gt stringclasses 1 value | context stringlengths 2.49k 119k |
|---|---|
import re
from collections import namedtuple, defaultdict
import sublime
from sublime_plugin import WindowCommand, TextCommand
from ..git_command import GitCommand
from ...common import util
BlamedLine = namedtuple("BlamedLine", ("contents", "commit_hash", "orig_lineno", "final_lineno"))
NOT_COMMITED_HASH = "0000000000000000000000000000000000000000"
BLAME_TITLE = "BLAME: {}"
class GsBlameCommand(WindowCommand, GitCommand):
@util.view.single_cursor_coords
def run(self, coords, file_path=None, repo_path=None):
self._coords = coords
self.__file_path = file_path or self.file_path
self.__repo_path = repo_path or self.repo_path
sublime.set_timeout_async(self.run_async)
def run_async(self):
self.window.show_quick_panel(
[
"Default",
"Ignore whitespace",
"Detect moved or copied lines within same file",
"Detect moved or copied lines within same commit",
"Detect moved or copied lines across all commits",
],
self.on_option_selection
)
def on_option_selection(self, index):
if index == -1:
return
ignore_whitespace = "-w" if index > 0 else None
detect_move_or_copy = [None, None, "-M", "-C", "-CCC"][index]
view = self.window.new_file()
view.set_syntax_file("Packages/GitSavvy/syntax/blame.tmLanguage")
view.settings().set("git_savvy.blame_view", True)
view.settings().set("git_savvy.repo_path", self.__repo_path)
view.settings().set("git_savvy.file_path", self.__file_path)
view.settings().set("word_wrap", False)
view.settings().set("line_numbers", False)
view.settings().set('indent_guide_options', [])
view.set_name(BLAME_TITLE.format(self.get_rel_path(self.__file_path)))
view.set_scratch(True)
view.run_command("gs_blame_initialize_view", {
"coords": self._coords,
"ignore_whitespace": ignore_whitespace,
"detect_move_or_copy": detect_move_or_copy
})
class GsBlameInitializeViewCommand(TextCommand, GitCommand):
def run(self, edit, coords=None, ignore_whitespace=None, detect_move_or_copy=None):
content = self.get_content(
ignore_whitespace=ignore_whitespace,
detect_move_or_copy=detect_move_or_copy
)
self.view.sel().clear()
self.view.set_read_only(False)
self.view.replace(edit, sublime.Region(0, self.view.size()), content)
self.view.set_read_only(True)
if coords is not None:
self.scroll_to(coords)
def get_content(self, ignore_whitespace=None, detect_move_or_copy=None):
blame_porcelain = self.git(
"blame", "-p", ignore_whitespace, detect_move_or_copy, self.file_path
)
blamed_lines, commits = self.parse_blame(blame_porcelain.splitlines())
commit_infos = {
commit_hash: self.short_commit_info(commit)
for commit_hash, commit in commits.items()
}
partitions = tuple(self.partition(blamed_lines))
longest_commit_line = max(
(line
for commit_info in commit_infos.values()
for line in commit_info),
key=len)
longest_code_line = max(
(line.contents for partition in partitions for line in partition),
key=len
)
partitions_with_commits_iter = self.couple_partitions_and_commits(
partitions=partitions,
commit_infos=commit_infos,
left_pad=len(longest_commit_line)
)
spacer = (
"-" * len(longest_commit_line) +
" | " +
"-" * (5 + len(longest_code_line)) +
"\n"
)
return spacer.join(partitions_with_commits_iter)
def parse_blame(self, blame_porcelain):
lines_iter = iter(blame_porcelain)
blamed_lines = []
commits = defaultdict(lambda: defaultdict(str))
for line in lines_iter:
commit_hash, orig_lineno, final_lineno, _ = \
re.match(r"([0-9a-f]{40}) (\d+) (\d+)( \d+)?", line).groups()
commits[commit_hash]["short_hash"] = commit_hash[:12]
commits[commit_hash]["long_hash"] = commit_hash
next_line = next(lines_iter)
while not next_line.startswith("\t"):
# Iterate through header keys and values.
try:
k, v = re.match(r"([^ ]+) (.+)", next_line).groups()
except AttributeError as e:
# Sometimes git-blame includes keys without values;
# since we don't care about these, simply discard.
print("Skipping blame line: " + repr(next_line))
commits[commit_hash][k] = v
next_line = next(lines_iter)
# If `next_lines` starts with a tab (and breaks out of the above
# while loop), it is an actual line of code. The line following
# that will be a new header or the end of the file.
blamed_lines.append(BlamedLine(
# Strip tab character.
contents=next_line[1:],
commit_hash=commit_hash,
orig_lineno=orig_lineno,
final_lineno=final_lineno))
return blamed_lines, commits
@staticmethod
def partition(blamed_lines):
prev_line = None
current_hunk = []
for line in blamed_lines:
if prev_line and line.commit_hash != prev_line.commit_hash:
yield current_hunk
current_hunk = []
prev_line = line
current_hunk.append(line)
yield current_hunk
@staticmethod
def short_commit_info(commit):
if commit["long_hash"] == NOT_COMMITED_HASH:
return ("Not committed yet.", )
summary = commit["summary"]
if len(summary) > 40:
summary = summary[:36] + " ..."
author_info = commit["author"] + " " + commit["author-mail"]
time_stamp = util.dates.fuzzy(commit["author-time"]) if commit["author-time"] else ""
return (summary, commit["short_hash"], author_info, time_stamp)
@staticmethod
def couple_partitions_and_commits(partitions, commit_infos, left_pad):
left_fallback = " " * left_pad
right_fallback = ""
for partition in partitions:
output = ""
commit_info = commit_infos[partition[0].commit_hash]
left_len = len(commit_info)
right_len = len(partition)
total_lines = max(left_len, right_len)
total_lines = len(max((commit_info, partition), key=len))
for i in range(total_lines):
left = commit_info[i] if i < left_len else left_fallback
right = partition[i].contents if i < right_len else right_fallback
lineno = partition[i].final_lineno if i < right_len else right_fallback
output += "{left: <{left_pad}} | {lineno: >4} {right}\n".format(
left=left,
left_pad=left_pad,
lineno=lineno,
right=right)
yield output
def scroll_to(self, coords):
pattern = r".{{40}} \| {lineno: >4} ".format(lineno=coords[0] + 1)
corresponding_region = self.view.find(pattern, 0)
blame_view_pt = corresponding_region.b
self.view.sel().add(sublime.Region(blame_view_pt, blame_view_pt))
sublime.set_timeout_async(lambda: self.view.show_at_center(blame_view_pt), 0)
class GsBlameOpenCommitCommand(TextCommand):
@util.view.single_cursor_pt
def run(self, cursor_pt, edit):
hunk_start = util.view.get_instance_before_pt(self.view, cursor_pt, r"^\-+ \| \-+")
if hunk_start is None:
short_hash_row = 1
else:
hunk_start_row, _ = self.view.rowcol(hunk_start)
short_hash_row = hunk_start_row + 2
short_hash_pos = self.view.text_point(short_hash_row, 0)
short_hash = self.view.substr(sublime.Region(short_hash_pos, short_hash_pos + 12))
# Uncommitted blocks.
if not short_hash.strip():
return
self.view.window().run_command("gs_show_commit", {"commit_hash": short_hash})
| |
#!/usr/bin/env python
# Copyright (c) 2013 Tom Steele, Dan Kottmann, FishNet Security
# See the file license.txt for copying permission
import xml.etree.ElementTree as et
import re
import copy
from lairdrone import drone_models as models
from lairdrone import helper
OS_WEIGHT = 75
TOOL = "nessus"
def parse(project, nessus_file, include_informational=False, min_note_sev=2):
"""Parses a Nessus XMLv2 file and updates the Hive database
:param project: The project id
:param nessus_file: The Nessus xml file to be parsed
:param include_informational: Whether to include info findings in data. Default False
:min_note_sev: The minimum severity of notes that will be saved. Default 2
"""
cve_pattern = re.compile(r'(CVE-|CAN-)')
false_udp_pattern = re.compile(r'.*\?$')
tree = et.parse(nessus_file)
root = tree.getroot()
note_id = 1
# Create the project dictionary which acts as foundation of document
project_dict = dict(models.project_model)
project_dict['commands'] = list()
project_dict['vulnerabilities'] = list()
project_dict['project_id'] = project
# Used to maintain a running list of host:port vulnerabilities by plugin
vuln_host_map = dict()
for host in root.iter('ReportHost'):
temp_ip = host.attrib['name']
host_dict = dict(models.host_model)
host_dict['os'] = list()
host_dict['ports'] = list()
host_dict['hostnames'] = list()
# Tags contain host-specific information
for tag in host.iter('tag'):
# Operating system tag
if tag.attrib['name'] == 'operating-system':
os_dict = dict(models.os_model)
os_dict['tool'] = TOOL
os_dict['weight'] = OS_WEIGHT
os_dict['fingerprint'] = tag.text
host_dict['os'].append(os_dict)
# IP address tag
if tag.attrib['name'] == 'host-ip':
host_dict['string_addr'] = tag.text
host_dict['long_addr'] = helper.ip2long(tag.text)
# MAC address tag
if tag.attrib['name'] == 'mac-address':
host_dict['mac_addr'] = tag.text
# Hostname tag
if tag.attrib['name'] == 'host-fqdn':
host_dict['hostnames'].append(tag.text)
# NetBIOS name tag
if tag.attrib['name'] == 'netbios-name':
host_dict['hostnames'].append(tag.text)
# Track the unique port/protocol combos for a host so we don't
# add duplicate entries
ports_processed = dict()
# Process each 'ReportItem'
for item in host.findall('ReportItem'):
plugin_id = item.attrib['pluginID']
plugin_family = item.attrib['pluginFamily']
severity = int(item.attrib['severity'])
title = item.attrib['pluginName']
port = int(item.attrib['port'])
protocol = item.attrib['protocol']
service = item.attrib['svc_name']
evidence = item.find('plugin_output')
# Ignore false positive UDP services
if protocol == "udp" and false_udp_pattern.match(service):
continue
# Create a port model and temporarily store it in the dict
# for tracking purposes. The ports_processed dict is used
# later to add ports to the host so that no duplicates are
# present. This is necessary due to the format of the Nessus
# XML files.
if '{0}:{1}'.format(port, protocol) not in ports_processed:
port_dict = copy.deepcopy(models.port_model)
port_dict['port'] = port
port_dict['protocol'] = protocol
port_dict['service'] = service
ports_processed['{0}:{1}'.format(port, protocol)] = port_dict
# Set the evidence as a port note if it exists
if evidence is not None and \
severity >= min_note_sev and \
plugin_family != 'Port scanners' and \
plugin_family != 'Service detection':
note_dict = copy.deepcopy(models.note_model)
note_dict['title'] = "{0} (ID{1})".format(title, str(note_id))
e = evidence.text.strip()
for line in e.split("\n"):
line = line.strip()
if line:
note_dict['content'] += " " + line + "\n"
note_dict['last_modified_by'] = TOOL
ports_processed['{0}:{1}'.format(port, protocol)]['notes'].append(note_dict)
note_id += 1
# This plugin is general scan info...use it for 'command' element
if plugin_id == '19506':
command = item.find('plugin_output')
command_dict = dict(models.command_model)
command_dict['tool'] = TOOL
if command is not None:
command_dict['command'] = command.text
if not project_dict['commands']:
project_dict['commands'].append(command_dict)
continue
# Check if this vulnerability has been seen in this file for
# another host. If not, create a new vulnerability_model and
# maintain a mapping between plugin-id and vulnerability as
# well as a mapping between plugin-id and host. These mappings
# are later used to completed the Hive schema such that host
# IP and port information are embedded within each vulnerability
# while ensuring no duplicate data exists.
if plugin_id not in vuln_host_map:
v = copy.deepcopy(models.vulnerability_model)
v['cves'] = list()
v['plugin_ids'] = list()
v['identified_by'] = list()
v['hosts'] = list()
# Set the title
v['title'] = title
# Set the description
description = item.find('description')
if description is not None:
v['description'] = description.text
# Set the solution
solution = item.find('solution')
if solution is not None:
v['solution'] = solution.text
# Set the evidence
if evidence is not None:
v['evidence'] = evidence.text
# Set the vulnerability flag if exploit exists
exploit = item.find('exploit_available')
if exploit is not None:
v['flag'] = exploit.text == 'true'
# Grab Metasploit details
exploit_detail = item.find('exploit_framework_metasploit')
if exploit_detail is not None and \
exploit_detail.text == 'true':
note_dict = copy.deepcopy(models.note_model)
note_dict['title'] = 'Metasploit Exploit'
note_dict['content'] = 'Exploit exists. Details unknown.'
module = item.find('metasploit_name')
if module is not None:
note_dict['content'] = module.text
note_dict['last_modified_by'] = TOOL
v['notes'].append(note_dict)
# Grab Canvas details
exploit_detail = item.find('exploit_framework_canvas')
if exploit_detail is not None and \
exploit_detail.text == 'true':
note_dict = copy.deepcopy(models.note_model)
note_dict['title'] = 'Canvas Exploit'
note_dict['content'] = 'Exploit exists. Details unknown.'
module = item.find('canvas_package')
if module is not None:
note_dict['content'] = module.text
note_dict['last_modified_by'] = TOOL
v['notes'].append(note_dict)
# Grab Core Impact details
exploit_detail = item.find('exploit_framework_core')
if exploit_detail is not None and \
exploit_detail.text == 'true':
note_dict = copy.deepcopy(models.note_model)
note_dict['title'] = 'Core Impact Exploit'
note_dict['content'] = 'Exploit exists. Details unknown.'
module = item.find('core_name')
if module is not None:
note_dict['content'] = module.text
note_dict['last_modified_by'] = TOOL
v['notes'].append(note_dict)
# Grab ExploitHub SKUs
exploit_detail = item.find('exploit_framework_exploithub')
if exploit_detail is not None and \
exploit_detail.text == 'true':
note_dict = copy.deepcopy(models.note_model)
note_dict['title'] = 'Exploit Hub Exploit'
note_dict['content'] = 'Exploit exists. Details unknown.'
module = item.find('exploithub_sku')
if module is not None:
note_dict['content'] = module.text
note_dict['last_modified_by'] = TOOL
v['notes'].append(note_dict)
# Grab any and all ExploitDB IDs
details = item.iter('edb-id')
if details is not None:
for module in details:
note_dict = copy.deepcopy(models.note_model)
note_dict['title'] = 'Exploit-DB Exploit ' \
'({0})'.format(module.text)
note_dict['content'] = module.text
note_dict['last_modified_by'] = TOOL
v['notes'].append(note_dict)
# Set the CVSS score
cvss = item.find('cvss_base_score')
if cvss is not None:
v['cvss'] = float(cvss.text)
else:
risk_factor = item.find('risk_factor')
if risk_factor is not None:
rf = risk_factor.text
if rf == "Low":
v['cvss'] = 3.0
elif rf == "Medium":
v['cvss'] = 5.0
elif rf == "High":
v['cvss'] = 7.5
elif rf == "Critical":
v['cvss'] = 10.0
# Set the CVE(s)
for cve in item.findall('cve'):
c = cve_pattern.sub('', cve.text)
v['cves'].append(c)
# Set the plugin information
plugin_dict = dict(models.plugin_id_model)
plugin_dict['tool'] = TOOL
plugin_dict['id'] = plugin_id
v['plugin_ids'].append(plugin_dict)
# Set the identified by information
identified_dict = dict(models.identified_by_model)
identified_dict['tool'] = TOOL
identified_dict['id'] = plugin_id
v['identified_by'].append(identified_dict)
# By default, don't include informational findings unless
# explicitly told to do so.
if v['cvss'] == 0 and not include_informational:
continue
vuln_host_map[plugin_id] = dict()
vuln_host_map[plugin_id]['hosts'] = set()
vuln_host_map[plugin_id]['vuln'] = v
if plugin_id in vuln_host_map:
vuln_host_map[plugin_id]['hosts'].add(
"{0}:{1}:{2}".format(
host_dict['string_addr'],
str(port),
protocol
)
)
# In the event no IP was found, use the 'name' attribute of
# the 'ReportHost' element
if not host_dict['string_addr']:
host_dict['string_addr'] = temp_ip
host_dict['long_addr'] = helper.ip2long(temp_ip)
# Add all encountered ports to the host
host_dict['ports'].extend(ports_processed.values())
project_dict['hosts'].append(host_dict)
# This code block uses the plugin/host/vuln mapping to associate
# all vulnerable hosts to their vulnerability data within the
# context of the expected Hive schema structure.
for plugin_id, data in vuln_host_map.items():
# Build list of host and ports affected by vulnerability and
# assign that list to the vulnerability model
for key in data['hosts']:
(string_addr, port, protocol) = key.split(':')
host_key_dict = dict(models.host_key_model)
host_key_dict['string_addr'] = string_addr
host_key_dict['port'] = int(port)
host_key_dict['protocol'] = protocol
data['vuln']['hosts'].append(host_key_dict)
project_dict['vulnerabilities'].append(data['vuln'])
if not project_dict['commands']:
# Adds a dummy 'command' in the event the the Nessus plugin used
# to populate the data was not run. The Lair API expects it to
# contain a value.
command = copy.deepcopy(models.command_model)
command['tool'] = TOOL
command['command'] = "Nessus scan - command unknown"
project_dict['commands'].append(command)
return project_dict
| |
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""TensorBoard WSGI Application Logic.
TensorBoardApplication constructs TensorBoard as a WSGI application.
It handles serving static assets, and implements TensorBoard data APIs.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import csv
import imghdr
import mimetypes
import os
import re
import threading
import time
import six
from six import StringIO
from six.moves import urllib
from six.moves import xrange # pylint: disable=redefined-builtin
from six.moves.urllib import parse as urlparse
from werkzeug import wrappers
from tensorflow.python.platform import resource_loader
from tensorflow.python.platform import tf_logging as logging
from tensorflow.tensorboard.backend import http_util
from tensorflow.tensorboard.backend import process_graph
from tensorflow.tensorboard.backend.event_processing import event_accumulator
from tensorflow.tensorboard.backend.event_processing import event_multiplexer
DEFAULT_SIZE_GUIDANCE = {
event_accumulator.COMPRESSED_HISTOGRAMS: 500,
event_accumulator.IMAGES: 10,
event_accumulator.AUDIO: 10,
event_accumulator.SCALARS: 1000,
event_accumulator.HEALTH_PILLS: 100,
event_accumulator.HISTOGRAMS: 50,
}
DATA_PREFIX = '/data'
LOGDIR_ROUTE = '/logdir'
RUNS_ROUTE = '/runs'
PLUGIN_PREFIX = '/plugin'
PLUGINS_LISTING_ROUTE = '/plugins_listing'
SCALARS_ROUTE = '/' + event_accumulator.SCALARS
IMAGES_ROUTE = '/' + event_accumulator.IMAGES
AUDIO_ROUTE = '/' + event_accumulator.AUDIO
HISTOGRAMS_ROUTE = '/' + event_accumulator.HISTOGRAMS
COMPRESSED_HISTOGRAMS_ROUTE = '/' + event_accumulator.COMPRESSED_HISTOGRAMS
INDIVIDUAL_IMAGE_ROUTE = '/individualImage'
INDIVIDUAL_AUDIO_ROUTE = '/individualAudio'
GRAPH_ROUTE = '/' + event_accumulator.GRAPH
RUN_METADATA_ROUTE = '/' + event_accumulator.RUN_METADATA
TAB_ROUTES = ['', '/events', '/images', '/audio', '/graphs', '/histograms']
_IMGHDR_TO_MIMETYPE = {
'bmp': 'image/bmp',
'gif': 'image/gif',
'jpeg': 'image/jpeg',
'png': 'image/png'
}
_DEFAULT_IMAGE_MIMETYPE = 'application/octet-stream'
def _content_type_for_image(encoded_image_string):
image_type = imghdr.what(None, encoded_image_string)
return _IMGHDR_TO_MIMETYPE.get(image_type, _DEFAULT_IMAGE_MIMETYPE)
class _OutputFormat(object):
"""An enum used to list the valid output formats for API calls.
Not all API calls support all formats (for example, only scalars and
compressed histograms support CSV).
"""
JSON = 'json'
CSV = 'csv'
def standard_tensorboard_wsgi(
logdir,
purge_orphaned_data,
reload_interval,
plugins):
"""Construct a TensorBoardWSGIApp with standard plugins and multiplexer.
Args:
logdir: The path to the directory containing events files.
purge_orphaned_data: Whether to purge orphaned data.
reload_interval: The interval at which the backend reloads more data in
seconds.
plugins: A list of plugins for TensorBoard to initialize.
Returns:
The new TensorBoard WSGI application.
"""
multiplexer = event_multiplexer.EventMultiplexer(
size_guidance=DEFAULT_SIZE_GUIDANCE,
purge_orphaned_data=purge_orphaned_data)
return TensorBoardWSGIApp(logdir, plugins, multiplexer, reload_interval)
class TensorBoardWSGIApp(object):
"""The TensorBoard application, conforming to WSGI spec."""
# How many samples to include in sampling API calls by default.
DEFAULT_SAMPLE_COUNT = 10
# NOTE TO MAINTAINERS: An accurate Content-Length MUST be specified on all
# responses using send_header.
protocol_version = 'HTTP/1.1'
def __init__(self, logdir, plugins, multiplexer, reload_interval):
"""Constructs the TensorBoard application.
Args:
logdir: the logdir spec that describes where data will be loaded.
may be a directory, or comma,separated list of directories, or colons
can be used to provide named directories
plugins: List of plugins that extend tensorboard.plugins.BasePlugin
multiplexer: The EventMultiplexer with TensorBoard data to serve
reload_interval: How often (in seconds) to reload the Multiplexer
Returns:
A WSGI application that implements the TensorBoard backend.
Raises:
ValueError: If some plugin has no plugin_name
ValueError: If two plugins have the same plugin_name
"""
self._logdir = logdir
self._plugins = plugins
self._multiplexer = multiplexer
self.tag = get_tensorboard_tag()
path_to_run = parse_event_files_spec(self._logdir)
if reload_interval:
start_reloading_multiplexer(self._multiplexer, path_to_run,
reload_interval)
else:
reload_multiplexer(self._multiplexer, path_to_run)
self.data_applications = {
'/app.js':
self._serve_js,
DATA_PREFIX + AUDIO_ROUTE:
self._serve_audio,
DATA_PREFIX + COMPRESSED_HISTOGRAMS_ROUTE:
self._serve_compressed_histograms,
DATA_PREFIX + GRAPH_ROUTE:
self._serve_graph,
DATA_PREFIX + HISTOGRAMS_ROUTE:
self._serve_histograms,
DATA_PREFIX + IMAGES_ROUTE:
self._serve_images,
DATA_PREFIX + INDIVIDUAL_AUDIO_ROUTE:
self._serve_individual_audio,
DATA_PREFIX + INDIVIDUAL_IMAGE_ROUTE:
self._serve_image,
DATA_PREFIX + LOGDIR_ROUTE:
self._serve_logdir,
# TODO(chizeng): Delete this RPC once we have skylark rules that obviate
# the need for the frontend to determine which plugins are active.
DATA_PREFIX + PLUGINS_LISTING_ROUTE:
self._serve_plugins_listing,
DATA_PREFIX + RUN_METADATA_ROUTE:
self._serve_run_metadata,
DATA_PREFIX + RUNS_ROUTE:
self._serve_runs,
DATA_PREFIX + SCALARS_ROUTE:
self._serve_scalars,
}
# Serve the routes from the registered plugins using their name as the route
# prefix. For example if plugin z has two routes /a and /b, they will be
# served as /data/plugin/z/a and /data/plugin/z/b.
plugin_names_encountered = set()
for plugin in self._plugins:
if plugin.plugin_name is None:
raise ValueError('Plugin %s has no plugin_name' % plugin)
if plugin.plugin_name in plugin_names_encountered:
raise ValueError('Duplicate plugins for name %s' % plugin.plugin_name)
plugin_names_encountered.add(plugin.plugin_name)
try:
plugin_apps = plugin.get_plugin_apps(self._multiplexer, self._logdir)
except Exception as e: # pylint: disable=broad-except
logging.warning('Plugin %s failed. Exception: %s', plugin.plugin_name,
str(e))
continue
for route, app in plugin_apps.items():
path = DATA_PREFIX + PLUGIN_PREFIX + '/' + plugin.plugin_name + route
self.data_applications[path] = app
# We use underscore_names for consistency with inherited methods.
def _image_response_for_run(self, run_images, run, tag):
"""Builds a JSON-serializable object with information about run_images.
Args:
run_images: A list of event_accumulator.ImageValueEvent objects.
run: The name of the run.
tag: The name of the tag the images all belong to.
Returns:
A list of dictionaries containing the wall time, step, URL, width, and
height for each image.
"""
response = []
for index, run_image in enumerate(run_images):
response.append({
'wall_time': run_image.wall_time,
'step': run_image.step,
# We include the size so that the frontend can add that to the <img>
# tag so that the page layout doesn't change when the image loads.
'width': run_image.width,
'height': run_image.height,
'query': self._query_for_individual_image(run, tag, index)
})
return response
def _audio_response_for_run(self, run_audio, run, tag):
"""Builds a JSON-serializable object with information about run_audio.
Args:
run_audio: A list of event_accumulator.AudioValueEvent objects.
run: The name of the run.
tag: The name of the tag the images all belong to.
Returns:
A list of dictionaries containing the wall time, step, URL, and
content_type for each audio clip.
"""
response = []
for index, run_audio_clip in enumerate(run_audio):
response.append({
'wall_time': run_audio_clip.wall_time,
'step': run_audio_clip.step,
'content_type': run_audio_clip.content_type,
'query': self._query_for_individual_audio(run, tag, index)
})
return response
def _path_is_safe(self, path):
"""Check path is safe (stays within current directory).
This is for preventing directory-traversal attacks.
Args:
path: The path to check for safety.
Returns:
True if the given path stays within the current directory, and false
if it would escape to a higher directory. E.g. _path_is_safe('index.html')
returns true, but _path_is_safe('../../../etc/password') returns false.
"""
base = os.path.abspath(os.curdir)
absolute_path = os.path.abspath(path)
prefix = os.path.commonprefix([base, absolute_path])
return prefix == base
@wrappers.Request.application
def _serve_logdir(self, request):
"""Respond with a JSON object containing this TensorBoard's logdir."""
return http_util.Respond(
request, {'logdir': self._logdir}, 'application/json')
@wrappers.Request.application
def _serve_scalars(self, request):
"""Given a tag and single run, return array of ScalarEvents."""
# TODO(cassandrax): return HTTP status code for malformed requests
tag = request.args.get('tag')
run = request.args.get('run')
values = self._multiplexer.Scalars(run, tag)
if request.args.get('format') == _OutputFormat.CSV:
string_io = StringIO()
writer = csv.writer(string_io)
writer.writerow(['Wall time', 'Step', 'Value'])
writer.writerows(values)
return http_util.Respond(request, string_io.getvalue(), 'text/csv')
else:
return http_util.Respond(request, values, 'application/json')
@wrappers.Request.application
def _serve_graph(self, request):
"""Given a single run, return the graph definition in json format."""
run = request.args.get('run', None)
if run is None:
return http_util.Respond(
request, 'query parameter "run" is required', 'text/plain', 400)
try:
graph = self._multiplexer.Graph(run)
except ValueError:
return http_util.Respond(
request, '404 Not Found', 'text/plain; charset=UTF-8', code=404)
limit_attr_size = request.args.get('limit_attr_size', None)
if limit_attr_size is not None:
try:
limit_attr_size = int(limit_attr_size)
except ValueError:
return http_util.Respond(
request, 'query parameter `limit_attr_size` must be integer',
'text/plain', 400)
large_attrs_key = request.args.get('large_attrs_key', None)
try:
process_graph.prepare_graph_for_ui(graph, limit_attr_size,
large_attrs_key)
except ValueError as e:
return http_util.Respond(request, e.message, 'text/plain', 400)
return http_util.Respond(request, str(graph), 'text/x-protobuf') # pbtxt
@wrappers.Request.application
def _serve_run_metadata(self, request):
"""Given a tag and a TensorFlow run, return the session.run() metadata."""
tag = request.args.get('tag', None)
run = request.args.get('run', None)
if tag is None:
return http_util.Respond(
request, 'query parameter "tag" is required', 'text/plain', 400)
if run is None:
return http_util.Respond(
request, 'query parameter "run" is required', 'text/plain', 400)
try:
run_metadata = self._multiplexer.RunMetadata(run, tag)
except ValueError:
return http_util.Respond(
request, '404 Not Found', 'text/plain; charset=UTF-8', code=404)
return http_util.Respond(
request, str(run_metadata), 'text/x-protobuf') # pbtxt
@wrappers.Request.application
def _serve_histograms(self, request):
"""Given a tag and single run, return an array of histogram values."""
tag = request.args.get('tag')
run = request.args.get('run')
values = self._multiplexer.Histograms(run, tag)
return http_util.Respond(request, values, 'application/json')
@wrappers.Request.application
def _serve_compressed_histograms(self, request):
"""Given a tag and single run, return an array of compressed histograms."""
tag = request.args.get('tag')
run = request.args.get('run')
compressed_histograms = self._multiplexer.CompressedHistograms(run, tag)
if request.args.get('format') == _OutputFormat.CSV:
string_io = StringIO()
writer = csv.writer(string_io)
# Build the headers; we have two columns for timing and two columns for
# each compressed histogram bucket.
headers = ['Wall time', 'Step']
if compressed_histograms:
bucket_count = len(compressed_histograms[0].compressed_histogram_values)
for i in xrange(bucket_count):
headers += ['Edge %d basis points' % i, 'Edge %d value' % i]
writer.writerow(headers)
for compressed_histogram in compressed_histograms:
row = [compressed_histogram.wall_time, compressed_histogram.step]
for value in compressed_histogram.compressed_histogram_values:
row += [value.rank_in_bps, value.value]
writer.writerow(row)
return http_util.Respond(request, string_io.getvalue(), 'text/csv')
else:
return http_util.Respond(
request, compressed_histograms, 'application/json')
@wrappers.Request.application
def _serve_images(self, request):
"""Given a tag and list of runs, serve a list of images.
Note that the images themselves are not sent; instead, we respond with URLs
to the images. The frontend should treat these URLs as opaque and should not
try to parse information about them or generate them itself, as the format
may change.
Args:
request: A werkzeug.wrappers.Request object.
Returns:
A werkzeug.Response application.
"""
tag = request.args.get('tag')
run = request.args.get('run')
images = self._multiplexer.Images(run, tag)
response = self._image_response_for_run(images, run, tag)
return http_util.Respond(request, response, 'application/json')
@wrappers.Request.application
def _serve_image(self, request):
"""Serves an individual image."""
tag = request.args.get('tag')
run = request.args.get('run')
index = int(request.args.get('index'))
image = self._multiplexer.Images(run, tag)[index]
encoded_image_string = image.encoded_image_string
content_type = _content_type_for_image(encoded_image_string)
return http_util.Respond(request, encoded_image_string, content_type)
def _query_for_individual_image(self, run, tag, index):
"""Builds a URL for accessing the specified image.
This should be kept in sync with _serve_image. Note that the URL is *not*
guaranteed to always return the same image, since images may be unloaded
from the reservoir as new images come in.
Args:
run: The name of the run.
tag: The tag.
index: The index of the image. Negative values are OK.
Returns:
A string representation of a URL that will load the index-th
sampled image in the given run with the given tag.
"""
query_string = urllib.parse.urlencode({
'run': run,
'tag': tag,
'index': index
})
return query_string
@wrappers.Request.application
def _serve_audio(self, request):
"""Given a tag and list of runs, serve a list of audio.
Note that the audio clips themselves are not sent; instead, we respond with
URLs to the audio. The frontend should treat these URLs as opaque and should
not try to parse information about them or generate them itself, as the
format may change.
Args:
request: A werkzeug.wrappers.Request object.
Returns:
A werkzeug.Response application.
"""
tag = request.args.get('tag')
run = request.args.get('run')
audio_list = self._multiplexer.Audio(run, tag)
response = self._audio_response_for_run(audio_list, run, tag)
return http_util.Respond(request, response, 'application/json')
@wrappers.Request.application
def _serve_individual_audio(self, request):
"""Serves an individual audio clip."""
tag = request.args.get('tag')
run = request.args.get('run')
index = int(request.args.get('index'))
audio = self._multiplexer.Audio(run, tag)[index]
return http_util.Respond(
request, audio.encoded_audio_string, audio.content_type)
def _query_for_individual_audio(self, run, tag, index):
"""Builds a URL for accessing the specified audio.
This should be kept in sync with _serve_individual_audio. Note that the URL
is *not* guaranteed to always return the same audio, since audio may be
unloaded from the reservoir as new audio comes in.
Args:
run: The name of the run.
tag: The tag.
index: The index of the audio. Negative values are OK.
Returns:
A string representation of a URL that will load the index-th
sampled audio in the given run with the given tag.
"""
query_string = urllib.parse.urlencode({
'run': run,
'tag': tag,
'index': index
})
return query_string
@wrappers.Request.application
def _serve_plugins_listing(self, request):
"""Serves an object mapping plugin name to whether it is enabled.
Args:
request: The werkzeug.Request object.
Returns:
A werkzeug.Response object.
"""
return http_util.Respond(
request,
{plugin.plugin_name: plugin.is_active() for plugin in self._plugins},
'application/json')
@wrappers.Request.application
def _serve_runs(self, request):
"""WSGI app serving a JSON object about runs and tags.
Returns a mapping from runs to tagType to list of tags for that run.
Args:
request: A werkzeug request
Returns:
A werkzeug Response with the following content:
{runName: {images: [tag1, tag2, tag3],
audio: [tag4, tag5, tag6],
scalars: [tagA, tagB, tagC],
histograms: [tagX, tagY, tagZ],
firstEventTimestamp: 123456.789}}
"""
runs = self._multiplexer.Runs()
for run_name, run_data in runs.items():
try:
run_data['firstEventTimestamp'] = self._multiplexer.FirstEventTimestamp(
run_name)
except ValueError:
logging.warning('Unable to get first event timestamp for run %s',
run_name)
run_data['firstEventTimestamp'] = None
return http_util.Respond(request, runs, 'application/json')
@wrappers.Request.application
def _serve_index(self, request):
"""Serves the index page (i.e., the tensorboard app itself)."""
return self._serve_static_file(request, '/dist/index.html')
@wrappers.Request.application
def _serve_js(self, request):
"""Serves the JavaScript for the index page."""
return self._serve_static_file(request, '/dist/app.js')
def _serve_static_file(self, request, path):
"""Serves the static file located at the given path.
Args:
request: A werkzeug Request
path: The path of the static file, relative to the tensorboard/ directory.
Returns:
A werkzeug.Response application.
"""
# Strip off the leading forward slash.
orig_path = path.lstrip('/')
if not self._path_is_safe(orig_path):
logging.warning('path not safe: %s', orig_path)
return http_util.Respond(request, 'Naughty naughty!', 'text/plain', 400)
# Resource loader wants a path relative to //WORKSPACE/tensorflow.
path = os.path.join('tensorboard', orig_path)
# Open the file and read it.
try:
contents = resource_loader.load_resource(path)
except IOError:
# For compatibility with latest version of Bazel, we renamed bower
# packages to use '_' rather than '-' in their package name.
# This means that the directory structure is changed too.
# So that all our recursive imports work, we need to modify incoming
# requests to map onto the new directory structure.
path = orig_path
components = path.split('/')
components[0] = components[0].replace('-', '_')
path = ('/').join(components)
# Bazel keeps all the external dependencies in //WORKSPACE/external.
# and resource loader wants a path relative to //WORKSPACE/tensorflow/.
path = os.path.join('../external', path)
try:
contents = resource_loader.load_resource(path)
except IOError:
logging.warning('path %s not found, sending 404', path)
return http_util.Respond(request, 'Not found', 'text/plain', code=404)
mimetype, content_encoding = mimetypes.guess_type(path)
mimetype = mimetype or 'application/octet-stream'
return http_util.Respond(
request,
contents,
mimetype,
expires=3600,
content_encoding=content_encoding)
def __call__(self, environ, start_response): # pylint: disable=invalid-name
"""Central entry point for the TensorBoard application.
This method handles routing to sub-applications. It does simple routing
using regular expression matching.
This __call__ method conforms to the WSGI spec, so that instances of this
class are WSGI applications.
Args:
environ: See WSGI spec.
start_response: See WSGI spec.
Returns:
A werkzeug Response.
"""
request = wrappers.Request(environ)
parsed_url = urlparse.urlparse(request.path)
# Remove a trailing slash, if present.
clean_path = parsed_url.path
if clean_path.endswith('/'):
clean_path = clean_path[:-1]
# pylint: disable=too-many-function-args
if clean_path in self.data_applications:
return self.data_applications[clean_path](environ, start_response)
elif clean_path in TAB_ROUTES:
return self._serve_index(environ, start_response)
else:
return self._serve_static_file(request, clean_path)(environ,
start_response)
# pylint: enable=too-many-function-args
def parse_event_files_spec(logdir):
"""Parses `logdir` into a map from paths to run group names.
The events files flag format is a comma-separated list of path specifications.
A path specification either looks like 'group_name:/path/to/directory' or
'/path/to/directory'; in the latter case, the group is unnamed. Group names
cannot start with a forward slash: /foo:bar/baz will be interpreted as a
spec with no name and path '/foo:bar/baz'.
Globs are not supported.
Args:
logdir: A comma-separated list of run specifications.
Returns:
A dict mapping directory paths to names like {'/path/to/directory': 'name'}.
Groups without an explicit name are named after their path. If logdir is
None, returns an empty dict, which is helpful for testing things that don't
require any valid runs.
"""
files = {}
if logdir is None:
return files
# Make sure keeping consistent with ParseURI in core/lib/io/path.cc
uri_pattern = re.compile('[a-zA-Z][0-9a-zA-Z.]*://.*')
for specification in logdir.split(','):
# Check if the spec contains group. A spec start with xyz:// is regarded as
# URI path spec instead of group spec. If the spec looks like /foo:bar/baz,
# then we assume it's a path with a colon.
if (uri_pattern.match(specification) is None and ':' in specification and
specification[0] != '/'):
# We split at most once so run_name:/path:with/a/colon will work.
run_name, _, path = specification.partition(':')
else:
run_name = None
path = specification
if uri_pattern.match(path) is None:
path = os.path.realpath(path)
files[path] = run_name
return files
def reload_multiplexer(multiplexer, path_to_run):
"""Loads all runs into the multiplexer.
Args:
multiplexer: The `EventMultiplexer` to add runs to and reload.
path_to_run: A dict mapping from paths to run names, where `None` as the run
name is interpreted as a run name equal to the path.
"""
start = time.time()
logging.info('TensorBoard reload process beginning')
for (path, name) in six.iteritems(path_to_run):
multiplexer.AddRunsFromDirectory(path, name)
logging.info('TensorBoard reload process: Reload the whole Multiplexer')
multiplexer.Reload()
duration = time.time() - start
logging.info('TensorBoard done reloading. Load took %0.3f secs', duration)
def start_reloading_multiplexer(multiplexer, path_to_run, load_interval):
"""Starts a thread to automatically reload the given multiplexer.
The thread will reload the multiplexer by calling `ReloadMultiplexer` every
`load_interval` seconds, starting immediately.
Args:
multiplexer: The `EventMultiplexer` to add runs to and reload.
path_to_run: A dict mapping from paths to run names, where `None` as the run
name is interpreted as a run name equal to the path.
load_interval: How many seconds to wait after one load before starting the
next load.
Returns:
A started `threading.Thread` that reloads the multiplexer.
"""
# We don't call multiplexer.Reload() here because that would make
# AddRunsFromDirectory block until the runs have all loaded.
def _reload_forever():
while True:
reload_multiplexer(multiplexer, path_to_run)
time.sleep(load_interval)
thread = threading.Thread(target=_reload_forever)
thread.daemon = True
thread.start()
return thread
def get_tensorboard_tag():
"""Read the TensorBoard TAG number, and return it or an empty string."""
tag = resource_loader.load_resource('tensorboard/TAG').strip()
return tag
| |
#!/usr/bin/env python2.7
#
# Copyright (C) 2013 Permabit, Inc.
# Copyright (C) 2013--2014 Zulip, Inc.
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# The following is a table showing which kinds of messages are handled by the
# mirror in each mode:
#
# Message origin/type --> | Jabber | Zulip
# Mode/sender-, +-----+----+--------+----
# V | MUC | PM | stream | PM
# --------------+-------------+-----+----+--------+----
# | other sender| | x | |
# personal mode +-------------+-----+----+--------+----
# | self sender | | x | x | x
# ------------- +-------------+-----+----+--------+----
# | other sender| x | | |
# public mode +-------------+-----+----+--------+----
# | self sender | | | |
import logging
import threading
import optparse
from sleekxmpp import ClientXMPP, InvalidJID, JID
from sleekxmpp.exceptions import IqError, IqTimeout
from six.moves.configparser import SafeConfigParser
import os, sys, zulip, getpass
import re
__version__ = "1.1"
def room_to_stream(room):
return room + "/xmpp"
def stream_to_room(stream):
return stream.lower().rpartition("/xmpp")[0]
def jid_to_zulip(jid):
suffix = ''
if not jid.username.endswith("-bot"):
suffix = options.zulip_email_suffix
return "%s%s@%s" % (jid.username, suffix, options.zulip_domain)
def zulip_to_jid(email, jabber_domain):
jid = JID(email, domain=jabber_domain)
if (options.zulip_email_suffix
and options.zulip_email_suffix in jid.username
and not jid.username.endswith("-bot")):
jid.username = jid.username.rpartition(options.zulip_email_suffix)[0]
return jid
class JabberToZulipBot(ClientXMPP):
def __init__(self, jid, password, rooms):
if jid.resource:
self.nick = jid.resource
else:
self.nick = jid.username
jid.resource = "zulip"
ClientXMPP.__init__(self, jid, password)
self.rooms = set()
self.rooms_to_join = rooms
self.add_event_handler("session_start", self.session_start)
self.add_event_handler("message", self.message)
self.zulip = None
self.use_ipv6 = False
self.register_plugin('xep_0045') # Jabber chatrooms
self.register_plugin('xep_0199') # XMPP Ping
def set_zulip_client(self, client):
self.zulip = client
def session_start(self, event):
self.get_roster()
self.send_presence()
for room in self.rooms_to_join:
self.join_muc(room)
def join_muc(self, room):
if room in self.rooms:
return
logging.debug("Joining " + room)
self.rooms.add(room)
muc_jid = JID(local=room, domain=options.conference_domain)
xep0045 = self.plugin['xep_0045']
try:
xep0045.joinMUC(muc_jid, self.nick, wait=True)
except InvalidJID:
logging.error("Could not join room: " + str(muc_jid))
return
# Configure the room. Really, we should only do this if the room is
# newly created.
form = None
try:
form = xep0045.getRoomConfig(muc_jid)
except ValueError:
pass
if form:
xep0045.configureRoom(muc_jid, form)
else:
logging.error("Could not configure room: " + str(muc_jid))
def leave_muc(self, room):
if room not in self.rooms:
return
logging.debug("Leaving " + room)
self.rooms.remove(room)
muc_jid = JID(local=room, domain=options.conference_domain)
self.plugin['xep_0045'].leaveMUC(muc_jid, self.nick)
def message(self, msg):
try:
if msg["type"] == "groupchat":
return self.group(msg)
elif msg["type"] == "chat":
return self.private(msg)
else:
logging.warning("Got unexpected message type")
logging.warning(msg)
except Exception:
logging.exception("Error forwarding Jabber => Zulip")
def private(self, msg):
if options.mode == 'public' or msg['thread'] == u'\u1FFFE':
return
sender = jid_to_zulip(msg["from"])
recipient = jid_to_zulip(msg["to"])
zulip_message = dict(
sender = sender,
type = "private",
to = recipient,
content = msg["body"],
)
ret = self.zulip.client.send_message(zulip_message)
if ret.get("result") != "success":
logging.error(ret)
def group(self, msg):
if options.mode == 'personal' or msg["thread"] == u'\u1FFFE':
return
subject = msg["subject"]
if len(subject) == 0:
subject = "(no topic)"
stream = room_to_stream(msg['from'].local)
sender_nick = msg.get_mucnick()
if not sender_nick:
# Messages from the room itself have no nickname. We should not try
# to mirror these
return
jid = self.nickname_to_jid(msg.get_mucroom(), sender_nick)
sender = jid_to_zulip(jid)
zulip_message = dict(
forged = "yes",
sender = sender,
type = "stream",
subject = subject,
to = stream,
content = msg["body"],
)
ret = self.zulip.client.send_message(zulip_message)
if ret.get("result") != "success":
logging.error(ret)
def nickname_to_jid(self, room, nick):
jid = self.plugin['xep_0045'].getJidProperty(room, nick, "jid")
if (jid is None or jid == ''):
return JID(local=nick.replace(' ', ''), domain=self.boundjid.domain)
else:
return jid
class ZulipToJabberBot(object):
def __init__(self, zulip_client):
self.client = zulip_client
self.jabber = None
def set_jabber_client(self, client):
self.jabber = client
def process_event(self, event):
if event['type'] == 'message':
message = event["message"]
if message['sender_email'] != self.client.email:
return
try:
if message['type'] == 'stream':
self.stream_message(message)
elif message['type'] == 'private':
self.private_message(message)
except:
logging.exception("Exception forwarding Zulip => Jabber")
elif event['type'] == 'subscription':
self.process_subscription(event)
elif event['type'] == 'stream':
self.process_stream(event)
def stream_message(self, msg):
stream = msg['display_recipient']
if not stream.endswith("/xmpp"):
return
room = stream_to_room(stream)
jabber_recipient = JID(local=room, domain=options.conference_domain)
outgoing = self.jabber.make_message(
mto = jabber_recipient,
mbody = msg['content'],
mtype = 'groupchat')
outgoing['thread'] = u'\u1FFFE'
outgoing.send()
def private_message(self, msg):
for recipient in msg['display_recipient']:
if recipient["email"] == self.client.email:
continue
if not recipient["is_mirror_dummy"]:
continue
recip_email = recipient['email']
jabber_recipient = zulip_to_jid(recip_email, self.jabber.boundjid.domain)
outgoing = self.jabber.make_message(
mto = jabber_recipient,
mbody = msg['content'],
mtype = 'chat')
outgoing['thread'] = u'\u1FFFE'
outgoing.send()
def process_subscription(self, event):
if event['op'] == 'add':
streams = [s['name'].lower() for s in event['subscriptions']]
streams = [s for s in streams if s.endswith("/xmpp")]
for stream in streams:
self.jabber.join_muc(stream_to_room(stream))
if event['op'] == 'remove':
streams = [s['name'].lower() for s in event['subscriptions']]
streams = [s for s in streams if s.endswith("/xmpp")]
for stream in streams:
self.jabber.leave_muc(stream_to_room(stream))
def process_stream(self, event):
if event['op'] == 'occupy':
streams = [s['name'].lower() for s in event['streams']]
streams = [s for s in streams if s.endswith("/xmpp")]
for stream in streams:
self.jabber.join_muc(stream_to_room(stream))
if event['op'] == 'vacate':
streams = [s['name'].lower() for s in event['streams']]
streams = [s for s in streams if s.endswith("/xmpp")]
for stream in streams:
self.jabber.leave_muc(stream_to_room(stream))
def get_rooms(zulip):
def get_stream_infos(key, method):
ret = method()
if ret.get("result") != "success":
logging.error(ret)
sys.exit("Could not get initial list of Zulip %s" % (key,))
return ret[key]
if options.mode == 'public':
stream_infos = get_stream_infos("streams", zulip.client.get_streams)
else:
stream_infos = get_stream_infos("subscriptions", zulip.client.list_subscriptions)
rooms = []
for stream_info in stream_infos:
stream = stream_info['name']
if stream.endswith("/xmpp"):
rooms.append(stream_to_room(stream))
return rooms
def config_error(msg):
sys.stderr.write("%s\n" % (msg,))
sys.exit(2)
if __name__ == '__main__':
parser = optparse.OptionParser(epilog=
'''Most general and Jabber configuration options may also be specified in the
zulip configuration file under the jabber_mirror section (exceptions are noted
in their help sections). Keys have the same name as options with hyphens
replaced with underscores. Zulip configuration options go in the api section,
as normal.'''.replace("\n", " ")
)
parser.add_option('--mode',
default=None,
action='store',
help= \
'''Which mode to run in. Valid options are "personal" and "public". In
"personal" mode, the mirror uses an individual users' credentials and mirrors
all messages they send on Zulip to Jabber and all private Jabber messages to
Zulip. In "public" mode, the mirror uses the credentials for a dedicated mirror
user and mirrors messages sent to Jabber rooms to Zulip. Defaults to
"personal"'''.replace("\n", " "))
parser.add_option('--zulip-email-suffix',
default=None,
action='store',
help= \
'''Add the specified suffix to the local part of email addresses constructed
from JIDs and nicks before sending requests to the Zulip server, and remove the
suffix before sending requests to the Jabber server. For example, specifying
"+foo" will cause messages that are sent to the "bar" room by nickname "qux" to
be mirrored to the "bar/xmpp" stream in Zulip by user "qux+foo@example.com". This
option does not affect login credentials.'''.replace("\n", " "))
parser.add_option('-d', '--debug',
help='set logging to DEBUG. Can not be set via config file.',
action='store_const',
dest='log_level',
const=logging.DEBUG,
default=logging.INFO)
jabber_group = optparse.OptionGroup(parser, "Jabber configuration")
jabber_group.add_option('--jid',
default=None,
action='store',
help="Your Jabber JID. If a resource is specified, "
+ "it will be used as the nickname when joining MUCs. "
+ "Specifying the nickname is mostly useful if you want "
+ "to run the public mirror from a regular user instead of "
+ "from a dedicated account.")
jabber_group.add_option('--jabber-password',
default=None,
action='store',
help="Your Jabber password")
jabber_group.add_option('--conference-domain',
default=None,
action='store',
help="Your Jabber conference domain (E.g. conference.jabber.example.com). "
+ "If not specifed, \"conference.\" will be prepended to your JID's domain.")
jabber_group.add_option('--no-use-tls',
default=None,
action='store_true')
jabber_group.add_option('--jabber-server-address',
default=None,
action='store',
help="The hostname of your Jabber server. This is only needed if "
"your server is missing SRV records")
jabber_group.add_option('--jabber-server-port',
default='5222',
action='store',
help="The port of your Jabber server. This is only needed if "
"your server is missing SRV records")
parser.add_option_group(jabber_group)
parser.add_option_group(zulip.generate_option_group(parser, "zulip-"))
(options, args) = parser.parse_args()
logging.basicConfig(level=options.log_level,
format='%(levelname)-8s %(message)s')
if options.zulip_config_file is None:
config_file = zulip.get_default_config_filename()
else:
config_file = options.zulip_config_file
config = SafeConfigParser()
try:
with file(config_file, 'r') as f:
config.readfp(f, config_file)
except IOError:
pass
for option in ("jid", "jabber_password", "conference_domain", "mode", "zulip_email_suffix", "jabber_server_address", "jabber_server_port"):
if (getattr(options, option) is None
and config.has_option("jabber_mirror", option)):
setattr(options, option, config.get("jabber_mirror", option))
for option in ("no_use_tls",):
if getattr(options, option) is None:
if config.has_option("jabber_mirror", option):
setattr(options, option, config.getboolean("jabber_mirror", option))
else:
setattr(options, option, False)
if options.mode is None:
options.mode = "personal"
if options.zulip_email_suffix is None:
options.zulip_email_suffix = ''
if options.mode not in ('public', 'personal'):
config_error("Bad value for --mode: must be one of 'public' or 'personal'")
if None in (options.jid, options.jabber_password):
config_error("You must specify your Jabber JID and Jabber password either "
+ "in the Zulip configuration file or on the commandline")
zulip = ZulipToJabberBot(zulip.init_from_options(options, "JabberMirror/" + __version__))
# This won't work for open realms that don't have a consistent domain
options.zulip_domain = zulip.client.email.partition('@')[-1]
try:
jid = JID(options.jid)
except InvalidJID as e:
config_error("Bad JID: %s: %s" % (options.jid, e.message))
if options.conference_domain is None:
options.conference_domain = "conference.%s" % (jid.domain,)
xmpp = JabberToZulipBot(jid, options.jabber_password, get_rooms(zulip))
address = None
if options.jabber_server_address:
address = (options.jabber_server_address, options.jabber_server_port)
if not xmpp.connect(use_tls=not options.no_use_tls, address=address):
sys.exit("Unable to connect to Jabber server")
xmpp.set_zulip_client(zulip)
zulip.set_jabber_client(xmpp)
xmpp.process(block=False)
if options.mode == 'public':
event_types = ['stream']
else:
event_types = ['message', 'subscription']
try:
logging.info("Connecting to Zulip.")
zulip.client.call_on_each_event(zulip.process_event,
event_types=event_types)
except BaseException as e:
logging.exception("Exception in main loop")
xmpp.abort()
sys.exit(1)
| |
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from distutils.version import LooseVersion
import numpy as np
import pandas as pd
from pandas.api.types import CategoricalDtype
import pyspark.pandas as ps
from pyspark.testing.pandasutils import PandasOnSparkTestCase, TestUtils
class CategoricalTest(PandasOnSparkTestCase, TestUtils):
@property
def pdf(self):
return pd.DataFrame(
{
"a": pd.Categorical([1, 2, 3, 1, 2, 3]),
"b": pd.Categorical(
["b", "a", "c", "c", "b", "a"], categories=["c", "b", "d", "a"]
),
},
)
@property
def psdf(self):
return ps.from_pandas(self.pdf)
@property
def df_pair(self):
return self.pdf, self.psdf
def test_categorical_frame(self):
pdf, psdf = self.df_pair
self.assert_eq(psdf, pdf)
self.assert_eq(psdf.a, pdf.a)
self.assert_eq(psdf.b, pdf.b)
self.assert_eq(psdf.index, pdf.index)
self.assert_eq(psdf.sort_index(), pdf.sort_index())
self.assert_eq(psdf.sort_values("b"), pdf.sort_values("b"))
def test_categorical_series(self):
pser = pd.Series([1, 2, 3], dtype="category")
psser = ps.Series([1, 2, 3], dtype="category")
self.assert_eq(psser, pser)
self.assert_eq(psser.cat.categories, pser.cat.categories)
self.assert_eq(psser.cat.codes, pser.cat.codes)
self.assert_eq(psser.cat.ordered, pser.cat.ordered)
def test_categories_setter(self):
pdf, psdf = self.df_pair
pser = pdf.a
psser = psdf.a
pser.cat.categories = ["z", "y", "x"]
psser.cat.categories = ["z", "y", "x"]
if LooseVersion(pd.__version__) >= LooseVersion("1.3"):
# TODO(SPARK-36367): Fix the behavior to follow pandas >= 1.3
pass
else:
self.assert_eq(pser, psser)
self.assert_eq(pdf, psdf)
with self.assertRaises(ValueError):
psser.cat.categories = [1, 2, 3, 4]
def test_add_categories(self):
pdf, psdf = self.df_pair
pser = pdf.a
psser = psdf.a
self.assert_eq(pser.cat.add_categories(4), psser.cat.add_categories(4))
self.assert_eq(pser.cat.add_categories([4, 5]), psser.cat.add_categories([4, 5]))
self.assert_eq(pser.cat.add_categories([]), psser.cat.add_categories([]))
pser.cat.add_categories(4, inplace=True)
psser.cat.add_categories(4, inplace=True)
if LooseVersion(pd.__version__) >= LooseVersion("1.3"):
# TODO(SPARK-36367): Fix the behavior to follow pandas >= 1.3
pass
else:
self.assert_eq(pser, psser)
self.assert_eq(pdf, psdf)
self.assertRaises(ValueError, lambda: psser.cat.add_categories(4))
self.assertRaises(ValueError, lambda: psser.cat.add_categories([5, 5]))
def test_remove_categories(self):
pdf, psdf = self.df_pair
pser = pdf.a
psser = psdf.a
self.assert_eq(pser.cat.remove_categories(2), psser.cat.remove_categories(2))
self.assert_eq(pser.cat.remove_categories([1, 3]), psser.cat.remove_categories([1, 3]))
self.assert_eq(pser.cat.remove_categories([]), psser.cat.remove_categories([]))
self.assert_eq(pser.cat.remove_categories([2, 2]), psser.cat.remove_categories([2, 2]))
self.assert_eq(
pser.cat.remove_categories([1, 2, 3]), psser.cat.remove_categories([1, 2, 3])
)
self.assert_eq(pser.cat.remove_categories(None), psser.cat.remove_categories(None))
self.assert_eq(pser.cat.remove_categories([None]), psser.cat.remove_categories([None]))
pser.cat.remove_categories(2, inplace=True)
psser.cat.remove_categories(2, inplace=True)
if LooseVersion(pd.__version__) >= LooseVersion("1.3"):
# TODO(SPARK-36367): Fix the behavior to follow pandas >= 1.3
pass
else:
self.assert_eq(pser, psser)
self.assert_eq(pdf, psdf)
self.assertRaises(ValueError, lambda: psser.cat.remove_categories(4))
self.assertRaises(ValueError, lambda: psser.cat.remove_categories([4, None]))
def test_remove_unused_categories(self):
pdf, psdf = self.df_pair
pser = pdf.a
psser = psdf.a
self.assert_eq(pser.cat.remove_unused_categories(), psser.cat.remove_unused_categories())
pser.cat.add_categories(4, inplace=True)
pser.cat.remove_categories(2, inplace=True)
psser.cat.add_categories(4, inplace=True)
psser.cat.remove_categories(2, inplace=True)
self.assert_eq(pser.cat.remove_unused_categories(), psser.cat.remove_unused_categories())
pser.cat.remove_unused_categories(inplace=True)
psser.cat.remove_unused_categories(inplace=True)
if LooseVersion(pd.__version__) >= LooseVersion("1.3"):
# TODO(SPARK-36367): Fix the behavior to follow pandas >= 1.3
pass
else:
self.assert_eq(pser, psser)
self.assert_eq(pdf, psdf)
def test_reorder_categories(self):
pdf, psdf = self.df_pair
pser = pdf.a
psser = psdf.a
self.assert_eq(
pser.cat.reorder_categories([1, 2, 3]), psser.cat.reorder_categories([1, 2, 3])
)
self.assert_eq(
pser.cat.reorder_categories([1, 2, 3], ordered=True),
psser.cat.reorder_categories([1, 2, 3], ordered=True),
)
self.assert_eq(
pser.cat.reorder_categories([3, 2, 1]), psser.cat.reorder_categories([3, 2, 1])
)
self.assert_eq(
pser.cat.reorder_categories([3, 2, 1], ordered=True),
psser.cat.reorder_categories([3, 2, 1], ordered=True),
)
pser.cat.reorder_categories([1, 2, 3], inplace=True)
psser.cat.reorder_categories([1, 2, 3], inplace=True)
if LooseVersion(pd.__version__) >= LooseVersion("1.3"):
# TODO(SPARK-36367): Fix the behavior to follow pandas >= 1.3
pass
else:
self.assert_eq(pser, psser)
self.assert_eq(pdf, psdf)
pser.cat.reorder_categories([3, 2, 1], ordered=True, inplace=True)
psser.cat.reorder_categories([3, 2, 1], ordered=True, inplace=True)
if LooseVersion(pd.__version__) >= LooseVersion("1.3"):
# TODO(SPARK-36367): Fix the behavior to follow pandas >= 1.3
pass
else:
self.assert_eq(pser, psser)
self.assert_eq(pdf, psdf)
self.assertRaises(ValueError, lambda: psser.cat.reorder_categories([1, 2]))
self.assertRaises(ValueError, lambda: psser.cat.reorder_categories([1, 2, 4]))
self.assertRaises(ValueError, lambda: psser.cat.reorder_categories([1, 2, 2]))
self.assertRaises(TypeError, lambda: psser.cat.reorder_categories(1))
self.assertRaises(TypeError, lambda: psdf.b.cat.reorder_categories("abcd"))
def test_as_ordered_unordered(self):
pdf, psdf = self.df_pair
pser = pdf.a
psser = psdf.a
# as_ordered
self.assert_eq(pser.cat.as_ordered(), psser.cat.as_ordered())
pser.cat.as_ordered(inplace=True)
psser.cat.as_ordered(inplace=True)
if LooseVersion(pd.__version__) >= LooseVersion("1.3"):
# TODO(SPARK-36367): Fix the behavior to follow pandas >= 1.3
pass
else:
self.assert_eq(pser, psser)
self.assert_eq(pdf, psdf)
# as_unordered
self.assert_eq(pser.cat.as_unordered(), psser.cat.as_unordered())
pser.cat.as_unordered(inplace=True)
psser.cat.as_unordered(inplace=True)
self.assert_eq(pser, psser)
self.assert_eq(pdf, psdf)
def test_astype(self):
pser = pd.Series(["a", "b", "c"])
psser = ps.from_pandas(pser)
self.assert_eq(psser.astype("category"), pser.astype("category"))
self.assert_eq(
psser.astype(CategoricalDtype(["c", "a", "b"])),
pser.astype(CategoricalDtype(["c", "a", "b"])),
)
pcser = pser.astype(CategoricalDtype(["c", "a", "b"]))
kcser = psser.astype(CategoricalDtype(["c", "a", "b"]))
self.assert_eq(kcser.astype("category"), pcser.astype("category"))
if LooseVersion(pd.__version__) >= LooseVersion("1.3"):
# TODO(SPARK-36367): Fix the behavior to follow pandas >= 1.3
pass
elif LooseVersion(pd.__version__) >= LooseVersion("1.2"):
self.assert_eq(
kcser.astype(CategoricalDtype(["b", "c", "a"])),
pcser.astype(CategoricalDtype(["b", "c", "a"])),
)
else:
self.assert_eq(
kcser.astype(CategoricalDtype(["b", "c", "a"])),
pser.astype(CategoricalDtype(["b", "c", "a"])),
)
self.assert_eq(kcser.astype(str), pcser.astype(str))
def test_factorize(self):
pser = pd.Series(["a", "b", "c", None], dtype=CategoricalDtype(["c", "a", "d", "b"]))
psser = ps.from_pandas(pser)
pcodes, puniques = pser.factorize()
kcodes, kuniques = psser.factorize()
self.assert_eq(kcodes.tolist(), pcodes.tolist())
self.assert_eq(kuniques, puniques)
pcodes, puniques = pser.factorize(na_sentinel=-2)
kcodes, kuniques = psser.factorize(na_sentinel=-2)
self.assert_eq(kcodes.tolist(), pcodes.tolist())
self.assert_eq(kuniques, puniques)
def test_frame_apply(self):
pdf, psdf = self.df_pair
self.assert_eq(psdf.apply(lambda x: x).sort_index(), pdf.apply(lambda x: x).sort_index())
self.assert_eq(
psdf.apply(lambda x: x, axis=1).sort_index(),
pdf.apply(lambda x: x, axis=1).sort_index(),
)
def test_frame_apply_without_shortcut(self):
with ps.option_context("compute.shortcut_limit", 0):
self.test_frame_apply()
pdf = pd.DataFrame(
{"a": ["a", "b", "c", "a", "b", "c"], "b": ["b", "a", "c", "c", "b", "a"]}
)
psdf = ps.from_pandas(pdf)
dtype = CategoricalDtype(categories=["a", "b", "c"])
def categorize(ser) -> ps.Series[dtype]:
return ser.astype(dtype)
self.assert_eq(
psdf.apply(categorize).sort_values(["a", "b"]).reset_index(drop=True),
pdf.apply(categorize).sort_values(["a", "b"]).reset_index(drop=True),
)
def test_frame_transform(self):
pdf, psdf = self.df_pair
self.assert_eq(psdf.transform(lambda x: x), pdf.transform(lambda x: x))
self.assert_eq(psdf.transform(lambda x: x.cat.codes), pdf.transform(lambda x: x.cat.codes))
pdf = pd.DataFrame(
{"a": ["a", "b", "c", "a", "b", "c"], "b": ["b", "a", "c", "c", "b", "a"]}
)
psdf = ps.from_pandas(pdf)
dtype = CategoricalDtype(categories=["a", "b", "c", "d"])
self.assert_eq(
psdf.transform(lambda x: x.astype(dtype)).sort_index(),
pdf.transform(lambda x: x.astype(dtype)).sort_index(),
)
def test_frame_transform_without_shortcut(self):
with ps.option_context("compute.shortcut_limit", 0):
self.test_frame_transform()
pdf, psdf = self.df_pair
def codes(pser) -> ps.Series[np.int8]:
return pser.cat.codes
self.assert_eq(psdf.transform(codes), pdf.transform(codes))
pdf = pd.DataFrame(
{"a": ["a", "b", "c", "a", "b", "c"], "b": ["b", "a", "c", "c", "b", "a"]}
)
psdf = ps.from_pandas(pdf)
dtype = CategoricalDtype(categories=["a", "b", "c", "d"])
def to_category(pser) -> ps.Series[dtype]:
return pser.astype(dtype)
self.assert_eq(
psdf.transform(to_category).sort_index(), pdf.transform(to_category).sort_index()
)
def test_series_apply(self):
pdf, psdf = self.df_pair
self.assert_eq(
psdf.a.apply(lambda x: x).sort_index(), pdf.a.apply(lambda x: x).sort_index()
)
def test_series_apply_without_shortcut(self):
with ps.option_context("compute.shortcut_limit", 0):
self.test_series_apply()
pdf, psdf = self.df_pair
ret = psdf.a.dtype
def identity(pser) -> ret:
return pser
self.assert_eq(psdf.a.apply(identity).sort_index(), pdf.a.apply(identity).sort_index())
# TODO: The return type is still category.
# def to_str(x) -> str:
# return str(x)
#
# self.assert_eq(
# psdf.a.apply(to_str).sort_index(), pdf.a.apply(to_str).sort_index()
# )
def test_groupby_apply(self):
pdf, psdf = self.df_pair
self.assert_eq(
psdf.groupby("a").apply(lambda df: df).sort_index(),
pdf.groupby("a").apply(lambda df: df).sort_index(),
)
self.assert_eq(
psdf.groupby("b").apply(lambda df: df[["a"]]).sort_index(),
pdf.groupby("b").apply(lambda df: df[["a"]]).sort_index(),
)
self.assert_eq(
psdf.groupby(["a", "b"]).apply(lambda df: df).sort_index(),
pdf.groupby(["a", "b"]).apply(lambda df: df).sort_index(),
)
self.assert_eq(
psdf.groupby("a").apply(lambda df: df.b.cat.codes).sort_index(),
pdf.groupby("a").apply(lambda df: df.b.cat.codes).sort_index(),
)
self.assert_eq(
psdf.groupby("a")["b"].apply(lambda b: b.cat.codes).sort_index(),
pdf.groupby("a")["b"].apply(lambda b: b.cat.codes).sort_index(),
)
# TODO: grouping by a categorical type sometimes preserves unused categories.
# self.assert_eq(
# psdf.groupby("a").apply(len).sort_index(), pdf.groupby("a").apply(len).sort_index(),
# )
def test_groupby_apply_without_shortcut(self):
with ps.option_context("compute.shortcut_limit", 0):
self.test_groupby_apply()
pdf, psdf = self.df_pair
def identity(df) -> ps.DataFrame[zip(psdf.columns, psdf.dtypes)]:
return df
self.assert_eq(
psdf.groupby("a").apply(identity).sort_values(["a", "b"]).reset_index(drop=True),
pdf.groupby("a").apply(identity).sort_values(["a", "b"]).reset_index(drop=True),
)
def test_groupby_transform(self):
pdf, psdf = self.df_pair
self.assert_eq(
psdf.groupby("a").transform(lambda x: x).sort_index(),
pdf.groupby("a").transform(lambda x: x).sort_index(),
)
dtype = CategoricalDtype(categories=["a", "b", "c", "d"])
self.assert_eq(
psdf.groupby("a").transform(lambda x: x.astype(dtype)).sort_index(),
pdf.groupby("a").transform(lambda x: x.astype(dtype)).sort_index(),
)
def test_groupby_transform_without_shortcut(self):
with ps.option_context("compute.shortcut_limit", 0):
self.test_groupby_transform()
pdf, psdf = self.df_pair
def identity(x) -> ps.Series[psdf.b.dtype]: # type: ignore
return x
self.assert_eq(
psdf.groupby("a").transform(identity).sort_values("b").reset_index(drop=True),
pdf.groupby("a").transform(identity).sort_values("b").reset_index(drop=True),
)
dtype = CategoricalDtype(categories=["a", "b", "c", "d"])
def astype(x) -> ps.Series[dtype]:
return x.astype(dtype)
if LooseVersion(pd.__version__) >= LooseVersion("1.3"):
# TODO(SPARK-36367): Fix the behavior to follow pandas >= 1.3
pass
elif LooseVersion(pd.__version__) >= LooseVersion("1.2"):
self.assert_eq(
psdf.groupby("a").transform(astype).sort_values("b").reset_index(drop=True),
pdf.groupby("a").transform(astype).sort_values("b").reset_index(drop=True),
)
else:
expected = pdf.groupby("a").transform(astype)
expected["b"] = dtype.categories.take(expected["b"].cat.codes).astype(dtype)
self.assert_eq(
psdf.groupby("a").transform(astype).sort_values("b").reset_index(drop=True),
expected.sort_values("b").reset_index(drop=True),
)
def test_frame_apply_batch(self):
pdf, psdf = self.df_pair
self.assert_eq(
psdf.pandas_on_spark.apply_batch(lambda pdf: pdf.astype(str)).sort_index(),
pdf.astype(str).sort_index(),
)
pdf = pd.DataFrame(
{"a": ["a", "b", "c", "a", "b", "c"], "b": ["b", "a", "c", "c", "b", "a"]}
)
psdf = ps.from_pandas(pdf)
dtype = CategoricalDtype(categories=["a", "b", "c", "d"])
self.assert_eq(
psdf.pandas_on_spark.apply_batch(lambda pdf: pdf.astype(dtype)).sort_index(),
pdf.astype(dtype).sort_index(),
)
def test_frame_apply_batch_without_shortcut(self):
with ps.option_context("compute.shortcut_limit", 0):
self.test_frame_apply_batch()
pdf, psdf = self.df_pair
def to_str(pdf) -> 'ps.DataFrame["a":str, "b":str]': # noqa: F405
return pdf.astype(str)
self.assert_eq(
psdf.pandas_on_spark.apply_batch(to_str).sort_values(["a", "b"]).reset_index(drop=True),
to_str(pdf).sort_values(["a", "b"]).reset_index(drop=True),
)
pdf = pd.DataFrame(
{"a": ["a", "b", "c", "a", "b", "c"], "b": ["b", "a", "c", "c", "b", "a"]}
)
psdf = ps.from_pandas(pdf)
dtype = CategoricalDtype(categories=["a", "b", "c", "d"])
ret = ps.DataFrame["a":dtype, "b":dtype]
def to_category(pdf) -> ret:
return pdf.astype(dtype)
self.assert_eq(
psdf.pandas_on_spark.apply_batch(to_category)
.sort_values(["a", "b"])
.reset_index(drop=True),
to_category(pdf).sort_values(["a", "b"]).reset_index(drop=True),
)
def test_frame_transform_batch(self):
pdf, psdf = self.df_pair
self.assert_eq(
psdf.pandas_on_spark.transform_batch(lambda pdf: pdf.astype(str)).sort_index(),
pdf.astype(str).sort_index(),
)
self.assert_eq(
psdf.pandas_on_spark.transform_batch(lambda pdf: pdf.b.cat.codes).sort_index(),
pdf.b.cat.codes.sort_index(),
)
pdf = pd.DataFrame(
{"a": ["a", "b", "c", "a", "b", "c"], "b": ["b", "a", "c", "c", "b", "a"]}
)
psdf = ps.from_pandas(pdf)
dtype = CategoricalDtype(categories=["a", "b", "c", "d"])
self.assert_eq(
psdf.pandas_on_spark.transform_batch(lambda pdf: pdf.astype(dtype)).sort_index(),
pdf.astype(dtype).sort_index(),
)
self.assert_eq(
psdf.pandas_on_spark.transform_batch(lambda pdf: pdf.b.astype(dtype)).sort_index(),
pdf.b.astype(dtype).sort_index(),
)
def test_frame_transform_batch_without_shortcut(self):
with ps.option_context("compute.shortcut_limit", 0):
self.test_frame_transform_batch()
pdf, psdf = self.df_pair
def to_str(pdf) -> 'ps.DataFrame["a":str, "b":str]': # noqa: F405
return pdf.astype(str)
self.assert_eq(
psdf.pandas_on_spark.transform_batch(to_str).sort_index(),
to_str(pdf).sort_index(),
)
def to_codes(pdf) -> ps.Series[np.int8]:
return pdf.b.cat.codes
self.assert_eq(
psdf.pandas_on_spark.transform_batch(to_codes).sort_index(),
to_codes(pdf).sort_index(),
)
pdf = pd.DataFrame(
{"a": ["a", "b", "c", "a", "b", "c"], "b": ["b", "a", "c", "c", "b", "a"]}
)
psdf = ps.from_pandas(pdf)
dtype = CategoricalDtype(categories=["a", "b", "c", "d"])
ret = ps.DataFrame["a":dtype, "b":dtype]
def to_category(pdf) -> ret:
return pdf.astype(dtype)
self.assert_eq(
psdf.pandas_on_spark.transform_batch(to_category).sort_index(),
to_category(pdf).sort_index(),
)
def to_category(pdf) -> ps.Series[dtype]:
return pdf.b.astype(dtype)
self.assert_eq(
psdf.pandas_on_spark.transform_batch(to_category).sort_index(),
to_category(pdf).rename().sort_index(),
)
def test_series_transform_batch(self):
pdf, psdf = self.df_pair
self.assert_eq(
psdf.a.pandas_on_spark.transform_batch(lambda pser: pser.astype(str)).sort_index(),
pdf.a.astype(str).sort_index(),
)
pdf = pd.DataFrame(
{"a": ["a", "b", "c", "a", "b", "c"], "b": ["b", "a", "c", "c", "b", "a"]}
)
psdf = ps.from_pandas(pdf)
dtype = CategoricalDtype(categories=["a", "b", "c", "d"])
self.assert_eq(
psdf.a.pandas_on_spark.transform_batch(lambda pser: pser.astype(dtype)).sort_index(),
pdf.a.astype(dtype).sort_index(),
)
def test_series_transform_batch_without_shortcut(self):
with ps.option_context("compute.shortcut_limit", 0):
self.test_series_transform_batch()
pdf, psdf = self.df_pair
def to_str(pser) -> ps.Series[str]:
return pser.astype(str)
self.assert_eq(
psdf.a.pandas_on_spark.transform_batch(to_str).sort_index(), to_str(pdf.a).sort_index()
)
pdf = pd.DataFrame(
{"a": ["a", "b", "c", "a", "b", "c"], "b": ["b", "a", "c", "c", "b", "a"]}
)
psdf = ps.from_pandas(pdf)
dtype = CategoricalDtype(categories=["a", "b", "c", "d"])
def to_category(pser) -> ps.Series[dtype]:
return pser.astype(dtype)
self.assert_eq(
psdf.a.pandas_on_spark.transform_batch(to_category).sort_index(),
to_category(pdf.a).sort_index(),
)
def test_unstack(self):
pdf = self.pdf
index = pd.MultiIndex.from_tuples(
[("x", "a"), ("x", "b"), ("x", "c"), ("y", "a"), ("y", "b"), ("y", "d")]
)
pdf.index = index
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf.a.unstack().sort_index(), pdf.a.unstack().sort_index())
self.assert_eq(psdf.b.unstack().sort_index(), pdf.b.unstack().sort_index())
def test_rename_categories(self):
pdf, psdf = self.df_pair
pser = pdf.b
psser = psdf.b
self.assert_eq(
pser.cat.rename_categories([0, 1, 3, 2]), psser.cat.rename_categories([0, 1, 3, 2])
)
self.assert_eq(
pser.cat.rename_categories({"a": "A", "c": "C"}),
psser.cat.rename_categories({"a": "A", "c": "C"}),
)
self.assert_eq(
pser.cat.rename_categories(lambda x: x.upper()),
psser.cat.rename_categories(lambda x: x.upper()),
)
pser.cat.rename_categories({"a": "A", "c": "C"}, inplace=True)
psser.cat.rename_categories({"a": "A", "c": "C"}, inplace=True)
if LooseVersion(pd.__version__) >= LooseVersion("1.3"):
# TODO(SPARK-36367): Fix the behavior to follow pandas >= 1.3
pass
else:
self.assert_eq(pser, psser)
self.assert_eq(pdf, psdf)
pser.cat.rename_categories(lambda x: x.upper(), inplace=True)
psser.cat.rename_categories(lambda x: x.upper(), inplace=True)
if LooseVersion(pd.__version__) >= LooseVersion("1.3"):
# TODO(SPARK-36367): Fix the behavior to follow pandas >= 1.3
pass
else:
self.assert_eq(pser, psser)
self.assert_eq(pdf, psdf)
pser.cat.rename_categories([0, 1, 3, 2], inplace=True)
psser.cat.rename_categories([0, 1, 3, 2], inplace=True)
if LooseVersion(pd.__version__) >= LooseVersion("1.3"):
# TODO(SPARK-36367): Fix the behavior to follow pandas >= 1.3
pass
else:
self.assert_eq(pser, psser)
self.assert_eq(pdf, psdf)
self.assertRaisesRegex(
ValueError,
"new categories need to have the same number of items as the old categories",
lambda: psser.cat.rename_categories([0, 1, 2]),
)
self.assertRaises(
TypeError,
lambda: psser.cat.rename_categories(None),
)
self.assertRaises(
TypeError,
lambda: psser.cat.rename_categories(1),
)
self.assertRaises(
TypeError,
lambda: psser.cat.rename_categories("x"),
)
def test_set_categories(self):
pdf, psdf = self.df_pair
pser = pdf.b
psser = psdf.b
self.assert_eq(
pser.cat.set_categories(["a", "c", "b", "o"]),
psser.cat.set_categories(["a", "c", "b", "o"]),
)
self.assert_eq(
pser.cat.set_categories(["a", "c", "b"]),
psser.cat.set_categories(["a", "c", "b"]),
)
self.assert_eq(
pser.cat.set_categories(["a", "c", "b", "d", "e"]),
psser.cat.set_categories(["a", "c", "b", "d", "e"]),
)
self.assert_eq(
pser.cat.set_categories([0, 1, 3, 2], rename=True),
psser.cat.set_categories([0, 1, 3, 2], rename=True),
)
self.assert_eq(
pser.cat.set_categories([0, 1, 3], rename=True),
psser.cat.set_categories([0, 1, 3], rename=True),
)
self.assert_eq(
pser.cat.set_categories([0, 1, 3, 2, 4], rename=True),
psser.cat.set_categories([0, 1, 3, 2, 4], rename=True),
)
self.assert_eq(
pser.cat.set_categories(["a", "c", "b", "o"], ordered=True),
psser.cat.set_categories(["a", "c", "b", "o"], ordered=True),
)
self.assert_eq(
pser.cat.set_categories(["a", "c", "b"], ordered=True),
psser.cat.set_categories(["a", "c", "b"], ordered=True),
)
self.assert_eq(
pser.cat.set_categories(["a", "c", "b", "d", "e"], ordered=True),
psser.cat.set_categories(["a", "c", "b", "d", "e"], ordered=True),
)
self.assert_eq(
pser.cat.set_categories(["a", "c", "b", "o"], inplace=True, rename=True),
psser.cat.set_categories(["a", "c", "b", "o"], inplace=True, rename=True),
)
if LooseVersion(pd.__version__) >= LooseVersion("1.3"):
# TODO(SPARK-36367): Fix the behavior to follow pandas >= 1.3
pass
else:
self.assert_eq(pser, psser)
self.assert_eq(pdf, psdf)
pser.cat.set_categories([2, 3, 1, 0], inplace=True, rename=False),
psser.cat.set_categories([2, 3, 1, 0], inplace=True, rename=False),
if LooseVersion(pd.__version__) >= LooseVersion("1.3"):
# TODO(SPARK-36367): Fix the behavior to follow pandas >= 1.3
pass
else:
self.assert_eq(pser, psser)
self.assert_eq(pdf, psdf)
self.assertRaisesRegex(
TypeError,
"Parameter 'new_categories' must be list-like, was",
lambda: psser.cat.set_categories(None),
)
if __name__ == "__main__":
import unittest
from pyspark.pandas.tests.test_categorical import * # noqa: F401
try:
import xmlrunner # type: ignore[import]
testRunner = xmlrunner.XMLTestRunner(output="target/test-reports", verbosity=2)
except ImportError:
testRunner = None
unittest.main(testRunner=testRunner, verbosity=2)
| |
"""Kernel Principal Components Analysis"""
# Author: Mathieu Blondel <mathieu@mblondel.org>
# License: BSD 3 clause
import numpy as np
from scipy import linalg
from scipy.sparse.linalg import eigsh
from ..utils import check_random_state
from ..utils.validation import check_is_fitted, check_array
from ..exceptions import NotFittedError
from ..base import BaseEstimator, TransformerMixin
from ..preprocessing import KernelCenterer
from ..metrics.pairwise import pairwise_kernels
class KernelPCA(BaseEstimator, TransformerMixin):
"""Kernel Principal component analysis (KPCA)
Non-linear dimensionality reduction through the use of kernels (see
:ref:`metrics`).
Read more in the :ref:`User Guide <kernel_PCA>`.
Parameters
----------
n_components : int, default=None
Number of components. If None, all non-zero components are kept.
kernel : "linear" | "poly" | "rbf" | "sigmoid" | "cosine" | "precomputed"
Kernel. Default="linear".
gamma : float, default=1/n_features
Kernel coefficient for rbf, poly and sigmoid kernels. Ignored by other
kernels.
degree : int, default=3
Degree for poly kernels. Ignored by other kernels.
coef0 : float, default=1
Independent term in poly and sigmoid kernels.
Ignored by other kernels.
kernel_params : mapping of string to any, default=None
Parameters (keyword arguments) and values for kernel passed as
callable object. Ignored by other kernels.
alpha : int, default=1.0
Hyperparameter of the ridge regression that learns the
inverse transform (when fit_inverse_transform=True).
fit_inverse_transform : bool, default=False
Learn the inverse transform for non-precomputed kernels.
(i.e. learn to find the pre-image of a point)
eigen_solver : string ['auto'|'dense'|'arpack'], default='auto'
Select eigensolver to use. If n_components is much less than
the number of training samples, arpack may be more efficient
than the dense eigensolver.
tol : float, default=0
Convergence tolerance for arpack.
If 0, optimal value will be chosen by arpack.
max_iter : int, default=None
Maximum number of iterations for arpack.
If None, optimal value will be chosen by arpack.
remove_zero_eig : boolean, default=False
If True, then all components with zero eigenvalues are removed, so
that the number of components in the output may be < n_components
(and sometimes even zero due to numerical instability).
When n_components is None, this parameter is ignored and components
with zero eigenvalues are removed regardless.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`. Used when ``eigen_solver`` == 'arpack'.
.. versionadded:: 0.18
copy_X : boolean, default=True
If True, input X is copied and stored by the model in the `X_fit_`
attribute. If no further changes will be done to X, setting
`copy_X=False` saves memory by storing a reference.
.. versionadded:: 0.18
n_jobs : int, default=1
The number of parallel jobs to run.
If `-1`, then the number of jobs is set to the number of CPU cores.
.. versionadded:: 0.18
Attributes
----------
lambdas_ : array, (n_components,)
Eigenvalues of the centered kernel matrix in decreasing order.
If `n_components` and `remove_zero_eig` are not set,
then all values are stored.
alphas_ : array, (n_samples, n_components)
Eigenvectors of the centered kernel matrix. If `n_components` and
`remove_zero_eig` are not set, then all components are stored.
dual_coef_ : array, (n_samples, n_features)
Inverse transform matrix. Set if `fit_inverse_transform` is True.
X_transformed_fit_ : array, (n_samples, n_components)
Projection of the fitted data on the kernel principal components.
X_fit_ : (n_samples, n_features)
The data used to fit the model. If `copy_X=False`, then `X_fit_` is
a reference. This attribute is used for the calls to transform.
References
----------
Kernel PCA was introduced in:
Bernhard Schoelkopf, Alexander J. Smola,
and Klaus-Robert Mueller. 1999. Kernel principal
component analysis. In Advances in kernel methods,
MIT Press, Cambridge, MA, USA 327-352.
"""
def __init__(self, n_components=None, kernel="linear",
gamma=None, degree=3, coef0=1, kernel_params=None,
alpha=1.0, fit_inverse_transform=False, eigen_solver='auto',
tol=0, max_iter=None, remove_zero_eig=False,
random_state=None, copy_X=True, n_jobs=1):
if fit_inverse_transform and kernel == 'precomputed':
raise ValueError(
"Cannot fit_inverse_transform with a precomputed kernel.")
self.n_components = n_components
self.kernel = kernel
self.kernel_params = kernel_params
self.gamma = gamma
self.degree = degree
self.coef0 = coef0
self.alpha = alpha
self.fit_inverse_transform = fit_inverse_transform
self.eigen_solver = eigen_solver
self.remove_zero_eig = remove_zero_eig
self.tol = tol
self.max_iter = max_iter
self._centerer = KernelCenterer()
self.random_state = random_state
self.n_jobs = n_jobs
self.copy_X = copy_X
@property
def _pairwise(self):
return self.kernel == "precomputed"
def _get_kernel(self, X, Y=None):
if callable(self.kernel):
params = self.kernel_params or {}
else:
params = {"gamma": self.gamma,
"degree": self.degree,
"coef0": self.coef0}
return pairwise_kernels(X, Y, metric=self.kernel,
filter_params=True, n_jobs=self.n_jobs,
**params)
def _fit_transform(self, K):
""" Fit's using kernel K"""
# center kernel
K = self._centerer.fit_transform(K)
if self.n_components is None:
n_components = K.shape[0]
else:
n_components = min(K.shape[0], self.n_components)
# compute eigenvectors
if self.eigen_solver == 'auto':
if K.shape[0] > 200 and n_components < 10:
eigen_solver = 'arpack'
else:
eigen_solver = 'dense'
else:
eigen_solver = self.eigen_solver
if eigen_solver == 'dense':
self.lambdas_, self.alphas_ = linalg.eigh(
K, eigvals=(K.shape[0] - n_components, K.shape[0] - 1))
elif eigen_solver == 'arpack':
random_state = check_random_state(self.random_state)
# initialize with [-1,1] as in ARPACK
v0 = random_state.uniform(-1, 1, K.shape[0])
self.lambdas_, self.alphas_ = eigsh(K, n_components,
which="LA",
tol=self.tol,
maxiter=self.max_iter,
v0=v0)
# sort eigenvectors in descending order
indices = self.lambdas_.argsort()[::-1]
self.lambdas_ = self.lambdas_[indices]
self.alphas_ = self.alphas_[:, indices]
# remove eigenvectors with a zero eigenvalue
if self.remove_zero_eig or self.n_components is None:
self.alphas_ = self.alphas_[:, self.lambdas_ > 0]
self.lambdas_ = self.lambdas_[self.lambdas_ > 0]
return K
def _fit_inverse_transform(self, X_transformed, X):
if hasattr(X, "tocsr"):
raise NotImplementedError("Inverse transform not implemented for "
"sparse matrices!")
n_samples = X_transformed.shape[0]
K = self._get_kernel(X_transformed)
K.flat[::n_samples + 1] += self.alpha
self.dual_coef_ = linalg.solve(K, X, sym_pos=True, overwrite_a=True)
self.X_transformed_fit_ = X_transformed
def fit(self, X, y=None):
"""Fit the model from data in X.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training vector, where n_samples in the number of samples
and n_features is the number of features.
Returns
-------
self : object
Returns the instance itself.
"""
X = check_array(X, accept_sparse='csr', copy=self.copy_X)
K = self._get_kernel(X)
self._fit_transform(K)
if self.fit_inverse_transform:
sqrt_lambdas = np.diag(np.sqrt(self.lambdas_))
X_transformed = np.dot(self.alphas_, sqrt_lambdas)
self._fit_inverse_transform(X_transformed, X)
self.X_fit_ = X
return self
def fit_transform(self, X, y=None, **params):
"""Fit the model from data in X and transform X.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training vector, where n_samples in the number of samples
and n_features is the number of features.
Returns
-------
X_new : array-like, shape (n_samples, n_components)
"""
self.fit(X, **params)
X_transformed = self.alphas_ * np.sqrt(self.lambdas_)
if self.fit_inverse_transform:
self._fit_inverse_transform(X_transformed, X)
return X_transformed
def transform(self, X):
"""Transform X.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Returns
-------
X_new : array-like, shape (n_samples, n_components)
"""
check_is_fitted(self, 'X_fit_')
K = self._centerer.transform(self._get_kernel(X, self.X_fit_))
return np.dot(K, self.alphas_ / np.sqrt(self.lambdas_))
def inverse_transform(self, X):
"""Transform X back to original space.
Parameters
----------
X : array-like, shape (n_samples, n_components)
Returns
-------
X_new : array-like, shape (n_samples, n_features)
References
----------
"Learning to Find Pre-Images", G BakIr et al, 2004.
"""
if not self.fit_inverse_transform:
raise NotFittedError("The fit_inverse_transform parameter was not"
" set to True when instantiating and hence "
"the inverse transform is not available.")
K = self._get_kernel(X, self.X_transformed_fit_)
return np.dot(K, self.dual_coef_)
| |
"""Network Authentication Helpers
Contains interface (MultiDomainBasicAuth) and associated glue code for
providing credentials in the context of network requests.
"""
import logging
import urllib.parse
from typing import Any, Dict, List, Optional, Tuple
from pip._vendor.requests.auth import AuthBase, HTTPBasicAuth
from pip._vendor.requests.models import Request, Response
from pip._vendor.requests.utils import get_netrc_auth
from pip._internal.utils.misc import (
ask,
ask_input,
ask_password,
remove_auth_from_url,
split_auth_netloc_from_url,
)
from pip._internal.vcs.versioncontrol import AuthInfo
logger = logging.getLogger(__name__)
Credentials = Tuple[str, str, str]
try:
import keyring
except ImportError:
keyring = None
except Exception as exc:
logger.warning(
"Keyring is skipped due to an exception: %s", str(exc),
)
keyring = None
def get_keyring_auth(url, username):
# type: (Optional[str], Optional[str]) -> Optional[AuthInfo]
"""Return the tuple auth for a given url from keyring."""
global keyring
if not url or not keyring:
return None
try:
try:
get_credential = keyring.get_credential
except AttributeError:
pass
else:
logger.debug("Getting credentials from keyring for %s", url)
cred = get_credential(url, username)
if cred is not None:
return cred.username, cred.password
return None
if username:
logger.debug("Getting password from keyring for %s", url)
password = keyring.get_password(url, username)
if password:
return username, password
except Exception as exc:
logger.warning(
"Keyring is skipped due to an exception: %s", str(exc),
)
keyring = None
return None
class MultiDomainBasicAuth(AuthBase):
def __init__(self, prompting=True, index_urls=None):
# type: (bool, Optional[List[str]]) -> None
self.prompting = prompting
self.index_urls = index_urls
self.passwords = {} # type: Dict[str, AuthInfo]
# When the user is prompted to enter credentials and keyring is
# available, we will offer to save them. If the user accepts,
# this value is set to the credentials they entered. After the
# request authenticates, the caller should call
# ``save_credentials`` to save these.
self._credentials_to_save = None # type: Optional[Credentials]
def _get_index_url(self, url):
# type: (str) -> Optional[str]
"""Return the original index URL matching the requested URL.
Cached or dynamically generated credentials may work against
the original index URL rather than just the netloc.
The provided url should have had its username and password
removed already. If the original index url had credentials then
they will be included in the return value.
Returns None if no matching index was found, or if --no-index
was specified by the user.
"""
if not url or not self.index_urls:
return None
for u in self.index_urls:
prefix = remove_auth_from_url(u).rstrip("/") + "/"
if url.startswith(prefix):
return u
return None
def _get_new_credentials(self, original_url, allow_netrc=True,
allow_keyring=False):
# type: (str, bool, bool) -> AuthInfo
"""Find and return credentials for the specified URL."""
# Split the credentials and netloc from the url.
url, netloc, url_user_password = split_auth_netloc_from_url(
original_url,
)
# Start with the credentials embedded in the url
username, password = url_user_password
if username is not None and password is not None:
logger.debug("Found credentials in url for %s", netloc)
return url_user_password
# Find a matching index url for this request
index_url = self._get_index_url(url)
if index_url:
# Split the credentials from the url.
index_info = split_auth_netloc_from_url(index_url)
if index_info:
index_url, _, index_url_user_password = index_info
logger.debug("Found index url %s", index_url)
# If an index URL was found, try its embedded credentials
if index_url and index_url_user_password[0] is not None:
username, password = index_url_user_password
if username is not None and password is not None:
logger.debug("Found credentials in index url for %s", netloc)
return index_url_user_password
# Get creds from netrc if we still don't have them
if allow_netrc:
netrc_auth = get_netrc_auth(original_url)
if netrc_auth:
logger.debug("Found credentials in netrc for %s", netloc)
return netrc_auth
# If we don't have a password and keyring is available, use it.
if allow_keyring:
# The index url is more specific than the netloc, so try it first
kr_auth = (
get_keyring_auth(index_url, username) or
get_keyring_auth(netloc, username)
)
if kr_auth:
logger.debug("Found credentials in keyring for %s", netloc)
return kr_auth
return username, password
def _get_url_and_credentials(self, original_url):
# type: (str) -> Tuple[str, Optional[str], Optional[str]]
"""Return the credentials to use for the provided URL.
If allowed, netrc and keyring may be used to obtain the
correct credentials.
Returns (url_without_credentials, username, password). Note
that even if the original URL contains credentials, this
function may return a different username and password.
"""
url, netloc, _ = split_auth_netloc_from_url(original_url)
# Use any stored credentials that we have for this netloc
username, password = self.passwords.get(netloc, (None, None))
if username is None and password is None:
# No stored credentials. Acquire new credentials without prompting
# the user. (e.g. from netrc, keyring, or the URL itself)
username, password = self._get_new_credentials(original_url)
if username is not None or password is not None:
# Convert the username and password if they're None, so that
# this netloc will show up as "cached" in the conditional above.
# Further, HTTPBasicAuth doesn't accept None, so it makes sense to
# cache the value that is going to be used.
username = username or ""
password = password or ""
# Store any acquired credentials.
self.passwords[netloc] = (username, password)
assert (
# Credentials were found
(username is not None and password is not None) or
# Credentials were not found
(username is None and password is None)
), f"Could not load credentials from url: {original_url}"
return url, username, password
def __call__(self, req):
# type: (Request) -> Request
# Get credentials for this request
url, username, password = self._get_url_and_credentials(req.url)
# Set the url of the request to the url without any credentials
req.url = url
if username is not None and password is not None:
# Send the basic auth with this request
req = HTTPBasicAuth(username, password)(req)
# Attach a hook to handle 401 responses
req.register_hook("response", self.handle_401)
return req
# Factored out to allow for easy patching in tests
def _prompt_for_password(self, netloc):
# type: (str) -> Tuple[Optional[str], Optional[str], bool]
username = ask_input(f"User for {netloc}: ")
if not username:
return None, None, False
auth = get_keyring_auth(netloc, username)
if auth and auth[0] is not None and auth[1] is not None:
return auth[0], auth[1], False
password = ask_password("Password: ")
return username, password, True
# Factored out to allow for easy patching in tests
def _should_save_password_to_keyring(self):
# type: () -> bool
if not keyring:
return False
return ask("Save credentials to keyring [y/N]: ", ["y", "n"]) == "y"
def handle_401(self, resp, **kwargs):
# type: (Response, **Any) -> Response
# We only care about 401 responses, anything else we want to just
# pass through the actual response
if resp.status_code != 401:
return resp
# We are not able to prompt the user so simply return the response
if not self.prompting:
return resp
parsed = urllib.parse.urlparse(resp.url)
# Query the keyring for credentials:
username, password = self._get_new_credentials(resp.url,
allow_netrc=False,
allow_keyring=True)
# Prompt the user for a new username and password
save = False
if not username and not password:
username, password, save = self._prompt_for_password(parsed.netloc)
# Store the new username and password to use for future requests
self._credentials_to_save = None
if username is not None and password is not None:
self.passwords[parsed.netloc] = (username, password)
# Prompt to save the password to keyring
if save and self._should_save_password_to_keyring():
self._credentials_to_save = (parsed.netloc, username, password)
# Consume content and release the original connection to allow our new
# request to reuse the same one.
resp.content
resp.raw.release_conn()
# Add our new username and password to the request
req = HTTPBasicAuth(username or "", password or "")(resp.request)
req.register_hook("response", self.warn_on_401)
# On successful request, save the credentials that were used to
# keyring. (Note that if the user responded "no" above, this member
# is not set and nothing will be saved.)
if self._credentials_to_save:
req.register_hook("response", self.save_credentials)
# Send our new request
new_resp = resp.connection.send(req, **kwargs)
new_resp.history.append(resp)
return new_resp
def warn_on_401(self, resp, **kwargs):
# type: (Response, **Any) -> None
"""Response callback to warn about incorrect credentials."""
if resp.status_code == 401:
logger.warning(
'401 Error, Credentials not correct for %s', resp.request.url,
)
def save_credentials(self, resp, **kwargs):
# type: (Response, **Any) -> None
"""Response callback to save credentials on success."""
assert keyring is not None, "should never reach here without keyring"
if not keyring:
return
creds = self._credentials_to_save
self._credentials_to_save = None
if creds and resp.status_code < 400:
try:
logger.info('Saving credentials to keyring')
keyring.set_password(*creds)
except Exception:
logger.exception('Failed to save credentials')
| |
import itertools
import json
import logging
import os.path
from abc import ABCMeta, abstractmethod, abstractproperty
from collections import MutableMapping, Mapping, Sequence, Iterator
from contextlib import contextmanager
from itertools import izip
from sqlalchemy import MetaData
from sqlalchemy.engine.url import URL
from gtd.io import open_or_create, JSONPicklable
from gtd.utils import ensure_unicode, SimpleExecutor, Failure
from gtd.utils import makedirs
from sqlalchemy import Column, Table
from sqlalchemy import tuple_
from sqlalchemy.engine import Engine, create_engine
from sqlalchemy.exc import NoSuchTableError
from sqlalchemy.sql import select
class Closeable(object):
__metaclass__ = ABCMeta
@abstractmethod
def close(self):
"""Close this object."""
pass
@abstractproperty
def closed(self):
"""A bool indicating whether this object was closed.
Returns:
bool
"""
pass
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.close()
def __del__(self):
if not self.closed:
logging.warn('{} was not properly closed.'.format(self))
self.close()
class BatchMapping(Mapping):
"""Like the built-in Mapping class, except subclasses must implement batch versions of get and contains."""
__metaclass__ = ABCMeta
@abstractmethod
def get_batch(self, keys):
"""Get value for each key in keys.
Args:
keys (list): a list of keys
Returns:
list: a list of values with the same order corresponding to the list of keys.
If a given key does not have a value, the corresponding returned value will be a Failure object.
"""
pass
def __getitem__(self, key):
"""Get value for key."""
val = self.get_batch([key])[0]
if isinstance(val, Failure):
raise KeyError(key)
return val
@abstractmethod
def contains_batch(self, keys):
"""Check for the presence of each key in keys.
Args:
keys (list): a list of keys
Returns:
list[bool]: a list of booleans with the same order corresponding to the list of keys, indicating
whether each key is present in the BatchMapping.
"""
pass
def __contains__(self, key):
"""Check if key is in the mapping."""
return self.contains_batch([key])[0]
class BatchMutableMapping(MutableMapping, BatchMapping):
"""Like the built-in MutableMapping, except subclasses must implement batch versions of setitem and delitem."""
__metaclass__ = ABCMeta
@abstractmethod
def set_batch(self, key_val_pairs):
pass
def __setitem__(self, key, value):
self.set_batch([(key, value)])
@abstractmethod
def del_batch(self, keys):
pass
def __delitem__(self, key):
self.del_batch([key])
class SimpleBatchMapping(BatchMutableMapping):
def __init__(self, d=None):
if d is None:
d = {}
self._d = d
def get_batch(self, keys):
f = Failure.silent("Could not get key.")
return [self._d.get(k, f) for k in keys]
def contains_batch(self, keys):
return [k in self._d for k in keys]
def set_batch(self, key_val_pairs):
for k, v in key_val_pairs:
self._d[k] = v
def del_batch(self, keys):
for k in keys:
del self._d[k]
def __iter__(self):
return iter(self._d)
def __len__(self):
return len(self._d)
class CacheWrapperMixin(object):
def _set_cache(self, cache):
self._cache = cache
@property
def cache(self):
return self._cache
def __iter__(self):
return iter(self.cache)
def __len__(self):
return len(self.cache)
def iteritems(self):
return self.cache.iteritems()
def iterkeys(self):
return self.cache.iterkeys()
def itervalues(self):
return self.cache.itervalues()
def keys(self):
return self.cache.keys()
def items(self):
return self.cache.items()
def values(self):
return self.cache.values()
class LazyMapping(CacheWrapperMixin, BatchMapping):
def __init__(self, cache):
"""Create a LazyMapping.
Args:
cache (BatchMutableMapping)
"""
self._set_cache(cache)
def contains_batch(self, keys):
"""Determine whether each key in the batch is already present in the cache.
Args:
keys (list): a list of keys
Returns:
list[bool]: a list of booleans, indicating whether each key is present in the cache
"""
return self.cache.contains_batch(keys)
@abstractmethod
def compute_batch(self, keys):
"""Compute the values for a batch of keys.
Args:
keys (list): a list of keys
Returns:
list: a list of values with the same order corresponding to the list of keys.
If a given key does not have a value, the corresponding returned value will be a Failure object.
"""
pass
def compute(self, key):
"""Compute the value for a single key.
Args:
key
Returns:
val
"""
return self.compute_batch([key])[0]
def ensure_batch(self, keys, computed_list=False):
"""Ensure that the given keys are present in the cache.
If a key is not present, its entry will be computed.
Args:
keys (list): a list of keys
computed_list (bool): defaults to False. See Returns description.
Returns:
if computed_list:
list(bool): a list of booleans indicating which keys were freshly computed (may include failed computations)
else:
int: the number of keys which were freshly computed
"""
presence = self.cache.contains_batch(keys)
to_compute = [key for key, present in izip(keys, presence) if not present]
computed = self.compute_batch(to_compute)
updates = []
for key, val in izip(to_compute, computed):
if not isinstance(val, Failure):
updates.append((key, val))
self.cache.set_batch(updates)
if computed_list:
return [not p for p in presence]
return len([p for p in presence if not p])
def get_batch(self, keys, compute=True):
"""Get value for each key in keys.
Args:
keys (list): a list of keys
compute (bool): if a key is missing from the cache, compute it. When disabled, just returns Failure
objects for missing keys.
Returns:
list: a list of values with the same order corresponding to the list of keys.
If a given key's value cannot be computed, the corresponding returned value will be a Failure object.
"""
if compute:
self.ensure_batch(keys)
return self.cache.get_batch(keys)
@staticmethod
def compute_batch_parallel(fxn, keys):
"""Execute a function in parallel on the entire batch of keys, using a multi-threaded executor.
This is a helper function which subclasses of LazyDict can use to implement `compute_batch`.
Note that speedups will only be obtained if compute is IO bound, due to Python's GIL.
Args:
fxn (Callable): function to be called in parallel
keys (list): a list of keys
Returns:
list: result is equivalent to [fxn(key) for key in keys]
"""
no_result_failure = Failure.silent('No result returned by SimpleExecutor.')
results = [no_result_failure] * len(keys)
with SimpleExecutor(fxn) as ex:
for i, key in enumerate(keys):
ex.submit(i, key)
for i, val in ex.results():
results[i] = val
for result in results:
assert result != no_result_failure
return results
class EagerMapping(CacheWrapperMixin, BatchMapping):
def __init__(self, cache):
self._set_cache(cache)
if len(cache) == 0:
self.populate(cache)
@abstractmethod
def populate(self, cache):
pass
def get_batch(self, keys):
return self.cache.get_batch(keys)
def contains_batch(self, keys):
return self.cache.contains_batch(keys)
class EagerSequence(Sequence):
def __init__(self, cache):
self._cache = cache
if len(self.cache) == 0:
self.populate(self.cache)
@property
def cache(self):
return self._cache
@abstractmethod
def populate(self, cache):
pass
def __getitem__(self, key):
return self.cache[key]
def __iter__(self):
return iter(self.cache)
def __len__(self):
return len(self.cache)
def sqlalchemy_metadata(host, port, database, username, password):
url = URL(drivername='postgresql+psycopg2', username=username,
password=password, host=host, port=port, database=database)
engine = create_engine(url, server_side_cursors=True, connect_args={'connect_timeout': 4})
# ensure that we can connect
with engine.begin():
pass # this will throw OperationalError if it fails
return MetaData(engine)
class ORM(object):
__metaclass__ = ABCMeta
def __init__(self, columns):
assert isinstance(columns, list)
for c in columns:
assert isinstance(c, ORMColumn)
self._columns = columns
@property
def columns(self):
"""Return a list of ORMColumns."""
return self._columns
@abstractmethod
def to_row(self, value):
"""Convert object into database row.
Args:
value (object)
Returns:
dict[Column, object]
"""
pass
@abstractmethod
def from_row(self, row):
"""Convert row back into object.
Args:
dict[Column, object]
Returns:
object
"""
pass
def bind(self, table):
for orm_col in self.columns:
orm_col.bind(table)
class SimpleORM(ORM):
def __init__(self, column):
self._col = column
super(SimpleORM, self).__init__([column])
def to_row(self, value):
return {self._col.key: value}
def from_row(self, row):
return row[self._col.key]
class CustomORM(ORM):
def __init__(self, columns, to_row, from_row):
self._to_row = to_row
self._from_row = from_row
super(CustomORM, self).__init__(columns)
def to_row(self, value):
return self._to_row(value)
def from_row(self, row):
return self._from_row(row)
class ORMColumn(object):
"""Wraps a SQLAlchemy Column object."""
def __init__(self, *args, **kwargs):
self._rebuild(args, kwargs)
def _rebuild(self, args, kwargs):
if self.bound:
raise RuntimeError('Cannot rebuild ORMColumn if it is already bound.')
self._unbound_column = Column(*args, **kwargs)
self._args = args
self._kwargs = kwargs
@property
def unbound_column(self):
return self._unbound_column
@property
def name(self):
return self.unbound_column.name
def extend(self, *args, **kwargs):
new_args = self._args + args
new_kwargs = dict(self._kwargs)
new_kwargs.update(kwargs)
self._rebuild(new_args, new_kwargs)
@property
def bound(self):
return hasattr(self, '_column')
def bind(self, table):
col_names = [c.name for c in table.columns]
if len(col_names) != len(set(col_names)):
raise ValueError('Can only bind to table with unique column names.')
self._column = table.c[self.name]
@property
def column(self):
"""Return SQLAlchemy Column object."""
if self.bound:
return self._column
else:
raise RuntimeError("Need to bind ORMColumn to a Table.")
@property
def key(self):
"""Used to select this column from a SQLAlchemy RowProxy."""
return self.column
class TableMapping(BatchMutableMapping):
def __init__(self, name, key_orm, val_orm, metadata, engine=None):
if engine is None:
engine = metadata.bind
assert isinstance(engine, Engine)
# mark columns as primary keys
for c in key_orm.columns:
c.extend(primary_key=True)
# Convert ORMColumns into SQLAlchemy Columns to construct Table
orm_cols = key_orm.columns + val_orm.columns
table_cols = [orm_col.unbound_column for orm_col in orm_cols]
# avoid overlapping column names
col_names = [col.name for col in table_cols]
if len(col_names) != len(set(col_names)):
raise ValueError("Column names must be unique.")
try:
# If table is already defined in metadata, return it.
# It is possible for the table to be defined in metadata, but not exist in database.
# (e.g. if metadata.drop_all() was called)
# If not, use reflection to get information about the table from the database, and return it.
# If table isn't in database, raise NoSuchTableError.
table = Table(name, metadata, autoload=True)
except NoSuchTableError:
# Define the table.
table = Table(name, metadata, *table_cols)
# If table does not exist in database, create it.
metadata.create_all()
# make sure we only get the columns we expected
if set([c.name for c in table.columns]) != set(col_names):
raise ValueError("ORM column names must match table column names exactly.")
# ORMs must have a reference to the Table's Column objects.
key_orm.bind(table)
val_orm.bind(table)
self._key_orm = key_orm
self._val_orm = val_orm
self._table = table
self._engine = engine
@property
def _key_cols(self):
"""Return a list of Columns (not ORMColumns)."""
return [orm_column.column for orm_column in self._key_orm.columns]
@property
def _val_cols(self):
"""Return a list of Columns (not ORMColumns)."""
return [orm_column.column for orm_column in self._val_orm.columns]
@contextmanager
def _transaction(self):
with self._engine.begin() as conn:
yield conn
# connection automatically closed after transaction
assert conn.closed
@property
def table(self):
return self._table
def _key_conditions(self, keys):
vals = []
for key in keys:
row = self._key_orm.to_row(key)
val = tuple(row[c] for c in self._key_cols)
vals.append(val)
return tuple_(*self._key_cols).in_(vals)
def contains_batch(self, keys):
if len(keys) == 0: return []
# select all rows matching any of the keys
condition = self._key_conditions(keys)
cmd = select(self._key_cols).where(condition)
# get the set of keys found
with self._transaction() as conn:
result = conn.execute(cmd)
present_keys = set(self._key_orm.from_row(row) for row in result)
return [key in present_keys for key in keys]
def get_batch(self, keys):
if len(keys) == 0: return []
key_to_index = {k: i for i, k in enumerate(keys)}
condition = self._key_conditions(keys)
cmd = select([self.table]).where(condition)
with self._transaction() as conn:
results = conn.execute(cmd)
no_result_failure = Failure.silent('No result returned from TableDict.')
vals = [no_result_failure] * len(keys)
for row in results:
key = self._key_orm.from_row(row)
val = self._val_orm.from_row(row)
index = key_to_index[key]
vals[index] = val
return vals
def _kv_to_row(self, key, val, string_cols=False):
row = self._key_orm.to_row(key)
row.update(self._val_orm.to_row(val))
if string_cols:
row = {col.name: v for col, v in row.iteritems()}
return row
def del_batch(self, keys):
if len(keys) == 0: return
condition = self._key_conditions(keys)
cmd = self.table.delete().where(condition)
with self._transaction() as conn:
result = conn.execute(cmd)
if result.rowcount == 0:
raise KeyError(keys) # rollback
def __iter__(self):
with self._transaction() as conn:
for row in conn.execute(select(self._key_cols)):
yield self._key_orm.from_row(row)
def __len__(self):
cmd = self.table.count()
with self._transaction() as conn:
return conn.execute(cmd).scalar()
def set_batch(self, key_val_pairs):
if len(key_val_pairs) == 0: return
keys, vals = zip(*key_val_pairs)
# make sure keys are unique
assert len(keys) == len(set(keys))
present_keys = []
for key, present in izip(keys, self.contains_batch(keys)):
if present:
present_keys.append(key)
rows = []
for k, v in key_val_pairs:
row = self._kv_to_row(k, v, string_cols=True)
rows.append(row)
with self._transaction() as conn:
self.del_batch(present_keys) # delete rows that are already present
conn.execute(self.table.insert(), rows) # insert new rows
def iteritems(self):
with self._transaction() as conn:
for row in conn.execute(select([self.table])):
key = self._key_orm.from_row(row)
val = self._val_orm.from_row(row)
yield (key, val)
def iterkeys(self):
return iter(self)
def itervalues(self):
for _, val in self.iteritems():
yield val
def keys(self):
return list(self.iterkeys())
def items(self):
return list(self.iteritems())
def values(self):
return list(self.itervalues())
class FileMapping(MutableMapping, Closeable):
def __init__(self, path):
self._path = path
self._f = open_or_create(self._path, 'r+')
s = self._f.read()
if len(s) == 0:
self._d = {}
else:
self._d = json.loads(s)
def close(self):
self._f.close()
@property
def closed(self):
return self._f.closed
def __repr__(self):
return 'FileMapping at {}'.format(self._path)
def _dump(self):
f = self._f
f.seek(0)
f.truncate()
json.dump(self._d, f)
f.flush()
def __setitem__(self, key, value):
self._d[key] = value
self._dump()
def __delitem__(self, key):
del self._d[key]
self._dump()
def __getitem__(self, item):
return self._d[item]
def __len__(self):
return len(self._d)
def __iter__(self):
return iter(self._d)
def __str__(self):
return str(self._d)
def __repr__(self):
return repr(self._d)
class FileSerializer(object):
__class__ = ABCMeta
@abstractmethod
def to_line(self, obj):
"""Return a string that can be written as a SINGLE line in a file (cannot contain newline character)."""
pass
@abstractmethod
def from_line(self, line):
pass
class UnicodeSerializer(FileSerializer):
def to_line(self, obj):
u = ensure_unicode(obj)
return u.encode('utf-8')
def from_line(self, line):
return line.decode('utf-8')
class CustomSerializer(FileSerializer):
def __init__(self, to_line, from_line):
self._to = to_line
self._from = from_line
def to_line(self, obj):
return self._to(obj)
def from_line(self, line):
return self._from(line)
class JSONPicklableSerializer(FileSerializer):
def to_line(self, obj):
return obj.to_json_str()
def from_line(self, line):
return JSONPicklable.from_json_str(line)
class AppendableSequence(Sequence):
__class__ = ABCMeta
@abstractmethod
def append(self, item):
pass
def extend(self, items):
for item in items:
self.append(item)
class SimpleAppendableSequence(AppendableSequence, Closeable):
def __init__(self, l=None):
if l is None:
l = []
self._l = l
self._closed = False
def __getitem__(self, item):
if isinstance(item, slice):
return SequenceSlice(self, item)
return self._l[item]
def __len__(self):
return len(self._l)
def append(self, item):
self._l.append(item)
def close(self):
self._closed = True
@property
def closed(self):
return self._closed
class FileSequenceOffsets(Sequence, Closeable):
def __init__(self, file_seq):
offsets_path = file_seq.path + '.offsets'
file_existed = os.path.isfile(offsets_path) # check if file already existed
self._f_write = open_or_create(offsets_path, 'a') # open for appending only
if file_existed:
# load offsets from file into memory
with open(offsets_path, 'r') as f:
self._offsets = [int(line) for line in f] # int cast strips newline automatically
else:
# build offsets (in-memory and on-file)
self._offsets = []
current_offset = 0
for line in file_seq.iter_raw_lines():
self.append(current_offset)
current_offset += len(line)
self._offsets_path = offsets_path
def close(self):
self._f_write.close()
@property
def closed(self):
return self._f_write.closed
def __repr__(self):
return 'FileSequenceOffsets at {}'.format(self._offsets_path)
def __getitem__(self, i):
return self._offsets[i]
def __len__(self):
return len(self._offsets)
def append(self, i):
self.extend([i])
def extend(self, i_list):
self._offsets.extend(i_list)
f = self._f_write
for i in i_list:
f.write(str(i))
f.write('\n')
f.flush()
class FileSequenceMetaData(Closeable):
"""Stores FileSequence properties in a JSON file."""
def __init__(self, file_seq):
"""Store metadata about a FileSequence.
Args:
file_seq (FileSequence)
"""
meta_path = file_seq.path + '.meta'
file_existed = os.path.isfile(meta_path) # check if file already exists
self._d = FileMapping(meta_path) # initialize underlying dict
if not file_existed:
self.length = len(file_seq) # record length
def close(self):
self._d.close()
@property
def closed(self):
return self._d.closed
@property
def length(self):
try:
return self._d['length']
except KeyError:
raise AttributeError()
@length.setter
def length(self, val):
self._d['length'] = val
def __str__(self):
return str(self._d)
def __repr__(self):
return repr(self._d)
class FileSequence(AppendableSequence, Closeable):
"""Sequence backed by a file."""
def __init__(self, path, serializer=None):
if serializer is None:
serializer = UnicodeSerializer() # by default, just write to file as utf-8 encoded strings
self._path = path
self._ser = serializer
# open or create the corresponding file
self._f_read = open_or_create(path, 'r') # for reading only
self._f_write = open_or_create(path, 'a') # for appending. Stream positioned at end of file.
# create metadata
self._offsets = FileSequenceOffsets(self) # note: this must come before metadata
self._meta = FileSequenceMetaData(self)
def close(self):
self._meta.close()
self._offsets.close()
self._f_write.close()
self._f_read.close()
@property
def closed(self):
return self._meta.closed and self._offsets.closed and self._f_write.closed and self._f_read.closed
def __repr__(self):
return 'FileSequence at {}'.format(self._path)
@property
def path(self):
return self._path
def _strip_newline(self, line):
return line[:-1]
def __getitem__(self, i):
if isinstance(i, slice):
return SequenceSlice(self, i)
f = self._f_read
f.seek(self._offsets[i])
line = f.readline()
line = self._strip_newline(line)
return self._ser.from_line(line)
def __len__(self):
return len(self._offsets)
def append(self, item):
self.extend([item])
def extend(self, items):
f = self._f_write
offsets = []
for item in items:
offset = f.tell()
offsets.append(offset)
line = self._ser.to_line(item)
f.write(line)
f.write('\n')
f.flush()
self._meta.length += len(offsets) # keep metadata up-to-date
self._offsets.extend(offsets)
def iter_raw_lines(self):
for line in self._f_read:
yield line
def __iter__(self):
for line in self.iter_raw_lines():
line = self._strip_newline(line)
yield self._ser.from_line(line)
class SimpleFileSequence(FileSequence):
def __init__(self, path):
ser = UnicodeSerializer()
super(SimpleFileSequence, self).__init__(path, ser)
class Shard(FileSequence):
"""A FileSequence serving as a Shard in a ShardedSequence."""
@classmethod
def open(cls, directory, index, max_length, serializer):
path = cls.shard_path(directory, index)
if not os.path.isfile(path):
raise IOError('No such shard: {}'.format(path))
return Shard(directory, index, max_length, serializer)
@classmethod
def shard_path(cls, directory, index):
return os.path.join(directory, '{}.shard'.format(index))
def __init__(self, directory, index, max_length, serializer):
path = self.shard_path(directory, index)
self._index = index
self._max_length = max_length
super(Shard, self).__init__(path, serializer)
assert len(self) <= self._max_length
@property
def index(self):
return self._index
@property
def max_length(self):
return self._max_length
@property
def remaining_space(self):
return self.max_length - len(self)
class ShardedSequence(AppendableSequence, Closeable):
def __init__(self, directory, shard_size, serializer):
self._directory = directory
self._shard_size = shard_size
self._serializer = serializer
# create directory if it does not exist
makedirs(directory)
# identify shards in the directory
self._shards = []
for k in itertools.count():
try:
shard = Shard.open(directory, k, self._shard_size, serializer)
self._shards.append(shard)
except IOError:
break
# create one shard if there are none
if len(self._shards) == 0:
self.add_shard()
# all shards except the last should match the shard size
for i, shard in enumerate(self._shards):
l = len(shard)
if i == len(self._shards) - 1: # final shard
assert l <= self._shard_size
else:
assert l == self._shard_size
def __repr__(self):
return 'ShardedSequence at {}'.format(self._directory)
def close(self):
for shard in self._shards:
shard.close()
@property
def closed(self):
for shard in self._shards:
if not shard.closed:
return False
return True
@property
def shard_size(self):
return self._shard_size
@property
def directory(self):
return self._directory
def __len__(self):
return sum(len(s) for s in self._shards)
def __getitem__(self, i):
if isinstance(i, slice):
return SequenceSlice(self, i)
index = i // self.shard_size
shard_index = i % self.shard_size
try:
shard = self._shards[index]
return shard[shard_index]
except IndexError:
raise IndexError('{} exceeds max index of ShardedSequence.'.format(i))
def add_shard(self):
index = len(self._shards)
shard = Shard(self.directory, index, self.shard_size, self._serializer)
self._shards.append(shard)
return shard
def appendable_shard(self):
"""Return the shard that we can append to.
If the last existing shard is full, create a new shard and return that.
Returns:
Shard
"""
last_shard = self._shards[-1]
if last_shard.remaining_space == 0:
last_shard = self.add_shard()
return last_shard
def append(self, item):
self.extend([item])
def extend(self, items):
iter_items = iter(items)
def get_batch(k):
"""Get up to k more elements from items."""
results = []
for _ in range(k):
try:
results.append(next(iter_items))
except StopIteration:
break
return results
# keep filling shards until we can't fill them anymore
while True:
shard = self.appendable_shard()
requested = shard.remaining_space
batch = get_batch(requested)
shard.extend(batch)
if len(batch) < requested:
break
def __iter__(self):
return itertools.chain(*self._shards)
class BatchIterator(Iterator):
__metaclass__ = ABCMeta
def __init__(self, default_batch_size=20):
self._default_batch_size = default_batch_size
@abstractmethod
def next_batch(self, k):
"""Get next batch of elements from iterator.
Get k more elements from the iterator. If there are less than k elements remaining,
return whatever remains.
Raise StopIteration if and only if there are 0 more elements to yield.
Args:
k (int): number of elements to yield
Returns:
list: batch of elements
"""
pass
def next(self):
try:
return next(self._latest_batch)
except (AttributeError, StopIteration):
self._latest_batch = iter(self.next_batch(self._default_batch_size))
return next(self._latest_batch)
class LazyIterator(BatchIterator):
__metaclass__ = ABCMeta
def __init__(self, cache, default_batch_size=100):
"""Create a CacheIterator.
Args:
cache (AppendableSequence): an appendable sequence
"""
self._iterated = 0
self._cache = cache
super(LazyIterator, self).__init__(default_batch_size=default_batch_size)
@property
def iterated(self):
"""Number of elements produced by this iterator so far."""
return self._iterated
@property
def cache(self):
return self._cache
@abstractmethod
def compute_batch(self, k):
"""Compute the next k items for the iterator.
This should be a function of self.iterated, self.cache and k.
Besides these 3 variables, it should NOT rely on any state accumulated from previous iterations of the iterator.
Args:
k (int)
Returns:
A list of up to k items. If there aren't k more items to compute, just return whatever there
is to compute.
"""
pass
@property
def num_computed(self):
return len(self.cache)
def _ensure_batch(self, k):
"""Ensure that the cache has the next k items.
If there aren't k more items to add, just add whatever can be added.
Returns:
the number of freshly computed new items
"""
missing = (self.iterated + k) - len(self.cache)
if missing <= 0:
return 0 # cache already has everything we need
batch = self.compute_batch(k)
new_items = batch[k - missing:]
self.cache.extend(new_items)
return len(new_items)
def next_batch(self, k):
self._ensure_batch(k)
cache_excess = len(self.cache) - self.iterated
num_to_yield = min(cache_excess, k) # sometimes the cache doesn't have k more
if num_to_yield == 0:
raise StopIteration # no more elements
i = self._iterated
batch = list(self.cache[i:i + num_to_yield])
self._iterated += num_to_yield
return batch
def advance_to(self, index):
"""Advance the iterator to the specified index.
Args:
index (int): the next item yielded by the iterator will be iterator[index]
"""
if index > len(self.cache):
raise IndexError('Cache has not been computed up to index {} yet.'.format(index))
self._iterated = index
def ensure_to(self, index, batch_size):
"""Ensure that every value up to (but not including) index has been computed.
Args:
index (int)
batch_size (int): size of the batches used to compute missing values.
"""
while True:
n = self.num_computed
if n >= index:
break
self.advance_to(n)
self._ensure_batch(batch_size)
class SequenceSlice(Sequence):
def __init__(self, seq, slice):
self._seq = seq
start, stop, step = slice.start, slice.stop, slice.step
if start is None:
start = 0
if stop is None:
stop = len(seq)
if step is None:
step = 1
for val in (start, stop, step):
if val < 0:
raise ValueError("Slice values must be non-negative.")
self.start, self.stop, self.step = start, stop, step
def __getitem__(self, i):
if i < 0: # allow for negative indexing
if i < -len(self): # only allow negatives in the appropriate range
raise IndexError()
i = i % len(self) # convert to positive index
idx = self.start + self.step * i
if idx >= self.stop:
raise IndexError()
return self._seq[idx]
def __len__(self):
diff = self.stop - self.start
num_items = diff / self.step # integer division rounds down
remainder = diff % self.step
if remainder > 0:
num_items += 1
return num_items
| |
#!/usr/bin/env python
# Control program for the Red Pitaya Scanning system
# Copyright (C) 2015 Pavel Demin
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import sys
import struct
import numpy as np
import matplotlib
matplotlib.use('Qt5Agg')
from matplotlib.backends.backend_qt5agg import FigureCanvasQTAgg as FigureCanvas
from matplotlib.backends.backend_qt5agg import NavigationToolbar2QT as NavigationToolbar
from matplotlib.figure import Figure
import matplotlib.cm as cm
from PyQt5.uic import loadUiType
from PyQt5.QtCore import QRegExp, QTimer, Qt
from PyQt5.QtGui import QRegExpValidator
from PyQt5.QtWidgets import QApplication, QMainWindow, QMenu, QVBoxLayout, QSizePolicy, QMessageBox, QWidget
from PyQt5.QtNetwork import QAbstractSocket, QTcpSocket
Ui_Scanner, QMainWindow = loadUiType('scanner.ui')
class Scanner(QMainWindow, Ui_Scanner):
def __init__(self):
super(Scanner, self).__init__()
self.setupUi(self)
# IP address validator
rx = QRegExp('^(([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])\.){3}([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])|rp-[0-9A-Fa-f]{6}\.local$')
self.addrValue.setValidator(QRegExpValidator(rx, self.addrValue))
# state variable
self.idle = True
# number of samples to show on the plot
self.size = 512 * 512
self.freq = 125.0
# buffer and offset for the incoming samples
self.buffer = bytearray(8 * self.size)
self.offset = 0
self.data = np.frombuffer(self.buffer, np.int32)
# create figure
figure = Figure()
figure.set_facecolor('none')
self.axes = figure.add_subplot(111)
self.canvas = FigureCanvas(figure)
self.plotLayout.addWidget(self.canvas)
self.axes.axis((0.0, 512.0, 0.0, 512.0))
x, y = np.meshgrid(np.linspace(0.0, 512.0, 513), np.linspace(0.0, 512.0, 513))
z = x / 512.0 + y * 0.0
self.mesh = self.axes.pcolormesh(x, y, z, cmap = cm.plasma)
# create navigation toolbar
self.toolbar = NavigationToolbar(self.canvas, self.plotWidget, False)
# remove subplots action
actions = self.toolbar.actions()
if int(matplotlib.__version__[0]) < 2:
self.toolbar.removeAction(actions[7])
else:
self.toolbar.removeAction(actions[6])
self.plotLayout.addWidget(self.toolbar)
# create TCP socket
self.socket = QTcpSocket(self)
self.socket.connected.connect(self.connected)
self.socket.readyRead.connect(self.read_data)
self.socket.error.connect(self.display_error)
# connect signals from buttons and boxes
self.connectButton.clicked.connect(self.start)
self.scanButton.clicked.connect(self.scan)
self.periodValue.valueChanged.connect(self.set_period)
self.trgtimeValue.valueChanged.connect(self.set_trgtime)
self.trginvCheck.stateChanged.connect(self.set_trginv)
self.shdelayValue.valueChanged.connect(self.set_shdelay)
self.shtimeValue.valueChanged.connect(self.set_shtime)
self.shinvCheck.stateChanged.connect(self.set_shinv)
self.acqdelayValue.valueChanged.connect(self.set_acqdelay)
self.samplesValue.valueChanged.connect(self.set_samples)
self.pulsesValue.valueChanged.connect(self.set_pulses)
# create timers
self.startTimer = QTimer(self)
self.startTimer.timeout.connect(self.timeout)
self.meshTimer = QTimer(self)
self.meshTimer.timeout.connect(self.update_mesh)
# set default values
self.periodValue.setValue(200.0)
def start(self):
if self.idle:
self.connectButton.setEnabled(False)
self.socket.connectToHost(self.addrValue.text(), 1001)
self.startTimer.start(5000)
else:
self.stop()
def stop(self):
self.idle = True
self.socket.abort()
self.offset = 0
self.connectButton.setText('Connect')
self.connectButton.setEnabled(True)
self.scanButton.setEnabled(True)
def timeout(self):
self.display_error('timeout')
def connected(self):
self.startTimer.stop()
self.idle = False
self.set_period(self.periodValue.value())
self.set_trgtime(self.trgtimeValue.value())
self.set_trginv(self.trginvCheck.checkState())
self.set_shdelay(self.shdelayValue.value())
self.set_shtime(self.shtimeValue.value())
self.set_shinv(self.shinvCheck.checkState())
self.set_acqdelay(self.acqdelayValue.value())
self.set_samples(self.samplesValue.value())
self.set_pulses(self.pulsesValue.value())
# start pulse generators
self.socket.write(struct.pack('<I', 11<<28))
self.connectButton.setText('Disconnect')
self.connectButton.setEnabled(True)
self.scanButton.setEnabled(True)
def read_data(self):
size = self.socket.bytesAvailable()
if self.offset + size < 8 * self.size:
self.buffer[self.offset:self.offset + size] = self.socket.read(size)
self.offset += size
else:
self.meshTimer.stop()
self.buffer[self.offset:8 * self.size] = self.socket.read(8 * self.size - self.offset)
self.offset = 0
self.update_mesh()
self.scanButton.setEnabled(True)
def display_error(self, socketError):
self.startTimer.stop()
if socketError == 'timeout':
QMessageBox.information(self, 'Scanner', 'Error: connection timeout.')
else:
QMessageBox.information(self, 'Scanner', 'Error: %s.' % self.socket.errorString())
self.stop()
def set_period(self, value):
# set maximum delays and times to half period
maximum = int(value * 5.0 + 0.5) / 10.0
self.trgtimeValue.setMaximum(maximum)
self.shdelayValue.setMaximum(maximum)
self.shtimeValue.setMaximum(maximum)
self.acqdelayValue.setMaximum(maximum)
# set maximum number of samples per pulse
maximum = int(value * 500.0 + 0.5) / 10.0
if maximum > 256.0: maximum = 256.0
self.samplesValue.setMaximum(maximum)
shdelay = value * 0.25
samples = value * 0.5
if self.idle: return
self.socket.write(struct.pack('<I', 0<<28 | int(value * self.freq)))
def set_trgtime(self, value):
if self.idle: return
self.socket.write(struct.pack('<I', 1<<28 | int(value * self.freq)))
def set_trginv(self, checked):
if self.idle: return
self.socket.write(struct.pack('<I', 2<<28 | int(checked == Qt.Checked)))
def set_shdelay(self, value):
if self.idle: return
self.socket.write(struct.pack('<I', 3<<28 | int(value * self.freq)))
def set_shtime(self, value):
if self.idle: return
self.socket.write(struct.pack('<I', 4<<28 | int(value * self.freq)))
def set_shinv(self, checked):
if self.idle: return
self.socket.write(struct.pack('<I', 5<<28 | int(checked == Qt.Checked)))
def set_acqdelay(self, value):
if self.idle: return
self.socket.write(struct.pack('<I', 6<<28 | int(value * self.freq)))
def set_samples(self, value):
if self.idle: return
self.socket.write(struct.pack('<I', 7<<28 | int(value)))
def set_pulses(self, value):
if self.idle: return
self.socket.write(struct.pack('<I', 8<<28 | int(value)))
def set_coordinates(self):
if self.idle: return
self.socket.write(struct.pack('<I', 9<<28))
for i in range(256):
for j in range(512):
value = (i * 2 + 0 << 18) | (j << 4)
self.socket.write(struct.pack('<I', 10<<28 | int(value)))
for j in range(512):
value = (i * 2 + 1 << 18) | (511 - j << 4)
self.socket.write(struct.pack('<I', 10<<28 | int(value)))
def scan(self):
if self.idle: return
self.scanButton.setEnabled(False)
self.data[:] = np.zeros(2 * 512 * 512, np.int32)
self.update_mesh()
self.set_coordinates()
self.socket.write(struct.pack('<I', 12<<28))
self.meshTimer.start(1000)
def update_mesh(self):
result = self.data[0::2]/(self.samplesValue.value() * self.pulsesValue.value() * 8192.0)
result = result.reshape(512, 512)
result[1::2, :] = result[1::2, ::-1]
self.mesh.set_array(result.reshape(512 * 512))
self.canvas.draw()
app = QApplication(sys.argv)
window = Scanner()
window.show()
sys.exit(app.exec_())
| |
import re
import os
from Bio.Seq import Seq
from giraffe_features import Giraffe_Feature_Base, Aligned_Feature, Feature_Type_Choices
from hippo import clean_sequence, Blast_Accession
##################################
# BLAST
#
from django.conf import settings
from Bio.Blast.Applications import NcbiblastnCommandline
from Bio.Blast.Applications import NcbiblastpCommandline
from Bio.Blast.Applications import NcbiblastxCommandline
from Bio.Blast.Applications import NcbitblastnCommandline
from Bio.Blast import NCBIXML
import tempfile
import subprocess
def blast(sequence, dbobj, input_type='dna', protein=False,
identity_threshold=0.85, evalue_threshold=0.0001, feature_threshold=None, circular=True):
"""
Blast sequence against specified feature database. If input type is 'dna',
using blastn if protein=False (default), or blastx if protein=True. If input
type is 'protein', using tblastn if protein=False, or blastp if protein=True.
identity_threshold: only return results with identity rate greater than this
threshold. Can be None. Default is 0.85.
evalue_threshold: only return results with evalue smaller than this
threshold. Default is 0.001.
feature_threshold: only return results that span at least this amount of a
feature. Can be None (default). E.g. if set to 0.99, only results spanning an
entire feature are returned.
"""
infile = None
feature_list = []
input = clean_sequence(sequence)
if circular is True:
input2 = input+input
else:
input2 = input
with tempfile.NamedTemporaryFile(mode='w', delete=False) as f:
infile = f.name
f.write(">Query\n%s\n" % (input2,))
outfile = "%s.out.xml" % (infile,)
if protein:
if input_type == 'dna':
blast_cl = NcbiblastxCommandline(query=infile, db="%s" % (dbobj.protein_db_name(),), soft_masking=True,
evalue=evalue_threshold, word_size=3, outfmt=5, out=outfile,
max_target_seqs=500)
else:
blast_cl = NcbiblastpCommandline(query=infile, db="%s" % (dbobj.protein_db_name(),), soft_masking=True,
evalue=evalue_threshold, word_size=3, outfmt=5, out=outfile,
max_target_seqs=500)
else:
if input_type == 'dna':
blast_cl = NcbiblastnCommandline(query=infile, db="%s" % (dbobj.dna_db_name(),), soft_masking=True,
evalue=evalue_threshold, word_size=6, outfmt=5, out=outfile,
max_target_seqs=500)
else:
blast_cl = NcbitblastnCommandline(query=infile, db="%s" % (dbobj.dna_db_name(),), soft_masking=True,
evalue=evalue_threshold, word_size=6, outfmt=5, out=outfile,
max_target_seqs=500)
cl = str(blast_cl)
cl = "%s/%s" % (settings.NCBI_BIN_DIR, cl)
r = subprocess.call(cl.split(" "))
if r != 0:
# blast can fail if blastdb is not there, which can happen if there were no
# sequences available to build a db
print "Blast failed: %s" % (cl,)
try:
os.unlink(outfile)
os.unlink(infile)
except:
pass
return []
with open(outfile, "r") as f:
blast_record = NCBIXML.read(f)
for alignment in blast_record.alignments:
accession = Blast_Accession(alignment.accession)
for hsp in alignment.hsps:
# since we doubled up the input, ignore hits starting after the input
if hsp.query_start > len(input):
continue
# check identity threshold
if identity_threshold is not None and \
1.0*hsp.identities/len(hsp.sbjct) < identity_threshold:
continue
if hsp.sbjct_end > hsp.sbjct_start:
clockwise = True
hit_start = hsp.sbjct_start
hit_end = hsp.sbjct_end
else:
clockwise = False
hit_end = hsp.sbjct_start
hit_start = hsp.sbjct_end
# check feature threshold
if feature_threshold is not None and \
1.0*(1+abs(hit_end-hit_start))/accession.feature_length < feature_threshold:
continue
# print "hit %s evalue %s" % (alignment.hit_def, hsp.expect)
# print " query %s-%s, sbjct %s-%s" % (hsp.query_start, hsp.query_end, hsp.sbjct_start, hsp.sbjct_end)
start = hsp.query_start
end = hsp.query_end
if end > len(input):
end = end % len(input)
feature = alignment.hit_def
if hit_start != 1 or hit_end != accession.feature_length:
feature = '%s (%s-%s/%s)' % (feature, hit_start, hit_end, accession.feature_length)
f = Aligned_Feature(alignment.hit_def, feature,
start, end, hsp.sbjct_start, hsp.sbjct_end,
accession.type,
hsp.query, hsp.match, hsp.sbjct,
hsp.expect, hsp.identities)
setattr(f, 'feature_id', accession.feature_id)
feature_list.append(f)
os.unlink(outfile)
os.unlink(infile)
# remove truncated features across circular boundary
filtered = []
for f in feature_list:
trumped = False
if f.query_start == 1:
# see if this feature is trumped by another one
for other_f in feature_list:
# same ending, direction, feature, but other_f is across circular
# boundary (start > end)
if other_f.query_start != f.query_start and \
other_f.query_end == f.query_end and \
other_f.feature_id == f.feature_id and \
other_f.query_start > other_f.query_end:
trumped = True
break
if not trumped:
filtered.append(f)
return filtered
##################################
# Restriction enzyme analysis
#
from Bio.Restriction import CommOnly, RestrictionBatch
from Bio.Restriction import *
class Restriction_Site(Giraffe_Feature_Base):
def __init__(self, enzyme, start, end, clockwise, cut):
name = str(enzyme)
super(Restriction_Site, self).__init__(name, name, start, end,
1, len(enzyme.site),
Feature_Type_Choices.ENZYME[1], 'Restriction Enzyme')
self.enzyme = enzyme
self.cut = cut
self.layer = 'Restriction Enzymes'
def to_dict(self):
r = super(Restriction_Site, self).to_dict()
r['elucidate'] = self.enzyme.elucidate()
r['cut'] = self.cut
return r
_MyEnzymes = [AatII, AflII, AgeI, ApaI, ApaLI, AscI, AseI,
BamHI, BclI, BglII, BstBI, ClaI, DraI, EagI, EarI,
EcoRI, EcoRV, FspI, HindIII, HpaI, KpnI, MscI,
NarI, NcoI, NdeI, NheI, NotI, NruI, PacI,
PmlI, PstI, PvuII, SacI, SacII, SalI, SmaI,
SpeI, StuI, XbaI, XhoI, XmaI]
MyEnzymes = RestrictionBatch([x for x in _MyEnzymes if x.elucidate().find('^') >= 0])
def find_restriction_sites(sequence, circular=True):
input_seq = clean_sequence(sequence)
if circular is True:
input2 = Seq(input_seq+input_seq)
else:
input2 = Seq(input_seq)
r = MyEnzymes.search(input2)
cutter_list = []
for enzyme in r:
v = r[enzyme]
for cut in v:
cut_after = cut-1
if cut_after <= 0:
cut_after += len(input2)
pattern = enzyme.elucidate()
pattern = re.sub(r'_', '', pattern)
cut_off = pattern.find('^')
if cut_off < 0:
raise Exception('Cannot find cut site for %s (%s)' % (enzyme, pattern))
# first try fwd
start = cut-cut_off-1
end = start+enzyme.size-1
# print 'try %s vs %s' % (input2[start:end+1].lower(), enzyme.site.lower())
if str(input2[start:end+1]).lower() == enzyme.site.lower():
if start < len(input_seq):
end = end % len(input_seq)
cut_after = cut_after % len(input_seq)
f = Restriction_Site(enzyme, start+1, end+1, True, cut_after)
cutter_list.append(f)
# print 'found %s' % (f.to_dict(),)
else:
end = cut+cut_off+1
start = end-enzyme.size+1
# print 'try rc %s vs %s' % (input2[start:end+1].reverse_complement().lower(), enzyme.site.lower())
if str(input2[start:end+1].reverse_complement()).lower() == enzyme.site.lower():
if start < len(input_seq):
end = end % len(input_seq)
cut_after = cut_after % len(input_seq)
f = Restriction_Site(enzyme, start+1, end+1, False, cut_after)
cutter_list.append(f)
# print 'found %s' % (f.to_dict(),)
else:
raise Exception('Cannot find reported cut site %s %s %s %s' % (enzyme, cut, cut_off, pattern))
return cutter_list
##################################
# BLAST2
#
def blast2(subject, query):
subject = clean_sequence(subject)
query = clean_sequence(query)
with tempfile.NamedTemporaryFile(mode='w', delete=False) as f:
subject_file = f.name
f.write(">Subject\n%s\n" % (subject,))
#print 'subject=%s' % (subject,)
with tempfile.NamedTemporaryFile(mode='w', delete=False) as f:
query_file = f.name
f.write(">Query\n%s\n" % (query,))
#print 'query=%s' % (query,)
outfile = "%s.out.xml" % (query_file,)
blast_cl = NcbiblastnCommandline(query=query_file, subject=subject_file,
evalue=0.001, word_size=6,
# these params were tested to allow gaps in
# alignments. i.e. large number of bps
# misaligned or gapped.
gapextend=4, gapopen=0, reward=2,
outfmt=5, out=outfile)
cl = str(blast_cl)
cl = "%s/%s" % (settings.NCBI_BIN_DIR, cl)
r = subprocess.call(cl.split(" "))
if r != 0:
try:
os.unlink(outfile)
os.unlink(subject_file)
os.unlink(query_file)
except:
pass
raise Exception("Blast failed: %s" % (cl,))
res = []
with open(outfile, "r") as f:
blast_record = NCBIXML.read(f)
for alignment in blast_record.alignments:
for hsp in alignment.hsps:
res.append({ "query_start": hsp.query_start,
"query_end": hsp.query_end,
"subject_start": hsp.sbjct_start,
"subject_end": hsp.sbjct_end,
"evalue": hsp.expect,
"query": hsp.query,
"match": hsp.match,
"subject": hsp.sbjct, })
os.unlink(outfile)
os.unlink(subject_file)
os.unlink(query_file)
return res
| |
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import operator
import re
import textwrap
import numpy as np
from six.moves import range # pylint: disable=redefined-builtin
import tensorflow as tf
from tensorflow.contrib.labeled_tensor.python.ops import _typecheck as tc
from tensorflow.contrib.labeled_tensor.python.ops import core
from tensorflow.contrib.labeled_tensor.python.ops import test_util
class AxisTest(tf.test.TestCase):
def setUp(self):
d_7 = tf.Dimension(7)
p_rgb = ['red', 'green', 'blue']
self.i_7 = core.Axis('7', d_7)
self.i_7p = core.Axis('7prime', d_7)
self.i_rgb = core.Axis('rgb', p_rgb)
self.i_range = core.Axis('range', range(7))
self.i_unknown = core.Axis('unknown', None)
def test_equality(self):
axes = [self.i_7, self.i_7p, self.i_rgb, self.i_range, self.i_unknown]
for i, axis_0 in enumerate(axes):
for j, axis_1 in enumerate(axes):
if i == j:
self.assertEqual(axis_0, axis_1)
else:
self.assertNotEqual(axis_0, axis_1)
def test_axis_value(self):
self.assertEqual(self.i_7.value, tf.Dimension(7))
self.assertTrue(self.i_range.value == tuple(range(7)))
def test_axis_input(self):
axes = [self.i_7, self.i_7p, self.i_rgb, self.i_range, self.i_unknown]
for axis in axes:
self.assertEqual(axis, core.Axis(axis.name, axis.value))
def test_axis_value_input(self):
axis = self.i_range
for value in [range(7), list(range(7)), np.arange(7)]:
self.assertEqual(axis, core.Axis(axis.name, value))
def test_size(self):
self.assertEqual(len(self.i_7), 7)
self.assertEqual(len(self.i_rgb), 3)
self.assertEqual(len(self.i_range), 7)
self.assertEqual(self.i_unknown.size, None)
def test_concat_single(self):
red = core.Axis('rgb', ['red'])
self.assertEqual(core.concat_axes([red]), red)
def test_concat_many(self):
red = core.Axis('rgb', ['red'])
green = core.Axis('rgb', ['green'])
blue = core.Axis('rgb', ['blue'])
red_green_blue = core.Axis('rgb', ['red', 'green', 'blue'])
self.assertEqual(core.concat_axes([red, green, blue]), red_green_blue)
def test_concat_different_names(self):
red = core.Axis('red', ['red'])
green = core.Axis('green', ['red'])
with self.assertRaises(ValueError):
core.concat_axes([red, green])
def test_concat_unknown(self):
red = core.Axis('rgb', None)
green = core.Axis('rgb', None)
self.assertEqual(core.concat_axes([red, green]), red)
def test_repr(self):
self.assertEqual("Axis('7', Dimension(7))", repr(self.i_7))
def test_invalid_input(self):
with self.assertRaises(TypeError):
core.Axis('foo', [{}])
with self.assertRaises(ValueError):
core.Axis('foo', [1, 2, 3, 1])
red = core.Axis('foo', ['red'])
with self.assertRaises(tc.Error):
core.concat_axes([red, 1])
def test_as_axis(self):
self.assertEqual(self.i_7, core.as_axis(('7', 7)))
self.assertEqual(self.i_7, core.as_axis(self.i_7))
class AxesTest(tf.test.TestCase):
def setUp(self):
d_7 = tf.Dimension(7)
d_8 = tf.Dimension(8)
p_rgb = ['red', 'green', 'blue']
p_range = range(7)
self.i_8 = core.Axis('8', d_8)
self.a0 = core.Axes([('d7', d_7)])
self.a1 = core.Axes([('d7', d_7)])
self.a2 = core.Axes([('d7', d_7), ('rgb', p_rgb)])
self.a3 = core.Axes([('8', d_8), ('range', p_range)])
def test_equality(self):
self.assertEqual(self.a0, self.a0)
self.assertEqual(self.a0, self.a1)
self.assertNotEqual(self.a0, self.a2)
def test_repr(self):
self.assertEqual("Axes([('d7', Dimension(7))])", repr(self.a0))
def test_remove(self):
a = self.a3.remove('range')
self.assertEqual(a, core.Axes([self.i_8]))
with self.assertRaises(KeyError):
self.a3.remove('foobar')
def test_typecheck_error_message(self):
pattern = ('List(Union(labeled_tensor.Axis, Tuple(..., '
'Union(Union(numpy.ndarray, %s, list, tuple), '
'Optional(Union(tensorflow.Dimension, int))))))' %
range.__name__)
regexp = re.escape(pattern).replace(re.escape('...'), '.*')
with self.assertRaisesRegexp(tc.Error, 'allowed type ' + regexp):
core.Axes(None)
class LabeledTensorTest(test_util.Base):
def setUp(self):
tensor = tf.ones([7, 3, 8, 1])
a0 = ('x', range(7))
a1 = ('channel', ['red', 'green', 'blue'])
a2 = ('y', 8)
a3 = ('z', tf.Dimension(1))
self.lt = core.LabeledTensor(tensor, [a0, a1, a2, a3])
def test_repr(self):
pattern = textwrap.dedent("""\
<LabeledTensor '...' shape=(7, 3, 8, 1) dtype=float32
axes=[('x', ...),
('channel', ...),
('y', Dimension(8)),
('z', Dimension(1))]>""")
regexp = re.escape(pattern).replace(re.escape('...'), '.*')
self.assertRegexpMatches(repr(self.lt), regexp)
def test_reuse_existing_axes(self):
alt_lt = core.LabeledTensor(self.lt.tensor, self.lt.axes)
self.assertLabeledTensorsEqual(alt_lt, self.lt)
def test_reuse_existing_axis_objects(self):
alt_lt = core.LabeledTensor(self.lt.tensor, self.lt.axes.values())
self.assertLabeledTensorsEqual(alt_lt, self.lt)
def test_indexing_scalars(self):
actual = self.lt[:, :, :, 0]
expected = core.LabeledTensor(self.lt.tensor[:, :, :, 0],
list(self.lt.axes.values())[:-1])
self.assertLabeledTensorsEqual(actual, expected)
actual = self.lt[1, :, :, 0]
expected = core.LabeledTensor(self.lt.tensor[1, :, :, 0],
list(self.lt.axes.values())[1:-1])
self.assertLabeledTensorsEqual(actual, expected)
actual = self.lt[1, 2, :, 0]
expected = core.LabeledTensor(self.lt.tensor[1, 2, :, 0],
list(self.lt.axes.values())[2:-1])
self.assertLabeledTensorsEqual(actual, expected)
def test_indexing_1d(self):
lt_1d = self.lt[1, 2, :, 0]
actual = lt_1d[3]
expected = core.LabeledTensor(lt_1d.tensor[3], [])
self.assertLabeledTensorsEqual(actual, expected)
def test_indexing_slices(self):
actual = self.lt[:3, :, :, :]
axes = [('x', range(3))] + list(self.lt.axes.values())[1:]
expected = core.LabeledTensor(self.lt.tensor[:3, :, :, :], axes)
self.assertLabeledTensorsEqual(actual, expected)
def test_invalid_indexing(self):
with self.assertRaises(ValueError):
self.lt[0] # pylint: disable=pointless-statement
with self.assertRaises(ValueError):
self.lt[:, :, :, :, 0] # pylint: disable=pointless-statement
def test_unknown_size(self):
tensor = tf.placeholder(tf.string, [None])
actual = core.LabeledTensor(tensor, ['x'])
self.assertIsNone(actual.axes['x'].size)
self.assertIs(actual.axes['x'].value, tensor.get_shape()[0])
def test_eq(self):
self.assertEqual(self.lt, self.lt)
self.assertNotEqual(self.lt, self.lt.tensor)
self.assertNotEqual(self.lt.tensor, self.lt)
def test_hash(self):
lt1 = self.lt
lt2 = core.LabeledTensor(self.lt.tensor, self.lt.axes)
self.assertEqual(lt1, lt2)
self.assertEqual(hash(lt1), hash(lt2))
def test_name(self):
self.assertEqual(self.lt.name, self.lt.tensor.name)
def test_dtype(self):
self.assertEqual(self.lt.dtype, self.lt.tensor.dtype)
def test_get_shape(self):
self.assertEqual(self.lt.get_shape(), self.lt.tensor.get_shape())
def test_convert_to_tensor(self):
expected = self.lt.tensor
actual = tf.convert_to_tensor(self.lt)
self.assertIs(expected, actual)
class Base(test_util.Base):
def setUp(self):
self.x_size = 7
self.channel_size = 3
self.z_size = 4
self.probs_size = 11
tensor = tf.range(0, self.x_size * self.channel_size * self.z_size *
self.probs_size)
tensor = tf.reshape(tensor, [self.x_size, self.channel_size, self.z_size,
self.probs_size])
a0 = ('x', range(self.x_size))
a1 = ('channel', ['red', 'green', 'blue'])
a2 = 'z'
a3 = ('probs', np.linspace(0.0, 1.0, self.probs_size))
self.tensor = tensor
self.a0 = a0
self.a1 = a1
self.a2 = a2
self.a3 = a3
self.original_lt = core.LabeledTensor(tensor, [a0, a1, a2, a3])
self.x_probs_lt = core.slice_function(self.original_lt, {'z': 0,
'channel': 0})
self.channel_probs_lt = core.slice_function(self.original_lt, {'x': 3,
'z': 0})
class IdentityTest(Base):
def test_name(self):
identity_lt = core.identity(self.original_lt)
self.assertIn('lt_identity', identity_lt.name)
class SliceFunctionTest(Base):
def test_name(self):
select_lt = core.slice_function(self.original_lt, {'channel': 1})
self.assertIn('lt_slice', select_lt.name)
def test_scalar(self):
select_lt = core.slice_function(self.original_lt, {'channel': 1})
golden_lt = core.LabeledTensor(self.tensor[:, 1, :, :], [self.a0, self.a2,
self.a3])
self.assertLabeledTensorsEqual(select_lt, golden_lt)
def test_slice(self):
select_lt = core.slice_function(self.original_lt, {'channel': slice(0, 2)})
a1_sliced = ('channel', ['red', 'green'])
golden_lt = core.LabeledTensor(self.tensor[:, :2, :, :],
[self.a0, a1_sliced, self.a2, self.a3])
self.assertLabeledTensorsEqual(select_lt, golden_lt)
def test_slices(self):
select_lt = core.slice_function(self.original_lt, {'x': slice(1, 5),
'channel': slice(1,
None)})
a0_sliced = ('x', range(1, 5))
a1_sliced = ('channel', ['green', 'blue'])
golden_lt = core.LabeledTensor(self.tensor[1:5, 1:, :, :],
[a0_sliced, a1_sliced, self.a2, self.a3])
self.assertLabeledTensorsEqual(select_lt, golden_lt)
def test_slice_unlabeled(self):
select_lt = core.slice_function(self.original_lt, {'z': slice(1, 3)})
a2_sliced = 'z'
golden_lt = core.LabeledTensor(self.tensor[:, :, 1:3, :],
[self.a0, self.a1, a2_sliced, self.a3])
self.assertLabeledTensorsEqual(select_lt, golden_lt)
def test_slice_unknown_shape(self):
lt = core.LabeledTensor(tf.placeholder(tf.float32, [None, 1]), ['x', 'y'])
sliced_lt = core.slice_function(lt, {'y': 0})
self.assertEqual(list(sliced_lt.axes.values()), [lt.axes['x']])
class TransposeTest(Base):
def test_name(self):
transpose_lt = core.transpose(self.original_lt,
self.original_lt.axes.keys())
self.assertIn('lt_transpose', transpose_lt.name)
def test_identity(self):
transpose_lt = core.transpose(self.original_lt,
self.original_lt.axes.keys())
golden_lt = self.original_lt
self.assertLabeledTensorsEqual(transpose_lt, golden_lt)
def test(self):
transpose_lt = core.transpose(self.original_lt, ['z', 'channel', 'x',
'probs'])
golden_lt = core.LabeledTensor(
tf.transpose(self.tensor, [2, 1, 0, 3]),
[self.a2, self.a1, self.a0, self.a3])
self.assertLabeledTensorsEqual(transpose_lt, golden_lt)
def test_default_axis_order(self):
transpose_lt = core.transpose(self.original_lt)
golden_lt = core.LabeledTensor(
tf.transpose(self.tensor, [3, 2, 1, 0]),
list(reversed(list(self.original_lt.axes.values()))))
self.assertLabeledTensorsEqual(transpose_lt, golden_lt)
def test_invalid_input(self):
with self.assertRaises(ValueError):
core.transpose(self.original_lt, ['channel', 'x', 'probs'])
with self.assertRaises(ValueError):
core.transpose(self.original_lt, ['z', 'foo', 'x', 'probs'])
class ExpandDimsTest(Base):
def test_name(self):
expand_lt = core.expand_dims(self.original_lt, self.original_lt.axes.keys())
self.assertIn('lt_expand', expand_lt.name)
def test_identity(self):
expand_lt = core.expand_dims(self.original_lt, self.original_lt.axes.keys())
golden_lt = self.original_lt
self.assertLabeledTensorsEqual(expand_lt, golden_lt)
def test(self):
expand_lt = core.expand_dims(self.original_lt, ['foo', 'x', 'bar',
'channel', 'z', 'probs',
'grok'])
golden_lt = core.LabeledTensor(
tf.reshape(self.tensor, [1, self.x_size, 1, self.channel_size,
self.z_size, self.probs_size, 1]),
['foo', self.a0, 'bar', self.a1, self.a2, self.a3, 'grok'])
self.assertLabeledTensorsEqual(expand_lt, golden_lt)
def test_label(self):
expand_lt = core.expand_dims(self.original_lt, ['x',
'channel',
('foo', 'bar'),
'z',
'probs',])
golden_lt = core.LabeledTensor(
tf.reshape(self.tensor, [self.x_size, self.channel_size, 1, self.z_size,
self.probs_size]),
[self.a0, self.a1, ('foo', ['bar']), self.a2, self.a3])
self.assertLabeledTensorsEqual(expand_lt, golden_lt)
def test_unknown_dimension(self):
orig_lt = core.LabeledTensor(tf.placeholder(tf.float32, [None]), ['x'])
expand_lt = core.expand_dims(orig_lt, ['x', 'y'])
self.assertEqual(expand_lt.axes, core.Axes([('x', None), ('y', 1)]))
def test_invalid_input(self):
with self.assertRaises(core.AxisOrderError):
core.expand_dims(self.original_lt, ['foo', 'not_x', 'bar', 'channel', 'z',
'probs', 'grok'])
with self.assertRaises(core.AxisOrderError):
core.expand_dims(self.original_lt, ['foo', 'z', 'bar', 'channel', 'x',
'probs', 'grok'])
class AxisOrderScopeTest(Base):
def test(self):
xyz = ['x', 'y', 'z']
abc = ['a', 'b', 'c']
self.assertIsNone(core.get_axis_order())
with core.axis_order_scope(xyz):
self.assertEqual(core.get_axis_order(), xyz)
with core.axis_order_scope():
self.assertIsNone(core.get_axis_order())
with core.axis_order_scope(abc):
self.assertEqual(core.get_axis_order(), abc)
self.assertIsNone(core.get_axis_order())
self.assertEqual(core.get_axis_order(), xyz)
self.assertIsNone(core.get_axis_order())
class CheckAxisOrderTest(Base):
def test_passes(self):
axis_order = ['w', 'x', 'y', 'z']
lt = core.LabeledTensor(tf.ones((1, 1, 1, 1)), axis_order)
core.check_axis_order(lt, axis_order)
lt = core.LabeledTensor(tf.ones((1, 1, 1)), axis_order[1:])
core.check_axis_order(lt, axis_order)
lt = core.LabeledTensor(tf.ones((1, 1, 1)), axis_order[:-1])
core.check_axis_order(lt, axis_order)
def test_invalid(self):
axis_order = ['w', 'x', 'y', 'z']
lt = core.LabeledTensor(tf.ones((1, 1, 1, 1)), axis_order)
with self.assertRaises(core.AxisOrderError):
core.check_axis_order(lt)
with self.assertRaises(core.AxisOrderError):
core.check_axis_order(lt, axis_order[:-1])
with self.assertRaises(core.AxisOrderError):
core.check_axis_order(lt, axis_order[::-1])
def test_scope(self):
axis_order = ['w', 'x', 'y', 'z']
lt = core.LabeledTensor(tf.ones((1, 1, 1, 1)), axis_order)
with core.axis_order_scope(axis_order):
core.check_axis_order(lt)
class ImposeAxisOrderTest(Base):
def test_identity(self):
axis_order = ['w', 'x', 'y', 'z']
lt = core.LabeledTensor(tf.reshape(tf.range(24), (1, 2, 3, 4)), axis_order)
actual = core.impose_axis_order(lt, axis_order)
self.assertLabeledTensorsEqual(lt, actual)
lt = core.LabeledTensor(tf.reshape(tf.range(6), (1, 2, 3)), axis_order[:3])
actual = core.impose_axis_order(lt, axis_order)
self.assertLabeledTensorsEqual(lt, actual)
def test_reverse(self):
axis_order = ['w', 'x', 'y', 'z']
lt = core.LabeledTensor(tf.reshape(tf.range(24), (1, 2, 3, 4)), axis_order)
actual = core.impose_axis_order(lt, axis_order[::-1])
expected = core.transpose(lt, axis_order[::-1])
self.assertLabeledTensorsEqual(expected, actual)
lt = core.LabeledTensor(tf.reshape(tf.range(6), (1, 2, 3)), axis_order[:3])
actual = core.impose_axis_order(lt, axis_order[::-1])
expected = core.transpose(lt, ['y', 'x', 'w'])
self.assertLabeledTensorsEqual(expected, actual)
def test_scope(self):
axis_order = ['w', 'x', 'y', 'z']
lt = core.LabeledTensor(tf.reshape(tf.range(24), (1, 2, 3, 4)), axis_order)
expected = core.transpose(lt, axis_order[::-1])
with core.axis_order_scope(axis_order[::-1]):
actual = core.impose_axis_order(lt)
self.assertLabeledTensorsEqual(expected, actual)
def test_invalid(self):
lt = core.LabeledTensor(tf.reshape(tf.range(2), (1, 2)), ['x', 'y'])
with self.assertRaises(ValueError):
core.impose_axis_order(lt)
with self.assertRaises(ValueError):
core.impose_axis_order(lt, ['x'])
class FindConsistentOrderingTest(Base):
def test(self):
cases = [
([], [], []),
(['x'], [], ['x']),
([], ['x'], ['x']),
(['x'], ['x'], ['x']),
(['x'], ['y'], ['x', 'y']),
(['y'], ['x'], ['y', 'x']),
(['x', 'y'], ['x', 'y'], ['x', 'y']),
(['x', 'y'], ['y', 'x'], None),
(['x', 'y'], ['y', 'z'], ['x', 'y', 'z']),
(['x', 'z'], ['y', 'z'], ['x', 'y', 'z']),
(['x', 'y'], ['x', 'z'], ['x', 'y', 'z']),
(['w', 'x'], ['y', 'z'], ['w', 'x', 'y', 'z']),
(['x', 'y', 'z'], ['z', 'x'], None),
(['x', 'y', 'z'], ['x'], ['x', 'y', 'z']),
([], ['x', 'y', 'z'], ['x', 'y', 'z']),
]
for a, b, expected in cases:
actual = core._find_consistent_ordering(a, b)
msg = ('unexpected ordering between %r and %r:\nexpected: %r\nactual: %r'
% (a, b, expected, actual))
self.assertEqual(expected, actual, msg=msg)
class AlignTest(Base):
def test_name(self):
align_lt_0, align_lt_1, _ = core.align(self.original_lt, self.original_lt)
self.assertIn('lt_align', align_lt_0.name)
self.assertIn('/0', align_lt_0.name)
self.assertIn('lt_align', align_lt_1.name)
self.assertIn('/1', align_lt_1.name)
def test_identical_shaped_inputs(self):
offset_tensor = self.original_lt.tensor + 1
offset_lt = core.LabeledTensor(offset_tensor, self.original_lt.axes)
align_lt, align_offset_lt, broadcast_axes = core.align(self.original_lt,
offset_lt)
self.assertLabeledTensorsEqual(align_lt, self.original_lt)
self.assertLabeledTensorsEqual(align_offset_lt, offset_lt)
self.assertEqual(broadcast_axes, self.original_lt.axes)
def test_different_inputs(self):
# The correct axis ordering is ['x', 'channel', 'probs'].
align_x_probs_lt, align_channel_probs_lt, broadcast_axes = core.align(
self.x_probs_lt, self.channel_probs_lt)
x_probs_golden_lt = core.LabeledTensor(
tf.reshape(self.x_probs_lt.tensor, [self.x_size, 1, self.probs_size]),
[self.a0, 'channel', self.a3])
self.assertLabeledTensorsEqual(align_x_probs_lt, x_probs_golden_lt)
channel_probs_golden_lt = core.LabeledTensor(
tf.reshape(self.channel_probs_lt.tensor,
[1, self.channel_size, self.probs_size]),
['x', self.a1, self.a3])
self.assertLabeledTensorsEqual(align_channel_probs_lt,
channel_probs_golden_lt)
self.assertEqual(broadcast_axes, core.Axes([self.a0, self.a1, self.a3]))
def test_axis_order_scope(self):
xz_lt = core.LabeledTensor(tf.ones((2, 3)), ['x', 'z'])
yz_lt = core.LabeledTensor(tf.ones((4, 3)), ['y', 'z'])
_, _, broadcast_axes = core.align(xz_lt, yz_lt)
self.assertEqual(list(broadcast_axes.keys()), ['x', 'y', 'z'])
_, _, broadcast_axes = core.align(yz_lt, xz_lt)
self.assertEqual(list(broadcast_axes.keys()), ['y', 'x', 'z'])
with core.axis_order_scope(['x', 'y', 'z']):
_, _, broadcast_axes = core.align(yz_lt, xz_lt)
self.assertEqual(list(broadcast_axes.keys()), ['x', 'y', 'z'])
with core.axis_order_scope(['x', 'y']):
with self.assertRaises(core.AxisOrderError):
core.align(xz_lt, yz_lt)
with self.assertRaises(core.AxisOrderError):
core.align(yz_lt, xz_lt)
def test_invalid_input(self):
lt_0 = core.LabeledTensor(tf.zeros([5]), [('a', range(5))])
lt_1 = core.LabeledTensor(tf.zeros([5]), [('a', range(1, 6))])
with self.assertRaises(ValueError):
core.align(lt_0, lt_1)
class ConvertToLabeledTensorTest(Base):
# TODO(shoyer): Simplify these tests once we can reuse labeled tensors in
# assertLabeledTensorsEqual.
def test_labeled_tensor(self):
actual = core.convert_to_labeled_tensor(self.original_lt)
self.assertLabeledTensorsEqual(actual, self.original_lt)
def test_python_scalar(self):
actual = core.convert_to_labeled_tensor(42)
golden_lt = core.LabeledTensor(tf.convert_to_tensor(42), [])
self.assertLabeledTensorsEqual(actual, golden_lt)
def test_numpy_array(self):
actual = core.convert_to_labeled_tensor(np.array(42))
golden_lt = core.LabeledTensor(tf.convert_to_tensor(42), [])
self.assertLabeledTensorsEqual(actual, golden_lt)
def test_tensor(self):
actual = core.convert_to_labeled_tensor(tf.constant(42))
golden_lt = core.LabeledTensor(tf.convert_to_tensor(42), [])
self.assertLabeledTensorsEqual(actual, golden_lt)
def test_invalid_input(self):
with self.assertRaises(ValueError):
core.convert_to_labeled_tensor(tf.range(5))
with self.assertRaises(ValueError):
core.convert_to_labeled_tensor(np.array([1, 2]))
class DocStringCheckMixin(object):
# requires self.ops to be defined
def test_function_docstring_and_name(self):
for op_name, _, _, lt_op in self.ops:
if lt_op is not None:
self.assertIn('tf.%s' % op_name, lt_op.__doc__)
self.assertEqual(op_name, lt_op.__name__)
class UnaryOpsTestsMixin(object):
# requires self.ops and self.test_lt to be defined
def test_core_op(self):
for op_name, _, tf_op, lt_op in self.ops:
if tf_op is not None:
golden_lt = core.LabeledTensor(tf_op(self.test_lt.tensor),
self.test_lt.axes)
actual_lt = lt_op(self.test_lt)
self.assertIn(op_name, actual_lt.name)
self.assertLabeledTensorsEqual(golden_lt, actual_lt)
def test_infix(self):
for op_name, infix_op, _, _ in self.ops:
if infix_op is not None:
expected_lt = core.LabeledTensor(infix_op(self.test_lt.tensor),
self.test_lt.axes)
actual_lt = infix_op(self.test_lt)
self.assertIn(op_name, actual_lt.name)
self.assertLabeledTensorsEqual(expected_lt, actual_lt)
class CoreUnaryOpsTest(Base, DocStringCheckMixin, UnaryOpsTestsMixin):
def setUp(self):
super(CoreUnaryOpsTest, self).setUp()
self.ops = [
('abs', operator.abs, tf.abs, core.abs_function),
('neg', operator.neg, tf.neg, core.neg),
# TODO(shoyer): add unary + to core TensorFlow
('pos', None, None, None),
('sign', None, tf.sign, core.sign),
('reciprocal', None, tf.reciprocal, core.reciprocal),
('square', None, tf.square, core.square),
('round', None, tf.round, core.round_function),
('sqrt', None, tf.sqrt, core.sqrt),
('rsqrt', None, tf.rsqrt, core.rsqrt),
('log', None, tf.log, core.log),
('exp', None, tf.exp, core.exp),
('log', None, tf.log, core.log),
('ceil', None, tf.ceil, core.ceil),
('floor', None, tf.floor, core.floor),
('cos', None, tf.cos, core.cos),
('sin', None, tf.sin, core.sin),
('tan', None, tf.tan, core.tan),
('acos', None, tf.acos, core.acos),
('asin', None, tf.asin, core.asin),
('atan', None, tf.atan, core.atan),
('lgamma', None, tf.lgamma, core.lgamma),
('digamma', None, tf.digamma, core.digamma),
('erf', None, tf.erf, core.erf),
('erfc', None, tf.erfc, core.erfc),
('lgamma', None, tf.lgamma, core.lgamma),
]
total_size = np.prod([v.size for v in self.original_lt.axes.values()])
self.test_lt = core.LabeledTensor(
tf.cast(self.original_lt, tf.float32) / total_size,
self.original_lt.axes)
class LogicalNotTest(Base, DocStringCheckMixin, UnaryOpsTestsMixin):
def setUp(self):
super(LogicalNotTest, self).setUp()
self.ops = [
('logical_not', operator.invert, tf.logical_not, core.logical_not),
]
self.test_lt = self.original_lt < 10
class BinaryOpsTestsMixin(object):
# requires self.ops, self.test_lt_1, self.test_lt_2, self.test_lt_1_broadcast
# and self.test_lt_2_broadcast to be defined
def test_core_op(self):
for op_name, _, tf_op, lt_op in self.ops:
golden_tensor = tf_op(self.test_lt_1_broadcast,
self.test_lt_2_broadcast)
golden_lt = core.LabeledTensor(golden_tensor, self.broadcast_axes)
actual_lt = lt_op(self.test_lt_1, self.test_lt_2)
self.assertIn(op_name, actual_lt.name)
self.assertLabeledTensorsEqual(golden_lt, actual_lt)
def test_infix(self):
for op_name, infix_op, _, lt_op in self.ops:
if infix_op is not None:
expected_lt = lt_op(self.test_lt_1, self.test_lt_2)
actual_lt = infix_op(self.test_lt_1, self.test_lt_2)
self.assertIn(op_name, actual_lt.name)
self.assertLabeledTensorsEqual(expected_lt, actual_lt)
class CoreBinaryOpsTest(Base, DocStringCheckMixin, BinaryOpsTestsMixin):
def setUp(self):
super(CoreBinaryOpsTest, self).setUp()
self.x_probs_broadcast_tensor = tf.reshape(
self.x_probs_lt.tensor, [self.x_size, 1, self.probs_size])
self.channel_probs_broadcast_tensor = tf.reshape(
self.channel_probs_lt.tensor, [1, self.channel_size, self.probs_size])
# == and != are not element-wise for tf.Tensor, so they shouldn't be
# elementwise for LabeledTensor, either.
self.ops = [
('add', operator.add, tf.add, core.add),
('sub', operator.sub, tf.sub, core.sub),
('mul', operator.mul, tf.mul, core.mul),
('div', operator.truediv, tf.div, core.div),
('mod', operator.mod, tf.mod, core.mod),
('pow', operator.pow, tf.pow, core.pow_function),
('equal', None, tf.equal, core.equal),
('less', operator.lt, tf.less, core.less),
('less_equal', operator.le, tf.less_equal, core.less_equal),
('not_equal', None, tf.not_equal, core.not_equal),
('greater', operator.gt, tf.greater, core.greater),
('greater_equal', operator.ge, tf.greater_equal, core.greater_equal),
]
self.test_lt_1 = self.x_probs_lt
self.test_lt_2 = self.channel_probs_lt
self.test_lt_1_broadcast = self.x_probs_broadcast_tensor
self.test_lt_2_broadcast = self.channel_probs_broadcast_tensor
self.broadcast_axes = [self.a0, self.a1, self.a3]
def test_reflexive(self):
labeled_tensor = self.x_probs_lt + 1 # all elements must be >0 for division
for op_name, infix_op, _, lt_op in self.ops:
if infix_op is not None:
expected_lt = lt_op(2, labeled_tensor)
actual_lt = infix_op(2, labeled_tensor)
# Python uses greater for the reflexive version of less (and vise-versa)
if 'less' in op_name:
op_name = op_name.replace('less', 'greater')
elif 'greater' in op_name:
op_name = op_name.replace('greater', 'less')
self.assertIn(op_name, actual_lt.name)
self.assertLabeledTensorsEqual(expected_lt, actual_lt)
class LogicalBinaryOpsTest(Base, DocStringCheckMixin, BinaryOpsTestsMixin):
def setUp(self):
super(LogicalBinaryOpsTest, self).setUp()
self.ops = [
('logical_and', operator.and_, tf.logical_and, core.logical_and),
('logical_or', operator.or_, tf.logical_or, core.logical_or),
('logical_xor', operator.xor, tf.logical_xor, core.logical_xor),
]
self.test_lt_1 = self.original_lt < 10
self.test_lt_2 = self.original_lt < 5
self.test_lt_1_broadcast = self.test_lt_1.tensor
self.test_lt_2_broadcast = self.test_lt_2.tensor
self.broadcast_axes = self.test_lt_1.axes
class FloatBinaryOpsTest(Base, DocStringCheckMixin, BinaryOpsTestsMixin):
def setUp(self):
super(FloatBinaryOpsTest, self).setUp()
self.ops = [
('igamma', None, tf.igamma, core.igamma),
('igammac', None, tf.igammac, core.igammac),
('zeta', None, tf.zeta, core.zeta),
('polygamma', None, tf.polygamma, core.polygamma),
('maximum', None, tf.maximum, core.maximum),
('minimum', None, tf.minimum, core.minimum),
('squared_difference', None, tf.squared_difference,
core.squared_difference),
]
total_size = np.prod([v.size for v in self.original_lt.axes.values()])
test_lt = core.LabeledTensor(
tf.cast(self.original_lt, tf.float32) / total_size,
self.original_lt.axes)
self.test_lt_1 = test_lt
self.test_lt_2 = 1.0 - test_lt
self.test_lt_1_broadcast = self.test_lt_1.tensor
self.test_lt_2_broadcast = self.test_lt_2.tensor
self.broadcast_axes = self.test_lt_1.axes
if __name__ == '__main__':
tf.test.main()
| |
import sys
import os, numpy as np
import scipy.stats
import anglepy.paramgraphics as paramgraphics
import anglepy.ndict as ndict
import matplotlib.pyplot as plt
import Image
import math
import theano
import theano.tensor as T
from collections import OrderedDict
import preprocessing as pp
def main(n_z, n_hidden, dataset, seed, gfx=True, _size=None):
'''Learn a variational auto-encoder with generative model p(x,y,z)=p(y)p(z)p(x|y,z).
x and y are (always) observed.
I.e. this cannot be used for semi-supervised learning
'''
assert (type(n_hidden) == tuple or type(n_hidden) == list)
assert type(n_z) == int
assert isinstance(dataset, basestring)
print 'gpulearn_yz_x', n_z, n_hidden, dataset, seed
import time
logdir = 'results/gpulearn_yz_x_'+dataset+'_'+str(n_z)+'-'+str(n_hidden)+'-'+str(int(time.time()))+'/'
if not os.path.exists(logdir): os.makedirs(logdir)
print 'logdir:', logdir
np.random.seed(seed)
# Init data
if dataset == 'mnist':
'''
What works well:
100-2-100 (Generated digits stay bit shady)
1000-2-1000 (Needs pretty long training)
'''
import anglepy.data.mnist as mnist
# MNIST
size = 28
train_x, train_y, valid_x, valid_y, test_x, test_y = mnist.load_numpy(size, binarize_y=True)
f_enc, f_dec = lambda x:x, lambda x:x
x = {'x': train_x[:,:].astype(np.float32), 'y': train_y[:,:].astype(np.float32)}
x_valid = {'x': valid_x.astype(np.float32), 'y': valid_y.astype(np.float32)}
L_valid = 1
dim_input = (size,size)
n_x = size*size
n_y = 10
n_batch = 1000
colorImg = False
bernoulli_x = True
byteToFloat = False
mosaic_w = 5
mosaic_h = 2
type_px = 'bernoulli'
elif dataset == 'norb':
# resized NORB dataset, reshuffled
import anglepy.data.norb as norb
size = _size #48
train_x, train_y, test_x, test_y = norb.load_resized(size, binarize_y=True)
_x = {'x': train_x, 'y': train_y}
ndict.shuffleCols(_x)
train_x = _x['x']
train_y = _x['y']
# Do PCA
f_enc, f_dec, pca_params = pp.PCA(_x['x'][:,:10000], cutoff=2000, toFloat=False)
ndict.savez(pca_params, logdir+'pca_params')
x = {'x': f_enc(train_x).astype(np.float32), 'y':train_y.astype(np.float32)}
x_valid = {'x': f_enc(test_x).astype(np.float32), 'y':test_y.astype(np.float32)}
L_valid = 1
n_x = x['x'].shape[0]
n_y = 5
dim_input = (size,size)
n_batch = 1000 #23400/900 = 27
colorImg = False
bernoulli_x = False
byteToFloat = False
mosaic_w = 5
mosaic_h = 1
type_px = 'gaussian'
elif dataset == 'norb_instances':
# resized NORB dataset with the instances as classes
import anglepy.data.norb2 as norb2
size = _size #48
x, y = norb2.load_numpy_subclasses(size, binarize_y=True)
_x = {'x': x, 'y': y}
ndict.shuffleCols(_x)
# Do pre=processing
if True:
# Works
f_enc, f_dec, pca_params = pp.PCA(_x['x'][:,:10000], cutoff=600, global_sd=True, toFloat=True)
ndict.savez(pca_params, logdir+'pca_params')
elif False:
# Doesn't work
f_enc, f_dec, pp_params = pp.normalize_noise(_x['x'][:,:50000], noise_sd=0.01, global_sd=True, toFloat=True)
else:
# Doesn't work
f_enc, f_dec, params = pp.normalize_random(x=x[:,:10000], global_sd=True, toFloat=True)
ndict.savez(params, logdir+'normalize_random_params')
n_valid = 5000
x = {'x': f_enc(_x['x'][:,:-n_valid]).astype(np.float32), 'y':_x['y'][:,:-n_valid].astype(np.float32)}
x_valid = {'x': f_enc(_x['x'][:,:n_valid]).astype(np.float32), 'y':_x['y'][:,:n_valid].astype(np.float32)}
L_valid = 1
n_x = x['x'].shape[0]
n_y = 50
dim_input = (size,size)
n_batch = 5000 #23400/900 = 27
colorImg = False
bernoulli_x = False
byteToFloat = False
mosaic_w = 5
mosaic_h = 1
type_px = 'gaussian'
elif dataset == 'svhn':
# SVHN dataset
import anglepy.data.svhn as svhn
size = 32
train_x, train_y, test_x, test_y = svhn.load_numpy(False, binarize_y=True) #norb.load_resized(size, binarize_y=True)
extra_x, extra_y = svhn.load_numpy_extra(False, binarize_y=True)
x = {'x': np.hstack((train_x, extra_x)), 'y':np.hstack((train_y, extra_y))}
ndict.shuffleCols(x)
#f_enc, f_dec, (x_sd, x_mean) = pp.preprocess_normalize01(train_x, True)
f_enc, f_dec, pca_params = pp.PCA(x['x'][:,:10000], cutoff=1000, toFloat=True)
ndict.savez(pca_params, logdir+'pca_params')
n_y = 10
x = {'x': f_enc(x['x']).astype(np.float32), 'y': x['y'].astype(np.float32)}
x_valid = {'x': f_enc(test_x).astype(np.float32), 'y': test_y.astype(np.float32)}
L_valid = 1
n_x = x['x'].shape[0]
dim_input = (size,size)
n_batch = 5000
colorImg = True
bernoulli_x = False
byteToFloat = False
mosaic_w = 5
mosaic_h = 2
type_px = 'gaussian'
# Init model
n_hidden_q = n_hidden
n_hidden_p = n_hidden
from anglepy.models import GPUVAE_YZ_X
updates = get_adam_optimizer(alpha=3e-4, beta1=0.9, beta2=0.999, weight_decay=0)
model = GPUVAE_YZ_X(updates, n_x, n_y, n_hidden_q, n_z, n_hidden_p[::-1], 'softplus', 'softplus', type_px=type_px, type_qz='gaussianmarg', type_pz='gaussianmarg', prior_sd=1, uniform_y=True)
if False:
dir = '/home/ubuntu/results/gpulearn_yz_x_svhn_300-(500, 500)-1414094291/'
dir = '/home/ubuntu/results/gpulearn_yz_x_svhn_300-(500, 500)-1414163488/'
w = ndict.loadz(dir+'w_best.ndict.tar.gz')
v = ndict.loadz(dir+'v_best.ndict.tar.gz')
ndict.set_value(model.w, w)
ndict.set_value(model.v, v)
# Some statistics for optimization
ll_valid_stats = [-1e99, 0]
# Fixed sample for visualisation
z_sample = {'z': np.repeat(np.random.standard_normal(size=(n_z, 12)), 12, axis=1).astype(np.float32)}
y_sample = {'y': np.tile(np.random.multinomial(1, [1./n_y]*n_y, size=12).T, (1, 12))}
# Progress hook
def hook(epoch, t, ll):
if epoch%10 != 0:
return
ll_valid, _ = model.est_loglik(x_valid, n_samples=L_valid, n_batch=n_batch, byteToFloat=byteToFloat)
if math.isnan(ll_valid):
print "NaN detected. Reverting to saved best parameters"
ndict.set_value(model.v, ndict.loadz(logdir+'v.ndict.tar.gz'))
ndict.set_value(model.w, ndict.loadz(logdir+'w.ndict.tar.gz'))
return
if ll_valid > ll_valid_stats[0]:
ll_valid_stats[0] = ll_valid
ll_valid_stats[1] = 0
ndict.savez(ndict.get_value(model.v), logdir+'v_best')
ndict.savez(ndict.get_value(model.w), logdir+'w_best')
else:
ll_valid_stats[1] += 1
# Stop when not improving validation set performance in 100 iterations
if False and ll_valid_stats[1] > 1000:
print "Finished"
with open(logdir+'hook.txt', 'a') as f:
print >>f, "Finished"
exit()
# Log
ndict.savez(ndict.get_value(model.v), logdir+'v')
ndict.savez(ndict.get_value(model.w), logdir+'w')
print epoch, t, ll, ll_valid
with open(logdir+'hook.txt', 'a') as f:
print >>f, t, ll, ll_valid
if gfx:
# Graphics
v = {i: model.v[i].get_value() for i in model.v}
w = {i: model.w[i].get_value() for i in model.w}
tail = '-'+str(epoch)+'.png'
image = paramgraphics.mat_to_img(f_dec(v['w0x'][:].T), dim_input, True, colorImg=colorImg)
image.save(logdir+'q_w0x'+tail, 'PNG')
image = paramgraphics.mat_to_img(f_dec(w['out_w'][:]), dim_input, True, colorImg=colorImg)
image.save(logdir+'out_w'+tail, 'PNG')
_x = {'y': np.random.multinomial(1, [1./n_y]*n_y, size=144).T}
_, _, _z_confab = model.gen_xz(_x, {}, n_batch=144)
image = paramgraphics.mat_to_img(f_dec(_z_confab['x']), dim_input, colorImg=colorImg)
image.save(logdir+'samples'+tail, 'PNG')
_, _, _z_confab = model.gen_xz(y_sample, z_sample, n_batch=144)
image = paramgraphics.mat_to_img(f_dec(_z_confab['x']), dim_input, colorImg=colorImg)
image.save(logdir+'samples_fixed'+tail, 'PNG')
if n_z == 2:
import ImageFont
import ImageDraw
n_width = 10
submosaic_offset = 15
submosaic_width = (dim_input[1]*n_width)
submosaic_height = (dim_input[0]*n_width)
mosaic = Image.new("RGB", (submosaic_width*mosaic_w, submosaic_offset+submosaic_height*mosaic_h))
for digit in range(0,n_y):
if digit >= mosaic_h*mosaic_w: continue
_x = {}
n_batch_plot = n_width*n_width
_x['y'] = np.zeros((n_y,n_batch_plot))
_x['y'][digit,:] = 1
_z = {'z':np.zeros((2,n_width**2))}
for i in range(0,n_width):
for j in range(0,n_width):
_z['z'][0,n_width*i+j] = scipy.stats.norm.ppf(float(i)/n_width+0.5/n_width)
_z['z'][1,n_width*i+j] = scipy.stats.norm.ppf(float(j)/n_width+0.5/n_width)
_x, _, _z_confab = model.gen_xz(_x, _z, n_batch=n_batch_plot)
x_samples = _z_confab['x']
image = paramgraphics.mat_to_img(f_dec(x_samples), dim_input, colorImg=colorImg, tile_spacing=(0,0))
#image.save(logdir+'samples_digit_'+str(digit)+'_'+tail, 'PNG')
mosaic_x = (digit%mosaic_w)*submosaic_width
mosaic_y = submosaic_offset+int(digit/mosaic_w)*submosaic_height
mosaic.paste(image, (mosaic_x, mosaic_y))
draw = ImageDraw.Draw(mosaic)
draw.text((1,1),"Epoch #"+str(epoch)+" Loss="+str(int(ll)))
#plt.savefig(logdir+'mosaic'+tail, format='PNG')
mosaic.save(logdir+'mosaic'+tail, 'PNG')
#x_samples = _x['x']
#image = paramgraphics.mat_to_img(f_dec(x_samples), dim_input, colorImg=colorImg)
#image.save(logdir+'samples2'+tail, 'PNG')
# Optimize
dostep = epoch_vae_adam(model, x, n_batch=n_batch, bernoulli_x=bernoulli_x, byteToFloat=byteToFloat)
loop_va(dostep, hook)
pass
# Training loop for variational autoencoder
def loop_va(doEpoch, hook, n_epochs=9999999):
import time
t0 = time.time()
for t in xrange(1, n_epochs):
L = doEpoch()
hook(t, time.time() - t0, L)
print 'Optimization loop finished'
# Learning step for variational auto-encoder
def epoch_vae_adam(model, x, n_batch=100, convertImgs=False, bernoulli_x=False, byteToFloat=False):
print 'Variational Auto-Encoder', n_batch
def doEpoch():
from collections import OrderedDict
n_tot = x.itervalues().next().shape[1]
idx_from = 0
L = 0
while idx_from < n_tot:
idx_to = min(n_tot, idx_from+n_batch)
x_minibatch = ndict.getCols(x, idx_from, idx_to)
idx_from += n_batch
if byteToFloat: x_minibatch['x'] = x_minibatch['x'].astype(np.float32)/256.
if bernoulli_x: x_minibatch['x'] = np.random.binomial(n=1, p=x_minibatch['x']).astype(np.float32)
# Get gradient
#raise Exception()
L += model.evalAndUpdate(x_minibatch, {}).sum()
#model.profmode.print_summary()
L /= n_tot
return L
return doEpoch
def get_adam_optimizer(alpha=3e-4, beta1=0.9, beta2=0.999, weight_decay=0.0):
print 'AdaM', alpha, beta1, beta2, weight_decay
def shared32(x, name=None, borrow=False):
return theano.shared(np.asarray(x, dtype='float32'), name=name, borrow=borrow)
def get_optimizer(w, g):
updates = OrderedDict()
it = shared32(0.)
updates[it] = it + 1.
fix1 = 1.-beta1**(it+1.) # To make estimates unbiased
fix2 = 1.-beta2**(it+1.) # To make estimates unbiased
lr_t = alpha * T.sqrt(fix2) / fix1
for i in w:
gi = g[i]
if weight_decay > 0:
gi -= weight_decay * w[i] #T.tanh(w[i])
# mean_squared_grad := E[g^2]_{t-1}
mom1 = shared32(w[i].get_value() * 0.)
mom2 = shared32(w[i].get_value() * 0.)
# Update moments
mom1_new = mom1 + (1.-beta1) * (gi - mom1)
mom2_new = mom2 + (1.-beta2) * (T.sqr(gi) - mom2)
# Compute the effective gradient
effgrad = mom1_new / (T.sqrt(mom2_new) + 1e-8)
# Do update
w_new = w[i] + lr_t * effgrad
# Apply update
updates[w[i]] = w_new
updates[mom1] = mom1_new
updates[mom2] = mom2_new
return updates
return get_optimizer
| |
# Copyright (c) 2000-2002 LOGILAB S.A. (Paris, FRANCE).
# http://www.logilab.fr/ -- mailto:contact@logilab.fr
#
# This program is free software; you can redistribute it and/or modify it under
# the terms of the GNU General Public License as published by the Free Software
# Foundation; either version 2 of the License, or (at your option) any later
# version.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc.,
# 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
"""
utilities functions to generate file readable with Georg Sander's vcg
(Visualization of Compiler Graphs).
You can download vcg at http://rw4.cs.uni-sb.de/~sander/html/gshome.html
Note that vcg exists as a debian package.
See the documentation of vcg for explanation about the different value that
maybe used for the functions parameters
"""
__revision__ = "$Id: vcgutils.py,v 1.6 2003-12-10 08:15:09 syt Exp $"
import string
ATTRS_VAL = {
'algos': ('dfs', 'tree', 'minbackward',
'left_to_right','right_to_left',
'top_to_bottom','bottom_to_top',
'maxdepth', 'maxdepthslow', 'mindepth', 'mindepthslow',
'mindegree', 'minindegree', 'minoutdegree',
'maxdegree','maxindegree', 'maxoutdegree'),
'booleans': ('yes', 'no'),
'colors': ('black', 'white', 'blue', 'red', 'green', 'yellow',
'magenta', 'lightgrey',
'cyan', 'darkgrey', 'darkblue', 'darkred', 'darkgreen',
'darkyellow', 'darkmagenta', 'darkcyan', 'gold',
'lightblue', 'lightred', 'lightgreen', 'lightyellow',
'lightmagenta', 'lightcyan', 'lilac', 'turquoise',
'aquamarine', 'khaki', 'purple', 'yellowgreen', 'pink',
'orange', 'orchid'),
'shapes': ('box', 'ellipse', 'rhomb', 'triangle'),
'textmodes': ('center', 'left_justify', 'right_justify'),
'arrowstyles': ('solid', 'line', 'none'),
'linestyles': ('continuous', 'dashed', 'dotted', 'invisible'),
}
# meaning of possible values:
# O -> string
# 1 -> int
# list -> value in list
GRAPH_ATTRS = {
'title' : 0,
'label' : 0,
'color': ATTRS_VAL['colors'],
'textcolor': ATTRS_VAL['colors'],
'bordercolor': ATTRS_VAL['colors'],
'width': 1,
'height': 1,
'borderwidth': 1,
'textmode': ATTRS_VAL['textmodes'],
'shape': ATTRS_VAL['shapes'],
'shrink': 1,
'stretch': 1,
'orientation': ATTRS_VAL['algos'],
'vertical_order': 1,
'horizontal_order': 1,
'xspace': 1,
'yspace': 1,
'layoutalgorithm' : ATTRS_VAL['algos'],
'late_edge_labels' : ATTRS_VAL['booleans'],
'display_edge_labels': ATTRS_VAL['booleans'],
'dirty_edge_labels' : ATTRS_VAL['booleans'],
'finetuning': ATTRS_VAL['booleans'],
'manhattan_edges': ATTRS_VAL['booleans'],
'smanhattan_edges': ATTRS_VAL['booleans'],
'port_sharing': ATTRS_VAL['booleans'],
'edges': ATTRS_VAL['booleans'],
'nodes': ATTRS_VAL['booleans'],
'splines': ATTRS_VAL['booleans'],
}
NODE_ATTRS = {
'title' : 0,
'label' : 0,
'color': ATTRS_VAL['colors'],
'textcolor': ATTRS_VAL['colors'],
'bordercolor': ATTRS_VAL['colors'],
'width': 1,
'height': 1,
'borderwidth': 1,
'textmode': ATTRS_VAL['textmodes'],
'shape': ATTRS_VAL['shapes'],
'shrink': 1,
'stretch': 1,
'vertical_order': 1,
'horizontal_order': 1,
}
EDGE_ATTRS = {
'sourcename' : 0,
'targetname' : 0,
'label' : 0,
'linestyle' : ATTRS_VAL['linestyles'],
'class' : 1,
'thickness' : 0,
'color': ATTRS_VAL['colors'],
'textcolor': ATTRS_VAL['colors'],
'arrowcolor': ATTRS_VAL['colors'],
'backarrowcolor': ATTRS_VAL['colors'],
'arrowsize': 1,
'backarrowsize': 1,
'arrowstyle': ATTRS_VAL['arrowstyles'],
'backarrowstyle': ATTRS_VAL['arrowstyles'],
'textmode': ATTRS_VAL['textmodes'],
'priority': 1,
'anchor': 1,
'horizontal_order': 1,
}
# Misc utilities ###############################################################
def latin_to_vcg(st):
"""convert latin characters using vcg escape sequence
"""
for char in st:
if char not in string.ascii_letters:
try:
num = ord(char)
if num >= 192:
st = st.replace(char, r'\fi%d'%ord(char))
except:
pass
return st
class VCGPrinter:
"""a vcg graph writer
"""
def __init__(self, output_stream):
self._stream = output_stream
self._indent = ''
def open_graph(self, **args):
"""open a vcg graph
"""
self._stream.write('%sgraph:{\n'%self._indent)
self._inc_indent()
self._write_attributes(GRAPH_ATTRS, **args)
def close_graph(self):
"""close a vcg graph
"""
self._dec_indent()
self._stream.write('%s}\n'%self._indent)
def node(self, title, **args):
"""draw a node
"""
self._stream.write('%snode: {title:"%s"' % (self._indent, title))
self._write_attributes(NODE_ATTRS, **args)
self._stream.write('}\n')
def edge(self, from_node, to_node, edge_type='', **args):
"""draw an edge from a node to another.
"""
self._stream.write(
'%s%sedge: {sourcename:"%s" targetname:"%s"' % (
self._indent, edge_type, from_node, to_node))
self._write_attributes(EDGE_ATTRS, **args)
self._stream.write('}\n')
# private ##################################################################
def _write_attributes(self, attributes_dict, **args):
"""write graph, node or edge attributes
"""
for key, value in args.items():
try:
_type = attributes_dict[key]
except KeyError:
raise Exception('''no such attribute %s
possible attributes are %s''' % (key, attributes_dict.keys()))
if not _type:
self._stream.write('%s%s:"%s"\n' % (self._indent, key, value))
elif _type == 1:
self._stream.write('%s%s:%s\n' % (self._indent, key,
int(value)))
elif value in _type:
self._stream.write('%s%s:%s\n' % (self._indent, key, value))
else:
raise Exception('''value %s isn\'t correct for attribute %s
correct values are %s''' % (value, key, _type))
def _inc_indent(self):
"""increment indentation
"""
self._indent = ' %s' % self._indent
def _dec_indent(self):
"""decrement indentation
"""
self._indent = self._indent[:-2]
| |
# ******************************************************************************
# pysimm.forcefield.gaff2 module
# ******************************************************************************
#
# ******************************************************************************
# License
# ******************************************************************************
# The MIT License (MIT)
#
# Copyright (c) 2016 Michael E. Fortunato, Coray M. Colina
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
import os
from itertools import permutations, combinations
import gasteiger
from pysimm.system import Angle, Dihedral, Improper
from forcefield import Forcefield
class Gaff2(Forcefield):
"""pysimm.forcefield.Gaff2
Forcefield object with typing rules for Gaff2 model.
By default reads data file in forcefields subdirectory.
Attributes:
ff_name: gaff2
pair_style: lj
bond_style: harmonic
angle_style: harmonic
dihedral_style: fourier
improper_style: cvff
ff_class: 1
"""
def __init__(self, db_file=None):
if not db_file and db_file is not False:
db_file = os.path.join(os.path.dirname(os.path.realpath(__file__)),
os.pardir, os.pardir, 'dat', 'forcefields', 'gaff2.json')
Forcefield.__init__(self, db_file)
self.name = 'gaff2'
self.pair_style = 'lj'
self.bond_style = 'harmonic'
self.angle_style = 'harmonic'
self.dihedral_style = 'fourier'
self.improper_style = 'cvff'
self.ff_class = '1'
def assign_ptypes(self, s):
"""pysimm.forcefield.Gaff2.assign_ptypes
Gaff2 specific particle typing rules.
Requires :class:`~pysimm.system.System` object :class:`~pysimm.system.Particle` objects have bonds defined.
*** use System.add_particle_bonding() to ensure this ***
*** Not entirely inclusive - some atom types not used ***
Args:
s: :class:`~pysimm.system.System`
Returns:
None
"""
all_types = set()
s.pair_style = self.pair_style
s.add_particle_bonding()
for p in s.particles:
p.bond_orders = [x.order for x in p.bonds]
if None in p.bond_orders:
error_print('error: bond orders are not set')
p.bond_elements = [x.a.elem if p is x.b else x.b.elem for x in
p.bonds]
p.nbonds = len(p.bond_elements)
if p.linker:
p.nbonds += 1
for p in s.particles:
if p.elem == 'H':
if 'O' in p.bond_elements:
water = False
for pb in p.bonded_to:
if pb.elem == 'O' and pb.bond_elements.count('H') == 2:
water = True
if water:
p.type_name = 'hw'
else:
p.type_name = 'ho'
elif 'N' in p.bond_elements:
p.type_name = 'hn'
elif 'P' in p.bond_elements:
p.type_name = 'hp'
elif 'S' in p.bond_elements:
p.type_name = 'hs'
elif 'C' in p.bond_elements:
for pb in p.bonded_to:
if pb.elem == 'C':
elctrwd = (pb.bond_elements.count('N') +
pb.bond_elements.count('O') +
pb.bond_elements.count('F') +
pb.bond_elements.count('P') +
pb.bond_elements.count('S') +
pb.bond_elements.count('Cl'))
if 4 in pb.bond_orders or 'A' in pb.bond_orders:
p.type_name = 'ha'
elif elctrwd == 0:
p.type_name = 'hc'
elif pb.nbonds == 4 and elctrwd == 1:
p.type_name = 'h1'
elif pb.nbonds == 4 and elctrwd == 2:
p.type_name = 'h2'
elif pb.nbonds == 4 and elctrwd == 3:
p.type_name = 'h3'
elif pb.nbonds == 3 and elctrwd == 1:
p.type_name = 'h4'
elif pb.nbonds == 3 and elctrwd == 2:
p.type_name = 'h5'
elif p.elem == 'C':
if p.nbonds == 3 and 'O' in p.bond_elements:
p.type_name = 'c'
elif 4 in p.bond_orders or 'A' in p.bond_orders:
for pb in p.bonded_to:
if pb.elem != 'C' and pb.elem != 'H':
p.type_name = 'cc'
if not p.type_name:
p.type_name = 'ca'
elif p.nbonds == 4:
p.type_name = 'c3'
elif p.nbonds == 3 and not 'O' in p.bond_elements:
p.type_name = 'c2'
elif p.nbonds == 2:
p.type_name = 'c1'
elif p.elem == 'F':
p.type_name = 'f'
elif p.elem == 'Cl':
p.type_name = 'cl'
elif p.elem == 'Br':
p.type_name = 'br'
elif p.elem == 'I':
p.type_name = 'i'
elif p.elem == 'N':
if 2 in p.bond_orders and p.nbonds == 2:
p.type_name = 'n2'
elif 2 in p.bond_orders and p.nbonds == 3:
p.type_name = 'na'
elif 3 in p.bond_orders:
p.type_name = 'n1'
elif p.bond_elements.count('O') == 2:
p.type_name = 'no'
elif p.nbonds <= 3:
amide = False
aromatic_ring = False
for pb in p.bonded_to:
if pb.elem == 'C':
if 4 in pb.bond_orders or 'A' in pb.bond_orders:
aromatic_ring = True
for b in pb.bonds:
bp = b.a if pb is b.b else b.b
if bp.elem == 'O' and b.order == 2:
amide = True
if amide:
p.type_name = 'n'
elif (4 in p.bond_orders or 'A' in p.bond_orders) and 'C' in p.bond_elements:
p.type_name = 'nc'
elif aromatic_ring:
p.type_name = 'nh'
else:
p.type_name = 'n3'
elif p.nbonds == 4:
p.type_name = 'n4'
else:
print(p.elem, p.nbonds, p.bond_elements, p.bond_orders)
elif p.elem == 'O':
if p.nbonds == 1:
p.type_name = 'o'
elif p.bond_elements.count('H') == 2:
p.type_name = 'ow'
elif p.bond_elements.count('H') == 1:
p.type_name = 'oh'
else:
p.type_name = 'os'
elif p.elem == 'P':
if 4 in p.bond_orders or 'A' in p.bond_orders:
p.type_name = 'pc'
elif p.nbonds == 2:
p.type_name = 'p2'
elif p.nbonds == 3 and p.bond_elements.count('H') == 3:
p.type_name = 'p3'
elif p.nbonds == 3:
p.type_name = 'p4'
elif p.nbonds == 4:
p.type_name = 'p5'
elif p.elem == 'S':
if p.nbonds == 1:
p.type_name = 's'
elif p.nbonds == 2 and 2 in p.bond_orders:
p.type_name = 's2'
elif p.nbonds == 3:
p.type_name = 's4'
elif p.nbonds == 4:
p.type_name = 's6'
elif len(set(p.bond_orders)) == 1 and p.bond_orders[0] == 1:
if 'H' in p.bond_elements:
p.type_name = 'sh'
elif p.nbonds == 2:
p.type_name = 'ss'
else:
print 'cant type particle %s' % p.tag
return p
type_ = self.particle_types.get(p.type_name)
if not type_:
print(p.tag, p.elem, p.type_name)
all_types.add(self.particle_types.get(p.type_name)[0])
for pt in all_types:
pt = pt.copy()
s.particle_types.add(pt)
for p in s.particles:
pt = s.particle_types.get(p.type_name)
if pt:
p.type = pt[0]
def assign_btypes(self, s):
"""pysimm.forcefield.Gaff2.assign_btypes
Gaff2 specific bond typing rules.
Requires :class:`~pysimm.system.System` object :class:`~pysimm.system.Particle` objects have bonds, type and type.name defined.
*** use after assign_ptypes ***
Args:
s: :class:`~pysimm.system.System`
Returns:
None
"""
all_types = set()
s.bond_style = self.bond_style
for b in s.bonds:
bt = self.bond_types.get('%s,%s' % (b.a.type.name, b.b.type.name))
if bt:
b.type_name = bt[0].name
else:
print ('couldnt type this bond %s,%s'
% (b.a.type.name, b.b.type.name))
return b
all_types.add(self.bond_types.get(b.type_name)[0])
for bt in all_types:
bt = bt.copy()
s.bond_types.add(bt)
for b in s.bonds:
bt = s.bond_types.get(b.type_name)
if bt:
b.type = bt[0]
def assign_atypes(self, s):
"""pysimm.forcefield.Gaff2.assign_atypes
Gaff2 specific angle typing rules.
Requires :class:`~pysimm.system.System` object :class:`~pysimm.system.Particle` objects have bonds, type and type.name defined.
*** use after assign_ptypes ***
Args:
s: :class:`~pysimm.system.System`
Returns:
None
"""
all_types = set()
s.angle_style = self.angle_style
s.add_particle_bonding()
for p in s.particles:
for p1 in p.bonded_to:
for p2 in p.bonded_to:
if p1 is not p2:
unique = True
for a in s.angles:
if ((a.a is p1 and a.b is p and a.c is p2) or
(a.a is p2 and a.b is p and a.c is p1)):
unique = False
if unique:
at = self.angle_types.get('%s,%s,%s'
% (p1.type.name,
p.type.name,
p2.type.name))
if at:
s.angles.add(Angle(type_name=at[0].name,
a=p1, b=p, c=p2))
all_types.add(at[0])
else:
print ('I cant type this angle %s,%s,%s'
% (p1.type.name,
p.type.name,
p2.type.name))
for at in all_types:
at = at.copy()
s.angle_types.add(at)
for a in s.angles:
at = s.angle_types.get(a.type_name)
if at:
a.type = at[0]
def assign_dtypes(self, s):
"""pysimm.forcefield.Gaff2.assign_dtypes
Gaff2 specific dihedral typing rules.
Requires :class:`~pysimm.system.System` object :class:`~pysimm.system.Particle` objects have bonds, type and type.name defined.
*** use after assign_ptypes ***
Args:
s: :class:`~pysimm.system.System`
Returns:
None
"""
all_types = set()
s.dihedral_style = self.dihedral_style
for b in s.bonds:
for p1 in b.a.bonded_to:
for p2 in b.b.bonded_to:
if p1 is b.b or p2 is b.a:
continue
unique = True
for d in s.dihedrals:
if ((d.a == p1 and d.b == b.a and
d.c == b.b and d.d == p2) or
(d.a == p2 and d.b == b.b and
d.c == b.a and d.d == p1)):
unique = False
if unique:
p1_name = p1.type.name
a_name = b.a.type.name
b_name = b.b.type.name
p2_name = p2.type.name
dt = self.dihedral_types.get('%s,%s,%s,%s'
% (p1_name, a_name,
b_name, p2_name))
if dt:
if len(dt) == 1:
all_types.add(dt[0])
s.dihedrals.add(Dihedral(type_name=dt[0].name,
a=p1, b=b.a,
c=b.b, d=p2))
else:
index = 0
x = 5
for i in range(len(dt)):
if dt[i].name.count('X') < x:
index = i
x = dt[i].name.count('X')
dt = dt[index]
all_types.add(dt)
s.dihedrals.add(Dihedral(type_name=dt.name,
a=p1, b=b.a,
c=b.b, d=p2))
else:
print ('I cant type this dihedral %s,%s,%s,%s'
% (p1_name, a_name, b_name, p2_name))
for dt in all_types:
dt = dt.copy()
s.dihedral_types.add(dt)
for d in s.dihedrals:
dt = s.dihedral_types.get(d.type_name, item_wildcard=None)
if dt:
d.type = dt[0]
def assign_itypes(self, s):
"""pysimm.forcefield.Gaff2.assign_itypes
Gaff2 specific improper typing rules.
Args:
s: :class:`~pysimm.system.System`
Returns:
None
"""
all_types = set()
s.improper_style = self.improper_style
for p in s.particles:
if len(p.bonded_to) == 3:
for perm in permutations(p.bonded_to, 3):
p1_name = perm[0].type.eq_improper or perm[0].type.name
p2_name = perm[1].type.eq_improper or perm[1].type.name
p3_name = perm[2].type.eq_improper or perm[2].type.name
it = self.improper_types.get(','.join([p.type.name, p1_name,
p2_name, p3_name]), order=True)
if it:
all_types.add(it[0])
bonded_to = p.bonded_to.get('all')
s.impropers.add(Improper(type_name=it[0].name,
a=p, b=bonded_to[0],
c=bonded_to[1],
d=bonded_to[2]))
break
for it in all_types:
it = it.copy()
s.improper_types.add(it)
for i in s.impropers:
it = s.improper_types.get(i.type_name)
if it:
i.type = it[0]
def assign_charges(self, s, charges='gasteiger'):
"""pysimm.forcefield.Gaff.assign_charges
Charge assignment. Gasteiger is default for now.
Args:
s: :class:`~pysimm.system.System`
charges: gasteiger
Returns:
None
"""
if charges == 'gasteiger':
print('adding gasteiger charges')
gasteiger.set_charges(s)
| |
import pytest
import salt.modules.proxy as proxy
from tests.support.mock import MagicMock, call, patch
@pytest.fixture
def configure_loader_modules():
return {proxy: {"__grains__": {"os": "Darwin"}}}
def test_get_http_proxy_macos():
"""
Test to make sure that we correctly get the current proxy info
on macOS
"""
mock = MagicMock(
return_value=(
"Enabled: Yes\nServer: 192.168.0.1\nPort: 3128\nAuthenticated Proxy"
" Enabled: 0"
)
)
expected = {"enabled": True, "server": "192.168.0.1", "port": "3128"}
with patch.dict(proxy.__salt__, {"cmd.run": mock}):
out = proxy.get_http_proxy()
mock.assert_called_once_with("networksetup -getwebproxy Ethernet")
assert expected == out
def test_get_https_proxy_macos():
"""
Test to make sure that we correctly get the current proxy info
on macOS
"""
mock = MagicMock(
return_value=(
"Enabled: Yes\nServer: 192.168.0.1\nPort: 3128\nAuthenticated Proxy"
" Enabled: 0"
)
)
expected = {"enabled": True, "server": "192.168.0.1", "port": "3128"}
with patch.dict(proxy.__salt__, {"cmd.run": mock}):
out = proxy.get_https_proxy()
mock.assert_called_once_with("networksetup -getsecurewebproxy Ethernet")
assert expected == out
def test_get_ftp_proxy_macos():
"""
Test to make sure that we correctly get the current proxy info
on macOS
"""
mock = MagicMock(
return_value=(
"Enabled: Yes\nServer: 192.168.0.1\nPort: 3128\nAuthenticated Proxy"
" Enabled: 0"
)
)
expected = {"enabled": True, "server": "192.168.0.1", "port": "3128"}
with patch.dict(proxy.__salt__, {"cmd.run": mock}):
out = proxy.get_ftp_proxy()
mock.assert_called_once_with("networksetup -getftpproxy Ethernet")
assert expected == out
def test_get_http_proxy_macos_none():
"""
Test to make sure that we correctly return when there's no proxy set
"""
mock = MagicMock(
return_value="Enabled: No\nServer:\nPort: 0\nAuthenticated Proxy Enabled: 0"
)
with patch.dict(proxy.__salt__, {"cmd.run": mock}):
out = proxy.get_http_proxy()
mock.assert_called_once_with("networksetup -getwebproxy Ethernet")
assert {} == out
def test_set_http_proxy_macos():
"""
Test to make sure that we correctly set the proxy info
on macOS
"""
mock = MagicMock()
with patch.dict(proxy.__salt__, {"cmd.run": mock}):
out = proxy.set_http_proxy(
"192.168.0.1",
3128,
"frank",
"badpassw0rd",
bypass_hosts=".moo.com,.salt.com",
)
mock.assert_called_once_with(
"networksetup -setwebproxy Ethernet 192.168.0.1 3128 On frank"
" badpassw0rd"
)
assert out
def test_set_https_proxy_macos():
"""
Test to make sure that we correctly set the proxy info
on macOS
"""
mock = MagicMock()
with patch.dict(proxy.__salt__, {"cmd.run": mock}):
out = proxy.set_https_proxy(
"192.168.0.1",
3128,
"frank",
"passw0rd",
bypass_hosts=".moo.com,.salt.com",
)
mock.assert_called_once_with(
"networksetup -setsecurewebproxy Ethernet 192.168.0.1 3128 On frank"
" passw0rd"
)
assert out
def test_set_ftp_proxy_macos():
"""
Test to make sure that we correctly set the proxy info
on macOS
"""
mock = MagicMock()
with patch.dict(proxy.__salt__, {"cmd.run": mock}):
out = proxy.set_ftp_proxy(
"192.168.0.1",
3128,
"frank",
"badpassw0rd",
bypass_hosts=".moo.com,.salt.com",
)
mock.assert_called_once_with(
"networksetup -setftpproxy Ethernet 192.168.0.1 3128 On frank"
" badpassw0rd"
)
assert out
def test_get_proxy_win():
"""
Test to make sure that we correctly get the current proxy info on
Windows
"""
result = [
{
"vdata": (
"http=192.168.0.1:3128;https=192.168.0.2:3128;ftp=192.168.0.3:3128"
)
},
{"vdata": 1},
]
mock_reg_read = MagicMock(side_effect=result)
expected = {
"enabled": True,
"http": {"server": "192.168.0.1", "port": "3128"},
"https": {"server": "192.168.0.2", "port": "3128"},
"ftp": {"server": "192.168.0.3", "port": "3128"},
}
with patch.dict(proxy.__grains__, {"os": "Windows"}), patch.dict(
proxy.__utils__, {"reg.read_value": mock_reg_read}
):
out = proxy.get_proxy_win()
assert out == expected
mock_reg_read.assert_any_call(
hive="HKEY_CURRENT_USER",
key="SOFTWARE\\Microsoft\\Windows\\CurrentVersion\\Internet Settings",
vname="ProxyServer",
)
mock_reg_read.assert_any_call(
hive="HKEY_CURRENT_USER",
key="SOFTWARE\\Microsoft\\Windows\\CurrentVersion\\Internet Settings",
vname="ProxyEnable",
)
def test_get_http_proxy_windows():
"""
Test to make sure that we correctly get the current proxy info on
Windows
"""
result = {
"vdata": "http=192.168.0.1:3128;https=192.168.0.2:3128;ftp=192.168.0.3:3128"
}
mock = MagicMock(return_value=result)
expected = {"server": "192.168.0.1", "port": "3128"}
with patch.dict(proxy.__grains__, {"os": "Windows"}), patch.dict(
proxy.__utils__, {"reg.read_value": mock}
):
out = proxy.get_http_proxy()
mock.assert_called_once_with(
hive="HKEY_CURRENT_USER",
key="SOFTWARE\\Microsoft\\Windows\\CurrentVersion\\Internet Settings",
vname="ProxyServer",
)
assert expected == out
def test_get_https_proxy_windows():
"""
Test to make sure that we correctly get the current proxy info on
Windows
"""
result = {
"vdata": "http=192.168.0.1:3128;https=192.168.0.2:3128;ftp=192.168.0.3:3128"
}
mock = MagicMock(return_value=result)
expected = {"server": "192.168.0.2", "port": "3128"}
with patch.dict(proxy.__grains__, {"os": "Windows"}), patch.dict(
proxy.__utils__, {"reg.read_value": mock}
):
out = proxy.get_https_proxy()
mock.assert_called_once_with(
hive="HKEY_CURRENT_USER",
key="SOFTWARE\\Microsoft\\Windows\\CurrentVersion\\Internet Settings",
vname="ProxyServer",
)
assert expected == out
def test_get_ftp_proxy_windows():
"""
Test to make sure that we correctly get the current proxy info on
Windows
"""
result = {
"vdata": "http=192.168.0.1:3128;https=192.168.0.2:3128;ftp=192.168.0.3:3128"
}
mock = MagicMock(return_value=result)
expected = {"server": "192.168.0.3", "port": "3128"}
with patch.dict(proxy.__grains__, {"os": "Windows"}), patch.dict(
proxy.__utils__, {"reg.read_value": mock}
):
out = proxy.get_ftp_proxy()
mock.assert_called_once_with(
hive="HKEY_CURRENT_USER",
key="SOFTWARE\\Microsoft\\Windows\\CurrentVersion\\Internet Settings",
vname="ProxyServer",
)
assert expected == out
def test_get_all_proxies_macos_fails():
mock = MagicMock()
with patch.dict(proxy.__utils__, {"reg.read_value": mock}):
out = proxy.get_proxy_win()
assert not mock.called
assert out is None
def test_get_all_proxies_windows():
"""
Test to make sure that we correctly get the current proxy info on
Windows
"""
results = [
{
"vdata": (
"http=192.168.0.1:3128;https=192.168.0.2:3128;ftp=192.168.0.3:3128"
)
},
{"vdata": 1},
]
mock = MagicMock(side_effect=results)
expected = {
"enabled": True,
"http": {"server": "192.168.0.1", "port": "3128"},
"https": {"server": "192.168.0.2", "port": "3128"},
"ftp": {"server": "192.168.0.3", "port": "3128"},
}
calls = [
call(
hive="HKEY_CURRENT_USER",
key="SOFTWARE\\Microsoft\\Windows\\CurrentVersion\\Internet Settings",
vname="ProxyServer",
),
call(
hive="HKEY_CURRENT_USER",
key="SOFTWARE\\Microsoft\\Windows\\CurrentVersion\\Internet Settings",
vname="ProxyEnable",
),
]
with patch.dict(proxy.__grains__, {"os": "Windows"}), patch.dict(
proxy.__utils__, {"reg.read_value": mock}
):
out = proxy.get_proxy_win()
mock.assert_has_calls(calls)
assert expected == out
def test_set_http_proxy_windows():
"""
Test to make sure that we correctly set the proxy info on Windows
"""
calls = [
call(
hive="HKEY_CURRENT_USER",
key="SOFTWARE\\Microsoft\\Windows\\CurrentVersion\\Internet Settings",
vname="ProxyServer",
vdata="http=192.168.0.1:3128;",
),
call(
hive="HKEY_CURRENT_USER",
key="SOFTWARE\\Microsoft\\Windows\\CurrentVersion\\Internet Settings",
vname="ProxyEnable",
vdata=1,
vtype="REG_DWORD",
),
call(
hive="HKEY_CURRENT_USER",
key="SOFTWARE\\Microsoft\\Windows\\CurrentVersion\\Internet Settings",
vname="ProxyOverride",
vdata="<local>;.moo.com;.salt.com",
),
]
mock_reg = MagicMock()
mock_cmd = MagicMock()
with patch.dict(proxy.__grains__, {"os": "Windows"}), patch.dict(
proxy.__utils__, {"reg.set_value": mock_reg}
), patch.dict(proxy.__salt__, {"cmd.run": mock_cmd}):
out = proxy.set_http_proxy(
server="192.168.0.1",
port=3128,
bypass_hosts=[".moo.com", ".salt.com"],
)
mock_reg.assert_has_calls(calls)
mock_cmd.assert_called_once_with("netsh winhttp import proxy source=ie")
assert out
def test_set_https_proxy_windows():
"""
Test to make sure that we correctly set the proxy info on Windows
"""
calls = [
call(
hive="HKEY_CURRENT_USER",
key="SOFTWARE\\Microsoft\\Windows\\CurrentVersion\\Internet Settings",
vname="ProxyServer",
vdata="https=192.168.0.1:3128;",
),
call(
hive="HKEY_CURRENT_USER",
key="SOFTWARE\\Microsoft\\Windows\\CurrentVersion\\Internet Settings",
vname="ProxyEnable",
vdata=1,
vtype="REG_DWORD",
),
call(
hive="HKEY_CURRENT_USER",
key="SOFTWARE\\Microsoft\\Windows\\CurrentVersion\\Internet Settings",
vname="ProxyOverride",
vdata="<local>;.moo.com;.salt.com",
),
]
mock_reg = MagicMock()
mock_cmd = MagicMock()
with patch.dict(proxy.__grains__, {"os": "Windows"}), patch.dict(
proxy.__utils__, {"reg.set_value": mock_reg}
), patch.dict(proxy.__salt__, {"cmd.run": mock_cmd}):
out = proxy.set_https_proxy(
server="192.168.0.1",
port=3128,
bypass_hosts=[".moo.com", ".salt.com"],
)
mock_reg.assert_has_calls(calls)
mock_cmd.assert_called_once_with("netsh winhttp import proxy source=ie")
assert out
def test_set_ftp_proxy_windows():
"""
Test to make sure that we correctly set the proxy info on Windows
"""
calls = [
call(
hive="HKEY_CURRENT_USER",
key="SOFTWARE\\Microsoft\\Windows\\CurrentVersion\\Internet Settings",
vname="ProxyServer",
vdata="ftp=192.168.0.1:3128;",
),
call(
hive="HKEY_CURRENT_USER",
key="SOFTWARE\\Microsoft\\Windows\\CurrentVersion\\Internet Settings",
vname="ProxyEnable",
vdata=1,
vtype="REG_DWORD",
),
call(
hive="HKEY_CURRENT_USER",
key="SOFTWARE\\Microsoft\\Windows\\CurrentVersion\\Internet Settings",
vname="ProxyOverride",
vdata="<local>;.moo.com;.salt.com",
),
]
mock_reg = MagicMock()
mock_cmd = MagicMock()
with patch.dict(proxy.__grains__, {"os": "Windows"}), patch.dict(
proxy.__utils__, {"reg.set_value": mock_reg}
), patch.dict(proxy.__salt__, {"cmd.run": mock_cmd}):
out = proxy.set_ftp_proxy(
server="192.168.0.1",
port=3128,
bypass_hosts=[".moo.com", ".salt.com"],
)
mock_reg.assert_has_calls(calls)
mock_cmd.assert_called_once_with("netsh winhttp import proxy source=ie")
assert out
def test_set_proxy_windows():
"""
Test to make sure that we correctly set the proxy info on Windows
"""
calls = [
call(
hive="HKEY_CURRENT_USER",
key="SOFTWARE\\Microsoft\\Windows\\CurrentVersion\\Internet Settings",
vname="ProxyServer",
vdata=(
"http=192.168.0.1:3128;https=192.168.0.1:3128;ftp=192.168.0.1:3128;"
),
),
call(
hive="HKEY_CURRENT_USER",
key="SOFTWARE\\Microsoft\\Windows\\CurrentVersion\\Internet Settings",
vname="ProxyEnable",
vdata=1,
vtype="REG_DWORD",
),
call(
hive="HKEY_CURRENT_USER",
key="SOFTWARE\\Microsoft\\Windows\\CurrentVersion\\Internet Settings",
vname="ProxyOverride",
vdata="<local>;.moo.com;.salt.com",
),
]
mock_reg = MagicMock()
mock_cmd = MagicMock()
with patch.dict(proxy.__grains__, {"os": "Windows"}), patch.dict(
proxy.__utils__, {"reg.set_value": mock_reg}
), patch.dict(proxy.__salt__, {"cmd.run": mock_cmd}):
out = proxy.set_proxy_win(
server="192.168.0.1",
port=3128,
bypass_hosts=[".moo.com", ".salt.com"],
)
mock_reg.assert_has_calls(calls)
mock_cmd.assert_called_once_with("netsh winhttp import proxy source=ie")
assert out
def test_set_proxy_windows_no_ftp():
"""
Test to make sure that we correctly set the proxy info on Windows
"""
calls = [
call(
hive="HKEY_CURRENT_USER",
key="SOFTWARE\\Microsoft\\Windows\\CurrentVersion\\Internet Settings",
vname="ProxyServer",
vdata="http=192.168.0.1:3128;https=192.168.0.1:3128;",
),
call(
hive="HKEY_CURRENT_USER",
key="SOFTWARE\\Microsoft\\Windows\\CurrentVersion\\Internet Settings",
vname="ProxyEnable",
vdata=1,
vtype="REG_DWORD",
),
call(
hive="HKEY_CURRENT_USER",
key="SOFTWARE\\Microsoft\\Windows\\CurrentVersion\\Internet Settings",
vname="ProxyOverride",
vdata="<local>;.moo.com;.salt.com",
),
]
mock_reg = MagicMock()
mock_cmd = MagicMock()
with patch.dict(proxy.__grains__, {"os": "Windows"}), patch.dict(
proxy.__utils__, {"reg.set_value": mock_reg}
), patch.dict(proxy.__salt__, {"cmd.run": mock_cmd}):
out = proxy.set_proxy_win(
server="192.168.0.1",
port=3128,
types=["http", "https"],
bypass_hosts=[".moo.com", ".salt.com"],
)
mock_reg.assert_has_calls(calls)
mock_cmd.assert_called_once_with("netsh winhttp import proxy source=ie")
assert out
| |
from rest_framework import generics, permissions as drf_permissions
from rest_framework.exceptions import NotFound
from django.db.models import Q, Count, Subquery, OuterRef, Case, When, Value, CharField, F
from api.base import permissions as base_permissions
from api.base.exceptions import InvalidFilterOperator
from api.base.filters import ListFilterMixin
from api.base.views import JSONAPIBaseView
from api.base.utils import get_object_or_error
from api.base.versioning import PrivateVersioning
from api.meetings.serializers import MeetingSerializer, MeetingSubmissionSerializer
from api.meetings.permissions import IsPublic
from api.nodes.views import NodeMixin
from framework.auth.oauth_scopes import CoreScopes
from osf.models import AbstractNode, Conference, Contributor, Tag
from website import settings
class MeetingMixin(object):
"""Mixin with convenience method get_meeting
"""
meeting_lookup_url_kwarg = 'meeting_id'
def get_meeting(self):
meeting = get_object_or_error(
Conference,
Q(endpoint=self.kwargs[self.meeting_lookup_url_kwarg]),
self.request,
display_name='meeting',
)
return meeting
class BaseMeetingView(JSONAPIBaseView, MeetingMixin):
permission_classes = (
drf_permissions.IsAuthenticatedOrReadOnly,
base_permissions.TokenHasScope,
)
required_read_scopes = [CoreScopes.MEETINGS_READ]
required_write_scopes = [CoreScopes.NULL]
model = Conference
# This view goes under the _/ namespace
versioning_class = PrivateVersioning
serializer_class = MeetingSerializer
view_category = 'meetings'
class MeetingList(BaseMeetingView, generics.ListAPIView, ListFilterMixin):
view_name = 'meeting-list'
ordering = ('-modified', ) # default ordering
ordering_fields = ('name', 'submissions_count', 'location', 'start_date',)
# overrides ListFilterMixin
def get_default_queryset(self):
conferences = Conference.objects.filter(
is_meeting=True,
submissions__is_public=True,
submissions__is_deleted=False,
).annotate(
submissions_count=Count(F('submissions')),
)
return conferences.filter(submissions_count__gte=settings.CONFERENCE_MIN_COUNT)
# overrides ListAPIView
def get_queryset(self):
return self.get_queryset_from_request()
class MeetingDetail(BaseMeetingView, generics.RetrieveAPIView):
view_name = 'meeting-detail'
def get_object(self):
# No minimum submissions count for accessing meeting directly
return self.get_meeting()
class BaseMeetingSubmission(JSONAPIBaseView, MeetingMixin):
permission_classes = (
drf_permissions.IsAuthenticatedOrReadOnly,
base_permissions.TokenHasScope,
IsPublic,
)
required_read_scopes = [CoreScopes.MEETINGS_READ, CoreScopes.NODE_BASE_READ]
required_write_scopes = [CoreScopes.NULL]
model = AbstractNode
# This view goes under the _/ namespace
versioning_class = PrivateVersioning
serializer_class = MeetingSubmissionSerializer
view_category = 'meetings'
def get_serializer_context(self):
context = super(BaseMeetingSubmission, self).get_serializer_context()
context['meeting'] = self.get_meeting()
return context
class MeetingSubmissionList(BaseMeetingSubmission, generics.ListAPIView, ListFilterMixin):
view_name = 'meeting-submissions'
ordering = ('-created', ) # default ordering
ordering_fields = ('title', 'meeting_category', 'author_name', 'created', )
# overrides ListFilterMixin
def get_default_queryset(self):
meeting = self.get_meeting()
return self.annotate_queryset_for_filtering_and_sorting(meeting, meeting.valid_submissions)
# overrides ListAPIView
def get_queryset(self):
return self.get_queryset_from_request()
def build_query_from_field(self, field_name, operation):
if field_name == 'author_name':
if operation['op'] != 'eq':
raise InvalidFilterOperator(value=operation['op'], valid_operators=['eq'])
return Q(author_name__icontains=operation['value'])
if field_name == 'meeting_category':
if operation['op'] != 'eq':
raise InvalidFilterOperator(value=operation['op'], valid_operators=['eq'])
return Q(meeting_category__icontains=operation['value'])
return super(MeetingSubmissionList, self).build_query_from_field(field_name, operation)
def annotate_queryset_for_filtering_and_sorting(self, meeting, queryset):
queryset = self.annotate_queryset_with_meeting_category(meeting, queryset)
queryset = self.annotate_queryset_with_author_name(queryset)
return queryset
def annotate_queryset_with_meeting_category(self, meeting, queryset):
"""
Annotates queryset with meeting_category - if submission1 tag exists, use this,
otherwise assume default submission2 tag
"""
# Setup meeting category subquery (really existence of certain tags)
category_1 = meeting.field_names.get('submission1', 'poster')
category_2 = meeting.field_names.get('submission2', 'talk')
tag_subquery = Tag.objects.filter(
abstractnode_tagged=OuterRef('pk'),
name=category_1,
).values_list('name', flat=True)
queryset = queryset.annotate(cat_one_count=Count(Subquery(tag_subquery))).annotate(
meeting_category=Case(
When(cat_one_count=1, then=Value(category_1)),
default=Value(category_2),
output_field=CharField(),
),
)
return queryset
def annotate_queryset_with_author_name(self, queryset):
"""
Annotates queryset with author_name_category - it is the family_name if it exists, otherwise,
the fullname is used
"""
# Setup author name subquery (really first bibliographic contributor)
contributors = Contributor.objects.filter(
visible=True,
node_id=OuterRef('pk'),
).order_by('_order')
queryset = queryset.annotate(
author_family_name=Subquery(contributors.values(('user__family_name'))[:1]),
author_full_name=Subquery(contributors.values(('user__fullname'))[:1]),
author_id=Subquery(contributors.values(('user__guids___id'))[:1]),
).annotate(
author_name=Case(
When(author_family_name='', then=F('author_full_name')),
default=F('author_family_name'),
output_field=CharField(),
),
)
return queryset
class MeetingSubmissionDetail(BaseMeetingSubmission, generics.RetrieveAPIView, NodeMixin):
view_name = 'meeting-submission-detail'
serializer_class = MeetingSubmissionSerializer
node_lookup_url_kwarg = 'submission_id'
def get_object(self):
meeting = self.get_meeting()
node = self.get_node()
# Submission must be associated with the Conference
if node.id not in meeting.submissions.values_list('id', flat=True):
raise NotFound('This is not a submission to {}.'.format(meeting.name))
return node
| |
"""
Module units: classes, functions and constants for working with players,
passengers, and their tokens.
Created on January 15, 2013
@author: Windward Studios, Inc. (www.windward.net).
No copyright claimed - do anything you want with this code.
"""
from xml.etree import ElementTree as ET
import debug
STATUS = ("UPDATE", "NO_PATH", "PASSENGER_ABANDONED", "PASSENGER_DELIVERED",
"PASSENGER_DELIVERED_AND_PICKED_UP", "PASSENGER_REFUSED",
"PASSENGER_PICKED_UP", "PASSENGER_NO_ACTION")
"""
Current status of the player:
UPDATE: Called ever N ticks to update the AI with the game status.
NO_PATH: The car has no path.
PASSENGER_ABANDONED: The passenger was abandoned, no passenger was picked up.
PASSENGER_DELIVERED: The passenger was delivered, no passenger was picked up.
PASSENGER_DELIVERED_AND_PICKED_UP: The passenger was delivered or abandoned, a new passenger was picked up.
PASSENGER_REFUSED: The passenger refused to exit at the bus stop because an enemy was there.
PASSENGER_PICKED_UP: A passenger was picked up. There was no passenger to deliver.
PASSENGER_NO_ACTION: At a bus stop, nothing happened (no drop off, no pick up).
"""
class Player(object):
"""Class for representing a player in the game."""
def __init__(self, element, pickup=[], passes=[], score=0):
"""Create a player instance from the given XML Element.
Initialize the following instance variables:
guid -- A unique string identifier for this player. This string will remain
constant throughout the game (while the Player objects passed will change
on every call).
name -- The name of this player.
pickup -- List of who to pick up at the next bus stop. Can be empty and
can also only list people not at the next bus stop. This may be
wrong after a pick-up occurs as all we get is a count. This is
updated with the most recent list sent to the server.
passengersDelivered -- The passengers delivered so far (this game).
limo -- The player's limo.
score -- The score for this player (this game, not across all games so far).
"""
if isinstance(element, basestring):
element = ET.XML(element)
self.guid = element.get('guid')
self.name = element.get('name')
self.limo = Limo( (int(element.get('limo-x')), int(element.get('limo-y'))),
int(element.get('limo-angle')))
self.pickup = pickup if pickup else []
self.passengersDelivered = passes if passes else []
self.score = score
def __repr__(self):
return ("Player('" +
'<player guid="%s" name=%r limo-x="%r" limo-y="%r" limo-angle="%r">' %
(self.guid, self.name, self.limo.tilePosition[0], self.limo.tilePosition[1], self.limo.angle) +
"', %r, %r, %r)" % (self.pickup, self.passengersDelivered, self.score))
def __str__(self):
return "%s; numDelivered:%r" % (self.name, len(self.passengersDelivered))
def __eq__(self, other):
if isinstance(other, Player) and other.guid == self.guid:
return True
else:
return False
def __hash__(self):
return hash('Player %s (%r)' % (self.name, self.guid))
class Limo(object):
"""A player's limo - holds a single passenger."""
def __init__(self, tilePosition, angle, path=[], passenger=None):
"""tilePosition -- The location in tile units of the center of the vehicle.
angle -- the angle this unit is facing (an int from 0 to 359; 0 is
North and 90 is East.
path -- Only set for the AI's own limo - the number of tiles
remaining in the limo's path. This may be wrong after movement
as all we get is a count. This is updated witht the most recent
list sent to the server.
passenger -- The passenger in this limo. None if there is no passenger.
"""
self.tilePosition = tilePosition
self.angle = angle
self.path = path if path else []
self.passenger = passenger
def __str__(self):
if self.passenger is not None:
return ("%s:%s; Passenger:%s; Dest:%s; PathLength:%s" %
(self.tilePosition, self.angle, self.passenger.name,
self.passenger.destination, len(self.path)))
else:
return "%s:%s; Passenger:{none}" % (self.tilePosition, self.angle)
class Passenger(object):
"""A company CEO."""
def __init__(self, element, companies):
"""Create a passenger from XML and a list of Company objects.
name -- The name of this passenger.
pointsDelivered -- The number of points a player get for delivering this passenger.
car -- The limo the passenger is currently in. None if they are not in a limo.
lobby -- The bus stop the passenger is currently waiting in. None if they
are in a limo or if they have arrived at their final destination.
destination -- The company the passenger wishes to go to next. This is
valid both at a bus stop and in a car. It is None of they have been
delivered to their final destination.
route -- The remaining companies the passenger wishes to go to after
destination, in order. This does not include their current destination.
enemies -- List of other Passenger objects. If any of them are at a bus
stop, this passenger will not exit the limo at that stop. If a
passenger at the bus stop has this passenger as an enemy, this
passenger can still exit the car.
"""
self.name = element.get('name')
self.pointsDelivered = int(element.get('points-delivered'))
lobby = element.get('lobby')
self.lobby = ([c for c in companies if c.name == lobby][0]
if lobby is not None else None)
dest = element.get('destination')
self.destination = ([c for c in companies if c.name == dest][0]
if dest is not None else None)
route = []
for routeElement in element.findall('route'):
debug.trap()
route.append([c for c in companies if c.name == routeElement.text][0])
self.route = route
self.enemies = []
self.car = None
def __repr__(self):
return self.name
def playersFromXml (element):
"""Called on setup to create initial list of players."""
return [Player(p) for p in element.findall('player')]
def updatePlayersFromXml (players, passengers, element):
"""Update a list of Player objects with passengers from the given XML."""
for playerElement in element.findall('player'):
player = [p for p in players if p.guid == playerElement.get('guid')][0]
player.score = float(playerElement.get('score'))
# car location
player.limo.tilePosition = ( int(playerElement.get('limo-x')),
int(playerElement.get('limo-y')) )
player.limo.angle = int(playerElement.get('limo-angle'))
# see if we now have a passenger
psgrName = playerElement.get('passenger')
if psgrName is not None:
passenger = [p for p in passengers if p.name == psgrName][0]
player.limo.passenger = passenger
passenger.car = player.limo
else:
player.limo.passenger = None
# add most recent delivery if this is the first time we're told.
psgrName = playerElement.get('last-delivered')
if psgrName is not None:
passenger = [p for p in passengers if p.name == psgrName][0]
if passenger not in player.passengersDelivered:
player.passengersDelivered.append(passenger)
def passengersFromXml (element, companies):
elements = element.findall('passenger')
passengers = [Passenger(psgr, companies) for psgr in elements]
# need to now assign enemies - needed all Passenger objects created first
for elemOn in elements:
psgr = [p for p in passengers if p.name == elemOn.get('name')][0]
psgr.enemies = [filter(lambda p: p.name == e.text, passengers)[0]
for e in elemOn.findall('enemy')]
# set if they're in a lobby
for psgr in passengers:
if psgr.lobby is not None:
company = [c for c in companies if c == psgr.lobby][0]
company.passengers.append(psgr)
return passengers
def updatePassengersFromXml (passengers, companies, element):
for psgrElement in element.findall('passenger'):
#debug.bugprint('updatePassengers XML:', ET.tostring(psgrElement))
#debug.bugprint(' passengers: ' + str(passengers))
passenger = [p for p in passengers if p.name == psgrElement.get('name')][0]
dest = psgrElement.get('destination')
if dest is not None:
passenger.destination = [c for c in companies if c.name == dest][0]
# remove from the route
if passenger.destination in passenger.route:
passenger.route.remove(passenger.destination)
# set props based on waiting, travelling, done
switch = psgrElement.get('status')
if switch == "lobby":
passenger.lobby = [c for c in companies if c.name == psgrElement.get('lobby')][0]
passenger.car = None
elif switch == "travelling":
passenger.lobby = None
# passenger.car set in Player update
elif switch == "done":
debug.trap()
passenger.destination = None
passenger.lobby = None
passenger.car = None
else:
raise TypeError("Invalid passenger status in XML: %r" % switch)
| |
#!/usr/bin/env python
from __future__ import division
from __future__ import with_statement
import numpy as np
from pylab import ion
import matplotlib as mpl
from matplotlib.path import Path
from matplotlib import pyplot as plt
from scipy.optimize import curve_fit
import sys
import time
import cPickle as pickle
from smartFormat import smartFormat
__author__ = "J.L. Lanfranchi"
__email__ = "jll1062@phys.psu.edu"
__copyright__ = "Copyright 2014 J.L. Lanfranchi"
__credits__ = ["J.L. Lanfranchi"]
__license__ = """Permission is hereby granted, free of charge, to any person
obtaining a copy of this software and associated documentation files (the
"Software"), to deal in the Software without restriction, including without
limitation the rights to use, copy, modify, merge, publish, distribute,
sublicense, and/or sell copies of the Software, and to permit persons to whom
the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE."""
#-- Turn live-interactive plotting on (makes updated plots appear animated)
ion()
#-- Adjust the font used on the plots
font = {'family' : 'serif', 'weight' : 'normal', 'size' : 8}
mpl.rc('font', **font)
def createChain(nSteps):
"""State is taken to be direction of travel; there are 4 directions, so
four states: right (0), up (1), left (2), and down (3). The only allowed
transitions are
state -> state + 1 (modulo 4)
state -> state - 1 (modulo 4)
state -> state
Then, it must be checked that the coordinate isn't already in the chain;
if it is, then None is returned; otherwise, the algo repeats until a
chain of nSteps is reached.
"""
#-- Initialize chain to start at (0,0) and move to (0,1) (i.e., move up)
chainCoords = [(0,0)]
chainCoords = [(0,0)]
coord = (0,1)
chainCoords.append(coord)
state = 1
length = 1
while True:
randVal = np.random.randint(low=-1, high=2)
state = (state + randVal) % 4
if state is 0:
coord = (coord[0]+1, coord[1])
elif state is 1:
coord = (coord[0], coord[1]+1)
elif state is 2:
coord = (coord[0]-1, coord[1])
elif state is 3:
coord = (coord[0], coord[1]-1)
if coord in chainCoords:
return None
chainCoords.append(coord)
length += 1
if length == nSteps:
return chainCoords
def measureChain(chain):
"""Measures the Euclidean distance from the startpoint to endpoint of
a chain"""
return np.sqrt((chain[-1][0] - chain[0][0])**2
+ (chain[-1][1] - chain[0][1])**2)
formatDic = {'sigFigs': 4, 'demarc': "", 'threeSpacing': False, 'rightSep':""}
def powerLaw(x, power, multFact, offset):
return multFact*(x**power) + offset
def powerLawLatex(power, multFact=1, offset=0, pcov=None):
offsetStr = smartFormat(offset, alwaysShowSign=True, **formatDic)
if not (offsetStr[0] == "+" or offsetStr[0] == "-"):
offsetStr = "+" + offsetStr
latex = r"$" + smartFormat(multFact, **formatDic) + \
r" \cdot N^{" + smartFormat(power, **formatDic) + r"} " + \
offsetStr + \
r"$"
return latex
def exponential(x, expExponent, multFact=1):
return multFact * np.exp(np.array(x)*expExponent)
def exponentialLatex(expExponent, multFact=1, pcov=None):
latex = r"$" + smartFormat(multFact, **formatDic) + \
r"\cdot e^{" + smartFormat(expExponent, **formatDic) + \
r"\cdot N}$"
return latex
def expPower(x, expExponent, powerLawExp, multFact):
x = np.array(x)
return multFact * np.exp(x*expExponent) * x**powerLawExp
def expPowerLatex(expExponent, powerLawExp, multFact, pcov=None):
latex = r"$" + smartFormat(multFact, **formatDic) + \
r"\cdot e^{" + smartFormat(expExponent, **formatDic) + \
r"\cdot N}\cdot N^{" + smartFormat(powerLawExp, **formatDic) + \
r"}$"
return latex
class SimulationData:
def __init__(self, **kwargs):
self.__dict__.update(kwargs)
class Simulation:
def __init__(self):
self.sd = SimulationData()
self.sd.simulationCompleted = False
self.sd.postprocCompleted = False
self.stateFilename = "p7x28_state.pk"
def saveState(self, filename=None):
if filename == None:
filename = self.stateFilename
with open(filename, 'wb') as stateFile:
pickle.dump(self.sd, stateFile, -1)
def loadState(self, filename=None):
if filename == None:
filename = self.stateFilename
with open(filename, 'rb') as stateFile:
self.sd = pickle.load(stateFile)
def runSimulation(self, targetSuccesses=10, stepsRange=(4,50),
plotting=False):
#-- Reset state variables for a new simulation run
self.sd.simulationCompleted = False
self.sd.postprocCompleted = False
timeLastSaved = time.time()
self.sd.targetSuccesses = targetSuccesses
self.sd.stepsInChains = range(stepsRange[0],stepsRange[1])
self.sd.allChainFinalCoords = []
self.sd.allMeanChainFinalCoords = []
self.sd.meanChainFinalCoords = []
self.sd.chainSquareLengthAvg = []
self.sd.successRatio = []
self.sd.timingAvg = []
if plotting:
self.fig1 = plt.figure(1)
self.fig1.clf()
self.ax1 = fig1.add_subplot(111)
line, = ax1.plot([], [], 'ko-', lw=2)
self.ax1.set_xlim(-20,20)
self.ax1.set_ylim(-20,20)
ax1.axis('image')
plt.draw()
for stepsThisChain in self.sd.stepsInChains:
startTime = time.time()
successfulChains = []
chainSquareLengths = []
chainFinalCoords = []
meanChainFinalCoord = []
nSuccesses = 0
trialN = 0
while nSuccesses < self.sd.targetSuccesses:
trialN += 1
chain = createChain(stepsThisChain)
if chain == None:
continue
successfulChains.append(chain)
chain = np.array(chain)
chainSquareLengths.append(measureChain(chain)**2)
chainFinalCoords.append(chain[-1,:])
nSuccesses += 1
if plotting:
line.set_data(chain[:,0],chain[:,1])
self.ax1.set_xlim(-20,20)
self.ax1.set_ylim(-20,20)
plt.draw()
time.sleep(0.005)
chainFinalCoords = np.array(chainFinalCoords)
self.sd.allChainFinalCoords.append(chainFinalCoords)
self.sd.allMeanChainFinalCoords.append(meanChainFinalCoord)
self.sd.meanChainFinalCoord = np.mean(chainFinalCoords, 0)
self.sd.chainSquareLengthAvg.append(np.mean(chainSquareLengths))
self.sd.successRatio.append(nSuccesses / trialN)
self.sd.timingAvg.append( (time.time()-startTime)/nSuccesses )
sys.stdout.write("\nstepsThisChain = " + str(stepsThisChain) + "\n")
sys.stdout.write(" nSuccesses/nTrials = " + str(nSuccesses) + "/"
+ str(trialN) + " = "
+ str(self.sd.successRatio[-1]) + "\n")
sys.stdout.write(" time/success = " +
str(self.sd.timingAvg[-1]) + "\n")
sys.stdout.flush()
if (time.time() - timeLastSaved) > 60*5:
self.saveState()
timeLastSaved = time.time()
self.sd.allMeanChainFinalCoords = \
np.array(self.sd.allMeanChainFinalCoords)
#-- TODO: mean of final-position vector (r_N vector)
#np.sqrt(allMeanChainFinalCoords[:,0]**2+
# allMeanChainFinalCoords[:,1]**2)
self.sd.simulationCompleted = True
self.saveState()
def postproc(self):
"""Perform curve fitting to the data"""
#-- Update state
self.sd.postprocCompleted = False
#-- Check that simulation data is present
if not self.sd.simulationCompleted:
raise Exception("No simulation run; cannot perform curve fit!")
#-- Same x data is used for *all* the below curve fits
x = self.sd.stepsInChains
#============================================================
# Fit success fraction with const * exponential * power law
#============================================================
y = self.sd.successRatio
#-- Weight variance by data size to make small data points equally
# important to fit to as large data points
sigma = list(np.array(y))
p0 = (-0.117, 0.1, 2)
popt1, pcov1 = curve_fit(f=expPower, xdata=x, ydata=y, sigma=sigma,
p0=p0)
self.sd.fit1 = expPower(x, *popt1)
self.sd.fit1eqn = expPowerLatex(*popt1)
print popt1, pcov1, "\n"
#============================================================
# TODO: Fit the final position data
#============================================================
#y = (self.sd.chainLengthAvg)
#sigma = list(np.array(y))
#popt2, pcov2 = curve_fit(powerLaw, x, y, sigma=sigma)
#self.sd.fit2 = powerLaw(x, *popt2)
#self.sd.fit2eqn = powerLawLatex(*popt2)
#print popt2, pcov2, "\n"
#============================================================
# Fit R_N^2 with const * power-law + const
#============================================================
y = self.sd.chainSquareLengthAvg
#-- Weight variance by data size to make small data points equally
# important to fit to as large data points
sigma = list(np.array(y))
popt3, pcov3 = curve_fit(f=powerLaw, xdata=x, ydata=y, sigma=sigma)
self.sd.fit3 = powerLaw(x, *popt3)
self.sd.fit3eqn = powerLawLatex(*popt3)
print popt3, pcov3, "\n"
#============================================================
# Exponential fit to wall-clock time (not as good a fit as
# exp*power, so this is commented out)
#============================================================
#y = (self.sd.timingAvg)
##p0 = (0.0985, 0.1, 1.65e-5)
#p0 = (0.0985, 1)
#sigma = list(np.array(y))
#popt4, pcov4 = curve_fit(f=exponential, xdata=x, ydata=y, sigma=sigma,
# p0=p0, )
#self.sd.fit4 = exponential(x, *popt4)
#self.sd.fit4eqn = exponentialLatex(*popt4)
#print popt4, pcov4, "\n"
#============================================================
# Exponential * power-law fit to wall-clock time
#============================================================
y = self.sd.timingAvg
#-- Initial guess
p0 = (0.129, 0, 2.981e-3)
#-- Weight variance by data size to make small data points equally
# important to fit to as large data points
sigma = list(np.array(y))
popt4, pcov4 = curve_fit(f=expPower, xdata=x, ydata=y, sigma=sigma,
p0=p0, )
self.sd.fit4 = expPower(x, *popt4)
self.sd.fit4eqn = expPowerLatex(*popt4)
print popt4, pcov4, "\n"
#-- Update state
self.sd.postprocCompleted = True
def plotResults(self, savePlot=True):
"""Plot the data and the fit curves"""
if not self.sd.simulationCompleted:
raise Exception("No simulation has been run; cannot plot results!")
if not self.sd.postprocCompleted:
self.postproc()
self.fig2 = plt.figure(2, figsize=(7,12), dpi=80)
self.fig2.clf()
self.ax21 = self.fig2.add_subplot(311)
self.ax21.plot(self.sd.stepsInChains, self.sd.successRatio,
'bo', label="data", markersize=4)
self.ax21.plot(self.sd.stepsInChains, self.sd.fit1,
'r-', label=self.sd.fit1eqn, linewidth=2, alpha=0.75)
self.ax21.set_title(
"Non-intersecting 2D random-walk chains;" +
" stop condition: " + str(self.sd.targetSuccesses) +
" successfully-built chains")
self.ax21.set_ylabel(r"Success fraction $f(N)$")
self.ax21.set_yscale('log')
self.ax21.grid(which='major', b=True)
self.ax21.legend(loc="best", fancybox=True, shadow=True)
#-- TODO: average of final position plot
#self.ax22 = fig2.add_subplot(412)
#self.ax22.plot(self.sd.stepsInChains, self.sd.chainLengthAvg,
# 'bo', label="data", markersize=4)
#self.ax22.plot(self.sd.stepsInChains, self.sd.fit2,
# 'r-', label=self.sd.fit2eqn, linewidth=2, alpha=0.75)
#self.ax22.set_ylabel(r"$\langle R_N \rangle$")
##self.ax22.set_yscale('log')
#ax22.grid(which='major', b=True)
#ax22.legend(loc="best", fancybox=True, shadow=True)
self.ax23 = self.fig2.add_subplot(312)
self.ax23.plot(self.sd.stepsInChains, self.sd.chainSquareLengthAvg,
'bo', label="data", markersize=4)
self.ax23.plot(self.sd.stepsInChains, self.sd.fit3,
'r-', label=self.sd.fit3eqn, linewidth=2, alpha=0.75)
self.ax23.set_ylabel(r"$\langle R_N^2\rangle$")
self.ax23.grid(which='major', b=True)
self.ax23.legend(loc="upper left", fancybox=True, shadow=True)
self.ax24 = self.fig2.add_subplot(313)
self.ax24.plot(self.sd.stepsInChains, self.sd.timingAvg,
'bo', label="data", markersize=4)
self.ax24.plot(self.sd.stepsInChains, self.sd.fit4,
'r-', label=self.sd.fit4eqn, linewidth=2, alpha=0.75)
self.ax24.set_xlabel(r"Nmber of steps in walk, $N$")
self.ax24.set_ylabel("Wall-clock time per successful chain (s)")
self.ax24.set_yscale('log')
self.ax24.grid(which='major', b=True)
self.ax24.legend(loc="upper left", fancybox=True, shadow=True)
self.fig2.tight_layout()
if savePlot:
self.fig2.savefig("2014-01-14_problem7x28_plots.pdf")
self.fig2.savefig("2014-01-14_problem7x28_plots.png", dpi=120)
plt.show()
if __name__ == "__main__":
#-- Instantiate the Simulation object
sim = Simulation()
#-- Try to load the sim data from any previous run; if no data saved
# to disk in the default location, run a new simulation
try:
sim.loadState()
except Exception as e:
print "Error({0}: {1}".format(e.errno, e.strerror)
sim.runSimulation(targetSuccesses=500, stepsRange=(4,101))
#-- *Always* perform post-processing and plotting (allows easy modification
# of the postprocessing (curve fitting) and plotting routines
# without needing to re-run the simulation, which can take hours)
sim.postproc()
sim.plotResults()
| |
'''
This class defines the graphical widget which allows for viewing
and editing of models on a scrollable, zoomable canvas, with custom
drawing styles available to each model.
'''
__author__ = 'William Emfinger'
__copyright__ = 'Copyright 2016, ROSMOD'
__credits__ = ['William Emfinger', 'Pranav Srinivas Kumar']
__license__ = 'GPL'
__version__ = '0.4'
__maintainer__ = 'William Emfinger'
__email__ = 'emfinger@isis.vanderbilt.edu'
__status__ = 'Production'
from PyQt4 import QtCore
from PyQt4 import QtGui
import view_attributes as view_attr
from layout import layout_create, valid_layouts
from graphics_items import RoundRectItem, TextItem
from attribute_editors import\
FileEditor,\
CodeEditor,\
ReferenceEditor
# TODO: Make ItemDelegate work for dictionary editor created in
# attribute editor
# TODO: there is a dependency between the text size and the item size
# of EditorItem because it's all max-based, when the text
# shrinks the item should shrink too but can't because the rect
# is preventing it
class EditorItemWidget(QtGui.QWidget):
'''Wrapper class for :class:`EditorItem` so that an
:class:`EditorItem` can be configured and automatically used with
:class:`EditorItemDelegate`
'''
def __init__(self, parent=None, ei=None):
super(EditorItemWidget, self).__init__(parent)
self.item = ei
class EditorItemDelegate(QtGui.QItemDelegate):
'''Handles the mapping between :class:`EditorItem`'s data and the
model's data. Ensures that whenever the item's data are edited,
the model's data get updated and vise-versa. This eables ensuring
that the graphics object's text label changes whenever its
associated model changes, for instance.
'''
def eventFilter(self, editor, event):
'''Required to allow tabbing in a
:class:`attribute_editors.CodeEditor`.
'''
if type(editor) == CodeEditor and type(event) is not QtGui.QFocusEvent:
return False
else:
return super(EditorItemDelegate, self).eventFilter(editor, event)
def setEditorData(self, editor, index):
if type(editor) == EditorItemWidget:
text = index.data().toString()
# SET THE EDITOR ITEM TEXT HERE
editor.item._label.setPlainText(text)
editor.item.updateGraphicsItem()
return
elif type(editor) == FileEditor:
text = str(index.data().toString())
editor.set_file_name(text)
elif type(editor) == CodeEditor:
text = str(index.data().toString())
editor.setPlainText(text)
return
elif type(editor) == ReferenceEditor:
editor.setCurrentReference(index.data().toPyObject())
return
return super(EditorItemDelegate, self).setEditorData(editor, index)
def setModelData(self, editor, model, index):
if (type(editor) == EditorItemWidget or
type(editor) == QtGui.QWidget):
return
elif type(editor) == FileEditor:
text = str(editor.file_name())
model.setData(index, text)
return
elif type(editor) == CodeEditor:
text = editor.toPlainText()
model.setData(index, text)
return
elif type(editor) == ReferenceEditor:
data = editor.itemData(
editor.currentIndex(),
editor.getRootItemModel().reference_role
).toPyObject()
model.setData(
index,
data
)
return
return super(EditorItemDelegate,
self).setModelData(editor, model, index)
class EditorItem(QtGui.QGraphicsWidget):
'''
Graphical representation of the data-model.
'''
def __init__(self,
index,
parent=None):
super(EditorItem, self).__init__(parent)
# Perhaps need an ItemDelegate/StyledItemDelegate
# which transforms data from editor to model and back
# perhaps just point this to the ItemModel()?
self.modelindex = index
self.dataMapper = QtGui.QDataWidgetMapper()
self.dataMapper.setModel(self.modelindex.model())
self.dataMapper.setOrientation(QtCore.Qt.Vertical)
self.dataMapper.setRootIndex(self.modelindex)
self.dataMapper.setCurrentIndex(1)
self.delegate = EditorItemDelegate(self)
self.dataMapper.setItemDelegate(self.delegate)
self.itemWidget = EditorItemWidget(None, self)
self.dataMapper.addMapping(self.itemWidget, 0)
# graphics item which represents
self._item = None
# text label of this item
item = self.modelindex.model().getModel(self.modelindex)
self._label = TextItem(item['Name'], parent=self)
self.loadResources()
# self.setAcceptDrops(True)
self.setAcceptHoverEvents(True)
self.initializeFlags()
self.updateGraphicsItem()
def viewModel(self):
return self.scene().viewModel()
def updateLabel(self, width, height):
self._label.updateGeometry()
'''
self._label.setAlignment(
self.viewModel()[item.kind()]['text horizontal alignment'],
self.viewModel()[item.kind()]['text vertical alignment']
)
self._label.setPos(
self.viewModel()['text location'],
self.pos(),
width,
height
)
'''
def createItem(self, width, height):
self._item = RoundRectItem(0, 0, width, height)
self._item.setBrush(QtGui.QColor('light blue'))
'''
draw_style = self.viewModel()['draw style']
if self.viewModel()['icon'] and draw_style == 'icon':
self._item = QtGui.QGraphicsPixmapItem()
self._item.setPixmap(
QtGui.QPixmap(self.viewModel()['icon']).scaled(width,height)
)
else:
if draw_style == 'rect':
self._item = QtGui.QGraphicsRectItem(0,0,width,height)
elif draw_style == 'ellipse':
self._item = QtGui.QGraphicsEllipseItem(0,0,width,height)
elif draw_style == 'round rect':
self._item = RoundRectItem(0,0,width,height)
if self._item:
self._item.setBrush(QtGui.QColor(self.viewModel()['color']))
'''
def loadResources(self):
self.setLayout(layout_create('horizontal'))
'''
new_layout = layout_create(self.viewModel()['layout style'])
if type(self.layout()) != type(new_layout):
new_layout.fromLayout(self.layout())
self.setLayout(new_layout)
'''
sh = self.sizeHint(QtCore.Qt.SizeHint(), QtCore.QSizeF())
width = sh.width()
height = sh.height()
self.updateLabel(width, height)
self.createItem(width, height)
def paint(self, painter, option, widget=None):
super(EditorItem, self).paint(painter, option, widget)
if self._item:
self._item.paint(painter, option, widget)
def boundingRect(self):
minx = 0
miny = 0
maxx = 0
maxy = 0
if self._item:
brect = self._item.boundingRect()
minx = min(brect.x(),minx)
miny = min(brect.y(),miny)
maxx = max(maxx, brect.x() + brect.width())
maxy = max(maxy, brect.y() + brect.height())
if self._label:
brect = self._label.boundingRect()
minx = min(brect.x(), minx)
miny = min(brect.y(), miny)
maxx = max(maxx, brect.x() + brect.width())
maxy = max(maxy, brect.y() + brect.height())
retRect = QtCore.QRectF(minx, miny, maxx-minx, maxy-miny)
return retRect
def sizeHint(self, which, constraint):
shw = 0
shh = 0
sh = self.layout().sizeHint(which, constraint)
shw = sh.width()
shh = sh.height()
shw = max(shw, self.boundingRect().width())
shh = max(shh, self.boundingRect().height())
return QtCore.QSizeF(
max(shw, 50), # self.viewModel()['width']),
max(shh, 50) # self.viewModel()['height'])
)
def updateGraphicsItem(self):
# self.layout().activate()
self.prepareGeometryChange()
sh = self.sizeHint(QtCore.Qt.SizeHint(), QtCore.QSizeF())
width = sh.width()
height = sh.height()
self.updateLabel(width, height)
self.createItem(width, height)
self.updateGeometry()
self.update()
def updateChild(self, child):
self.layout().updateItem(child)
self.updateGraphicsItem()
def removeChild(self, child):
# Should this just point down to the underlying model's
# removeRows() method and then let the updating take effect?
self.layout().removeItem(child)
self.updateGraphicsItem()
def addChild(self, child):
# Should this just point down to the underlying model's
# insertRows() method and then let the updating take effect?
self.layout().addItem(child)
self.updateGraphicsItem()
def isMovable(self):
return bool(self.flags() & QtGui.QGraphicsItem.ItemIsMovable)
def mouseDoubleClickEvent(self, event):
QtGui.QGraphicsWidget.mouseDoubleClickEvent(self, event)
'''
e = QtGui.QMouseEvent(
QtCore.QEvent.GraphicsSceneMouseDoubleClick,
event.screenPos(),
event.button(),
event.buttons(),
event.modifiers()
)
self.itemWidget.mouseDoubleClickEvent(e)
'''
editor = self.scene().parent().getEditor()
editor.setDataMapper(self.dataMapper)
editor.update()
editor.show()
editor.raise_()
def updateAttributes(self, attrs):
self.loadResources()
self.updateGraphicsItem()
'''
BELOW HERE ARE NOT AS RELEVANT RIGHT NOW
'''
def initializeFlags(self):
self.setFlag(QtGui.QGraphicsItem.ItemIsMovable)
self.setFlag(QtGui.QGraphicsItem.ItemIsSelectable)
self.setFlag(QtGui.QGraphicsItem.ItemSendsGeometryChanges)
self.setFlag(QtGui.QGraphicsItem.ItemSendsScenePositionChanges)
def getAnchors(self):
a = self.boundingRect()
anchorList = {
'bottom left': a.bottomLeft(),
'bottom right': a.bottomRight(),
'top left': a.topLeft(),
'top right': a.topRight(),
'center left': (a.topLeft() + a.bottomLeft()) / 2.0,
'center right': (a.topRight() + a.bottomRight()) / 2.0,
'top center': (a.topLeft() + a.topRight()) / 2.0,
'bottom center': (a.bottomLeft() + a.bottomRight()) / 2.0
}
return anchorList
def contextMenuEvent(self, event):
menu = QtGui.QMenu()
item = self.modelindex.model().getModel(self.modelindex)
if item.parent and \
'0' in item.parent.children.get_cardinality_of(type(item)):
delSelf = QtGui.QAction('Delete', self)
delSelf.triggered.connect(self.delete)
menu.addAction(delSelf)
for a in item.children.allowed():
addAction = QtGui.QAction('Add new {}'.format(a.__name__), self)
addAction.triggered.connect(self.addNewItem(a))
menu.addAction(addAction)
menu.exec_(event.screenPos())
def addNewItem(self, _type):
def genericItem(e):
self.modelindex.model().insertRows(0, 1, self.modelindex, _type)
return genericItem
def delete(self, event):
# What should this method do?
# Should this just point down to the underlying model's
# removeRows() method and then let the updating take effect?
for i in range(self.layout().count()):
self.layout().itemAt(0).delete(None)
if self.scene():
self.scene().removeItem(self)
def mousePressEvent(self, event):
QtGui.QGraphicsWidget.mousePressEvent(self, event)
def mouseMoveEvent(self, event):
QtGui.QGraphicsWidget.mouseMoveEvent(self, event)
def mouseReleaseEvent(self, event):
QtGui.QGraphicsWidget.mouseReleaseEvent(self, event)
def hoverEnterEvent(self, event):
QtGui.QGraphicsWidget.hoverEnterEvent(self, event)
def hoverLeaveEvent(self, event):
QtGui.QGraphicsWidget.hoverLeaveEvent(self, event)
| |
# -*- coding: utf-8 -*-
"""Tools for handling LaTeX."""
# Copyright (c) IPython Development Team.
# Distributed under the terms of the Modified BSD License.
from io import BytesIO, open
import os
import tempfile
import shutil
import subprocess
from base64 import encodebytes
import textwrap
from IPython.utils.process import find_cmd, FindCmdError
from traitlets.config import get_config
from traitlets.config.configurable import SingletonConfigurable
from traitlets import List, Bool, Unicode
from IPython.utils.py3compat import cast_unicode
class LaTeXTool(SingletonConfigurable):
"""An object to store configuration of the LaTeX tool."""
def _config_default(self):
return get_config()
backends = List(
Unicode(), ["matplotlib", "dvipng"],
help="Preferred backend to draw LaTeX math equations. "
"Backends in the list are checked one by one and the first "
"usable one is used. Note that `matplotlib` backend "
"is usable only for inline style equations. To draw "
"display style equations, `dvipng` backend must be specified. ",
# It is a List instead of Enum, to make configuration more
# flexible. For example, to use matplotlib mainly but dvipng
# for display style, the default ["matplotlib", "dvipng"] can
# be used. To NOT use dvipng so that other repr such as
# unicode pretty printing is used, you can use ["matplotlib"].
).tag(config=True)
use_breqn = Bool(
True,
help="Use breqn.sty to automatically break long equations. "
"This configuration takes effect only for dvipng backend.",
).tag(config=True)
packages = List(
['amsmath', 'amsthm', 'amssymb', 'bm'],
help="A list of packages to use for dvipng backend. "
"'breqn' will be automatically appended when use_breqn=True.",
).tag(config=True)
preamble = Unicode(
help="Additional preamble to use when generating LaTeX source "
"for dvipng backend.",
).tag(config=True)
def latex_to_png(s, encode=False, backend=None, wrap=False, color='Black',
scale=1.0):
"""Render a LaTeX string to PNG.
Parameters
----------
s : str
The raw string containing valid inline LaTeX.
encode : bool, optional
Should the PNG data base64 encoded to make it JSON'able.
backend : {matplotlib, dvipng}
Backend for producing PNG data.
wrap : bool
If true, Automatically wrap `s` as a LaTeX equation.
color : string
Foreground color name among dvipsnames, e.g. 'Maroon' or on hex RGB
format, e.g. '#AA20FA'.
scale : float
Scale factor for the resulting PNG.
None is returned when the backend cannot be used.
"""
s = cast_unicode(s)
allowed_backends = LaTeXTool.instance().backends
if backend is None:
backend = allowed_backends[0]
if backend not in allowed_backends:
return None
if backend == 'matplotlib':
f = latex_to_png_mpl
elif backend == 'dvipng':
f = latex_to_png_dvipng
if color.startswith('#'):
# Convert hex RGB color to LaTeX RGB color.
if len(color) == 7:
try:
color = "RGB {}".format(" ".join([str(int(x, 16)) for x in
textwrap.wrap(color[1:], 2)]))
except ValueError:
raise ValueError('Invalid color specification {}.'.format(color))
else:
raise ValueError('Invalid color specification {}.'.format(color))
else:
raise ValueError('No such backend {0}'.format(backend))
bin_data = f(s, wrap, color, scale)
if encode and bin_data:
bin_data = encodebytes(bin_data)
return bin_data
def latex_to_png_mpl(s, wrap, color='Black', scale=1.0):
try:
from matplotlib import mathtext
from pyparsing import ParseFatalException
except ImportError:
return None
# mpl mathtext doesn't support display math, force inline
s = s.replace('$$', '$')
if wrap:
s = u'${0}$'.format(s)
try:
mt = mathtext.MathTextParser('bitmap')
f = BytesIO()
dpi = 120*scale
mt.to_png(f, s, fontsize=12, dpi=dpi, color=color)
return f.getvalue()
except (ValueError, RuntimeError, ParseFatalException):
return None
def latex_to_png_dvipng(s, wrap, color='Black', scale=1.0):
try:
find_cmd('latex')
find_cmd('dvipng')
except FindCmdError:
return None
try:
workdir = tempfile.mkdtemp()
tmpfile = os.path.join(workdir, "tmp.tex")
dvifile = os.path.join(workdir, "tmp.dvi")
outfile = os.path.join(workdir, "tmp.png")
with open(tmpfile, "w", encoding='utf8') as f:
f.writelines(genelatex(s, wrap))
with open(os.devnull, 'wb') as devnull:
subprocess.check_call(
["latex", "-halt-on-error", "-interaction", "batchmode", tmpfile],
cwd=workdir, stdout=devnull, stderr=devnull)
resolution = round(150*scale)
subprocess.check_call(
["dvipng", "-T", "tight", "-D", str(resolution), "-z", "9",
"-bg", "transparent", "-o", outfile, dvifile, "-fg", color],
cwd=workdir, stdout=devnull, stderr=devnull)
with open(outfile, "rb") as f:
return f.read()
except subprocess.CalledProcessError:
return None
finally:
shutil.rmtree(workdir)
def kpsewhich(filename):
"""Invoke kpsewhich command with an argument `filename`."""
try:
find_cmd("kpsewhich")
proc = subprocess.Popen(
["kpsewhich", filename],
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
(stdout, stderr) = proc.communicate()
return stdout.strip().decode('utf8', 'replace')
except FindCmdError:
pass
def genelatex(body, wrap):
"""Generate LaTeX document for dvipng backend."""
lt = LaTeXTool.instance()
breqn = wrap and lt.use_breqn and kpsewhich("breqn.sty")
yield r'\documentclass{article}'
packages = lt.packages
if breqn:
packages = packages + ['breqn']
for pack in packages:
yield r'\usepackage{{{0}}}'.format(pack)
yield r'\pagestyle{empty}'
if lt.preamble:
yield lt.preamble
yield r'\begin{document}'
if breqn:
yield r'\begin{dmath*}'
yield body
yield r'\end{dmath*}'
elif wrap:
yield u'$${0}$$'.format(body)
else:
yield body
yield u'\\end{document}'
_data_uri_template_png = u"""<img src="data:image/png;base64,%s" alt=%s />"""
def latex_to_html(s, alt='image'):
"""Render LaTeX to HTML with embedded PNG data using data URIs.
Parameters
----------
s : str
The raw string containing valid inline LateX.
alt : str
The alt text to use for the HTML.
"""
base64_data = latex_to_png(s, encode=True).decode('ascii')
if base64_data:
return _data_uri_template_png % (base64_data, alt)
| |
"""Class to consolidate distributed redmapper run.
"""
import os
import numpy as np
import glob
import fitsio
import re
import healpy as hp
import esutil
from ..configuration import Configuration
from ..volumelimit import VolumeLimitMask, VolumeLimitMaskFixed
from ..catalog import Catalog, Entry
from ..utilities import read_members, astro_to_sphere
from ..plotting import SpecPlot, NzPlot, NLambdaPlot, PositionPlot
from ..galaxy import GalaxyCatalog
class RedmapperConsolidateTask(object):
"""
Class to consolidate a distributed redmapper run.
This class looks for files of the specific format in the specified
directory and consolidates, while applying richness and volume-limited
cuts.
"""
def __init__(self, configfile, lambda_cuts=None, vlim_lstars=[], path=None):
"""
Instantiate a RedmapperConsolidateTask.
Parameters
----------
configfile: `str`
Configuration yaml file.
lambda_cuts: `list`, optional
Richness cuts to apply. Default is None. If None,
use the values in self.config.consolidate_lambda_cuts
vlim_lstars: `list`, optional
Volume-limit luminosity cuts to apply. Default is [].
If [] then use the values in self.config.consolidate_vlim_lstars.
path: `str`, optional
Path to look for files. Default is config file path.
"""
if path is None:
outpath = os.path.dirname(os.path.abspath(configfile))
else:
outpath = path
self.config = Configuration(configfile, outpath=path)
if lambda_cuts is None:
self.lambda_cuts = self.config.consolidate_lambda_cuts
else:
self.lambda_cuts = lambda_cuts
if len(vlim_lstars) == 0:
if len(self.config.consolidate_vlim_lstars) == 0:
self.vlim_lstars = []
else:
self.vlim_lstars = self.config.consolidate_vlim_lstars
else:
self.vlim_lstars = vlim_lstars
def run(self, do_plots=True, match_spec=True):
"""
Run the redmapper consolidation task.
Parameters
----------
do_plots: `bool`, optional
Make diagnostic plots. Default is True.
match_spec: `bool`, optional
Match cluster centrals and members to spectra.
Default is True.
"""
# find the files
catfiles = sorted(glob.glob(os.path.join(self.config.outpath, '%s_*_?????_final_catalog.fit' % (self.config.outbase))))
self.config.logger.info("Found %d catalog files in %s" % (len(catfiles), self.config.outpath))
# Extract the nside that was run
m = re.search(r'_(\d+)_(\d\d\d\d\d)_', catfiles[0])
if m is None:
raise RuntimeError("Could not understand filename for %s" % (catfiles[0]))
nside = int(m.groups()[0])
if match_spec and not self.config.has_truth:
spec = GalaxyCatalog.from_fits_file(self.config.specfile)
use, = np.where(spec.z_err < 0.001)
spec = spec[use]
# Add check for the number of files that are here
# FIXME: add check for the number of files that are here!
# Set the "outbase" name
# Note that the vlim will not be per lambda!
# If we have vlim set, we need to make the vlims...
if len(self.vlim_lstars) > 0:
vlim_masks = []
vlim_areas = []
for vlim_lstar in self.vlim_lstars:
vlim_masks.append(VolumeLimitMask(self.config, vlim_lstar))
vlim_areas.append(vlim_masks[-1].get_areas())
else:
vlim_masks = [VolumeLimitMaskFixed(self.config)]
vlim_areas = [vlim_masks[0].get_areas()]
started = np.zeros((len(self.lambda_cuts), len(vlim_masks)), dtype=bool)
cat_filename_dict = {}
# Unique counter for temporary ids
ctr = 0
all_ids = np.zeros(0, dtype=np.int32)
all_likelihoods = np.zeros(0, dtype=np.float64)
for catfile in catfiles:
# Read in catalog
self.config.logger.info("Reading %s" % (os.path.basename(catfile)))
cat = Catalog.from_fits_file(catfile, ext=1)
# and read in members
mem = read_members(catfile)
# Extract pixnum from name
m = re.search(r'_(\d+)_(\d\d\d\d\d)_', catfile)
if m is None:
raise RuntimeError("Could not understand filename for %s" % (catfile))
hpix = int(m.groups()[1])
if match_spec and not self.config.has_truth:
# match spec to cat and mem
cat.cg_spec_z[:] = -1.0
i0, i1, dists = spec.match_many(cat.ra, cat.dec, 3./3600., maxmatch=1)
cat.cg_spec_z[i0] = spec.z[i1]
mem.zspec[:] = -1.0
i0, i1, dists = spec.match_many(mem.ra, mem.dec, 3./3600., maxmatch=1)
mem.zspec[i0] = spec.z[i1]
if self.config.has_truth:
# Need to match to the truth catalog
truthcat = GalaxyCatalog.from_galfile(self.config.galfile, hpix=hpix, nside=nside, border=0.0, truth=True)
cat.cg_spec_z[:] = -1.0
i0, i1, dists = truthcat.match_many(cat.ra, cat.dec, 1./3600., maxmatch=1)
cat.cg_spec_z[i0] = truthcat.ztrue[i1]
mem.zspec[:] = -1.0
i0, i1, dists = truthcat.match_many(mem.ra, mem.dec, 1./3600., maxmatch=1)
mem.zspec[i0] = truthcat.ztrue[i1]
# Note: when adding mock support, read in true galaxies to get members
scaleval = cat.scaleval if self.config.select_scaleval else 1.0
# Figure out which clusters are in the pixel, less than
# max_maskfrac, and above percolation_minlambda (with or without
# scale)
theta, phi = astro_to_sphere(cat.ra, cat.dec)
ipring = hp.ang2pix(nside, theta, phi)
use, = np.where((ipring == hpix) &
(cat.maskfrac < self.config.max_maskfrac) &
(cat.Lambda > self.config.percolation_minlambda))
if use.size == 0:
self.config.logger.info('Warning: no good clusters in pixel %d' % (hpix))
continue
cat = cat[use]
# match catalog with members via mem_match_id
a, b = esutil.numpy_util.match(cat.mem_match_id, mem.mem_match_id)
# put in new, temporary IDs
cat.mem_match_id = np.arange(use.size) + ctr
mem.mem_match_id[b] = cat.mem_match_id[a]
ctr += use.size
# Cut down members
mem = mem[b]
# Append ALL IDs and likelihoods for sorting here
all_ids = np.append(all_ids, cat.mem_match_id)
all_likelihoods = np.append(all_likelihoods, cat.lnlamlike)
# loop over minlambda
for i, minlambda in enumerate(self.lambda_cuts):
# loop over vlim_masks
# (note that if we have no mask we have the "fixed" mask placeholder)
for j, vlim_mask in enumerate(vlim_masks):
cat_use, = np.where((vlim_mask.calc_zmax(cat.ra, cat.dec) > cat.z_lambda) &
((cat.Lambda / scaleval) > minlambda))
if cat_use.size == 0:
continue
_, mem_use = esutil.numpy_util.match(cat.mem_match_id[cat_use], mem.mem_match_id)
if not started[i, j]:
# Figure out filename
if len(self.vlim_lstars) > 0:
extraname = 'lgt%02d_vl%02d' % (minlambda, int(self.vlim_lstars[j]*10))
else:
extraname = 'lgt%02d' % (minlambda)
cat_fname = self.config.redmapper_filename(extraname + '_catalog', withversion=True)
mem_fname = self.config.redmapper_filename(extraname + '_catalog_members', withversion=True)
parts = os.path.basename(cat_fname).split('_catalog')
cat_filename_dict[(i, j)] = (parts[0], cat_fname, mem_fname)
# Write out new fits files...
cat.to_fits_file(cat_fname, clobber=True, indices=cat_use)
mem.to_fits_file(mem_fname, clobber=True, indices=mem_use)
started[i, j] = True
else:
# Append to existing fits files
with fitsio.FITS(cat_filename_dict[(i, j)][1], mode='rw') as fits:
fits[1].append(cat._ndarray[cat_use])
with fitsio.FITS(cat_filename_dict[(i, j)][2], mode='rw') as fits:
fits[1].append(mem._ndarray[mem_use])
# Sort and renumber ...
st = np.argsort(all_likelihoods)[::-1]
all_ids_sorted = all_ids[st]
new_ids = np.arange(all_ids.size) + 1
# Now we have the index of all_ids_sorted to new_ids
# Read in specified columns and overwrite in each case
for i, minlambda in enumerate(self.lambda_cuts):
for j, vlim_mask in enumerate(vlim_masks):
catfits = fitsio.FITS(cat_filename_dict[(i, j)][1], mode='rw')
memfits = fitsio.FITS(cat_filename_dict[(i, j)][2], mode='rw')
cat_ids = catfits[1].read_column('mem_match_id')
mem_ids = memfits[1].read_column('mem_match_id')
# Every time we see an id we replace it
aa, bb = esutil.numpy_util.match(all_ids_sorted, cat_ids)
cat_ids[bb] = new_ids[aa]
aa, bb = esutil.numpy_util.match(all_ids_sorted, mem_ids)
mem_ids[bb] = new_ids[aa]
catfits[1].write_column('mem_match_id', cat_ids)
memfits[1].write_column('mem_match_id', mem_ids)
catfits.close()
memfits.close()
if do_plots:
# We want to plot the zspec plot and the n(z) plot
cat = Catalog.from_fits_file(cat_filename_dict[(i, j)][1])
self.config.d.outbase = cat_filename_dict[(i, j)][0]
specplot = SpecPlot(self.config)
if self.config.has_truth:
mem = Catalog.from_fits_file(cat_filename_dict[(i, j)][2])
specplot.plot_cluster_catalog_from_members(cat, mem, title=self.config.d.outbase, withversion=False)
else:
specplot.plot_cluster_catalog(cat, title=self.config.d.outbase, withversion=False)
nzplot = NzPlot(self.config)
nzplot.plot_cluster_catalog(cat, vlim_areas[j])
nlamplot = NLambdaPlot(self.config)
nlamplot.plot_cluster_catalog(cat)
posplot = PositionPlot(self.config)
posplot.plot_cluster_catalog(cat)
class RuncatConsolidateTask(object):
"""
Class to consolidate a distributed runcat run.
This class looks for files of the specific format in the specified
directory and consoldates. No richness or volume-limited cuts are applied.
"""
def __init__(self, configfile, path=None):
"""
Instantiate a RuncatConsolidateTask.
Parameters
----------
configfile: `str`
Configuration yaml file.
path: `str`, optional
Path to look for files. Default is config file path.
"""
if path is None:
outpath = os.path.dirname(os.path.abspath(configfile))
else:
outpath = path
self.config = Configuration(configfile, outpath=path)
def run(self, do_plots=True, match_spec=True, consolidate_members=True, cattype='runcat'):
"""
Run the runcat consolidation task.
Parameters
----------
do_plots: `bool`, optional
Make diagnostic plots. Default is True.
match_spec: `bool`, optional
Match cluster centrals and members to spectra.
Default is True.
consolidate_members: `bool`, optional
Consolidate members as well as catalog?
"""
# find the files
catfiles = sorted(glob.glob(os.path.join(self.config.outpath, '%s_*_?????_%s_catalog.fit' % (self.config.outbase, cattype))))
self.config.logger.info("Found %d catalog files in %s" % (len(catfiles), self.config.outpath))
# Extract the nside that was run
m = re.search(r'_(\d+)_(\d\d\d\d\d)_', catfiles[0])
if m is None:
raise RuntimeError("Could not understand filename for %s" % (catfiles[0]))
nside = int(m.groups()[0])
if match_spec and not self.config.has_truth:
spec = GalaxyCatalog.from_fits_file(self.config.specfile)
use, = np.where(spec.z_err < 0.001)
spec = spec[use]
started = False
for catfile in catfiles:
# Read in catalog
self.config.logger.info("Reading %s" % (os.path.basename(catfile)))
cat = Catalog.from_fits_file(catfile, ext=1)
# and read in members
if consolidate_members:
mem = read_members(catfile)
# Extract pixnum from name
m = re.search(r'_(\d+)_(\d\d\d\d\d)_', catfile)
if m is None:
raise RuntimeError("Could not understand filename for %s" % (catfile))
hpix = int(m.groups()[1])
if match_spec and not self.config.has_truth:
# match spec to cat and mem
cat.cg_spec_z[:] = -1.0
i0, i1, dists = spec.match_many(cat.ra, cat.dec, 3./3600., maxmatch=1)
cat.cg_spec_z[i0] = spec.z[i1]
if consolidate_members:
mem.zspec[:] = -1.0
i0, i1, dists = spec.match_many(mem.ra, mem.dec, 3./3600., maxmatch=1)
mem.zspec[i0] = spec.z[i1]
if match_spec and self.config.has_truth:
# Need to match to the truth catalog
truthcat = GalaxyCatalog.from_galfile(self.config.galfile, hpix=hpix, nside=nside, border=0.0, truth=True)
cat.cg_spec_z[:] = -1.0
i0, i1, dists = truthcat.match_many(cat.ra, cat.dec, 1./3600., maxmatch=1)
cat.cg_spec_z[i0] = truthcat.ztrue[i1]
if consolidate_members:
mem.zspec[:] = -1.0
i0, i1, dists = truthcat.match_many(mem.ra, mem.dec, 1./3600., maxmatch=1)
mem.zspec[i0] = truthcat.ztrue[i1]
# Figure out which clusters are in the pixel
theta, phi = astro_to_sphere(cat.ra, cat.dec)
ipring = hp.ang2pix(nside, theta, phi)
use, = np.where(ipring == hpix)
if use.size == 0:
self.config.logger.info('Warning: no good clusters in pixel %d' % (hpix))
continue
cat = cat[use]
if not started:
# Figure out filename
cat_fname = self.config.redmapper_filename('%s_catalog' % (cattype), withversion=True)
cat.to_fits_file(cat_fname, clobber=True)
if consolidate_members:
mem_fname = self.config.redmapper_filename('%s_catalog_members' % (cattype), withversion=True)
mem.to_fits_file(mem_fname, clobber=True)
started = True
else:
with fitsio.FITS(cat_fname, mode='rw') as fits:
fits[1].append(cat._ndarray)
if consolidate_members:
with fitsio.FITS(mem_fname, mode='rw') as fits:
fits[1].append(mem._ndarray)
if do_plots:
cat = Catalog.from_fits_file(cat_fname)
self.config.d.outbase = cat_name
specplot = SpecPlot(self.config)
if self.config.has_truth:
mem = Catalog.from_fits_file(mem_fname)
specplot.plot_cluster_catalog_from_members(cat, mem, title=self.config.d.outbase)
else:
specplot.plot_cluster_catalog(cat, title=self.config.d.outbase)
| |
from matplotlib import pyplot as plt
import numpy as np
import os
from random import random
from analysis_basin_plotting import plot_overlap_grid
from singlecell_constants import BETA, EXT_FIELD_STRENGTH, RUNS_FOLDER, MEMS_MEHTA, MEMS_SCMCA, FIELD_PROTOCOL, MEMORIESDIR
from singlecell_functions import hamiltonian
from singlecell_simsetup import singlecell_simsetup, unpack_simsetup
from singlecell_visualize import plot_as_bar
EXPT_FIELDS = {
# mir 21 field note:
# level 1 is main ref
# level 2 adds wiki mir21
# level 4 adds targetscan hits
'miR_21': {
'2014mehta': {
'level_1': ['Klf5'],
'level_2': ['Klf5', 'Trp63', 'Mef2c'],
'level_3': ['Klf5', 'Trp63', 'Mef2c', 'Smarcd1', 'Crebl2', 'Thrb', 'Nfat5', 'Gata2', 'Nkx6-1', 'Terf2',
'Zkscan5', 'Glis2', 'Egr3', 'Foxp2', 'Smad7', 'Tbx2', 'Cbx4', 'Myt1l', 'Satb1', 'Yap1', 'Foxp1',
'Foxg1', 'Pcbd1', 'Bahd1', 'Bcl11b', 'Pitx2', 'Sox7', 'Sox5', 'Alx1', 'Npas3', 'Adnp', 'Klf6',
'Sox2', 'Klf3', 'Msx1', 'Plag1', 'Osr1', 'Mycl1', 'Nfib', 'Nfia', 'Bnc2']},
'2018scMCA': {
'level_1': ['Klf5', 'Pten'],
'level_2': ['Klf5', 'Pten', 'Anp32a', 'Hnrnpk', 'Mef2c', 'Pdcd4', 'Smarca4', 'Trp63'],
'level_3': ['Klf5', 'Pten', 'Anp32a', 'Hnrnpk', 'Mef2c', 'Pdcd4', 'Smarca4', 'Trp63', 'Adnp', 'Ago2', 'Alx1',
'Asf1a', 'Bcl11b', 'Bnc2', 'Cbx4', 'Chd7', 'Cnot6', 'Crebl2', 'Crebrf', 'Csrnp3', 'Egr3', 'Elf2',
'Foxg1', 'Foxp1', 'Foxp2', 'Gata2', 'Gatad2b', 'Glis2', 'Hipk3', 'Hnrnpu', 'Kdm7a', 'Klf3',
'Klf6', 'Lcor', 'Msx1', 'Mycl', 'Myt1l', 'Nfat5', 'Nfia', 'Nfib', 'Nipbl', 'Nkx6-1', 'Notch2',
'Npas3', 'Osr1', 'Pbrm1', 'Pcbd1', 'Pdcd4', 'Peli1', 'Pik3r1', 'Pitx2', 'Plag1', 'Pspc1', 'Pura',
'Purb', 'Purg', 'Rbpj', 'Rnf111', 'Satb1', 'Ski', 'Smad7', 'Smarcd1', 'Sox2', 'Sox2ot', 'Sox5',
'Sox7', 'Stat3', 'Suz12', 'Tbx2', 'Terf2', 'Thrb', 'Tnks', 'Trim33', 'Wwp1', 'Yap1', 'Zfp36l2',
'Zkscan5', 'Zfp367']}
},
# yamanaka field notes: Pou5f1 is alias Oct4, these are OSKM + Nanog
'yamanaka': {
'2014mehta': {
'level_1': ['Sox2', 'Pou5f1', 'Klf4', 'Myc'],
'level_2': ['Sox2', 'Pou5f1', 'Klf4', 'Myc', 'Nanog']},
'2018scMCA': {
'level_1': ['Sox2', 'Pou5f1', 'Klf4', 'Myc'],
'level_2': ['Sox2', 'Pou5f1', 'Klf4', 'Myc', 'Nanog']},
},
# empty field list for None protocol
None: {}
}
def construct_app_field_from_genes(gene_name_effect, gene_id, num_steps=0):
"""
Args:
- gene_name_effect: dict of gene_name: +-1 (on or off)
- gene_id: map of gene name to idx for the input memories file
- num_steps: optional numsteps (return 2d array if nonzero)
Return:
- applied field array of size N x 1 or N x num_steps
"""
print "Constructing applied field:"
N = len(gene_id.keys())
#app_field = np.zeros((N, num_steps)) $ TODO implement time based
app_field = np.zeros(N)
for label, effect in gene_name_effect.iteritems():
if label in gene_id.keys():
#print label, gene_id[label], 'effect:', effect
app_field[gene_id[label]] += effect
else:
print "Field construction warning: label %s not in gene_id.keys()" % label
return app_field
def field_setup(simsetup, protocol=FIELD_PROTOCOL, level=None):
"""
Construct applied field vector (either fixed or on varying under a field protocol) to bias the dynamics
Notes on named fields
- Yamanaka factor (OSKM) names in mehta datafile: Sox2, Pou5f1 (oct4), Klf4, Myc, also nanog
"""
# TODO must optimize: naive implement brings i7-920 row: 16x200 from 56sec (None field) to 140sec (not parallel)
# TODO support time varying cleanly
# TODO speedup: initialize at the same time as simsetup
# TODO speedup: pre-multiply the fields so it need not to be scaled each glauber step (see singlecell_functions.py)
# TODO there are two non J_ij fields an isolated single cell experiences: TF explicit mod and type biasing via proj
# TODO need to include the type biasing one too
assert protocol in ["yamanaka", "miR_21", None]
field_dict = {'protocol': protocol,
'time_varying': False,
'app_field': None,
'app_field_strength': 1e5} # TODO calibrate this to be very large compared to J*s scale
gene_id = simsetup['GENE_ID']
# preamble
if simsetup['memories_path'] == MEMS_MEHTA:
npz_label = '2014mehta'
elif simsetup['memories_path'] == MEMS_SCMCA:
npz_label = '2018scMCA'
else:
print "Note npz mems not supported:", simsetup['memories_path']
npz_label = None
if level is None:
print "Warning: Arg 'level' is None -- setting field level to 'level_1'"
level = 'level_1'
if protocol == "yamanaka":
print "Note: field_setup using", protocol, npz_label, level
field_genes = EXPT_FIELDS[protocol][npz_label][level]
field_genes_effects = {label: 1.0 for label in field_genes} # this ensure all should be ON
app_field_start = construct_app_field_from_genes(field_genes_effects, gene_id, num_steps=0)
field_dict['app_field'] = app_field_start
elif protocol == 'miR_21':
"""
- 2018 Nature comm macrophage -> fibroblast paper lists KLF-5 and PTEN as primary targets of miR-21
- 2014 mehta dataset does not contain PTEN, but 2018 scMCA does
"""
print "Note: field_setup using", protocol, npz_label, level
field_genes = EXPT_FIELDS[protocol][npz_label][level]
field_genes_effects = {label: -1.0 for label in field_genes} # this ensure all should be OFF
app_field_start = construct_app_field_from_genes(field_genes_effects, gene_id, num_steps=0)
field_dict['app_field'] = app_field_start
else:
assert protocol is None
return field_dict
if __name__ == '__main__':
# local defs
npz_mehta = MEMORIESDIR + os.sep + '2014_mehta_mems_genes_types_boolean_compressed_pruned_A.npz'
npz_scmca = MEMORIESDIR + os.sep + '2018_scmca_mems_genes_types_boolean_compressed_pruned_A_TFonly.npz'
FIELD_EFFECT_FOLDER = RUNS_FOLDER + os.sep + 'field_effect'
# settings
plot_field_impact_all = False
plot_specific_field = True
def make_field_plots(field_type, field_level, npz_type, simsetup, outdir=FIELD_EFFECT_FOLDER):
plot_subtitle = "Field effect of %s, %s on %s" % (field_type, field_level, npz_type)
print plot_subtitle
field_dict = field_setup(simsetup, protocol=field_type, level=field_level)
app_field_vector = field_dict['app_field']
xi_orig = simsetup['XI']
xi_under_field = np.zeros(xi_orig.shape)
if app_field_vector is None:
app_field_vector = np.zeros(xi_orig.shape[0])
print app_field_vector.shape
for idx in xrange(app_field_vector.shape[0]):
if app_field_vector[idx] == 0:
xi_under_field[idx, :] = xi_orig[idx, :]
else:
xi_under_field[idx, :] = app_field_vector[idx]
# compute field term
field_term = np.dot(xi_orig.T, app_field_vector)
plot_as_bar(field_term, simsetup['CELLTYPE_LABELS'])
plt.axhline(y=0.0, linewidth=1, color='k', linestyle='--')
plt.title('%s field term xi^T dot h (unperturbed=%.2f)' % (plot_subtitle, 0.0))
filepath = outdir + os.sep + 'mems_%s_field_term_%s_%s' % (npz_type, field_type, field_level)
plt.savefig(filepath, bbox_inches='tight')
plt.close()
# compute energies of shifted celltypes
E0 = -0.5 * xi_orig.shape[0] + 0.5 * xi_orig.shape[1] # i.e. -N/2 + p/2
energies = np.zeros(xi_orig.shape[1])
for col in xrange(xi_orig.shape[1]):
energies[col] = hamiltonian(xi_under_field[:, col], simsetup['J']) - field_term[col]
plot_as_bar(energies, simsetup['CELLTYPE_LABELS'])
plt.axhline(y=E0, linewidth=1, color='k', linestyle='--')
plt.title('%s minima depth (unperturbed=%.2f)' % (plot_subtitle, E0))
plt.ylim(E0 * 1.05, 0.8 * np.max(energies))
filepath = outdir + os.sep + 'mems_%s_energy_under_field_%s_%s' % (npz_type, field_type, field_level)
plt.savefig(filepath, bbox_inches='tight')
plt.close()
# compute overlaps of shifted celltypes
self_overlaps = np.zeros(xi_orig.shape[1])
for idx in xrange(xi_orig.shape[1]):
self_overlaps[idx] = np.dot(xi_orig[:, idx], xi_under_field[:, idx]) / xi_orig.shape[0]
plot_as_bar(self_overlaps, simsetup['CELLTYPE_LABELS'])
plt.axhline(y=1.0, linewidth=1, color='k', linestyle='--')
plt.title('%s overlaps (unperturbed=%.2f)' % (plot_subtitle, 1.0))
plt.ylim(0.8 * np.min(self_overlaps), 1.01)
filepath = outdir + os.sep + 'mems_%s_overlap_under_field_%s_%s' % (npz_type, field_type, field_level)
plt.savefig(filepath, bbox_inches='tight')
plt.close()
# compute projections of shifted celltypes
self_proj = np.zeros(xi_orig.shape[1])
for idx in xrange(xi_orig.shape[1]):
proj_vector_of_shifted_mem = np.dot(simsetup['A_INV'], np.dot(xi_orig.T, xi_under_field[:, idx])) / \
xi_orig.shape[0]
self_proj[idx] = proj_vector_of_shifted_mem[idx]
plot_as_bar(self_proj, simsetup['CELLTYPE_LABELS'])
plt.axhline(y=1.0, linewidth=1, color='k', linestyle='--')
plt.title('%s projections (unperturbed=%.2f)' % (plot_subtitle, 1.0))
plt.ylim(0.8 * np.min(self_proj), 1.01)
filepath = outdir + os.sep + 'mems_%s_proj_under_field_%s_%s' % (npz_type, field_type, field_level)
plt.savefig(filepath, bbox_inches='tight')
plt.close()
# compute celltype specific overlaps of shifted celltypes
cell_idx_A = 7
cell_idx_B = 86
print simsetup['CELLTYPE_LABELS'][cell_idx_A], simsetup['CELLTYPE_LABELS'][cell_idx_B]
hetero_overlaps_A = np.zeros(xi_orig.shape[1])
hetero_overlaps_B = np.zeros(xi_orig.shape[1])
for idx in xrange(xi_orig.shape[1]):
hetero_overlaps_A[idx] = np.dot(xi_orig[:, cell_idx_A], xi_under_field[:, idx]) / xi_orig.shape[0]
hetero_overlaps_B[idx] = np.dot(xi_orig[:, cell_idx_B], xi_under_field[:, idx]) / xi_orig.shape[0]
plot_as_bar(hetero_overlaps_A, simsetup['CELLTYPE_LABELS'], alpha=0.8)
plot_as_bar(hetero_overlaps_B, simsetup['CELLTYPE_LABELS'], alpha=0.8)
plt.axhline(y=1.0, linewidth=1, color='k', linestyle='--')
plt.title('%s hetero_overlaps (unperturbed=%.2f)' % (plot_subtitle, 1.0))
#plt.ylim(0.8 * np.min(self_overlaps), 1.01)
filepath = outdir + os.sep + 'mems_%s_hetero_overlaps_under_field_%s_%s' % (npz_type, field_type, field_level)
plt.savefig(filepath, bbox_inches='tight')
plt.close()
# compute grid under the field
grid_data = np.dot(xi_under_field.T, xi_under_field) - np.dot(xi_orig.T, xi_orig)
plot_overlap_grid(grid_data, simsetup['CELLTYPE_LABELS'], outdir, ax=None, N=None, normalize=True, fs=9,
relmax=True, extragrid=False, ext='.pdf', vforce=None,
plotname='overlap_diff_under_field_%s_%s_%s' % (npz_type, field_type, field_level))
return
if plot_field_impact_all:
npz = npz_scmca
if npz == npz_scmca:
npz_type = '2018scMCA'
else:
npz_type = '2014mehta'
simsetup = singlecell_simsetup(npzpath=npz)
for field_type in EXPT_FIELDS.keys():
if field_type is None:
continue
field_levels_dict = EXPT_FIELDS[field_type][npz_type]
for field_level in field_levels_dict.keys():
make_field_plots(field_type, field_level, npz_type, simsetup)
if plot_specific_field:
# npz load
npz = npz_scmca
if npz == npz_scmca:
npz_type = '2018scMCA'
else:
npz_type = '2014mehta'
simsetup = singlecell_simsetup(npzpath=npz)
xi_orig = simsetup['XI']
# field choose
field_type = 'miR_21'
field_level = 'level_3'
if field_type is not None:
assert field_type in EXPT_FIELDS.keys() and field_level in EXPT_FIELDS[field_type][npz_type].keys()
make_field_plots(field_type, field_level, npz_type, simsetup)
| |
#
# Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "src/engine/SCons/PathList.py 2446 2007/09/18 11:41:57 knight"
__doc__ = """SCons.PathList
A module for handling lists of directory paths (the sort of things
that get set as CPPPATH, LIBPATH, etc.) with as much caching of data and
efficiency as we can while still keeping the evaluation delayed so that we
Do the Right Thing (almost) regardless of how the variable is specified.
"""
import os
import string
import SCons.Memoize
import SCons.Node
import SCons.Util
#
# Variables to specify the different types of entries in a PathList object:
#
TYPE_STRING_NO_SUBST = 0 # string with no '$'
TYPE_STRING_SUBST = 1 # string containing '$'
TYPE_OBJECT = 2 # other object
def node_conv(obj):
"""
This is the "string conversion" routine that we have our substitutions
use to return Nodes, not strings. This relies on the fact that an
EntryProxy object has a get() method that returns the underlying
Node that it wraps, which is a bit of architectural dependence
that we might need to break or modify in the future in response to
additional requirements.
"""
try:
get = obj.get
except AttributeError:
if isinstance(obj, SCons.Node.Node) or SCons.Util.is_Sequence( obj ):
result = obj
else:
result = str(obj)
else:
result = get()
return result
class _PathList:
"""
An actual PathList object.
"""
def __init__(self, pathlist):
"""
Initializes a PathList object, canonicalizing the input and
pre-processing it for quicker substitution later.
The stored representation of the PathList is a list of tuples
containing (type, value), where the "type" is one of the TYPE_*
variables defined above. We distinguish between:
strings that contain no '$' and therefore need no
delayed-evaluation string substitution (we expect that there
will be many of these and that we therefore get a pretty
big win from avoiding string substitution)
strings that contain '$' and therefore need substitution
(the hard case is things like '${TARGET.dir}/include',
which require re-evaluation for every target + source)
other objects (which may be something like an EntryProxy
that needs a method called to return a Node)
Pre-identifying the type of each element in the PathList up-front
and storing the type in the list of tuples is intended to reduce
the amount of calculation when we actually do the substitution
over and over for each target.
"""
if SCons.Util.is_String(pathlist):
pathlist = string.split(pathlist, os.pathsep)
elif not SCons.Util.is_Sequence(pathlist):
pathlist = [pathlist]
pl = []
for p in pathlist:
try:
index = string.find(p, '$')
except (AttributeError, TypeError):
type = TYPE_OBJECT
else:
if index == -1:
type = TYPE_STRING_NO_SUBST
else:
type = TYPE_STRING_SUBST
pl.append((type, p))
self.pathlist = tuple(pl)
def __len__(self): return len(self.pathlist)
def __getitem__(self, i): return self.pathlist[i]
def subst_path(self, env, target, source):
"""
Performs construction variable substitution on a pre-digested
PathList for a specific target and source.
"""
result = []
for type, value in self.pathlist:
if type == TYPE_STRING_SUBST:
value = env.subst(value, target=target, source=source,
conv=node_conv)
if SCons.Util.is_Sequence(value):
result += value
continue
elif type == TYPE_OBJECT:
value = node_conv(value)
if value:
result.append(value)
return tuple(result)
class PathListCache:
"""
A class to handle caching of PathList lookups.
This class gets instantiated once and then deleted from the namespace,
so it's used as a Singleton (although we don't enforce that in the
usual Pythonic ways). We could have just made the cache a dictionary
in the module namespace, but putting it in this class allows us to
use the same Memoizer pattern that we use elsewhere to count cache
hits and misses, which is very valuable.
Lookup keys in the cache are computed by the _PathList_key() method.
Cache lookup should be quick, so we don't spend cycles canonicalizing
all forms of the same lookup key. For example, 'x:y' and ['x',
'y'] logically represent the same list, but we don't bother to
split string representations and treat those two equivalently.
(Note, however, that we do, treat lists and tuples the same.)
The main type of duplication we're trying to catch will come from
looking up the same path list from two different clones of the
same construction environment. That is, given
env2 = env1.Clone()
both env1 and env2 will have the same CPPPATH value, and we can
cheaply avoid re-parsing both values of CPPPATH by using the
common value from this cache.
"""
if SCons.Memoize.use_memoizer:
__metaclass__ = SCons.Memoize.Memoized_Metaclass
memoizer_counters = []
def __init__(self):
self._memo = {}
def _PathList_key(self, pathlist):
"""
Returns the key for memoization of PathLists.
Note that we want this to be pretty quick, so we don't completely
canonicalize all forms of the same list. For example,
'dir1:$ROOT/dir2' and ['$ROOT/dir1', 'dir'] may logically
represent the same list if you're executing from $ROOT, but
we're not going to bother splitting strings into path elements,
or massaging strings into Nodes, to identify that equivalence.
We just want to eliminate obvious redundancy from the normal
case of re-using exactly the same cloned value for a path.
"""
if SCons.Util.is_Sequence(pathlist):
pathlist = tuple(SCons.Util.flatten(pathlist))
return pathlist
memoizer_counters.append(SCons.Memoize.CountDict('PathList', _PathList_key))
def PathList(self, pathlist):
"""
Returns the cached _PathList object for the specified pathlist,
creating and caching a new object as necessary.
"""
pathlist = self._PathList_key(pathlist)
try:
memo_dict = self._memo['PathList']
except KeyError:
memo_dict = {}
self._memo['PathList'] = memo_dict
else:
try:
return memo_dict[pathlist]
except KeyError:
pass
result = _PathList(pathlist)
memo_dict[pathlist] = result
return result
PathList = PathListCache().PathList
del PathListCache
| |
'''
antlr_grammar.py
Created on 4 sept. 2010
@author: luca
(Minor updates by Paul McGuire, June, 2012)
'''
from pyparsingOD import Word, ZeroOrMore, printables, Suppress, OneOrMore, Group, \
LineEnd, Optional, White, originalTextFor, hexnums, nums, Combine, Literal, Keyword, \
cStyleComment, Regex, Forward, MatchFirst, And, srange, oneOf, alphas, alphanums, \
delimitedList
# http://www.antlr.org/grammar/ANTLR/ANTLRv3.g
# Tokens
EOL = Suppress(LineEnd()) # $
singleTextString = originalTextFor(ZeroOrMore(~EOL + (White(" \t") | Word(printables)))).leaveWhitespace()
XDIGIT = hexnums
INT = Word(nums)
ESC = Literal('\\') + (oneOf(list(r'nrtbf\">'+"'")) | ('u' + Word(hexnums, exact=4)) | Word(printables, exact=1))
LITERAL_CHAR = ESC | ~(Literal("'") | Literal('\\')) + Word(printables, exact=1)
CHAR_LITERAL = Suppress("'") + LITERAL_CHAR + Suppress("'")
STRING_LITERAL = Suppress("'") + Combine(OneOrMore(LITERAL_CHAR)) + Suppress("'")
DOUBLE_QUOTE_STRING_LITERAL = '"' + ZeroOrMore(LITERAL_CHAR) + '"'
DOUBLE_ANGLE_STRING_LITERAL = '<<' + ZeroOrMore(Word(printables, exact=1)) + '>>'
TOKEN_REF = Word(alphas.upper(), alphanums+'_')
RULE_REF = Word(alphas.lower(), alphanums+'_')
ACTION_ESC = (Suppress("\\") + Suppress("'")) | Suppress('\\"') | Suppress('\\') + (~(Literal("'") | Literal('"')) + Word(printables, exact=1))
ACTION_CHAR_LITERAL = Suppress("'") + (ACTION_ESC | ~(Literal('\\') | Literal("'")) + Word(printables, exact=1)) + Suppress("'")
ACTION_STRING_LITERAL = Suppress('"') + ZeroOrMore(ACTION_ESC | ~(Literal('\\') | Literal('"')) + Word(printables, exact=1)) + Suppress('"')
SRC = Suppress('src') + ACTION_STRING_LITERAL("file") + INT("line")
id = TOKEN_REF | RULE_REF
SL_COMMENT = Suppress('//') + Suppress('$ANTLR') + SRC | ZeroOrMore(~EOL + Word(printables)) + EOL
ML_COMMENT = cStyleComment
WS = OneOrMore(Suppress(' ') | Suppress('\t') | (Optional(Suppress('\r')) + Literal('\n')))
WS_LOOP = ZeroOrMore(SL_COMMENT | ML_COMMENT)
NESTED_ARG_ACTION = Forward()
NESTED_ARG_ACTION << Suppress('[') + ZeroOrMore(NESTED_ARG_ACTION | ACTION_STRING_LITERAL | ACTION_CHAR_LITERAL) + Suppress(']')
ARG_ACTION = NESTED_ARG_ACTION
NESTED_ACTION = Forward()
NESTED_ACTION << Suppress('{') + ZeroOrMore(NESTED_ACTION | SL_COMMENT | ML_COMMENT | ACTION_STRING_LITERAL | ACTION_CHAR_LITERAL) + Suppress('}')
ACTION = NESTED_ACTION + Optional('?')
SCOPE = Suppress('scope')
OPTIONS = Suppress('options') + Suppress('{') # + WS_LOOP + Suppress('{')
TOKENS = Suppress('tokens') + Suppress('{') # + WS_LOOP + Suppress('{')
FRAGMENT = 'fragment';
TREE_BEGIN = Suppress('^(')
ROOT = Suppress('^')
BANG = Suppress('!')
RANGE = Suppress('..')
REWRITE = Suppress('->')
# General Parser Definitions
# Grammar heading
optionValue = id | STRING_LITERAL | CHAR_LITERAL | INT | Literal('*').setName("s")
option = Group(id("id") + Suppress('=') + optionValue("value"))("option")
optionsSpec = OPTIONS + Group(OneOrMore(option + Suppress(';')))("options") + Suppress('}')
tokenSpec = Group(TOKEN_REF("token_ref") + (Suppress('=') + (STRING_LITERAL | CHAR_LITERAL)("lit")))("token") + Suppress(';')
tokensSpec = TOKENS + Group(OneOrMore(tokenSpec))("tokens") + Suppress('}')
attrScope = Suppress('scope') + id + ACTION
grammarType = Keyword('lexer') + Keyword('parser') + Keyword('tree')
actionScopeName = id | Keyword('lexer')("l") | Keyword('parser')("p")
action = Suppress('@') + Optional(actionScopeName + Suppress('::')) + id + ACTION
grammarHeading = Optional(ML_COMMENT("ML_COMMENT")) + Optional(grammarType) + Suppress('grammar') + id("grammarName") + Suppress(';') + Optional(optionsSpec) + Optional(tokensSpec) + ZeroOrMore(attrScope) + ZeroOrMore(action)
modifier = Keyword('protected') | Keyword('public') | Keyword('private') | Keyword('fragment')
ruleAction = Suppress('@') + id + ACTION
throwsSpec = Suppress('throws') + delimitedList(id)
ruleScopeSpec = (Suppress('scope') + ACTION) | (Suppress('scope') + delimitedList(id) + Suppress(';')) | (Suppress('scope') + ACTION + Suppress('scope') + delimitedList(id) + Suppress(';'))
unary_op = oneOf("^ !")
notTerminal = CHAR_LITERAL | TOKEN_REF | STRING_LITERAL
terminal = (CHAR_LITERAL | TOKEN_REF + Optional(ARG_ACTION) | STRING_LITERAL | '.') + Optional(unary_op)
block = Forward()
notSet = Suppress('~') + (notTerminal | block)
rangeNotPython = CHAR_LITERAL("c1") + RANGE + CHAR_LITERAL("c2")
atom = Group(rangeNotPython + Optional(unary_op)("op")) | terminal | (notSet + Optional(unary_op)("op")) | (RULE_REF + Optional(ARG_ACTION("arg")) + Optional(unary_op)("op"))
element = Forward()
treeSpec = Suppress('^(') + element*(2,) + Suppress(')')
ebnfSuffix = oneOf("? * +")
ebnf = block + Optional(ebnfSuffix("op") | '=>')
elementNoOptionSpec = (id("result_name") + oneOf('= +=')("labelOp") + atom("atom") + Optional(ebnfSuffix)) | (id("result_name") + oneOf('= +=')("labelOp") + block + Optional(ebnfSuffix)) | atom("atom") + Optional(ebnfSuffix) | ebnf | ACTION | (treeSpec + Optional(ebnfSuffix)) # | SEMPRED ( '=>' -> GATED_SEMPRED | -> SEMPRED )
element << Group(elementNoOptionSpec)("element")
alternative = Group(Group(OneOrMore(element))("elements")) # Do not ask me why group is needed twice... seems like the xml that you see is not always the real structure?
rewrite = Optional(Literal('TODO REWRITE RULES TODO'))
block << Suppress('(') + Optional(Optional(optionsSpec("opts")) + Suppress(':')) + Group(alternative('a1') + rewrite + Group(ZeroOrMore(Suppress('|') + alternative('a2') + rewrite))("alternatives"))("block") + Suppress(')')
altList = alternative('a1') + rewrite + Group(ZeroOrMore(Suppress('|') + alternative('a2') + rewrite))("alternatives")
exceptionHandler = Suppress('catch') + ARG_ACTION + ACTION
finallyClause = Suppress('finally') + ACTION
exceptionGroup = (OneOrMore(exceptionHandler) + Optional(finallyClause)) | finallyClause
ruleHeading = Optional(ML_COMMENT)("ruleComment") + Optional(modifier)("modifier") + id("ruleName") + Optional("!") + Optional(ARG_ACTION("arg")) + Optional(Suppress('returns') + ARG_ACTION("rt")) + Optional(throwsSpec) + Optional(optionsSpec) + Optional(ruleScopeSpec) + ZeroOrMore(ruleAction)
rule = Group(ruleHeading + Suppress(':') + altList + Suppress(';') + Optional(exceptionGroup))("rule")
grammarDef = grammarHeading + Group(OneOrMore(rule))("rules")
def grammar():
return grammarDef
def __antlrAlternativesConverter(pyparsingRules, antlrBlock):
rule = None
if hasattr(antlrBlock, 'alternatives') and antlrBlock.alternatives != '' and len(antlrBlock.alternatives) > 0:
alternatives = []
alternatives.append(__antlrAlternativeConverter(pyparsingRules, antlrBlock.a1))
for alternative in antlrBlock.alternatives:
alternatives.append(__antlrAlternativeConverter(pyparsingRules, alternative))
rule = MatchFirst(alternatives)("anonymous_or")
elif hasattr(antlrBlock, 'a1') and antlrBlock.a1 != '':
rule = __antlrAlternativeConverter(pyparsingRules, antlrBlock.a1)
else:
raise Exception('Not yet implemented')
assert rule != None
return rule
def __antlrAlternativeConverter(pyparsingRules, antlrAlternative):
elementList = []
for element in antlrAlternative.elements:
rule = None
if hasattr(element.atom, 'c1') and element.atom.c1 != '':
regex = r'['+str(element.atom.c1[0])+'-'+str(element.atom.c2[0]+']')
rule = Regex(regex)("anonymous_regex")
elif hasattr(element, 'block') and element.block != '':
rule = __antlrAlternativesConverter(pyparsingRules, element.block)
else:
ruleRef = element.atom
assert ruleRef in pyparsingRules
rule = pyparsingRules[element.atom](element.atom)
if hasattr(element, 'op') and element.op != '':
if element.op == '+':
rule = Group(OneOrMore(rule))("anonymous_one_or_more")
elif element.op == '*':
rule = Group(ZeroOrMore(rule))("anonymous_zero_or_more")
elif element.op == '?':
rule = Optional(rule)
else:
raise Exception('rule operator not yet implemented : ' + element.op)
rule = rule
elementList.append(rule)
if len(elementList) > 1:
rule = Group(And(elementList))("anonymous_and")
else:
rule = elementList[0]
assert rule != None
return rule
def __antlrRuleConverter(pyparsingRules, antlrRule):
rule = None
rule = __antlrAlternativesConverter(pyparsingRules, antlrRule)
assert rule != None
rule(antlrRule.ruleName)
return rule
def antlrConverter(antlrGrammarTree):
pyparsingRules = {}
antlrTokens = {}
for antlrToken in antlrGrammarTree.tokens:
antlrTokens[antlrToken.token_ref] = antlrToken.lit
for antlrTokenName, antlrToken in list(antlrTokens.items()):
pyparsingRules[antlrTokenName] = Literal(antlrToken)
antlrRules = {}
for antlrRule in antlrGrammarTree.rules:
antlrRules[antlrRule.ruleName] = antlrRule
pyparsingRules[antlrRule.ruleName] = Forward() # antlr is a top down grammar
for antlrRuleName, antlrRule in list(antlrRules.items()):
pyparsingRule = __antlrRuleConverter(pyparsingRules, antlrRule)
assert pyparsingRule != None
pyparsingRules[antlrRuleName] << pyparsingRule
return pyparsingRules
if __name__ == "__main__":
text = """grammar SimpleCalc;
options {
language = Python;
}
tokens {
PLUS = '+' ;
MINUS = '-' ;
MULT = '*' ;
DIV = '/' ;
}
/*------------------------------------------------------------------
* PARSER RULES
*------------------------------------------------------------------*/
expr : term ( ( PLUS | MINUS ) term )* ;
term : factor ( ( MULT | DIV ) factor )* ;
factor : NUMBER ;
/*------------------------------------------------------------------
* LEXER RULES
*------------------------------------------------------------------*/
NUMBER : (DIGIT)+ ;
/* WHITESPACE : ( '\t' | ' ' | '\r' | '\n'| '\u000C' )+ { $channel = HIDDEN; } ; */
fragment DIGIT : '0'..'9' ;
"""
grammar().validate()
antlrGrammarTree = grammar().parseString(text)
print(antlrGrammarTree.asXML("antlrGrammarTree"))
pyparsingRules = antlrConverter(antlrGrammarTree)
pyparsingRule = pyparsingRules["expr"]
pyparsingTree = pyparsingRule.parseString("2 - 5 * 42 + 7 / 25")
print(pyparsingTree.asXML("pyparsingTree"))
| |
import argparse
import os
import shutil
import time
import torch
import torch.nn as nn
import torch.nn.parallel
import torch.backends.cudnn as cudnn
import torch.distributed as dist
import torch.optim
import torch.utils.data
import torch.utils.data.distributed
import torchvision.transforms as transforms
import torchvision.datasets as datasets
import torchvision.models as models
model_names = sorted(name for name in models.__dict__
if name.islower() and not name.startswith("__")
and callable(models.__dict__[name]))
parser = argparse.ArgumentParser(description='PyTorch ImageNet Training')
parser.add_argument('data', metavar='DIR',
help='path to dataset')
parser.add_argument('--arch', '-a', metavar='ARCH', default='resnet18',
choices=model_names,
help='model architecture: ' +
' | '.join(model_names) +
' (default: resnet18)')
parser.add_argument('-j', '--workers', default=4, type=int, metavar='N',
help='number of data loading workers (default: 4)')
parser.add_argument('--epochs', default=90, type=int, metavar='N',
help='number of total epochs to run')
parser.add_argument('--start-epoch', default=0, type=int, metavar='N',
help='manual epoch number (useful on restarts)')
parser.add_argument('-b', '--batch-size', default=256, type=int,
metavar='N', help='mini-batch size (default: 256)')
parser.add_argument('--lr', '--learning-rate', default=0.1, type=float,
metavar='LR', help='initial learning rate')
parser.add_argument('--momentum', default=0.9, type=float, metavar='M',
help='momentum')
parser.add_argument('--weight-decay', '--wd', default=1e-4, type=float,
metavar='W', help='weight decay (default: 1e-4)')
parser.add_argument('--print-freq', '-p', default=10, type=int,
metavar='N', help='print frequency (default: 10)')
parser.add_argument('--resume', default='', type=str, metavar='PATH',
help='path to latest checkpoint (default: none)')
parser.add_argument('-e', '--evaluate', dest='evaluate', action='store_true',
help='evaluate model on validation set')
parser.add_argument('--pretrained', dest='pretrained', action='store_true',
help='use pre-trained model')
parser.add_argument('--world-size', default=1, type=int,
help='number of distributed processes')
parser.add_argument('--dist-url', default='tcp://224.66.41.62:23456', type=str,
help='url used to set up distributed training')
parser.add_argument('--dist-backend', default='gloo', type=str,
help='distributed backend')
best_prec1 = 0
def main():
global args, best_prec1
args = parser.parse_args()
args.distributed = args.world_size > 1
if args.distributed:
dist.init_process_group(backend=args.dist_backend, init_method=args.dist_url,
world_size=args.world_size)
# create model
if args.pretrained:
print("=> using pre-trained model '{}'".format(args.arch))
model = models.__dict__[args.arch](pretrained=True)
else:
print("=> creating model '{}'".format(args.arch))
model = models.__dict__[args.arch]()
if not args.distributed:
if args.arch.startswith('alexnet') or args.arch.startswith('vgg'):
model.features = torch.nn.DataParallel(model.features)
model.cuda()
else:
model = torch.nn.DataParallel(model).cuda()
else:
model.cuda()
model = torch.nn.parallel.DistributedDataParallel(model)
# define loss function (criterion) and optimizer
criterion = nn.CrossEntropyLoss().cuda()
optimizer = torch.optim.SGD(model.parameters(), args.lr,
momentum=args.momentum,
weight_decay=args.weight_decay)
# optionally resume from a checkpoint
if args.resume:
if os.path.isfile(args.resume):
print("=> loading checkpoint '{}'".format(args.resume))
checkpoint = torch.load(args.resume)
args.start_epoch = checkpoint['epoch']
best_prec1 = checkpoint['best_prec1']
model.load_state_dict(checkpoint['state_dict'])
optimizer.load_state_dict(checkpoint['optimizer'])
print("=> loaded checkpoint '{}' (epoch {})"
.format(args.resume, checkpoint['epoch']))
else:
print("=> no checkpoint found at '{}'".format(args.resume))
cudnn.benchmark = True
# Data loading code
traindir = os.path.join(args.data, 'train')
valdir = os.path.join(args.data, 'val')
normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])
train_dataset = datasets.ImageFolder(
traindir,
transforms.Compose([
transforms.RandomSizedCrop(224),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
normalize,
]))
if args.distributed:
train_sampler = torch.utils.data.distributed.DistributedSampler(train_dataset)
else:
train_sampler = None
train_loader = torch.utils.data.DataLoader(
train_dataset, batch_size=args.batch_size, shuffle=(train_sampler is None),
num_workers=args.workers, pin_memory=True, sampler=train_sampler)
val_loader = torch.utils.data.DataLoader(
datasets.ImageFolder(valdir, transforms.Compose([
transforms.Scale(256),
transforms.CenterCrop(224),
transforms.ToTensor(),
normalize,
])),
batch_size=args.batch_size, shuffle=False,
num_workers=args.workers, pin_memory=True)
if args.evaluate:
validate(val_loader, model, criterion)
return
for epoch in range(args.start_epoch, args.epochs):
if args.distributed:
train_sampler.set_epoch(epoch)
adjust_learning_rate(optimizer, epoch)
# train for one epoch
train(train_loader, model, criterion, optimizer, epoch)
# evaluate on validation set
prec1 = validate(val_loader, model, criterion)
# remember best prec@1 and save checkpoint
is_best = prec1 > best_prec1
best_prec1 = max(prec1, best_prec1)
save_checkpoint({
'epoch': epoch + 1,
'arch': args.arch,
'state_dict': model.state_dict(),
'best_prec1': best_prec1,
'optimizer' : optimizer.state_dict(),
}, is_best)
def train(train_loader, model, criterion, optimizer, epoch):
batch_time = AverageMeter()
data_time = AverageMeter()
losses = AverageMeter()
top1 = AverageMeter()
top5 = AverageMeter()
# switch to train mode
model.train()
end = time.time()
for i, (input, target) in enumerate(train_loader):
# measure data loading time
data_time.update(time.time() - end)
target = target.cuda(async=True)
input_var = torch.autograd.Variable(input)
target_var = torch.autograd.Variable(target)
# compute output
output = model(input_var)
loss = criterion(output, target_var)
# measure accuracy and record loss
prec1, prec5 = accuracy(output.data, target, topk=(1, 5))
losses.update(loss.data[0], input.size(0))
top1.update(prec1[0], input.size(0))
top5.update(prec5[0], input.size(0))
# compute gradient and do SGD step
optimizer.zero_grad()
loss.backward()
optimizer.step()
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
if i % args.print_freq == 0:
print('Epoch: [{0}][{1}/{2}]\t'
'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
'Data {data_time.val:.3f} ({data_time.avg:.3f})\t'
'Loss {loss.val:.4f} ({loss.avg:.4f})\t'
'Prec@1 {top1.val:.3f} ({top1.avg:.3f})\t'
'Prec@5 {top5.val:.3f} ({top5.avg:.3f})'.format(
epoch, i, len(train_loader), batch_time=batch_time,
data_time=data_time, loss=losses, top1=top1, top5=top5))
def validate(val_loader, model, criterion):
batch_time = AverageMeter()
losses = AverageMeter()
top1 = AverageMeter()
top5 = AverageMeter()
# switch to evaluate mode
model.eval()
end = time.time()
for i, (input, target) in enumerate(val_loader):
target = target.cuda(async=True)
input_var = torch.autograd.Variable(input, volatile=True)
target_var = torch.autograd.Variable(target, volatile=True)
# compute output
output = model(input_var)
loss = criterion(output, target_var)
# measure accuracy and record loss
prec1, prec5 = accuracy(output.data, target, topk=(1, 5))
losses.update(loss.data[0], input.size(0))
top1.update(prec1[0], input.size(0))
top5.update(prec5[0], input.size(0))
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
if i % args.print_freq == 0:
print('Test: [{0}/{1}]\t'
'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
'Loss {loss.val:.4f} ({loss.avg:.4f})\t'
'Prec@1 {top1.val:.3f} ({top1.avg:.3f})\t'
'Prec@5 {top5.val:.3f} ({top5.avg:.3f})'.format(
i, len(val_loader), batch_time=batch_time, loss=losses,
top1=top1, top5=top5))
print(' * Prec@1 {top1.avg:.3f} Prec@5 {top5.avg:.3f}'
.format(top1=top1, top5=top5))
return top1.avg
def save_checkpoint(state, is_best, filename='checkpoint.pth.tar'):
torch.save(state, filename)
if is_best:
shutil.copyfile(filename, 'model_best.pth.tar')
class AverageMeter(object):
"""Computes and stores the average and current value"""
def __init__(self):
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
def adjust_learning_rate(optimizer, epoch):
"""Sets the learning rate to the initial LR decayed by 10 every 30 epochs"""
lr = args.lr * (0.1 ** (epoch // 30))
for param_group in optimizer.param_groups:
param_group['lr'] = lr
def accuracy(output, target, topk=(1,)):
"""Computes the precision@k for the specified values of k"""
maxk = max(topk)
batch_size = target.size(0)
_, pred = output.topk(maxk, 1, True, True)
pred = pred.t()
correct = pred.eq(target.view(1, -1).expand_as(pred))
res = []
for k in topk:
correct_k = correct[:k].view(-1).float().sum(0, keepdim=True)
res.append(correct_k.mul_(100.0 / batch_size))
return res
if __name__ == '__main__':
main()
| |
# Copyright 2012 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Copyright 2012 Nebula, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import logging
import os
import socket
import time
import unittest
from django.contrib.auth.middleware import AuthenticationMiddleware
from django.contrib.auth.models import Permission
from django.contrib.auth.models import User
from django.contrib.contenttypes.models import ContentType
from django.contrib.messages.storage import default_storage
from django.contrib.sessions.backends.base import SessionBase
from django.core.handlers import wsgi
from django import http
from django import test as django_test
from django.test.client import RequestFactory
from django.utils.encoding import force_text
import six
from django.contrib.staticfiles.testing \
import StaticLiveServerTestCase as LiveServerTestCase
LOG = logging.getLogger(__name__)
# NOTE: Several distributions can't ship Selenium, or the Firefox
# component of it, due to its non-free license. So they have to patch
# it out of test-requirements.txt Avoid import failure and force not
# running selenium tests if we attempt to run selenium tests using the
# Firefox driver and it is not available.
try:
from selenium.webdriver.support import ui as selenium_ui
import xvfbwrapper # Only needed when running the Selenium tests headless
from horizon.test.webdriver import WebDriver
except ImportError as e:
LOG.warning("%s, force WITH_SELENIUM=False", e)
os.environ['WITH_SELENIUM'] = ''
from mox3 import mox
from horizon import middleware
# Makes output of failing mox tests much easier to read.
wsgi.WSGIRequest.__repr__ = lambda self: "<class 'django.http.HttpRequest'>"
class SessionStore(SessionBase):
"""Dict like object for simulating sessions in unittests."""
def load(self):
self.create()
return {}
def create(self):
self.modified = True
def save(self, must_create=False):
self._session_key = self._get_session_key()
self.modified = True
def exists(self, session_key=None):
return False
def delete(self, session_key=None):
self._session_key = ''
self._session_cache = {}
self.modified = True
def cycle_key(self):
self.save()
@classmethod
def clear_expired(cls):
pass
class RequestFactoryWithMessages(RequestFactory):
def get(self, *args, **kwargs):
req = super(RequestFactoryWithMessages, self).get(*args, **kwargs)
req.user = User()
req.session = SessionStore()
req._messages = default_storage(req)
return req
def post(self, *args, **kwargs):
req = super(RequestFactoryWithMessages, self).post(*args, **kwargs)
req.user = User()
req.session = SessionStore()
req._messages = default_storage(req)
return req
@unittest.skipIf(os.environ.get('SKIP_UNITTESTS', False),
"The SKIP_UNITTESTS env variable is set.")
class TestCase(django_test.TestCase):
"""Base test case class for Horizon with numerous additional features.
* The ``mox`` mocking framework via ``self.mox``.
* A ``RequestFactory`` class which supports Django's ``contrib.messages``
framework via ``self.factory``.
* A ready-to-go request object via ``self.request``.
"""
def setUp(self):
super(TestCase, self).setUp()
self.mox = mox.Mox()
self._setup_test_data()
self._setup_factory()
self._setup_user()
self._setup_request()
middleware.HorizonMiddleware().process_request(self.request)
AuthenticationMiddleware().process_request(self.request)
os.environ["HORIZON_TEST_RUN"] = "True"
def _setup_test_data(self):
pass
def _setup_factory(self):
self.factory = RequestFactoryWithMessages()
def _setup_user(self):
self.user = User.objects.create_user(username='test', password='test')
self.assertTrue(self.client.login(username="test", password="test"))
def _setup_request(self):
self.request = http.HttpRequest()
self.request.session = self.client.session
def tearDown(self):
super(TestCase, self).tearDown()
self.mox.UnsetStubs()
self.mox.VerifyAll()
del os.environ["HORIZON_TEST_RUN"]
def set_permissions(self, permissions=None):
perm_ids = Permission.objects.values_list('id', flat=True)
self.user.user_permissions.remove(*perm_ids)
for name in permissions:
ct, create = ContentType.objects.get_or_create(model=name,
app_label='horizon')
perm, create = Permission.objects.get_or_create(codename=name,
content_type=ct,
name=name)
self.user.user_permissions.add(perm)
if hasattr(self.user, "_perm_cache"):
del self.user._perm_cache
if six.PY3:
# Python 2 assert methods renamed in Python 3
def assertItemsEqual(self, expected_seq, actual_seq, msg=None):
self.assertCountEqual(expected_seq, actual_seq, msg)
def assertNotRegexpMatches(self, text, unexpected_regexp, msg=None):
self.assertNotRegex(text, unexpected_regexp, msg)
def assertNoMessages(self, response=None):
"""Asserts no messages have been attached by the messages framework.
The expected messages framework is ``django.contrib.messages``.
"""
self.assertMessageCount(response, success=0, warn=0, info=0, error=0)
def assertMessageCount(self, response=None, **kwargs):
"""Asserts that the expected number of messages have been attached.
The expected number of messages can be specified per message type.
Usage would look like ``self.assertMessageCount(success=1)``.
"""
temp_req = self.client.request(**{'wsgi.input': None})
temp_req.COOKIES = self.client.cookies
storage = default_storage(temp_req)
messages = []
if response is None:
# To gain early access to the messages we have to decode the
# cookie on the test client.
if 'messages' in self.client.cookies:
message_cookie = self.client.cookies['messages'].value
messages = storage._decode(message_cookie)
# Check for messages in the context
elif hasattr(response, "context") and "messages" in response.context:
messages = response.context["messages"]
# Check for messages attached to the request on a TemplateResponse
elif hasattr(response, "_request") and hasattr(response._request,
"_messages"):
messages = response._request._messages._queued_messages
# If we don't have messages and we don't expect messages, we're done.
if not any(kwargs.values()) and not messages:
return
# If we expected messages and have none, that's a problem.
if any(kwargs.values()) and not messages:
error_msg = "Messages were expected, but none were set."
assert 0 == sum(kwargs.values()), error_msg
# Otherwise, make sure we got the expected messages.
for msg_type, count in kwargs.items():
msgs = [force_text(m.message)
for m in messages if msg_type in m.tags]
assert len(msgs) == count, \
"%s messages not as expected: %s" % (msg_type.title(),
", ".join(msgs))
@unittest.skipUnless(os.environ.get('WITH_SELENIUM', False),
"The WITH_SELENIUM env variable is not set.")
class SeleniumTestCase(LiveServerTestCase):
@classmethod
def setUpClass(cls):
socket.setdefaulttimeout(60)
if os.environ.get('WITH_SELENIUM', False):
time.sleep(1)
# Start a virtual display server for running the tests headless.
if os.environ.get('SELENIUM_HEADLESS', False):
cls.vdisplay = xvfbwrapper.Xvfb(width=1280, height=720)
cls.vdisplay.start()
cls.selenium = WebDriver()
super(SeleniumTestCase, cls).setUpClass()
@classmethod
def tearDownClass(cls):
if os.environ.get('WITH_SELENIUM', False):
cls.selenium.quit()
time.sleep(1)
if hasattr(cls, 'vdisplay'):
cls.vdisplay.stop()
super(SeleniumTestCase, cls).tearDownClass()
def setUp(self):
socket.setdefaulttimeout(60)
self.selenium.implicitly_wait(30)
self.ui = selenium_ui
super(SeleniumTestCase, self).setUp()
class JasmineTests(SeleniumTestCase):
"""Helper class which allows you to create a simple Jasmine test.
Jasmine tests are run through Selenium.
To run a jasmine test suite, create a class which extends JasmineTests in
the :file:`horizon/test/jasmine/jasmine_tests.py` and define two class
attributes
.. attribute:: sources
A list of JS source files (the {{ STATIC_URL }} will be added
automatically, these are the source files tested
.. attribute:: specs
A list of Jasmine JS spec files (the {{ STATIC_URL }} will be added
automatically
.. attribute:: template_name
A template which will contain the html needed by the test,
this attribute is optional, if it is not specified the default template
will be used. The template, if specified, must extends
:file:`horizon/jasmine/jasmine.html` and inserts the html in a block
whose name must be content
"""
sources = []
specs = []
template_name = None
def run_jasmine(self):
self.selenium.get(
"%s%s%s" % (self.live_server_url,
"/jasmine/",
self.__class__.__name__))
wait = self.ui.WebDriverWait(self.selenium, 120)
def jasmine_done(driver):
text = driver.find_element_by_class_name("duration").text
return "finished" in text
wait.until(jasmine_done)
failures = \
self.selenium.find_elements_by_css_selector(".spec-detail.failed")
results = []
for failure in failures:
results.append(
failure.find_element_by_class_name("description").text)
results.append(
failure.find_element_by_class_name("stack-trace").text)
self.assertEqual(results, [], '\n\n' + '\n\n'.join(results) + '\n\n')
def test(self):
if self.__class__ == JasmineTests:
return
self.run_jasmine()
| |
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for variable store."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors_impl
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.ops import state_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops import variables as variables_lib
from tensorflow.python.platform import test
class VariableScopeTest(test.TestCase):
def testGetVar(self):
vs = variable_scope._get_default_variable_store()
v = vs.get_variable("v", [1])
v1 = vs.get_variable("v", [1])
self.assertEqual(v, v1)
def testResource(self):
vs = variable_scope._get_default_variable_store()
v1 = vs.get_variable("v", [1], use_resource=True)
self.assertTrue(isinstance(v1, resource_variable_ops.ResourceVariable))
def testNameExists(self):
vs = variable_scope._get_default_variable_store()
# No check by default, so we can both create and get existing names.
v = vs.get_variable("v", [1])
v1 = vs.get_variable("v", [1])
self.assertEqual(v, v1)
# When reuse is False, we fail when variables are already there.
vs.get_variable("w", [1], reuse=False) # That's ok.
with self.assertRaises(ValueError):
vs.get_variable("v", [1], reuse=False) # That fails.
# When reuse is True, we fail when variables are new.
vs.get_variable("v", [1], reuse=True) # That's ok.
with self.assertRaises(ValueError):
vs.get_variable("u", [1], reuse=True) # That fails.
def testNamelessStore(self):
vs = variable_scope._get_default_variable_store()
vs.get_variable("v1", [2])
vs.get_variable("v2", [2])
expected_names = ["%s:0" % name for name in ["v1", "v2"]]
self.assertEqual(
set(expected_names), set([v.name for v in vs._vars.values()]))
def testVarScopeInitializer(self):
with self.test_session() as sess:
init = init_ops.constant_initializer(0.3)
with variable_scope.variable_scope("tower") as tower:
with variable_scope.variable_scope("foo", initializer=init):
v = variable_scope.get_variable("v", [])
sess.run(variables_lib.initialize_variables([v]))
self.assertAllClose(v.eval(), 0.3)
with variable_scope.variable_scope(tower, initializer=init):
w = variable_scope.get_variable("w", [])
sess.run(variables_lib.initialize_variables([w]))
self.assertAllClose(w.eval(), 0.3)
def testVarScopeConstraint(self):
constraint = lambda x: 0. * x
with variable_scope.variable_scope("tower") as tower:
with variable_scope.variable_scope("foo", constraint=constraint):
v = variable_scope.get_variable("v", [])
self.assertEqual(v.constraint, constraint)
with variable_scope.variable_scope(tower, constraint=constraint):
w = variable_scope.get_variable("w", [])
self.assertEqual(w.constraint, constraint)
def testVarScopeDType(self):
with self.test_session():
with variable_scope.variable_scope("tower") as tower:
with variable_scope.variable_scope("foo", dtype=dtypes.float16):
v = variable_scope.get_variable("v", [])
self.assertEqual(v.dtype.base_dtype, dtypes.float16)
with variable_scope.variable_scope(tower, dtype=dtypes.float16):
w = variable_scope.get_variable("w", [])
self.assertEqual(w.dtype.base_dtype, dtypes.float16)
def testInitFromNonTensorValue(self):
with self.test_session() as sess:
v = variable_scope.get_variable("v", initializer=4, dtype=dtypes.int32)
sess.run(variables_lib.initialize_variables([v]))
self.assertAllClose(v.eval(), 4)
w = variable_scope.get_variable(
"w", initializer=numpy.array([1, 2, 3]), dtype=dtypes.int64)
sess.run(variables_lib.initialize_variables([w]))
self.assertAllClose(w.eval(), [1, 2, 3])
with self.assertRaises(TypeError):
variable_scope.get_variable("x", initializer={})
def testInitFromNonInitializer(self):
with self.test_session():
# Test various dtypes with zeros initializer as following:
types = [
dtypes.int8, dtypes.uint8, dtypes.int16, dtypes.uint16, dtypes.int32,
dtypes.int64, dtypes.bool
]
# Use different variable_name to distinguish various dtypes
for (i, dtype) in enumerate(types):
x = variable_scope.get_variable(
name="x%d" % i, shape=(3, 4), dtype=dtype)
y = variable_scope.get_variable(
name="y%d" % i,
shape=(3, 4),
dtype=dtype,
initializer=init_ops.zeros_initializer(dtype=dtype))
variables_lib.global_variables_initializer().run()
self.assertAllEqual(x.eval(), y.eval())
def testVarScopeCachingDevice(self):
with self.test_session():
caching_device = "/job:moo"
with variable_scope.variable_scope("tower"):
with variable_scope.variable_scope(
"caching", caching_device=caching_device):
v = variable_scope.get_variable("v", [])
self.assertTrue(v.value().device.startswith(caching_device))
with variable_scope.variable_scope("child"):
v2 = variable_scope.get_variable("v", [])
self.assertTrue(v2.value().device.startswith(caching_device))
with variable_scope.variable_scope("not_cached", caching_device=""):
v2_not_cached = variable_scope.get_variable("v", [])
self.assertFalse(v2_not_cached.value().device.startswith(
caching_device))
with variable_scope.variable_scope(
"not_cached_identity_device",
caching_device=lambda op: op.device):
v2_identity_device = variable_scope.get_variable("v", [])
self.assertFalse(v2_identity_device.value().device.startswith(
caching_device))
with variable_scope.variable_scope("we_will_do_it_live") as vs_live:
vs_live.set_caching_device("/job:live")
v_live = variable_scope.get_variable("v", [])
self.assertTrue(v_live.value().device.startswith("/job:live"))
v_tower = variable_scope.get_variable("v", [])
self.assertFalse(v_tower.value().device.startswith(caching_device))
def testVarScopeRegularizer(self):
with self.test_session() as sess:
init = init_ops.constant_initializer(0.3)
def regularizer1(v):
return math_ops.reduce_mean(v) + 0.1
def regularizer2(v):
return math_ops.reduce_mean(v) + 0.2
with variable_scope.variable_scope(
"tower", regularizer=regularizer1) as tower:
with variable_scope.variable_scope("foo", initializer=init):
v = variable_scope.get_variable("v", [])
sess.run(variables_lib.initialize_variables([v]))
losses = ops.get_collection(ops.GraphKeys.REGULARIZATION_LOSSES)
self.assertEqual(1, len(losses))
self.assertAllClose(losses[0].eval(), 0.4)
with variable_scope.variable_scope(tower, initializer=init) as vs:
u = variable_scope.get_variable("u", [])
vs.set_regularizer(regularizer2)
w = variable_scope.get_variable("w", [])
# Next 3 variable not regularized to test disabling regularization.
x = variable_scope.get_variable(
"x", [], regularizer=variable_scope.no_regularizer)
with variable_scope.variable_scope(
"baz", regularizer=variable_scope.no_regularizer):
y = variable_scope.get_variable("y", [])
vs.set_regularizer(variable_scope.no_regularizer)
z = variable_scope.get_variable("z", [])
# Check results.
losses = ops.get_collection(ops.GraphKeys.REGULARIZATION_LOSSES)
self.assertEqual(3, len(losses))
sess.run(variables_lib.initialize_variables([u, w, x, y, z]))
self.assertAllClose(losses[0].eval(), 0.4)
self.assertAllClose(losses[1].eval(), 0.4)
self.assertAllClose(losses[2].eval(), 0.5)
with variable_scope.variable_scope("foo", reuse=True):
v = variable_scope.get_variable("v",
[]) # "v" is alredy there, reused
losses = ops.get_collection(ops.GraphKeys.REGULARIZATION_LOSSES)
self.assertEqual(3, len(losses)) # No new loss added.
def testInitializeFromValue(self):
with self.test_session() as sess:
init = constant_op.constant(0.1)
w = variable_scope.get_variable("v", initializer=init)
sess.run(variables_lib.initialize_variables([w]))
self.assertAllClose(w.eval(), 0.1)
with self.assertRaisesRegexp(ValueError, "shape"):
# We disallow explicit shape specification when initializer is constant.
variable_scope.get_variable("u", [1], initializer=init)
with variable_scope.variable_scope("foo", initializer=init):
# Constant initializer can be passed through scopes if needed.
v = variable_scope.get_variable("v")
sess.run(variables_lib.initialize_variables([v]))
self.assertAllClose(v.eval(), 0.1)
# Check that non-float32 initializer creates a non-float32 variable.
init = constant_op.constant(1, dtype=dtypes.int32)
t = variable_scope.get_variable("t", initializer=init)
self.assertEqual(t.dtype.base_dtype, dtypes.int32)
# Raise error if `initializer` dtype and `dtype` are not identical.
with self.assertRaisesRegexp(ValueError, "don't match"):
variable_scope.get_variable("s", initializer=init, dtype=dtypes.float64)
def testControlDeps(self):
with self.test_session() as sess:
v0 = variable_scope.get_variable(
"v0", [1], initializer=init_ops.constant_initializer(0))
with ops.control_dependencies([v0.value()]):
v1 = variable_scope.get_variable(
"v1", [1], initializer=init_ops.constant_initializer(1))
add = v1 + v0
# v0 should be uninitialized.
with self.assertRaisesRegexp(errors_impl.OpError, "uninitialized"):
sess.run(v0)
# We should be able to initialize and run v1 without initializing
# v0, even if the variable was created with a control dep on v0.
sess.run(v1.initializer)
self.assertEqual(1, sess.run(v1))
# v0 should still be uninitialized.
with self.assertRaisesRegexp(errors_impl.OpError, "uninitialized"):
sess.run(v0)
with self.assertRaisesRegexp(errors_impl.OpError, "uninitialized"):
sess.run(add)
# If we initialize v0 we should be able to run 'add'.
sess.run(v0.initializer)
sess.run(add)
def testControlFlow(self):
with self.test_session() as sess:
v0 = variable_scope.get_variable(
"v0", [], initializer=init_ops.constant_initializer(0))
var_dict = {}
# Call get_variable in each of the cond clauses.
def var_in_then_clause():
v1 = variable_scope.get_variable(
"v1", [1], initializer=init_ops.constant_initializer(1))
var_dict["v1"] = v1
return v1 + v0
def var_in_else_clause():
v2 = variable_scope.get_variable(
"v2", [1], initializer=init_ops.constant_initializer(2))
var_dict["v2"] = v2
return v2 + v0
add = control_flow_ops.cond(
math_ops.less(v0, 10), var_in_then_clause, var_in_else_clause)
v1 = var_dict["v1"]
v2 = var_dict["v2"]
# We should be able to initialize and run v1 and v2 without initializing
# v0, even if the variable was created with a control dep on v0.
sess.run(v1.initializer)
self.assertEqual([1], sess.run(v1))
sess.run(v2.initializer)
self.assertEqual([2], sess.run(v2))
# v0 should still be uninitialized.
with self.assertRaisesRegexp(errors_impl.OpError, "uninitialized"):
sess.run(v0)
# We should not be able to run 'add' yet.
with self.assertRaisesRegexp(errors_impl.OpError, "uninitialized"):
sess.run(add)
# If we initialize v0 we should be able to run 'add'.
sess.run(v0.initializer)
sess.run(add)
def testGetVariableScope(self):
# Test the get_variable_scope() function and setting properties of result.
with self.test_session() as sess:
init = init_ops.constant_initializer(0.3)
with variable_scope.variable_scope("foo"):
new_init1 = variable_scope.get_variable_scope().initializer
self.assertEqual(new_init1, None)
# Check that we can set initializer like this.
variable_scope.get_variable_scope().set_initializer(init)
v = variable_scope.get_variable("v", [])
sess.run(variables_lib.initialize_variables([v]))
self.assertAllClose(v.eval(), 0.3)
# Check that we can set reuse.
variable_scope.get_variable_scope().reuse_variables()
with self.assertRaises(ValueError): # Fail, w does not exist yet.
variable_scope.get_variable("w", [1])
# Check that the set initializer goes away.
new_init = variable_scope.get_variable_scope().initializer
self.assertEqual(new_init, None)
def testVarScope(self):
with self.test_session():
with variable_scope.variable_scope("tower") as tower:
self.assertEqual(tower.name, "tower")
with ops.name_scope("scope") as sc:
self.assertEqual(sc, "tower/scope/")
with variable_scope.variable_scope("foo"):
with variable_scope.variable_scope("bar") as bar:
self.assertEqual(bar.name, "foo/bar")
with ops.name_scope("scope") as sc:
self.assertEqual(sc, "foo/bar/scope/")
with variable_scope.variable_scope("foo"):
with variable_scope.variable_scope(tower, reuse=True) as tower_shared:
self.assertEqual(tower_shared.name, "tower")
with ops.name_scope("scope") as sc:
self.assertEqual(sc, "foo_1/tower/scope/")
def testVarScopeNameScope(self):
with self.test_session():
with ops.name_scope("scope1"):
with variable_scope.variable_scope("tower") as tower:
with ops.name_scope("scope2") as sc2:
self.assertEqual(sc2, "scope1/tower/scope2/")
with variable_scope.variable_scope(
tower): # Re-entering acts like another "tower".
with ops.name_scope("scope2") as sc2:
self.assertEqual(sc2, "scope1/tower_1/scope2/")
with variable_scope.variable_scope(
"tower"): # Re-entering by string acts the same.
with ops.name_scope("scope2") as sc2:
self.assertEqual(sc2, "scope1/tower_2/scope2/")
with ops.name_scope("scope3"):
with variable_scope.variable_scope("tower"):
with ops.name_scope("scope2") as sc2:
self.assertEqual(sc2, "scope3/tower/scope2/")
with variable_scope.variable_scope(tower):
with ops.name_scope("scope2") as sc2:
self.assertEqual(sc2, "scope3/tower_1/scope2/")
root_var_scope = variable_scope.get_variable_scope()
with ops.name_scope("scope4"):
with variable_scope.variable_scope(root_var_scope):
with ops.name_scope("scope2") as sc2:
self.assertEqual(sc2, "scope4/scope2/")
def testVarScopeOriginalNameScope(self):
with self.test_session():
with ops.name_scope("scope1"):
with variable_scope.variable_scope("tower") as tower:
self.assertEqual(tower.original_name_scope, "scope1/tower/")
with ops.name_scope("scope2") as sc2:
self.assertEqual(sc2, "scope1/tower/scope2/")
with ops.name_scope("scope2"):
with variable_scope.variable_scope(tower) as tower1:
# Re-entering preserves original name scope.
self.assertEqual(tower1.original_name_scope, "scope1/tower/")
with ops.name_scope("foo") as sc2:
self.assertEqual(sc2, "scope2/tower/foo/")
# Test re-entering original name scope.
with ops.name_scope(tower.original_name_scope):
with ops.name_scope("bar") as sc3:
self.assertEqual(sc3, "scope1/tower/bar/")
with ops.name_scope("scope2"):
with variable_scope.variable_scope(tower):
with ops.name_scope(tower.original_name_scope):
with ops.name_scope("bar") as sc3:
self.assertEqual(sc3, "scope1/tower/bar_1/")
def testVarScopeObjectReuse(self):
with self.test_session():
vs = None
with variable_scope.variable_scope("jump", reuse=True) as scope:
vs = scope
with variable_scope.variable_scope(vs) as jump:
self.assertTrue(jump.reuse)
with variable_scope.variable_scope(vs, reuse=True) as jump_reuse:
self.assertTrue(jump_reuse.reuse)
with variable_scope.variable_scope(vs, reuse=False) as jump_no_reuse:
self.assertTrue(jump_no_reuse.reuse) # Inherited, cannot be undone.
with variable_scope.variable_scope("jump", reuse=False) as scope:
vs = scope
with variable_scope.variable_scope(vs) as jump:
self.assertFalse(jump.reuse)
with variable_scope.variable_scope(vs, reuse=True) as jump_reuse:
self.assertTrue(jump_reuse.reuse)
with variable_scope.variable_scope(vs, reuse=False) as jump_no_reuse:
self.assertFalse(jump_no_reuse.reuse)
def testVarScopeGetOrCreateReuse(self):
x = array_ops.placeholder(dtypes.float32)
with variable_scope.variable_scope("bar",
reuse=variable_scope.AUTO_REUSE):
v_assign = state_ops.assign(variable_scope.get_variable("var", []), x)
with variable_scope.variable_scope("bar",
reuse=variable_scope.AUTO_REUSE):
v = variable_scope.get_variable("var", [])
with self.test_session() as sess:
def test_value(value):
sess.run(v_assign, feed_dict={x: value})
self.assertEqual(value, v.eval())
test_value(42) # Variable is created.
test_value(13) # Variable is reused hereafter.
test_value(17)
def testVarOpScope(self):
with self.test_session():
with ops.name_scope("scope1"):
with variable_scope.variable_scope("tower", "default", []):
self.assertEqual(
variable_scope.get_variable("w", []).name, "tower/w:0")
with ops.name_scope("scope2") as sc2:
self.assertEqual(sc2, "scope1/tower/scope2/")
with variable_scope.variable_scope("tower", "default", []):
with self.assertRaises(ValueError):
variable_scope.get_variable("w", [])
with ops.name_scope("scope2") as sc2:
self.assertEqual(sc2, "scope1/tower_1/scope2/")
with ops.name_scope("scope2"):
with variable_scope.variable_scope(None, "default", []):
self.assertEqual(
variable_scope.get_variable("w", []).name, "default/w:0")
with ops.name_scope("scope2") as sc2:
self.assertEqual(sc2, "scope2/default/scope2/")
with variable_scope.variable_scope(None, "default", []):
self.assertEqual(
variable_scope.get_variable("w", []).name, "default_1/w:0")
with ops.name_scope("scope2") as sc2:
self.assertEqual(sc2, "scope2/default_1/scope2/")
def testVarOpScopeUniqueNamesInterleavedSubstringScopes(self):
with self.test_session():
with variable_scope.variable_scope(None, "defaultScope1"):
with variable_scope.variable_scope(None, "layer"):
self.assertEqual(
variable_scope.get_variable("w", []).name,
"defaultScope1/layer/w:0")
with variable_scope.variable_scope(None, "defaultScope1"):
with variable_scope.variable_scope(None, "layer"):
self.assertEqual(
variable_scope.get_variable("w", []).name,
"defaultScope1_1/layer/w:0")
with variable_scope.variable_scope(None, "defaultScope"):
with variable_scope.variable_scope(None, "layer"):
self.assertEqual(
variable_scope.get_variable("w", []).name,
"defaultScope/layer/w:0")
with variable_scope.variable_scope(None, "defaultScope1"):
with variable_scope.variable_scope(None, "layer"):
self.assertEqual(
variable_scope.get_variable("w", []).name,
"defaultScope1_2/layer/w:0")
def testVarOpScopeUniqueNamesWithJump(self):
with self.test_session():
with variable_scope.variable_scope("default") as default:
with variable_scope.variable_scope(None, "layer"):
self.assertEqual(
variable_scope.get_variable("w", []).name,
"default/layer/w:0")
with variable_scope.variable_scope(None, "layer"):
self.assertEqual(
variable_scope.get_variable("w", []).name,
"default/layer_1/w:0")
with variable_scope.variable_scope(default):
pass
# No matter the jump in the middle, unique numbering continues.
with variable_scope.variable_scope(None, "layer"):
self.assertEqual(
variable_scope.get_variable("w", []).name,
"default/layer_2/w:0")
def testVarOpScopeReuse(self):
with self.test_session():
with variable_scope.variable_scope("outer") as outer:
with variable_scope.variable_scope("tower", "default", []):
self.assertEqual(
variable_scope.get_variable("w", []).name, "outer/tower/w:0")
with ops.name_scope("scope2") as sc2:
self.assertEqual(sc2, "outer/tower/scope2/")
with variable_scope.variable_scope(None, "default", []):
self.assertEqual(
variable_scope.get_variable("w", []).name, "outer/default/w:0")
with ops.name_scope("scope2") as sc2:
self.assertEqual(sc2, "outer/default/scope2/")
with variable_scope.variable_scope(outer, reuse=True) as outer:
with variable_scope.variable_scope("tower", "default", []):
self.assertEqual(
variable_scope.get_variable("w", []).name, "outer/tower/w:0")
with ops.name_scope("scope2") as sc2:
self.assertEqual(sc2, "outer_1/tower/scope2/")
with variable_scope.variable_scope(None, "default", []):
self.assertEqual(
variable_scope.get_variable("w", []).name, "outer/default/w:0")
with ops.name_scope("scope2") as sc2:
self.assertEqual(sc2, "outer_1/default/scope2/")
def testVarScopeGetVar(self):
with self.test_session():
with variable_scope.variable_scope("root"):
with variable_scope.variable_scope("towerA") as tower_a:
va = variable_scope.get_variable("v", [1])
self.assertEqual(va.name, "root/towerA/v:0")
with variable_scope.variable_scope(tower_a, reuse=True):
va2 = variable_scope.get_variable("v", [1])
self.assertEqual(va2, va)
with variable_scope.variable_scope("towerB"):
vb = variable_scope.get_variable("v", [1])
self.assertEqual(vb.name, "root/towerB/v:0")
with self.assertRaises(ValueError):
with variable_scope.variable_scope("towerA"):
va2 = variable_scope.get_variable("v", [1])
with variable_scope.variable_scope("towerA", reuse=True):
va2 = variable_scope.get_variable("v", [1])
self.assertEqual(va2, va)
with variable_scope.variable_scope("foo"):
with variable_scope.variable_scope("bar"):
v = variable_scope.get_variable("v", [1])
self.assertEqual(v.name, "root/foo/bar/v:0")
with variable_scope.variable_scope(tower_a, reuse=True):
va3 = variable_scope.get_variable("v", [1])
self.assertEqual(va, va3)
with self.assertRaises(ValueError):
with variable_scope.variable_scope(tower_a, reuse=True):
with variable_scope.variable_scope("baz"):
variable_scope.get_variable("v", [1])
with self.assertRaises(ValueError) as exc:
with variable_scope.variable_scope(tower_a, reuse=True):
variable_scope.get_variable("v", [2]) # Different shape.
self.assertEqual("shape" in str(exc.exception), True)
with self.assertRaises(ValueError) as exc:
with variable_scope.variable_scope(tower_a, reuse=True):
variable_scope.get_variable("v", [1], dtype=dtypes.int32)
self.assertEqual("dtype" in str(exc.exception), True)
def testVarScopeOuterScope(self):
with self.test_session():
with variable_scope.variable_scope("outer") as outer:
pass
with variable_scope.variable_scope(outer):
self.assertEqual(variable_scope.get_variable("w", []).name, "outer/w:0")
with ops.name_scope("scope2") as sc2:
self.assertEqual(sc2, "outer_1/scope2/")
with variable_scope.variable_scope("default"):
self.assertEqual(
variable_scope.get_variable("w", []).name, "outer/default/w:0")
with ops.name_scope("scope2") as sc2:
self.assertEqual(sc2, "outer_1/default/scope2/")
with variable_scope.variable_scope(outer, reuse=True):
self.assertEqual(variable_scope.get_variable("w", []).name, "outer/w:0")
with ops.name_scope("scope2") as sc2:
self.assertEqual(sc2, "outer_2/scope2/")
with variable_scope.variable_scope("default", reuse=True):
self.assertEqual(
variable_scope.get_variable("w", []).name, "outer/default/w:0")
with ops.name_scope("scope2") as sc2:
self.assertEqual(sc2, "outer_2/default/scope2/")
def testVarScopeNestedOuterScope(self):
with self.test_session():
with variable_scope.variable_scope("outer") as outer:
with variable_scope.variable_scope(outer):
self.assertEqual(
variable_scope.get_variable("w", []).name, "outer/w:0")
with ops.name_scope("scope2") as sc2:
self.assertEqual(sc2, "outer/outer/scope2/")
with variable_scope.variable_scope("default"):
self.assertEqual(
variable_scope.get_variable("w", []).name, "outer/default/w:0")
with ops.name_scope("scope2") as sc2:
self.assertEqual(sc2, "outer/default/scope2/")
with variable_scope.variable_scope(outer, reuse=True):
self.assertEqual(
variable_scope.get_variable("w", []).name, "outer/w:0")
with ops.name_scope("scope2") as sc2:
self.assertEqual(sc2, "outer/outer_1/scope2/")
with variable_scope.variable_scope("default", reuse=True):
self.assertEqual(
variable_scope.get_variable("w", []).name, "outer/default/w:0")
with ops.name_scope("scope2") as sc2:
self.assertEqual(sc2, "outer/default_1/scope2/")
def testVarOpScopeReuseParam(self):
with self.test_session():
with variable_scope.variable_scope("outer") as outer:
with variable_scope.variable_scope("tower", "default", []):
self.assertEqual(
variable_scope.get_variable("w", []).name, "outer/tower/w:0")
with ops.name_scope("scope2") as sc2:
self.assertEqual(sc2, "outer/tower/scope2/")
with variable_scope.variable_scope(None, "default", []):
self.assertEqual(
variable_scope.get_variable("w", []).name, "outer/default/w:0")
with ops.name_scope("scope2") as sc2:
self.assertEqual(sc2, "outer/default/scope2/")
with variable_scope.variable_scope(outer) as outer:
with variable_scope.variable_scope("tower", "default", reuse=True):
self.assertEqual(
variable_scope.get_variable("w", []).name, "outer/tower/w:0")
with ops.name_scope("scope2") as sc2:
self.assertEqual(sc2, "outer_1/tower/scope2/")
outer.reuse_variables()
with variable_scope.variable_scope(None, "default", []):
self.assertEqual(
variable_scope.get_variable("w", []).name, "outer/default/w:0")
with ops.name_scope("scope2") as sc2:
self.assertEqual(sc2, "outer_1/default/scope2/")
def testVarOpScopeReuseError(self):
with self.test_session():
with self.assertRaises(ValueError):
with variable_scope.variable_scope(None, "default", reuse=True):
self.assertEqual(
variable_scope.get_variable("w", []).name, "outer/tower/w:0")
def testVarOpScopeOuterScope(self):
with self.test_session():
with variable_scope.variable_scope("outer") as outer:
pass
with variable_scope.variable_scope(outer, "default", []):
self.assertEqual(variable_scope.get_variable("w", []).name, "outer/w:0")
with ops.name_scope("scope2") as sc2:
self.assertEqual(sc2, "outer_1/scope2/")
with variable_scope.variable_scope(None, "default", []):
self.assertEqual(
variable_scope.get_variable("w", []).name, "outer/default/w:0")
with ops.name_scope("scope2") as sc2:
self.assertEqual(sc2, "outer_1/default/scope2/")
with variable_scope.variable_scope(outer, "default", reuse=True):
self.assertEqual(variable_scope.get_variable("w", []).name, "outer/w:0")
with ops.name_scope("scope2") as sc2:
self.assertEqual(sc2, "outer_2/scope2/")
outer.reuse_variables()
with variable_scope.variable_scope(None, "default", []):
self.assertEqual(
variable_scope.get_variable("w", []).name, "outer/default/w:0")
with ops.name_scope("scope2") as sc2:
self.assertEqual(sc2, "outer_2/default/scope2/")
def testVarOpScopeNestedOuterScope(self):
with self.test_session():
with variable_scope.variable_scope("outer") as outer:
with variable_scope.variable_scope(outer, "default", []):
self.assertEqual(
variable_scope.get_variable("w", []).name, "outer/w:0")
with ops.name_scope("scope2") as sc2:
self.assertEqual(sc2, "outer/outer/scope2/")
with variable_scope.variable_scope(None, "default", []):
self.assertEqual(
variable_scope.get_variable("w", []).name, "outer/default/w:0")
with ops.name_scope("scope2") as sc2:
self.assertEqual(sc2, "outer/default/scope2/")
with variable_scope.variable_scope(outer, "default", reuse=True):
self.assertEqual(variable_scope.get_variable("w", []).name, "outer/w:0")
with ops.name_scope("scope2") as sc2:
self.assertEqual(sc2, "outer_1/scope2/")
with variable_scope.variable_scope(None, "default", []):
self.assertEqual(
variable_scope.get_variable("w", []).name, "outer/default/w:0")
with ops.name_scope("scope2") as sc2:
self.assertEqual(sc2, "outer_1/default/scope2/")
def testGetLocalVar(self):
with self.test_session():
# Check that local variable respects naming.
with variable_scope.variable_scope("outer") as outer:
with variable_scope.variable_scope(outer, "default", []):
local_var = variable_scope.get_local_variable(
"w", [], collections=["foo"])
self.assertEqual(local_var.name, "outer/w:0")
# Since variable is local, it should be in the local variable collection
# but not the trainable collection.
self.assertIn(local_var,
ops.get_collection(ops.GraphKeys.LOCAL_VARIABLES))
self.assertIn(local_var, ops.get_collection("foo"))
self.assertNotIn(local_var,
ops.get_collection(ops.GraphKeys.TRAINABLE_VARIABLES))
# Check that local variable respects `reuse`.
with variable_scope.variable_scope(outer, "default", reuse=True):
self.assertEqual(
variable_scope.get_local_variable("w", []).name, "outer/w:0")
def testGetVarWithDevice(self):
g = ops.Graph()
varname_type = []
def device_func(op):
if op.type in ["Variable", "VariableV2", "VarHandleOp"]:
varname_type.append((op.name, op.get_attr("dtype")))
return "/device:GPU:0"
with g.as_default():
with ops.device(device_func):
_ = variable_scope.get_variable("x", (100, 200))
_ = variable_scope.get_variable(
"y", dtype=dtypes.int64, initializer=numpy.arange(73))
self.assertEqual(varname_type[0], ("x", dtypes.float32))
self.assertEqual(varname_type[1], ("y", dtypes.int64))
def testGetCollection(self):
with self.test_session():
_ = variable_scope.get_variable("a", [])
_ = variable_scope.get_variable("b", [], trainable=False)
with variable_scope.variable_scope("foo_") as scope1:
_ = variable_scope.get_variable("a", [])
_ = variable_scope.get_variable("b", [], trainable=False)
self.assertEqual([
v.name
for v in scope1.get_collection(ops.GraphKeys.TRAINABLE_VARIABLES)
], ["foo_/a:0"])
self.assertEqual([
v.name
for v in scope1.get_collection(ops.GraphKeys.GLOBAL_VARIABLES)
], ["foo_/a:0", "foo_/b:0"])
with variable_scope.variable_scope("foo") as scope2:
_ = variable_scope.get_variable("a", [])
_ = variable_scope.get_variable("b", [], trainable=False)
self.assertEqual([
v.name
for v in scope2.get_collection(ops.GraphKeys.TRAINABLE_VARIABLES)
], ["foo/a:0"])
self.assertEqual([
v.name
for v in scope2.get_collection(ops.GraphKeys.GLOBAL_VARIABLES)
], ["foo/a:0", "foo/b:0"])
scope = variable_scope.get_variable_scope()
self.assertEqual([
v.name for v in scope.get_collection(ops.GraphKeys.GLOBAL_VARIABLES)
], ["a:0", "b:0", "foo_/a:0", "foo_/b:0", "foo/a:0", "foo/b:0"])
self.assertEqual([
v.name
for v in scope.get_collection(ops.GraphKeys.TRAINABLE_VARIABLES)
], ["a:0", "foo_/a:0", "foo/a:0"])
def testGetTrainableVariables(self):
with self.test_session():
_ = variable_scope.get_variable("a", [])
with variable_scope.variable_scope("foo") as scope:
_ = variable_scope.get_variable("b", [])
_ = variable_scope.get_variable("c", [], trainable=False)
self.assertEqual([v.name
for v in scope.trainable_variables()], ["foo/b:0"])
def testGetGlobalVariables(self):
with self.test_session():
_ = variable_scope.get_variable("a", [])
with variable_scope.variable_scope("foo") as scope:
_ = variable_scope.get_variable("b", [])
self.assertEqual([v.name
for v in scope.global_variables()], ["foo/b:0"])
def testGetLocalVariables(self):
with self.test_session():
_ = variable_scope.get_variable(
"a", [], collections=[ops.GraphKeys.LOCAL_VARIABLES])
with variable_scope.variable_scope("foo") as scope:
_ = variable_scope.get_variable(
"b", [], collections=[ops.GraphKeys.LOCAL_VARIABLES])
_ = variable_scope.get_variable(
"c", [])
self.assertEqual([v.name
for v in scope.local_variables()], ["foo/b:0"])
def testGetVariableWithRefDtype(self):
v = variable_scope.get_variable("v", shape=[3, 4], dtype=dtypes.float32)
# Ensure it is possible to do get_variable with a _ref dtype passed in.
_ = variable_scope.get_variable("w", shape=[5, 6], dtype=v.dtype)
def axis0_into1_partitioner(shape=None, **unused_kwargs):
part = [1] * len(shape)
return part
def axis0_into2_partitioner(shape=None, **unused_kwargs):
part = [1] * len(shape)
part[0] = 2
return part
def axis0_into3_partitioner(shape=None, **unused_kwargs):
part = [1] * len(shape)
part[0] = 3
return part
class VariableScopeWithPartitioningTest(test.TestCase):
def testInitFromNonInitializer(self):
with self.test_session() as sess:
# Test various dtypes with zeros initializer as following:
types = [
dtypes.int8, dtypes.uint8, dtypes.int16, dtypes.uint16, dtypes.int32,
dtypes.int64, dtypes.bool
]
# Use different variable_name to distinguish various dtypes
for (i, dtype) in enumerate(types):
x = variable_scope.get_variable(
name="x%d" % i,
shape=(3, 4),
dtype=dtype,
partitioner=axis0_into2_partitioner)
y = variable_scope.get_variable(
name="y%d" % i,
shape=(6, 4),
dtype=dtype,
partitioner=axis0_into2_partitioner,
initializer=init_ops.zeros_initializer(dtype=dtype))
variables_lib.global_variables_initializer().run()
# x and y would become var list after partition
val_x = sess.run(list(x))
val_y = sess.run(list(y))
self.assertAllEqual(val_x, val_y)
def testResultNameMatchesRequested(self):
with variable_scope.variable_scope(
"scope0", partitioner=axis0_into2_partitioner):
v = variable_scope.get_variable("name0", shape=(3, 1, 1))
self.assertEqual(v.name, "scope0/name0")
v_concat = v.as_tensor()
self.assertEqual(v_concat.name, "scope0/name0:0")
variables = ops.get_collection(ops.GraphKeys.GLOBAL_VARIABLES)
self.assertIn("scope0/name0/part_0:0", [x.name for x in variables])
self.assertIn("scope0/name0/part_1:0", [x.name for x in variables])
self.assertNotIn("scope0/name0/part_2:0", [x.name for x in variables])
def testBreaksIfPartitioningChanges(self):
with variable_scope.variable_scope(
"scope0", partitioner=axis0_into2_partitioner):
variable_scope.get_variable("name0", shape=(3, 1, 1))
with variable_scope.variable_scope(
"scope0", partitioner=axis0_into3_partitioner, reuse=True):
with self.assertRaisesRegexp(
ValueError,
"Trying to reuse partitioned variable .* but specified partitions .* "
"and found partitions .*"):
variable_scope.get_variable("name0", shape=(3, 1, 1))
with variable_scope.variable_scope(
"scope0", partitioner=axis0_into1_partitioner, reuse=True):
with self.assertRaisesRegexp(
ValueError,
"Trying to reuse partitioned variable .* but specified partitions .* "
"and found partitions .*"):
variable_scope.get_variable("name0", shape=(3, 1, 1))
def testReturnsExistingConcatenatedValueIfReuse(self):
with variable_scope.variable_scope(
"scope0", partitioner=axis0_into2_partitioner):
v_concat = variable_scope.get_variable("name0", shape=(3, 1, 1))
variable_scope.get_variable_scope().reuse_variables()
v_concat_2 = variable_scope.get_variable("name0", shape=(3, 1, 1))
self.assertEqual(v_concat, v_concat_2)
def testAllowsReuseWithoutPartitioner(self):
with variable_scope.variable_scope(
"scope0", partitioner=axis0_into2_partitioner):
v = variable_scope.get_variable("name0", shape=(3, 1, 1))
with variable_scope.variable_scope("scope0", reuse=True):
v_reused = variable_scope.get_variable("name0")
self.assertEqual(v, v_reused)
def testPropagatePartitionerOnReopening(self):
with variable_scope.variable_scope(
"scope0", partitioner=axis0_into2_partitioner) as vs:
self.assertEqual(axis0_into2_partitioner, vs.partitioner)
with variable_scope.variable_scope(vs) as vs1:
self.assertEqual(axis0_into2_partitioner, vs1.partitioner)
def testScalarIgnoresPartitioner(self):
with variable_scope.variable_scope(
"scope0", partitioner=axis0_into2_partitioner):
v = variable_scope.get_variable("name0", shape=())
self.assertEqual(v.name, "scope0/name0:0")
variables = ops.get_collection(ops.GraphKeys.GLOBAL_VARIABLES)
self.assertIn("scope0/name0:0", [x.name for x in variables])
def _testPartitionConcatenatesAlongCorrectAxis(self, use_resource):
def _part_axis_0(**unused_kwargs):
return (2, 1, 1)
def _part_axis_1(**unused_kwargs):
return (1, 2, 1)
with variable_scope.variable_scope("root", use_resource=use_resource):
v0 = variable_scope.get_variable(
"n0", shape=(2, 2, 2), partitioner=_part_axis_0)
v1 = variable_scope.get_variable(
"n1", shape=(2, 2, 2), partitioner=_part_axis_1)
self.assertEqual(v0.get_shape(), (2, 2, 2))
self.assertEqual(v1.get_shape(), (2, 2, 2))
n0_0 = list(v0)[0]
n0_1 = list(v0)[1]
self.assertEqual(n0_0.get_shape(), (1, 2, 2))
self.assertEqual(n0_1.get_shape(), (1, 2, 2))
n1_0 = list(v1)[0]
n1_1 = list(v1)[1]
self.assertEqual(n1_0.get_shape(), (2, 1, 2))
self.assertEqual(n1_1.get_shape(), (2, 1, 2))
def testPartitionConcatenatesAlongCorrectAxis(self):
self._testPartitionConcatenatesAlongCorrectAxis(use_resource=False)
def testPartitionConcatenatesAlongCorrectAxisResource(self):
self._testPartitionConcatenatesAlongCorrectAxis(use_resource=True)
class VariableScopeWithCustomGetterTest(test.TestCase):
def testNonCallableGetterFails(self):
with self.assertRaisesRegexp(ValueError, r"custom_getter .* not callable:"):
with variable_scope.variable_scope("scope0", custom_getter=3):
variable_scope.get_variable("name0")
with self.assertRaisesRegexp(ValueError, r"custom_getter .* not callable:"):
variable_scope.get_variable("name0", custom_getter=3)
def testNoSideEffectsWithIdentityCustomGetter(self):
called = [0]
def custom_getter(getter, *args, **kwargs):
called[0] += 1
return getter(*args, **kwargs)
with variable_scope.variable_scope(
"scope", custom_getter=custom_getter) as scope:
v = variable_scope.get_variable("v", [1])
with variable_scope.variable_scope(scope, reuse=True):
v2 = variable_scope.get_variable("v", [1])
with variable_scope.variable_scope("new_scope") as new_scope:
v3 = variable_scope.get_variable("v3", [1])
with variable_scope.variable_scope(
new_scope, reuse=True, custom_getter=custom_getter):
v4 = variable_scope.get_variable("v3", [1])
self.assertEqual(v, v2)
self.assertEqual(v3, v4)
self.assertEqual(3, called[0]) # skipped one in the first new_scope
def testCustomGetterWithReuse(self):
# Custom getter can choose to behave differently on reused variables.
def custom_getter(getter, *args, **kwargs):
var = getter(*args, **kwargs)
if kwargs["reuse"]:
# This can be used, e.g., for changing the caching device if needed.
return array_ops.identity(var, name="reused")
else:
return array_ops.identity(var, name="not_reused")
with variable_scope.variable_scope(
"scope", custom_getter=custom_getter) as scope:
v = variable_scope.get_variable("v", [1])
with variable_scope.variable_scope(scope, reuse=True):
v2 = variable_scope.get_variable("v", [1])
self.assertEqual(v.name, "not_reused:0")
self.assertEqual(v2.name, "reused:0")
def testGetterThatCreatesTwoVariablesAndSumsThem(self):
def custom_getter(getter, name, *args, **kwargs):
g_0 = getter("%s/0" % name, *args, **kwargs)
g_1 = getter("%s/1" % name, *args, **kwargs)
with ops.name_scope("custom_getter"):
return g_0 + g_1
with variable_scope.variable_scope("scope", custom_getter=custom_getter):
v = variable_scope.get_variable("v", [1, 2, 3])
self.assertEqual([1, 2, 3], v.get_shape())
true_vars = variables_lib.trainable_variables()
self.assertEqual(2, len(true_vars))
self.assertEqual("scope/v/0:0", true_vars[0].name)
self.assertEqual("scope/v/1:0", true_vars[1].name)
self.assertEqual("custom_getter/add:0", v.name)
with self.test_session() as sess:
variables_lib.global_variables_initializer().run()
np_vars, np_v = sess.run([true_vars, v])
self.assertAllClose(np_v, sum(np_vars))
def testNestedCustomGetters(self):
def sum_getter(getter, name, *args, **kwargs):
g_0 = getter("%s/sum_0" % name, *args, **kwargs)
g_1 = getter("%s/sum_1" % name, *args, **kwargs)
with ops.name_scope("sum_getter"):
return g_0 + g_1
def prod_getter(getter, name, *args, **kwargs):
g_0 = getter("%s/prod_0" % name, *args, **kwargs)
g_1 = getter("%s/prod_1" % name, *args, **kwargs)
with ops.name_scope("prod_getter"):
return g_0 * g_1
with variable_scope.variable_scope(
"prod_scope", custom_getter=prod_getter):
with variable_scope.variable_scope(
"sum_scope", custom_getter=sum_getter):
with variable_scope.variable_scope(
"inner_sum_scope", custom_getter=sum_getter):
# take sums of sums of products
v = variable_scope.get_variable("v", [1, 2, 3])
self.assertEqual([1, 2, 3], v.get_shape())
true_vars = variables_lib.trainable_variables()
self.assertEqual(8, len(true_vars))
template = (
"prod_scope/sum_scope/inner_sum_scope/v/sum_%d/sum_%d/prod_%d:0")
self.assertEqual(template % (0, 0, 0), true_vars[0].name)
self.assertEqual(template % (0, 0, 1), true_vars[1].name)
self.assertEqual(template % (0, 1, 0), true_vars[2].name)
self.assertEqual(template % (0, 1, 1), true_vars[3].name)
self.assertEqual(template % (1, 0, 0), true_vars[4].name)
self.assertEqual(template % (1, 0, 1), true_vars[5].name)
self.assertEqual(template % (1, 1, 0), true_vars[6].name)
self.assertEqual(template % (1, 1, 1), true_vars[7].name)
with self.test_session() as sess:
variables_lib.global_variables_initializer().run()
np_vars, np_v = sess.run([true_vars, v])
# take products of sums of products
self.assertAllClose(
np_v,
(((np_vars[0] * np_vars[1]) + (np_vars[2] * np_vars[3]))
+ ((np_vars[4] * np_vars[5]) + (np_vars[6] * np_vars[7]))))
class PartitionInfoTest(test.TestCase):
def testConstructorChecks(self):
# Invalid arg types.
with self.assertRaises(TypeError):
variable_scope._PartitionInfo(full_shape=None, var_offset=[0, 1])
with self.assertRaises(TypeError):
variable_scope._PartitionInfo(full_shape=[0, 1], var_offset=None)
with self.assertRaises(TypeError):
variable_scope._PartitionInfo(full_shape="foo", var_offset=[0, 1])
with self.assertRaises(TypeError):
variable_scope._PartitionInfo(full_shape=[0, 1], var_offset="foo")
# full_shape and var_offset must have same length.
with self.assertRaises(ValueError):
variable_scope._PartitionInfo(full_shape=[0, 1], var_offset=[0])
# Offset must always be less than shape.
with self.assertRaises(ValueError):
variable_scope._PartitionInfo(full_shape=[1, 1], var_offset=[0, 1])
def testSingleOffset(self):
partition_info = variable_scope._PartitionInfo(
full_shape=[9, 3], var_offset=[4, 0])
self.assertEqual(4, partition_info.single_offset([1, 3]))
# Tests when the variable isn't partitioned at all.
partition_info = variable_scope._PartitionInfo(
full_shape=[9, 3], var_offset=[0, 0])
self.assertEqual(0, partition_info.single_offset([9, 3]))
def testSingleSliceDim(self):
partition_info = variable_scope._PartitionInfo(
full_shape=[9, 3], var_offset=[4, 0])
# Invalid shape.
with self.assertRaises(TypeError):
partition_info.single_slice_dim(None)
# Rank of shape differs from full_shape.
with self.assertRaises(ValueError):
partition_info.single_slice_dim([1, 2, 3])
# Shape is too large given var_offset (4+6 > 9).
with self.assertRaises(ValueError):
partition_info.single_slice_dim([6, 3])
# Multiple possible slice dim from shape.
with self.assertRaises(ValueError):
partition_info.single_slice_dim([1, 1])
partition_info = variable_scope._PartitionInfo(
full_shape=[9, 3], var_offset=[0, 0])
self.assertEqual(1, partition_info.single_slice_dim([9, 2]))
partition_info = variable_scope._PartitionInfo(
full_shape=[9, 3], var_offset=[4, 0])
self.assertEqual(0, partition_info.single_slice_dim([2, 3]))
if __name__ == "__main__":
test.main()
| |
# Copyright (c) 2007-2009 Thomas Herve <therve@free.fr>.
# See LICENSE for details.
"""
Example of a web server interacting with RabbitMQ.
"""
import sys
from urllib import quote
from Cheetah.Template import Template
from twisted.internet import reactor
from twisted.internet.defer import inlineCallbacks, returnValue
from twisted.web.resource import Resource
from twisted.web.server import Site, NOT_DONE_YET
from twisted.web.static import Data
from twisted.python import log
from twotp import Process, readCookie, buildNodeName
from twotp.term import Binary, Atom
css = """
body {
font-family: sans-serif;
font-size: 75%;
margin: 0;
padding: 0;
background-color: #EEEEEE;
}
table, tr, td
{
border-collapse: collapse;
margin: 0 1em 0 1em;
padding: 0;
}
thead {
font-weight: bold;
text-align: center;
}
td {
border: 1px solid #666666;
padding: 0.1em 0.5em 0.1em 0.5em;
}
#content {
width: 80%;
height: 100%;
margin: auto;
border-left: 1px solid black;
border-right: 1px solid black;
background-color: #FFFFFF;
}
h1 {
margin: 0;
padding: 1em 0 0 1em;
}
h3 {
margin: 0;
padding: 1em;
}
form.out_form {
padding: 1em 0 0 1em;
}
form.in_form {
padding: 0.1em 0 0.1em 0;
margin: 0;
}
form.in_form input {
margin: 0;
}
"""
summary = """<html>
<head>
<title>RabbitMQ TwOTP Web UI</title>
<link type="text/css" href="/style.css" rel="stylesheet">
</head>
<body>
<div id="content">
<h1>RabbitMQ management</h1>
<h3>Virtual Hosts<h3>
<table>
<thead>
<tr>
<td>Name</td>
<td>Delete</td>
</tr>
</thead>
<tbody>
#for $vhost in $vhosts
<tr>
<td><a href="vhost/$quote($vhost.value, safe="")">$vhost.value</a></td>
<td><a href="vhost/$quote($vhost.value, safe="")/delete">Delete</a></td>
</tr>
#end for
</tbody>
</table>
<form class="out_form" name="add_vhost" method="POST" action="add_vhost">
<input type="text" name="vhost" />
<input type="submit" value="Add" />
</form>
<h3>Users<h3>
<table>
<thead>
<tr>
<td>Name</td>
<td>Virtual hosts</td>
<td>Map to vhost</td>
<td>Delete</td>
</tr>
</thead>
<tbody>
#for $user in $users
<tr>
<td>$user.value</td>
<td>$user_vhosts[$user]</td>
<td>
<form class="in_form" name="map_user_vhost" method="POST"
action="user/$quote($user.value, safe="")/map_vhost">
<input type="text" name="vhost" />
<input type="submit" value="Map" />
</form>
</td>
<td><a href="user/$quote($user.value, safe="")/delete">Delete</a></td>
</tr>
#end for
</tbody>
</table>
<form class="out_form" name="add_user" method="POST" action="add_user">
<input type="text" name="user" />
<input type="password" name="password" />
<input type="submit" value="Add" />
</form>
</div>
</body>
</html>
"""
vhost = """<html>
<head>
<title>RabbitMQ TwOTP Web UI</title>
<link type="text/css" href="/style.css" rel="stylesheet">
</head>
<body>
<div id="content">
<h1>Virtual host '$vhost'</h1>
<h3>List of exchanges</h3>
<table>
<thead>
<tr>
<td>Name</td>
<td>Type</td>
<td>Durable</td>
<td>Auto-delete</td>
</tr>
</thead>
<tbody>
#for $exchange in $exchanges
<tr>
<td>$exchange[0]</td>
<td>$exchange[1]</td>
<td>$exchange[2]</td>
<td>$exchange[3]</td>
</tr>
#end for
</tbody>
</table>
<h3>List of queues</h3>
<table>
<thead>
<tr>
<td>Name</td>
<td>Durable</td>
<td>Auto-delete</td>
<td>Total messages</td>
<td>Memory</td>
<td>Consumers</td>
</tr>
</thead>
<tbody>
#for $queue in $queues
<tr>
<td>$queue[0]</td>
<td>$queue[1]</td>
<td>$queue[2]</td>
<td>$queue[8]</td>
<td>$queue[12]</td>
<td>$queue[10]</td>
</tr>
#end for
</tbody>
</table>
</div>
</body>
</html>
"""
class VhostDeleteResource(Resource):
isLeaf = True
def __init__(self, process, vhost):
Resource.__init__(self)
self.process = process
self.vhost = vhost
def render_GET(self, request):
def deleted(result):
request.redirect("/")
request.finish()
process.callRemote(
"rabbit", "rabbit_access_control", "delete_vhost",
Binary(self.vhost)).addCallback(deleted)
return NOT_DONE_YET
class VhostAddResource(Resource):
isLeaf = True
def __init__(self, process):
Resource.__init__(self)
self.process = process
def render_POST(self, request):
vhost = request.args["vhost"][0]
def added(result):
request.redirect("/")
request.finish()
process.callRemote(
"rabbit", "rabbit_access_control", "add_vhost",
Binary(vhost)).addCallback(added)
return NOT_DONE_YET
class VhostResource(Resource):
def __init__(self, process, vhost):
Resource.__init__(self)
self.process = process
self.vhost = vhost
def getChild(self, path, request):
if path == "delete":
return VhostDeleteResource(self.process, self.vhost)
return self
@inlineCallbacks
def get_infos(self):
items = [
"name", "durable", "auto_delete", "arguments", "pid",
"messages_ready", "messages_unacknowledged",
"messages_uncommitted", "messages", "acks_uncommitted",
"consumers", "transactions", "memory"]
items = [Atom(item) for item in items]
queues = yield process.callRemote(
"rabbit", "rabbit_amqqueue", "info_all", Binary(self.vhost), items)
queues = [
(queue[0][1][3].value,
queue[1][1].text == "true",
queue[2][1].text == "true",
queue[3][1],
queue[4][1].nodeName.text,
queue[5][1],
queue[6][1],
queue[7][1],
queue[8][1],
queue[9][1],
queue[10][1],
queue[11][1],
queue[12][1])
for queue in queues]
items = ["name", "type", "durable", "auto_delete", "arguments"]
items = [Atom(item) for item in items]
exchanges = yield process.callRemote(
"rabbit", "rabbit_exchange", "info_all", Binary(self.vhost), items)
exchanges = [
(exchange[0][1][3].value,
exchange[1][1].text,
exchange[2][1].text == "true",
exchange[3][1].text == "true",
exchange[4][1])
for exchange in exchanges]
returnValue((queues, exchanges))
def render_GET(self, request):
def got_result(results):
queues = results[0]
exchanges = results[1]
template = Template(vhost)
template.queues = queues
template.exchanges = exchanges
template.vhost = self.vhost
request.write(str(template))
request.finish()
self.get_infos().addCallback(got_result)
return NOT_DONE_YET
class ListVhostsResource(Resource):
def __init__(self, process):
Resource.__init__(self)
self.process = process
def getChild(self, path, request):
return VhostResource(self.process, path)
class UserDeleteResource(Resource):
isLeaf = True
def __init__(self, process, user):
Resource.__init__(self)
self.process = process
self.user = user
def render_GET(self, request):
def deleted(result):
request.redirect("/")
request.finish()
process.callRemote(
"rabbit", "rabbit_access_control", "delete_user",
Binary(self.user)).addCallback(deleted)
return NOT_DONE_YET
class UserMapVhostResource(Resource):
isLeaf = True
def __init__(self, process, user):
Resource.__init__(self)
self.process = process
self.user = user
def render_POST(self, request):
vhost = request.args["vhost"][0]
def mapped(result):
request.redirect("/")
request.finish()
process.callRemote(
"rabbit", "rabbit_access_control", "map_user_vhost",
Binary(self.user), Binary(vhost)).addCallback(mapped)
return NOT_DONE_YET
class UserAddResource(Resource):
isLeaf = True
def __init__(self, process):
Resource.__init__(self)
self.process = process
def render_POST(self, request):
user = request.args["user"][0]
password = request.args["password"][0]
def added(result):
request.redirect("/")
request.finish()
process.callRemote(
"rabbit", "rabbit_access_control", "add_user",
Binary(user), Binary(password)).addCallback(added)
return NOT_DONE_YET
class UserResource(Resource):
def __init__(self, process, user):
Resource.__init__(self)
self.process = process
self.user = user
def getChild(self, path, request):
if path == "delete":
return UserDeleteResource(self.process, self.user)
elif path == "map_vhost":
return UserMapVhostResource(self.process, self.user)
return self
class ListUsersResource(Resource):
def __init__(self, process):
Resource.__init__(self)
self.process = process
def getChild(self, path, request):
return UserResource(self.process, path)
class WebUi(Resource):
css = Data(css, "text/css")
def __init__(self, process):
Resource.__init__(self)
self.process = process
@inlineCallbacks
def get_infos(self):
vhosts = yield process.callRemote(
"rabbit", "rabbit_access_control", "list_vhosts")
users = yield process.callRemote(
"rabbit", "rabbit_access_control", "list_users")
mapping = {}
for user in users:
user_vhosts = yield process.callRemote(
"rabbit", "rabbit_access_control", "list_user_vhosts", user)
mapping[user] = user_vhosts
returnValue((vhosts, users, mapping))
def getChild(self, path, request):
if path == "vhost":
return ListVhostsResource(self.process)
elif path == "add_vhost":
return VhostAddResource(self.process)
if path == "user":
return ListUsersResource(self.process)
elif path == "add_user":
return UserAddResource(self.process)
elif path == "style.css":
return self.css
return self
def render_GET(self, request):
def got_result(results):
vhosts = results[0]
users = results[1]
accesses = results[2]
for user, user_vhosts in accesses.iteritems():
accesses[user] = ", ".join(v.value for v in user_vhosts)
template = Template(summary)
template.vhosts = vhosts
template.users = users
template.user_vhosts = accesses
template.quote = quote
request.write(str(template))
request.finish()
self.get_infos().addCallback(got_result)
return NOT_DONE_YET
if __name__ == "__main__":
log.startLogging(sys.stdout)
cookie = readCookie()
nodeName = buildNodeName("twotp-rabbit")
process = Process(nodeName, cookie)
reactor.listenTCP(8072, Site(WebUi(process)))
reactor.run()
| |
# Copyright 2017 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Rules engine for Bucket acls"""
from collections import namedtuple
import itertools
import re
# pylint: disable=line-too-long
from google.cloud.security.common.gcp_type import bucket_access_controls as bkt_acls
# pylint: enable=line-too-long
from google.cloud.security.common.util import log_util
from google.cloud.security.common.util.regex_util import escape_and_globify
from google.cloud.security.scanner.audit import base_rules_engine as bre
from google.cloud.security.scanner.audit import errors as audit_errors
LOGGER = log_util.get_logger(__name__)
class BucketsRulesEngine(bre.BaseRulesEngine):
"""Rules engine for bucket acls"""
def __init__(self, rules_file_path, snapshot_timestamp=None):
"""Initialize.
Args:
rules_file_path (str): file location of rules
snapshot_timestamp (str): snapshot timestamp. Defaults to None.
If set, this will be the snapshot timestamp
used in the engine.
"""
super(BucketsRulesEngine,
self).__init__(rules_file_path=rules_file_path)
self.rule_book = None
def build_rule_book(self, global_configs=None):
"""Build BucketsRuleBook from the rules definition file.
Args:
global_configs (dict): Global configurations.
"""
self.rule_book = BucketsRuleBook(self._load_rule_definitions())
# TODO: The naming is confusing and needs to be fixed in all scanners.
# pylint: disable=arguments-differ
def find_policy_violations(self, buckets_acls,
force_rebuild=False):
"""Determine whether bucket acls violates rules.
Args:
buckets_acls (BucketAccessControls): Object containing ACL
data
force_rebuild (bool): If True, rebuilds the rule book. This will
reload the rules definition file and add the rules to the book.
Returns:
generator: A generator of rule violations.
"""
violations = itertools.chain()
if self.rule_book is None or force_rebuild:
self.build_rule_book()
resource_rules = self.rule_book.get_resource_rules()
for rule in resource_rules:
violations = itertools.chain(violations,
rule.\
find_policy_violations(buckets_acls))
return violations
def add_rules(self, rules):
"""Add rules to the rule book.
Args:
rules (dict): rule definitions dictionary
"""
if self.rule_book is not None:
self.rule_book.add_rules(rules)
class BucketsRuleBook(bre.BaseRuleBook):
"""The RuleBook for bucket acls resources."""
def __init__(self, rule_defs=None):
"""Initialization.
Args:
rule_defs (dict): rule definitons
"""
super(BucketsRuleBook, self).__init__()
self.resource_rules_map = {}
if not rule_defs:
self.rule_defs = {}
else:
self.rule_defs = rule_defs
self.add_rules(rule_defs)
def add_rules(self, rule_defs):
"""Add rules to the rule book
Args:
rule_defs (dict): rule definitions dictionary
"""
for (i, rule) in enumerate(rule_defs.get('rules', [])):
self.add_rule(rule, i)
def add_rule(self, rule_def, rule_index):
"""Add a rule to the rule book.
Args:
rule_def (dict): A dictionary containing rule definition
properties.
rule_index (int): The index of the rule from the rule definitions.
Assigned automatically when the rule book is built.
"""
resources = rule_def.get('resource')
for resource in resources:
resource_ids = resource.get('resource_ids')
if not resource_ids or len(resource_ids) < 1:
raise audit_errors.InvalidRulesSchemaError(
'Missing resource ids in rule {}'.format(rule_index))
bucket = rule_def.get('bucket')
entity = rule_def.get('entity')
email = rule_def.get('email')
domain = rule_def.get('domain')
role = rule_def.get('role')
if (bucket is None) or (entity is None) or (email is None) or\
(domain is None) or (role is None):
raise audit_errors.InvalidRulesSchemaError(
'Faulty rule {}'.format(rule_def.get('name')))
rule_def_resource = bkt_acls.BucketAccessControls(
escape_and_globify(bucket),
escape_and_globify(entity),
escape_and_globify(email),
escape_and_globify(domain),
escape_and_globify(role.upper()))
rule = Rule(rule_name=rule_def.get('name'),
rule_index=rule_index,
rules=rule_def_resource)
resource_rules = self.resource_rules_map.get(rule_index)
if not resource_rules:
self.resource_rules_map[rule_index] = rule
def get_resource_rules(self):
"""Get all the resource rules for (resource, RuleAppliesTo.*).
Returns:
list: A list of ResourceRules.
"""
resource_rules = []
for resource_rule in self.resource_rules_map:
resource_rules.append(self.resource_rules_map[resource_rule])
return resource_rules
class Rule(object):
"""Rule properties from the rule definition file.
Also finds violations.
"""
def __init__(self, rule_name, rule_index, rules):
"""Initialize.
Args:
rule_name (str): Name of the loaded rule
rule_index (int): The index of the rule from the rule definitions
rules (dict): The rules from the file
"""
self.rule_name = rule_name
self.rule_index = rule_index
self.rules = rules
# TODO: The naming is confusing and needs to be fixed in all scanners.
def find_policy_violations(self, bucket_acl):
"""Find bucket policy acl violations in the rule book.
Args:
bucket_acl (BucketAccessControls): Bucket ACL resource
Yields:
namedtuple: Returns RuleViolation named tuple
"""
is_bucket_violated = True
is_entity_violated = True
is_email_violated = True
is_domain_violated = True
is_role_violated = True
is_bucket_violated = re.match(self.rules.bucket, bucket_acl.bucket)
is_entity_violated = re.match(self.rules.entity, bucket_acl.entity)
is_email_violated = re.match(self.rules.email, bucket_acl.email)
is_domain_violated = re.match(self.rules.domain, bucket_acl.domain)
is_role_violated = re.match(self.rules.role, bucket_acl.role)
should_raise_violation = (
(is_bucket_violated is not None and is_bucket_violated) and
(is_entity_violated is not None and is_entity_violated) and
(is_email_violated is not None and is_email_violated) and
(is_domain_violated is not None and is_domain_violated) and
(is_role_violated is not None and is_role_violated))
if should_raise_violation:
yield self.RuleViolation(
resource_type='project',
resource_id=bucket_acl.project_number,
rule_name=self.rule_name,
rule_index=self.rule_index,
violation_type='BUCKET_VIOLATION',
role=bucket_acl.role,
entity=bucket_acl.entity,
email=bucket_acl.email,
domain=bucket_acl.domain,
bucket=bucket_acl.bucket)
# Rule violation.
# resource_type: string
# resource_id: string
# rule_name: string
# rule_index: int
# violation_type: BUCKET_VIOLATION
# role: string
# entity: string
# email: string
# domain: string
# bucket: string
RuleViolation = namedtuple('RuleViolation',
['resource_type', 'resource_id', 'rule_name',
'rule_index', 'violation_type', 'role',
'entity', 'email', 'domain', 'bucket'])
| |
'''
Tests for the Mara Lexer
'''
import pytest
from .. import lexer as lexer_module
# pylint: disable=W0621
@pytest.fixture
def lex_simple():
lexer = lexer_module.build_lexer()
def inner(input_stream):
return lexer_module.lex_simple(lexer, input_stream)
return inner
def test_lex_wrappers(lex_simple):
given = 'module()[]{}end'
output = list(lex_simple(given))
assert output == [
('MODULE', 'module'),
('LPAR', '('),
('RPAR', ')'),
('LBKT', '['),
('RBKT', ']'),
('LBRC', '{'),
('RBRC', '}'),
('END', 'end'),
]
def test_lex_distinct_symbols(lex_simple):
given = r'module @ | & $ \ , . = + - ^ / * : :: end'
output = list(lex_simple(given))
assert output == [
('MODULE', 'module'),
('AT', '@'),
('PIPE', '|'),
('AMP', '&'),
('DOLLAR', '$'),
('SLASH', '\\'),
('COMMA', ','),
('DOT', '.'),
('EQ', '='),
('PLUS', '+'),
('MINUS', '-'),
('POWER', '^'),
('DIVIDE', '/'),
('TIMES', '*'),
('COLON', ':'),
('BIND', '::'),
('END', 'end'),
]
def test_lex_keywords(lex_simple):
given = '''module
match as
if else
for in
while
def val var let
object trait proto
end'''
output = list(lex_simple(given))
assert output == [
('MODULE', 'module'),
('TERM', '\n'),
('MATCH', 'match'),
('AS', 'as'),
('TERM', '\n'),
('IF', 'if'),
('ELSE', 'else'),
('TERM', '\n'),
('FOR', 'for'),
('IN', 'in'),
('TERM', '\n'),
('WHILE', 'while'),
('TERM', '\n'),
('DEF', 'def'),
('VAL', 'val'),
('VAR', 'var'),
('LET', 'let'),
('TERM', '\n'),
('OBJECT', 'object'),
('TRAIT', 'trait'),
('PROTO', 'proto'),
('TERM', '\n'),
('END', 'end'),
]
def test_lex_identifiers(lex_simple):
given = '''module
forsight
hello _goodbye
pizza_sauce num0
____pZ0x9 _0
< > == =>
~! ~~ ~>>
??? !
end'''.replace('\n', ' ')
output = list(lex_simple(given))
assert output == [
('MODULE', 'module'),
('VID', 'forsight'),
('VID', 'hello'),
('VID', '_goodbye'),
('VID', 'pizza_sauce'),
('VID', 'num0'),
('VID', '____pZ0x9'),
('VID', '_0'),
('SID', '<'),
('SID', '>'),
('SID', '=='),
('SID', '=>'),
('SID', '~!'),
('SID', '~~'),
('SID', '~>>'),
('SID', '???'),
('SID', '!'),
('END', 'end'),
]
def test_lex_literal_nums_and_bools(lex_simple):
given = ('module 1_000_000 1. 0.9 1231.0 -1 -19.0 +0 -10' +
' +3.14e-10 1.2e10 7.8e+10 1e10 0xAEF -0x12Aef true false end')
output = list(lex_simple(given))
assert output == [
('MODULE', 'module'),
('INTP', '1_000_000'),
('INTD', '1'),
('DOT', '.'),
('REAL', '0.9'),
('REAL', '1231.0'),
('INTD', '-1'),
('REAL', '-19.0'),
('INTD', '+0'),
('INTD', '-10'),
('SCI', '+3.14e-10'),
('SCI', '1.2e10'),
('SCI', '7.8e+10'),
('SCI', '1e10'),
('INTX', '0xAEF'),
('INTX', '-0x12Aef'),
('TRUE', 'true'),
('FALSE', 'false'),
('END', 'end'),
]
def test_lex_expr_end(lex_simple):
given = '''module test
x = 10
y +
5
end'''
output = list(lex_simple(given))
assert output == [
('MODULE', 'module'),
('VID', 'test'),
('TERM', '\n'),
('VID', 'x'),
('EQ', '='),
('INTD', '10'),
('TERM', '\n\n\n\n'),
('VID', 'y'),
('PLUS', '+'),
('INTD', '5'),
('TERM', '\n'),
('END', 'end'),
]
def test_open_state(lex_simple):
given = '''module test
[
x +
y
z
]
(
x +
y
z
)
{
x +
y
z
}
'''
expected = [
('MODULE', 'module'),
('VID', 'test'),
('TERM', '\n'),
('LBKT', '['),
('VID', 'x'),
('PLUS', '+'),
('VID', 'y'),
('VID', 'z'),
('RBKT', ']'),
('TERM', '\n'),
('LPAR', '('),
('VID', 'x'),
('PLUS', '+'),
('VID', 'y'),
('VID', 'z'),
('RPAR', ')'),
('TERM', '\n'),
('LBRC', '{'),
('VID', 'x'),
('PLUS', '+'),
('VID', 'y'),
('TERM', '\n'),
('VID', 'z'),
('TERM', '\n'),
('RBRC', '}'),
('TERM', '\n'),
]
result = list(lex_simple(given))
assert result == expected
def test_lex_comments(lex_simple):
given = '''module test
x
# asdf
## asdf
###
asdf
qwerty
###
'''
expected = [
('MODULE', 'module'),
('VID', 'test'),
('TERM', '\n'),
('VID', 'x'),
('TERM', '\n'),
('TCOMMENT', ' asdf'),
('TERM', '\n'),
('DCOMMENT', ' asdf'),
('TERM', '\n'),
('BCOMMENT', '\n asdf\n qwerty\n '),
('TERM', '\n'),
]
result = list(lex_simple(given))
assert result == expected
def test_control_with_empty(lex_simple):
given = '''module test
if x {
10
}
'''
expected = [
('MODULE', 'module'),
('VID', 'test'),
('TERM', '\n'),
('IF', 'if'),
('VID', 'x'),
('LBRC', '{'),
('INTD', '10'),
('TERM', '\n\n\n'),
('RBRC', '}'),
('TERM', '\n'),
]
result = list(lex_simple(given))
assert result == expected
| |
import re
import uuid
from time import gmtime, strftime
from celery.task import task
from pandas import DataFrame, rolling_window
from bamboo.core.calculator import calculate_updates, dframe_from_update,\
propagate
from bamboo.core.frame import BAMBOO_RESERVED_KEY_PREFIX,\
DATASET_ID, INDEX, join_dataset, PARENT_DATASET_ID, remove_reserved_keys
from bamboo.core.summary import summarize
from bamboo.lib.async import call_async
from bamboo.lib.exceptions import ArgumentError
from bamboo.lib.mongo import df_mongo_decode
from bamboo.lib.readers import ImportableDataset
from bamboo.lib.query_args import QueryArgs
from bamboo.lib.schema_builder import Schema
from bamboo.lib.utils import combine_dicts, to_list
from bamboo.models.abstract_model import AbstractModel
from bamboo.models.calculation import Calculation
from bamboo.models.observation import Observation
# The format pandas encodes multicolumns in.
strip_pattern = re.compile("\(u'|', u'|'\)")
@task(ignore_result=True)
def delete_task(dataset, query=None):
"""Background task to delete dataset and its associated observations."""
Observation.delete_all(dataset, query=query)
if query is None:
super(dataset.__class__, dataset).delete(
{DATASET_ID: dataset.dataset_id})
Observation.delete_encoding(dataset)
class Dataset(AbstractModel, ImportableDataset):
__collectionname__ = 'datasets'
# caching keys
STATS = '_stats'
ALL = '_all'
# metadata
AGGREGATED_DATASETS = BAMBOO_RESERVED_KEY_PREFIX + 'linked_datasets'
ATTRIBUTION = 'attribution'
CREATED_AT = 'created_at'
DESCRIPTION = 'description'
ID = 'id'
JOINED_DATASETS = 'joined_datasets'
LABEL = 'label'
LICENSE = 'license'
NUM_COLUMNS = 'num_columns'
NUM_ROWS = 'num_rows'
MERGED_DATASETS = 'merged_datasets'
PARENT_IDS = 'parent_ids'
PENDING_UPDATES = 'pending_updates'
SCHEMA = 'schema'
UPDATED_AT = 'updated_at'
def __init__(self, record=None):
super(Dataset, self).__init__(record)
self.__dframe = None
@property
def aggregated_datasets(self):
return [(self.split_groups(group), self.find_one(_id)) for (
group, _id) in self.aggregated_datasets_dict.items()]
@property
def aggregated_datasets_dict(self):
return self.record.get(self.AGGREGATED_DATASETS, {})
@property
def attribution(self):
return self.record.get(self.ATTRIBUTION)
@property
def columns(self):
return self.schema.keys() if self.num_rows else []
@property
def dataset_id(self):
return self.record[DATASET_ID]
@property
def description(self):
return self.record.get(self.DESCRIPTION)
@property
def joined_datasets(self):
# TODO: fetch all datasets in single DB call
# (let Dataset.find take a list of IDs)
return [
(direction, self.find_one(other_dataset_id), on,
self.find_one(joined_dataset_id))
for direction, other_dataset_id, on, joined_dataset_id in
self.joined_dataset_ids]
@property
def joined_dataset_ids(self):
return [
tuple(_list) for _list in self.record.get(self.JOINED_DATASETS, [])
]
@property
def label(self):
return self.record.get(self.LABEL)
@property
def labels(self):
return [column[self.LABEL] for column in self.schema.values()]
@property
def license(self):
return self.record.get(self.LICENSE)
@property
def merged_datasets(self):
return self.__linked_datasets(self.merged_dataset_ids)
@property
def merged_datasets_with_map(self):
results = self.merged_dataset_info
if len(results):
mappings, ids = zip(*results)
results = zip(mappings, self.__linked_datasets(ids))
return results
@property
def merged_dataset_ids(self):
results = self.merged_dataset_info
return zip(*results)[-1] if results else results
@property
def merged_dataset_info(self):
return self.record.get(self.MERGED_DATASETS, [])
@property
def num_columns(self):
return self.record.get(self.NUM_COLUMNS, 0)
@property
def num_rows(self):
return self.record.get(self.NUM_ROWS, 0)
@property
def on_columns_for_rhs_of_joins(self):
return [on for direction, _, on, __ in
self.joined_datasets if direction == 'left']
@property
def parent_ids(self):
query_args = QueryArgs(select={PARENT_DATASET_ID: 1},
distinct=PARENT_DATASET_ID)
return self.observations(query_args)
@property
def pending_updates(self):
return self.record[self.PENDING_UPDATES]
@property
def schema(self):
schema_dict = {}
if self.record:
schema_dict = self.record.get(self.SCHEMA)
return Schema.safe_init(schema_dict)
@property
def stats(self):
return self.record.get(self.STATS, {})
@property
def updatable_keys(self):
return [self.LABEL, self.DESCRIPTION, self.LICENSE, self.ATTRIBUTION]
@property
def __is_cached(self):
return self.__dframe is not None
@classmethod
def create(cls, dataset_id=None):
return super(cls, cls).create(dataset_id)
@classmethod
def find(cls, dataset_id):
"""Return datasets for `dataset_id`."""
query_args = QueryArgs(query={DATASET_ID: dataset_id})
return super(cls, cls).find(query_args)
@classmethod
def find_one(cls, dataset_id):
"""Return dataset for `dataset_id`."""
return super(cls, cls).find_one({DATASET_ID: dataset_id})
def __linked_datasets(self, ids):
return [self.find_one(_id) for _id in ids]
def add_joined_dataset(self, new_data):
"""Add the ID of `new_dataset` to the list of joined datasets."""
self.__add_linked_data(self.JOINED_DATASETS, self.joined_dataset_ids,
new_data)
def add_merged_dataset(self, mapping, new_dataset):
"""Add the ID of `new_dataset` to the list of merged datasets."""
self.__add_linked_data(self.MERGED_DATASETS, self.merged_dataset_info,
[mapping, new_dataset.dataset_id])
def add_observations(self, new_data):
"""Update `dataset` with `new_data`."""
update_id = uuid.uuid4().hex
self.add_pending_update(update_id)
new_data = to_list(new_data)
# fetch data before other updates
new_dframe_raw = dframe_from_update(self, new_data)
call_async(calculate_updates, self, new_data,
new_dframe_raw=new_dframe_raw, update_id=update_id)
def add_pending_update(self, update_id):
self.collection.update(
{'_id': self.record['_id']},
{'$push': {self.PENDING_UPDATES: update_id}})
def aggregated_dataset(self, groups):
groups = to_list(groups)
_id = self.aggregated_datasets_dict.get(self.join_groups(groups))
return self.find_one(_id) if _id else None
def append_observations(self, dframe):
Observation.append(dframe, self)
self.update({self.NUM_ROWS: self.num_rows + len(dframe)})
# to update cardinalities here we need to refetch the full DataFrame.
dframe = self.dframe(keep_parent_ids=True)
self.build_schema(dframe)
self.update_stats(dframe)
def build_schema(self, dframe, overwrite=False, set_num_columns=True):
"""Build schema for a dataset.
If no schema exists, build a schema from the passed `dframe` and store
that schema for this dataset. Otherwise, if a schema does exist, build
a schema for the passed `dframe` and merge this schema with the current
schema. Keys in the new schema replace keys in the current schema but
keys in the current schema not in the new schema are retained.
If `set_num_columns` is True the number of columns will be set to the
number of keys (columns) in the new schema.
:param dframe: The DataFrame whose schema to merge with the current
schema.
:param overwrite: If true replace schema, otherwise update.
:param set_num_columns: If True also set the number of columns.
"""
new_schema = self.schema.rebuild(dframe, overwrite)
self.set_schema(new_schema,
set_num_columns=(set_num_columns or overwrite))
def calculations(self, include_aggs=True, only_aggs=False):
"""Return the calculations for this dataset.
:param include_aggs: Include aggregations, default True.
:param only_aggs: Exclude non-aggregations, default False.
"""
return Calculation.find(self, include_aggs, only_aggs)
def cardinality(self, col):
return self.schema.cardinality(col)
def clear_cache(self):
self.__dframe = None
return self
def clear_pending_updates(self):
self.collection.update(
{'_id': self.record['_id']},
{'$set': {self.PENDING_UPDATES: []}})
def clear_summary_stats(self, group=None, column=None):
"""Remove summary stats for `group` and optional `column`.
By default will remove all stats.
:param group: The group to remove stats for, default None.
:param column: The column to remove stats for, default None.
"""
stats = self.stats
if stats:
if column:
stats_for_field = stats.get(group or self.ALL)
if stats_for_field:
stats_for_field.pop(column, None)
elif group:
stats.pop(group, None)
else:
stats = {}
self.update({self.STATS: stats})
def count(self, query_args=None):
"""Return the count of rows matching query in dataset.
:param query_args: An optional QueryArgs to hold the query arguments.
"""
query_args = query_args or QueryArgs()
obs = self.observations(query_args, as_cursor=True)
count = len(obs) if query_args.distinct else obs.count()
limit = query_args.limit
if limit > 0 and count > limit:
count = limit
return count
def delete(self, query=None, countdown=0):
"""Delete this dataset.
:param countdown: Delete dataset after this number of seconds.
"""
call_async(delete_task, self.clear_cache(), query=query,
countdown=countdown)
def delete_columns(self, columns):
"""Delete column `column` from this dataset.
:param column: The column to delete.
"""
columns = set(self.schema.keys()).intersection(set(to_list(columns)))
if not len(columns):
raise ArgumentError("Columns: %s not in dataset.")
Observation.delete_columns(self, columns)
new_schema = self.schema
[new_schema.pop(c) for c in columns]
self.set_schema(new_schema, set_num_columns=True)
return columns
def delete_observation(self, index):
"""Delete observation at index.
:params index: The index of an observation to delete.
"""
Observation.delete(self, index)
dframe = self.dframe()
self.update({self.NUM_ROWS: len(dframe)})
self.build_schema(dframe, overwrite=True)
call_async(propagate, self, update={'delete': index})
def dframe(self, query_args=None, keep_parent_ids=False, padded=False,
index=False, reload_=False, keep_mongo_keys=False):
"""Fetch the dframe for this dataset.
:param query_args: An optional QueryArgs to hold the query arguments.
:param keep_parent_ids: Do not remove parent IDs from the dframe,
default False.
:param padded: Used for joining, default False.
:param index: Return the index with dframe, default False.
:param reload_: Force refresh of data, default False.
:param keep_mongo_keys: Used for updating documents, default False.
:returns: Return DataFrame with contents based on query parameters
passed to MongoDB. DataFrame will not have parent ids if
`keep_parent_ids` is False.
"""
# bypass cache if we need specific version
cacheable = not (query_args or keep_parent_ids or padded)
# use cached copy if we have already fetched it
if cacheable and not reload_ and self.__is_cached:
return self.__dframe
query_args = query_args or QueryArgs()
observations = self.observations(query_args, as_cursor=True)
if query_args.distinct:
return DataFrame(observations)
dframe = Observation.batch_read_dframe_from_cursor(
self, observations, query_args.distinct, query_args.limit)
dframe = df_mongo_decode(dframe, keep_mongo_keys=keep_mongo_keys)
excluded = [keep_parent_ids and PARENT_DATASET_ID, index and INDEX]
dframe = remove_reserved_keys(dframe, filter(bool, excluded))
if index:
dframe.rename(columns={INDEX: 'index'}, inplace=True)
dframe = self.__maybe_pad(dframe, padded)
if cacheable:
self.__dframe = dframe
return dframe
def has_pending_updates(self, update_id):
"""Check if this dataset has pending updates.
Call the update identfied by `update_id` the current update. A dataset
has pending updates if, not including the current update, there are any
pending updates and the update at the top of the queue is not the
current update.
:param update_id: An update to exclude when checking for pending
updates.
:returns: True if there are pending updates, False otherwise.
"""
self.reload()
pending_updates = self.pending_updates
return pending_updates[0] != update_id and len(
set(pending_updates) - set([update_id]))
def info(self, update=None):
"""Return or update meta-data for this dataset.
:param update: Dictionary to update info with, default None.
:returns: Dictionary of info for this dataset.
"""
if update:
update_dict = {key: value for key, value in update.items()
if key in self.updatable_keys}
self.update(update_dict)
return {
self.ID: self.dataset_id,
self.LABEL: self.label,
self.DESCRIPTION: self.description,
self.SCHEMA: self.schema,
self.LICENSE: self.license,
self.ATTRIBUTION: self.attribution,
self.CREATED_AT: self.record.get(self.CREATED_AT),
self.UPDATED_AT: self.record.get(self.UPDATED_AT),
self.NUM_COLUMNS: self.num_columns,
self.NUM_ROWS: self.num_rows,
self.STATE: self.state,
self.PARENT_IDS: self.parent_ids,
self.PENDING_UPDATES: self.pending_updates,
}
def is_dimension(self, col):
return self.schema.is_dimension(col)
def is_factor(self, col):
return self.is_dimension(col) or self.schema.is_date_simpletype(col)
def join(self, other, on):
"""Join with dataset `other` on the passed columns.
:param other: The other dataset to join.
:param on: The column in this and the `other` dataset to join on.
"""
merged_dframe = self.dframe()
if not len(merged_dframe.columns):
# Empty dataset, simulate columns
merged_dframe = self.place_holder_dframe()
merged_dframe = join_dataset(merged_dframe, other, on)
merged_dataset = self.create()
if self.num_rows and other.num_rows:
merged_dataset.save_observations(merged_dframe)
else:
merged_dataset.build_schema(merged_dframe, set_num_columns=True)
merged_dataset.ready()
self.add_joined_dataset(
('right', other.dataset_id, on, merged_dataset.dataset_id))
other.add_joined_dataset(
('left', self.dataset_id, on, merged_dataset.dataset_id))
return merged_dataset
def observations(self, query_args=None, as_cursor=False):
"""Return observations for this dataset.
:param query_args: An optional QueryArgs to hold the query arguments.
:param as_cursor: Return the observations as a cursor.
"""
return Observation.find(self, query_args or QueryArgs(),
as_cursor=as_cursor)
def place_holder_dframe(self, dframe=None):
columns = self.schema.keys()
if dframe is not None:
columns = [c for c in columns if c not in dframe.columns[1:]]
return DataFrame([[''] * len(columns)], columns=columns)
def reload(self):
"""Reload the dataset from DB and clear any cache."""
dataset = Dataset.find_one(self.dataset_id)
self.record = dataset.record
self.clear_cache()
return self
def remove_parent_observations(self, parent_id):
"""Remove obervations for this dataset with the passed `parent_id`.
:param parent_id: Remove observations with this ID as their parent
dataset ID.
"""
Observation.delete_all(self, {PARENT_DATASET_ID: parent_id})
# clear the cached dframe
self.__dframe = None
def remove_pending_update(self, update_id):
self.collection.update(
{'_id': self.record['_id']},
{'$pull': {self.PENDING_UPDATES: update_id}})
def replace_observations(self, dframe, overwrite=False,
set_num_columns=True):
"""Remove all rows for this dataset and save the rows in `dframe`.
:param dframe: Replace rows in this dataset with this DataFrame's rows.
:param overwrite: If true replace the schema, otherwise update it.
Default False.
:param set_num_columns: If true update the dataset stored number of
columns. Default True.
:returns: DataFrame equivalent to the passed in `dframe`.
"""
self.build_schema(dframe, overwrite=overwrite,
set_num_columns=set_num_columns)
Observation.delete_all(self)
return self.save_observations(dframe)
def resample(self, date_column, interval, how, query=None):
"""Resample a dataset given a new time frame.
:param date_column: The date column use as the index for resampling.
:param interval: The interval code for resampling.
:param how: How to aggregate in the resample.
:returns: A DataFrame of the resampled DataFrame for this dataset.
"""
query_args = QueryArgs(query=query)
dframe = self.dframe(query_args).set_index(date_column)
resampled = dframe.resample(interval, how=how)
return resampled.reset_index()
def rolling(self, win_type, window):
"""Calculate a rolling window over all numeric columns.
:param win_type: The type of window, see pandas pandas.rolling_window.
:param window: The number of observations used for calculating the
window.
:returns: A DataFrame of the rolling window calculated for this
dataset.
"""
dframe = self.dframe(QueryArgs(select=self.schema.numerics_select))
return rolling_window(dframe, window, win_type)
def save(self, dataset_id=None):
"""Store dataset with `dataset_id` as the unique internal ID.
Store a new dataset with an ID given by `dataset_id` is exists,
otherwise reate a random UUID for this dataset. Additionally, set the
created at time to the current time and the state to pending.
:param dataset_id: The ID to store for this dataset, default is None.
:returns: A dict representing this dataset.
"""
if dataset_id is None:
dataset_id = uuid.uuid4().hex
record = {
DATASET_ID: dataset_id,
self.AGGREGATED_DATASETS: {},
self.CREATED_AT: strftime("%Y-%m-%d %H:%M:%S", gmtime()),
self.STATE: self.STATE_PENDING,
self.PENDING_UPDATES: [],
}
return super(self.__class__, self).save(record)
def save_observations(self, dframe):
"""Save rows in `dframe` for this dataset.
:param dframe: DataFrame to save rows from.
"""
return Observation.save(dframe, self)
def set_olap_type(self, column, olap_type):
"""Set the OLAP Type for this `column` of dataset.
Only columns with an original OLAP Type of 'measure' can be modified.
This includes columns with Simple Type integer, float, and datetime.
:param column: The column to set the OLAP Type for.
:param olap_type: The OLAP Type to set. Must be 'dimension' or
'measure'.
"""
schema = self.schema
schema.set_olap_type(column, olap_type)
self.set_schema(schema, False)
# Build summary for new type.
self.summarize(self.dframe(), update=True)
def set_schema(self, schema, set_num_columns=True):
"""Set the schema from an existing one."""
update_dict = {self.SCHEMA: schema}
if set_num_columns:
update_dict.update({self.NUM_COLUMNS: len(schema.keys())})
self.update(update_dict)
def summarize(self, dframe, groups=[], no_cache=False, update=False,
flat=False):
"""Build and return a summary of the data in this dataset.
Return a summary of dframe grouped by `groups`, or the overall
summary if no groups are specified.
:param dframe: dframe to summarize
:param groups: A list of columns to group on.
:param no_cache: Do not fetch a cached summary.
:param flat: Return a flattened list of groups.
:returns: A summary of the dataset as a dict. Numeric columns will be
summarized by the arithmetic mean, standard deviation, and
percentiles. Dimensional columns will be summarized by counts.
"""
self.reload()
summary = summarize(self, dframe, groups, no_cache, update=update)
if flat:
flat_summary = []
for cols, v in summary.iteritems():
cols = self.split_groups(cols)
for k, data in v.iteritems():
col_values = self.split_groups(k)
col_values = [strip_pattern.sub(',', i)[1:-1]
for i in col_values]
flat_summary.append(
combine_dicts(dict(zip(cols, col_values)), data))
summary = flat_summary
return summary
def update(self, record):
"""Update dataset `dataset` with `record`."""
record[self.UPDATED_AT] = strftime("%Y-%m-%d %H:%M:%S", gmtime())
super(self.__class__, self).update(record)
def update_observation(self, index, data):
# check that update is valid
dframe_from_update(self, [data])
Observation.update(self, index, data)
call_async(propagate, self, update={'edit': [index, data]})
def update_observations(self, dframe):
return Observation.update_from_dframe(dframe, self)
def update_complete(self, update_id):
"""Remove `update_id` from this datasets list of pending updates.
:param update_id: The ID of the completed update.
"""
self.collection.update(
{'_id': self.record['_id']},
{'$pull': {self.PENDING_UPDATES: update_id}})
def update_stats(self, dframe, update=False):
"""Update store statistics for this dataset.
:param dframe: Use this DataFrame for summary statistics.
:param update: Update or replace summary statistics, default False.
"""
self.update({
self.NUM_ROWS: len(dframe),
self.STATE: self.STATE_READY,
})
self.summarize(dframe, update=update)
def __add_linked_data(self, link_key, existing_data, new_data):
self.update({link_key: existing_data + [new_data]})
def __maybe_pad(self, dframe, pad):
if pad:
if len(dframe.columns):
on = dframe.columns[0]
place_holder = self.place_holder_dframe(dframe).set_index(on)
dframe = dframe.join(place_holder, on=on)
else:
dframe = self.place_holder_dframe()
return dframe
| |
from common_fixtures import * # NOQA
RCCOMMANDS_SUBDIR = os.path.join(os.path.dirname(os.path.realpath(__file__)),
'resources/rccmds')
logger = logging.getLogger(__name__)
start_project_str = "Starting"
reason_skipped_str = 'Rancher compose files directory location not ' \
'set/does not Exist or account api keys provided'
if_compose_data_files = pytest.mark.skipif(
not os.path.isdir(RCCOMMANDS_SUBDIR) or
ACCESS_KEY is not None or SECRET_KEY is not None,
reason=reason_skipped_str)
@if_compose_data_files
def test_rancher_compose_create_service(client,
rancher_compose_container):
# This method tests the rancher compose create and up commands
env_name = random_str().replace("-", "")
# Create an environment using up
launch_rancher_compose_from_file(
client, RCCOMMANDS_SUBDIR, "dc1.yml", env_name,
"create", "Creating stack", "rc1.yml")
launch_rancher_compose_from_file(
client, RCCOMMANDS_SUBDIR, "dc1.yml", env_name,
"up -d", start_project_str, "rc1.yml")
env, service = get_env_service_by_name(client, env_name, "test1")
# Confirm service is active and the containers are running
assert service.state == "active"
assert service.scale == 3
assert service.name == "test1"
check_config_for_service(client, service, {"test1": "value1"}, 1)
container_list = get_service_container_list(client, service)
assert len(container_list) == 3
for container in container_list:
assert container.state == "running"
delete_all(client, [env])
@if_compose_data_files
def test_rancher_compose_start_stop(client,
rancher_compose_container):
# This method tests the rancher compose start and stop commands
# Bug #4887 has been filed
# Bug #4933 has been filed [Start command has no response,
# Now "Started" response is being checked. Should be changed if required.
env_name = random_str().replace("-", "")
# Create an environment using up
launch_rancher_compose_from_file(
client, RCCOMMANDS_SUBDIR, "dc1.yml", env_name,
"up -d", start_project_str, "rc1.yml")
env, service = get_env_service_by_name(client, env_name, "test1")
# Confirm service is active and the containers are running
assert service.state == "active"
check_config_for_service(client, service, {"test1": "value1"}, 1)
container_list = get_service_container_list(client, service)
assert len(container_list) == 3
for container in container_list:
assert container.state == "running"
launch_rancher_compose_from_file(
client, RCCOMMANDS_SUBDIR, "dc1.yml", env_name,
"stop", "Stopped", rancher_compose="rc1.yml")
# Note: We add a sleep as the stop command does not wait until complete
time.sleep(10)
service = client.wait_success(service)
# Confirm service is inactive and the containers are stopped
assert service.state == "inactive"
container_list = get_service_container_list(client, service)
assert len(container_list) == 3
# Check for containers being stopped
for container in container_list:
assert container.state == "stopped"
launch_rancher_compose_from_file(
client, RCCOMMANDS_SUBDIR, "dc1.yml", env_name,
"start -d", "Started", "rc1.yml")
# Confirm service is active and the containers are running
service = client.wait_success(service, 300)
assert service.state == "active"
check_config_for_service(client, service, {"test1": "value1"}, 1)
container_list = get_service_container_list(client, service)
assert len(container_list) == 3
for container in container_list:
assert container.state == "running"
delete_all(client, [env])
@if_compose_data_files
def test_rancher_compose_start_down(client,
rancher_compose_container):
# This method tests the rancher compose start and down commands
env_name = random_str().replace("-", "")
# Bug #4933 has been filed [Start command has no response,
# Now "Started" response is being checked. Should be changed if required.
# Create an environment using up
launch_rancher_compose_from_file(
client, RCCOMMANDS_SUBDIR, "dc1.yml", env_name,
"up -d", start_project_str, "rc1.yml")
env, service = get_env_service_by_name(client, env_name, "test1")
# Confirm service is active and the containers are running
assert service.state == "active"
check_config_for_service(client, service, {"test1": "value1"}, 1)
container_list = get_service_container_list(client, service)
assert len(container_list) == 3
for container in container_list:
assert container.state == "running"
launch_rancher_compose_from_file(
client, RCCOMMANDS_SUBDIR, "dc1.yml", env_name,
"down", "Stopped", "rc1.yml")
# Note: We add a sleep as the down command does not wait until it completes
time.sleep(10)
service = client.wait_success(service)
# Confirm service is inactive and the containers are stopped
assert service.state == "inactive"
container_list = get_service_container_list(client, service)
assert len(container_list) == 3
# Check for containers being stopped
for container in container_list:
assert container.state == "stopped"
launch_rancher_compose_from_file(client, RCCOMMANDS_SUBDIR,
"dc1.yml", env_name,
"start -d", "Started", "rc1.yml")
# Confirm service is active and the containers are running
service = client.wait_success(service, 300)
assert service.state == "active"
check_config_for_service(client, service, {"test1": "value1"}, 1)
container_list = get_service_container_list(client, service)
assert len(container_list) == 3
for container in container_list:
assert container.state == "running"
delete_all(client, [env])
@if_compose_data_files
def test_rancher_compose_service_restart(client,
rancher_compose_container):
# This method tests the rancher compose restart command
env_name = random_str().replace("-", "")
# Create an environment using up
launch_rancher_compose_from_file(
client, RCCOMMANDS_SUBDIR, "dc2.yml", env_name,
"up -d", "Creating stack", "rc2.yml")
env, service1 = get_env_service_by_name(client, env_name, "test1")
env, service2 = get_env_service_by_name(client, env_name, "test2")
# Confirm service is active and the containers are running
service1 = client.wait_success(service1, 300)
service2 = client.wait_success(service2, 300)
assert service1.state == "active"
assert service2.state == "active"
check_config_for_service(client, service1, {"test1": "value1"}, 1)
check_config_for_service(client, service2, {"test2": "value2"}, 1)
container_list1 = get_service_container_list(client, service1)
assert len(container_list1) == 4
for container in container_list1:
assert container.state == "running"
assert container.startCount == 1
container_list2 = get_service_container_list(client, service2)
assert len(container_list2) == 4
for con in container_list2:
assert con.state == "running"
assert container.startCount == 1
launch_rancher_compose_from_file(
client, RCCOMMANDS_SUBDIR, "dc2.yml", env_name,
"restart", "Restarting", "rc2.yml")
env, service1 = get_env_service_by_name(client, env_name, "test1")
env, service2 = get_env_service_by_name(client, env_name, "test2")
# Confirm service is active and the containers are running
service1 = client.wait_success(service1, 300)
service2 = client.wait_success(service2, 300)
assert service1.state == "active"
assert service2.state == "active"
check_config_for_service(client, service1, {"test1": "value1"}, 1)
check_config_for_service(client, service2, {"test2": "value2"}, 1)
container_list1 = get_service_container_list(client, service1)
assert len(container_list1) == 4
for container in container_list1:
assert container.state == "running"
assert container.startCount == 2
container_list2 = get_service_container_list(client, service2)
assert len(container_list2) == 4
for container in container_list2:
assert container.state == "running"
assert container.startCount == 2
delete_all(client, [env])
@if_compose_data_files
def test_rancher_compose_service_restart_bat_inter(client,
rancher_compose_container):
# This method tests restart command with batchsize and inteval options
env_name = random_str().replace("-", "")
# Create an environment using up
launch_rancher_compose_from_file(
client, RCCOMMANDS_SUBDIR, "dc2.yml", env_name,
"up -d", "Creating stack", "rc2.yml")
env, service1 = get_env_service_by_name(client, env_name, "test1")
env, service2 = get_env_service_by_name(client, env_name, "test2")
# Confirm service is active and the containers are running
service1 = client.wait_success(service1, 300)
service2 = client.wait_success(service2, 300)
assert service1.state == "active"
assert service2.state == "active"
check_config_for_service(client, service1, {"test1": "value1"}, 1)
check_config_for_service(client, service2, {"test2": "value2"}, 1)
container_list1 = get_service_container_list(client, service1)
assert len(container_list1) == 4
for container in container_list1:
assert container.state == "running"
assert container.startCount == 1
container_list2 = get_service_container_list(client, service2)
assert len(container_list2) == 4
for con in container_list2:
assert con.state == "running"
assert container.startCount == 1
launch_rancher_compose_from_file(
client, RCCOMMANDS_SUBDIR, "dc2.yml", env_name,
"restart --batch-size 2 --interval 100", "Restarting", "rc2.yml")
env, service1 = get_env_service_by_name(client, env_name, "test1")
env, service2 = get_env_service_by_name(client, env_name, "test2")
# Confirm service is active and the containers are running
service1 = client.wait_success(service1, 300)
service2 = client.wait_success(service2, 300)
assert service1.state == "active"
assert service2.state == "active"
check_config_for_service(client, service1, {"test1": "value1"}, 1)
check_config_for_service(client, service2, {"test2": "value2"}, 1)
container_list1 = get_service_container_list(client, service1)
assert len(container_list1) == 4
for container in container_list1:
assert container.state == "running"
assert container.startCount == 2
container_list2 = get_service_container_list(client, service2)
assert len(container_list2) == 4
for container in container_list2:
assert container.state == "running"
assert container.startCount == 2
delete_all(client, [env])
@if_compose_data_files
def test_rancher_compose_services_delete(client,
rancher_compose_container):
# This method tests the delete command
env_name = random_str().replace("-", "")
# Create an environment using up
launch_rancher_compose_from_file(
client, RCCOMMANDS_SUBDIR, "dc1.yml", env_name,
"up -d", start_project_str, "rc1.yml")
env, service = get_env_service_by_name(client, env_name, "test1")
# Confirm service is active and the containers are running
assert service.state == "active"
check_config_for_service(client, service, {"test1": "value1"}, 1)
container_list = get_service_container_list(client, service)
assert len(container_list) == 3
for container in container_list:
assert container.state == "running"
launch_rancher_compose_from_file(
client, RCCOMMANDS_SUBDIR, "dc1.yml", env_name,
"rm -f", "Deleting", "rc1.yml")
# Confirm service is removed
service = client.wait_success(service, 300)
assert service.state == "removed"
delete_all(client, [env])
@if_compose_data_files
def test_rancher_compose_services_scale(client,
rancher_compose_container):
# This method tests the scale command
env_name = random_str().replace("-", "")
# Create an environment using up
launch_rancher_compose_from_file(
client, RCCOMMANDS_SUBDIR, "dc1.yml", env_name,
"up -d", start_project_str, "rc1.yml")
env, service = get_env_service_by_name(client, env_name, "test1")
# Confirm service is active and the containers are running
assert service.state == "active"
check_config_for_service(client, service, {"test1": "value1"}, 1)
container_list = get_service_container_list(client, service)
assert len(container_list) == 3
for container in container_list:
assert container.state == "running"
# Issue a command to scale up the services
launch_rancher_compose_from_file(
client, RCCOMMANDS_SUBDIR, "dc1.yml", env_name,
"scale test1=4", "Setting scale", "rc1.yml")
# Confirm service is active and the containers are running
service = client.wait_success(service, 300)
assert service.state == "active"
container_list = get_service_container_list(client, service)
# Check if the number of containers are incremented correctly
assert len(container_list) == 4
for container in container_list:
assert container.state == "running"
# Issue a command to scale down the services
launch_rancher_compose_from_file(
client, RCCOMMANDS_SUBDIR, "dc1.yml", env_name,
"scale test1=3", "Setting scale", "rc1.yml")
# Confirm service is active and the containers are running
service = client.wait_success(service, 300)
assert service.state == "active"
container_list = get_service_container_list(client, service)
# Check if the number of containers are decremented correctly
assert len(container_list) == 3
for container in container_list:
assert container.state == "running"
delete_all(client, [env])
@if_compose_data_files
def test_rancher_compose_services_security(client,
rancher_compose_container,
socat_containers):
# This method tests the options in security tab in the UI
env_name = random_str().replace("-", "")
# Create an environment using up
launch_rancher_compose_from_file(
client, RCCOMMANDS_SUBDIR, "dc3.yml", env_name,
"up -d", start_project_str, "rc3.yml")
env, service = get_env_service_by_name(client, env_name, "test3")
# Confirm service is active and the containers are running
assert service.state == "active"
container_list = get_service_container_list(client, service)
assert len(container_list) == 3
for con in container_list:
assert con.state == "running"
containers = client.list_container(
externalId=con.externalId,
include="hosts",
removed_null=True)
docker_client = get_docker_client(containers[0].hosts[0])
inspect = docker_client.inspect_container(con.externalId)
logger.info("Checked for containers running " + con.name)
assert inspect["State"]["Running"]
assert inspect["HostConfig"]["Privileged"]
assert inspect["HostConfig"]["Memory"] == 104857600
assert inspect["HostConfig"]["CpuShares"] == 256
assert inspect["HostConfig"]["CapAdd"] == ["AUDIT_CONTROL",
"AUDIT_WRITE"]
assert inspect["HostConfig"]["CapDrop"] == ["BLOCK_SUSPEND",
"CHOWN"]
assert inspect["Config"]["Hostname"] == "rancherhost"
assert inspect["HostConfig"]["PidMode"] == "host"
delete_all(client, [env])
@if_compose_data_files
def test_rancher_compose_services_log_driver(client,
rancher_compose_container,
socat_containers):
# This test case fails bcos of bug #4773
env_name = random_str().replace("-", "")
# Create an environment using up
launch_rancher_compose_from_file(
client, RCCOMMANDS_SUBDIR, "dc3.yml", env_name,
"up -d", start_project_str, "rc3.yml")
env, service = get_env_service_by_name(client, env_name, "test3")
# Confirm service is active and the containers are running
assert service.state == "active"
container_list = get_service_container_list(client, service)
assert len(container_list) == 3
for con in container_list:
assert con.state == "running"
containers = client.list_container(
externalId=con.externalId,
include="hosts",
removed_null=True)
docker_client = get_docker_client(containers[0].hosts[0])
inspect = docker_client.inspect_container(con.externalId)
logger.info("Checked for containers running" + con.name)
assert inspect["State"]["Running"]
assert inspect["HostConfig"]["LogConfig"]["Type"] == "syslog"
delete_all(client, [env])
@if_compose_data_files
def test_rancher_compose_services_network(client,
rancher_compose_container,
socat_containers):
# This method tests the options in Network tab in the UI
hostname_override = "io.rancher.container.hostname_override"
requested_ip = "io.rancher.container.requested_ip"
env_name = random_str().replace("-", "")
# Create an environment using up
launch_rancher_compose_from_file(
client, RCCOMMANDS_SUBDIR, "dc4.yml", env_name,
"up -d", start_project_str, "rc4.yml")
env, service = get_env_service_by_name(client, env_name, "test4")
# Confirm service is active and the containers are running
assert service.state == "active"
check_config_for_service(client, service,
{"testrc": "RANCHER_COMPOSE"}, 1)
check_config_for_service(client, service,
{"io.rancher.container.requested_ip":
"209.243.140.21"}, 1)
check_config_for_service(client, service,
{"io.rancher.container.hostname_override":
"container_name"}, 1)
container_list = get_service_container_list(client, service)
assert len(container_list) == 2
for con in container_list:
assert con.state == "running"
containers = client.list_container(
externalId=con.externalId,
include="hosts",
removed_null=True)
docker_client = get_docker_client(containers[0].hosts[0])
inspect = docker_client.inspect_container(con.externalId)
logger.info("Checked for containers running " + con.name)
assert inspect["State"]["Running"]
assert inspect["Config"]["Domainname"] == "xyz.com"
assert \
inspect["Config"]["Labels"][hostname_override] \
== "container_name"
assert inspect["Config"]["Labels"][requested_ip] == "209.243.140.21"
dns_list = inspect["HostConfig"]["Dns"]
dnssearch_list = inspect["HostConfig"]["DnsSearch"]
assert "209.243.150.21" in dns_list
assert "www.google.com" in dnssearch_list
delete_all(client, [env])
@if_compose_data_files
def test_rancher_compose_services_volume(client,
rancher_compose_container,
socat_containers):
env_name = random_str().replace("-", "")
# Create an environment using up
launch_rancher_compose_from_file(
client, RCCOMMANDS_SUBDIR, "dc5.yml", env_name,
"up -d", start_project_str, "rc5.yml")
env, service = get_env_service_by_name(client, env_name, "test5")
# Confirm service is active and the containers are running
assert service.state == "active"
container_list = get_service_container_list(client, service)
assert len(container_list) == 2
for con in container_list:
assert con.state == "running"
containers = client.list_container(
externalId=con.externalId,
include="hosts",
removed_null=True)
docker_client = get_docker_client(containers[0].hosts[0])
inspect = docker_client.inspect_container(con.externalId)
logger.info("Checked for containers running " + con.name)
assert inspect["State"]["Running"]
assert "testvol:/home:rw" in inspect["HostConfig"]["Binds"]
delete_all(client, [env])
| |
# Copyright (c) 2007-2013 LOGILAB S.A. (Paris, FRANCE) <contact@logilab.fr>
# Copyright (c) 2014-2016 Claudiu Popa <pcmanticore@gmail.com>
# Copyright (c) 2014 Google, Inc.
# Copyright (c) 2015-2016 Cara Vinson <ceridwenv@gmail.com>
# Licensed under the LGPL: https://www.gnu.org/licenses/old-licenses/lgpl-2.1.en.html
# For details: https://github.com/PyCQA/astroid/blob/master/COPYING.LESSER
"""tests for the astroid variable lookup capabilities
"""
import functools
import sys
import unittest
from astroid import builder
from astroid import exceptions
from astroid import nodes
from astroid import scoped_nodes
from astroid import util
from astroid.tests import resources
class LookupTest(resources.SysPathSetup, unittest.TestCase):
def setUp(self):
super(LookupTest, self).setUp()
self.module = resources.build_file('data/module.py', 'data.module')
self.module2 = resources.build_file('data/module2.py', 'data.module2')
self.nonregr = resources.build_file('data/nonregr.py', 'data.nonregr')
def test_limit(self):
code = '''
l = [a
for a,b in list]
a = 1
b = a
a = None
def func():
c = 1
'''
astroid = builder.parse(code, __name__)
# a & b
a = next(astroid.nodes_of_class(nodes.Name))
self.assertEqual(a.lineno, 2)
if sys.version_info < (3, 0):
self.assertEqual(len(astroid.lookup('b')[1]), 1)
self.assertEqual(len(astroid.lookup('a')[1]), 1)
b = astroid.locals['b'][1]
else:
self.assertEqual(len(astroid.lookup('b')[1]), 1)
self.assertEqual(len(astroid.lookup('a')[1]), 1)
b = astroid.locals['b'][0]
stmts = a.lookup('a')[1]
self.assertEqual(len(stmts), 1)
self.assertEqual(b.lineno, 6)
b_infer = b.infer()
b_value = next(b_infer)
self.assertEqual(b_value.value, 1)
# c
self.assertRaises(StopIteration, functools.partial(next, b_infer))
func = astroid.locals['func'][0]
self.assertEqual(len(func.lookup('c')[1]), 1)
def test_module(self):
astroid = builder.parse('pass', __name__)
# built-in objects
none = next(astroid.ilookup('None'))
self.assertIsNone(none.value)
obj = next(astroid.ilookup('object'))
self.assertIsInstance(obj, nodes.ClassDef)
self.assertEqual(obj.name, 'object')
self.assertRaises(exceptions.InferenceError,
functools.partial(next, astroid.ilookup('YOAA')))
# XXX
self.assertEqual(len(list(self.nonregr.ilookup('enumerate'))), 2)
def test_class_ancestor_name(self):
code = '''
class A:
pass
class A(A):
pass
'''
astroid = builder.parse(code, __name__)
cls1 = astroid.locals['A'][0]
cls2 = astroid.locals['A'][1]
name = next(cls2.nodes_of_class(nodes.Name))
self.assertEqual(next(name.infer()), cls1)
### backport those test to inline code
def test_method(self):
method = self.module['YOUPI']['method']
my_dict = next(method.ilookup('MY_DICT'))
self.assertTrue(isinstance(my_dict, nodes.Dict), my_dict)
none = next(method.ilookup('None'))
self.assertIsNone(none.value)
self.assertRaises(exceptions.InferenceError,
functools.partial(next, method.ilookup('YOAA')))
def test_function_argument_with_default(self):
make_class = self.module2['make_class']
base = next(make_class.ilookup('base'))
self.assertTrue(isinstance(base, nodes.ClassDef), base.__class__)
self.assertEqual(base.name, 'YO')
self.assertEqual(base.root().name, 'data.module')
def test_class(self):
klass = self.module['YOUPI']
my_dict = next(klass.ilookup('MY_DICT'))
self.assertIsInstance(my_dict, nodes.Dict)
none = next(klass.ilookup('None'))
self.assertIsNone(none.value)
obj = next(klass.ilookup('object'))
self.assertIsInstance(obj, nodes.ClassDef)
self.assertEqual(obj.name, 'object')
self.assertRaises(exceptions.InferenceError,
functools.partial(next, klass.ilookup('YOAA')))
def test_inner_classes(self):
ddd = list(self.nonregr['Ccc'].ilookup('Ddd'))
self.assertEqual(ddd[0].name, 'Ddd')
def test_loopvar_hiding(self):
astroid = builder.parse("""
x = 10
for x in range(5):
print (x)
if x > 0:
print ('#' * x)
""", __name__)
xnames = [n for n in astroid.nodes_of_class(nodes.Name) if n.name == 'x']
# inside the loop, only one possible assignment
self.assertEqual(len(xnames[0].lookup('x')[1]), 1)
# outside the loop, two possible assignments
self.assertEqual(len(xnames[1].lookup('x')[1]), 2)
self.assertEqual(len(xnames[2].lookup('x')[1]), 2)
def test_list_comps(self):
astroid = builder.parse("""
print ([ i for i in range(10) ])
print ([ i for i in range(10) ])
print ( list( i for i in range(10) ) )
""", __name__)
xnames = [n for n in astroid.nodes_of_class(nodes.Name) if n.name == 'i']
self.assertEqual(len(xnames[0].lookup('i')[1]), 1)
self.assertEqual(xnames[0].lookup('i')[1][0].lineno, 2)
self.assertEqual(len(xnames[1].lookup('i')[1]), 1)
self.assertEqual(xnames[1].lookup('i')[1][0].lineno, 3)
self.assertEqual(len(xnames[2].lookup('i')[1]), 1)
self.assertEqual(xnames[2].lookup('i')[1][0].lineno, 4)
def test_list_comp_target(self):
"""test the list comprehension target"""
astroid = builder.parse("""
ten = [ var for var in range(10) ]
var
""")
var = astroid.body[1].value
if sys.version_info < (3, 0):
self.assertEqual(var.inferred(), [util.Uninferable])
else:
self.assertRaises(exceptions.NameInferenceError, var.inferred)
def test_dict_comps(self):
astroid = builder.parse("""
print ({ i: j for i in range(10) for j in range(10) })
print ({ i: j for i in range(10) for j in range(10) })
""", __name__)
xnames = [n for n in astroid.nodes_of_class(nodes.Name) if n.name == 'i']
self.assertEqual(len(xnames[0].lookup('i')[1]), 1)
self.assertEqual(xnames[0].lookup('i')[1][0].lineno, 2)
self.assertEqual(len(xnames[1].lookup('i')[1]), 1)
self.assertEqual(xnames[1].lookup('i')[1][0].lineno, 3)
xnames = [n for n in astroid.nodes_of_class(nodes.Name) if n.name == 'j']
self.assertEqual(len(xnames[0].lookup('i')[1]), 1)
self.assertEqual(xnames[0].lookup('i')[1][0].lineno, 2)
self.assertEqual(len(xnames[1].lookup('i')[1]), 1)
self.assertEqual(xnames[1].lookup('i')[1][0].lineno, 3)
def test_set_comps(self):
astroid = builder.parse("""
print ({ i for i in range(10) })
print ({ i for i in range(10) })
""", __name__)
xnames = [n for n in astroid.nodes_of_class(nodes.Name) if n.name == 'i']
self.assertEqual(len(xnames[0].lookup('i')[1]), 1)
self.assertEqual(xnames[0].lookup('i')[1][0].lineno, 2)
self.assertEqual(len(xnames[1].lookup('i')[1]), 1)
self.assertEqual(xnames[1].lookup('i')[1][0].lineno, 3)
def test_set_comp_closure(self):
astroid = builder.parse("""
ten = { var for var in range(10) }
var
""")
var = astroid.body[1].value
self.assertRaises(exceptions.NameInferenceError, var.inferred)
def test_generator_attributes(self):
tree = builder.parse("""
def count():
"test"
yield 0
iterer = count()
num = iterer.next()
""")
next_node = tree.body[2].value.func
gener = next_node.expr.inferred()[0]
if sys.version_info < (3, 0):
self.assertIsInstance(gener.getattr('next')[0], nodes.FunctionDef)
else:
self.assertIsInstance(gener.getattr('__next__')[0], nodes.FunctionDef)
self.assertIsInstance(gener.getattr('send')[0], nodes.FunctionDef)
self.assertIsInstance(gener.getattr('throw')[0], nodes.FunctionDef)
self.assertIsInstance(gener.getattr('close')[0], nodes.FunctionDef)
def test_explicit___name__(self):
code = '''
class Pouet:
__name__ = "pouet"
p1 = Pouet()
class PouetPouet(Pouet): pass
p2 = Pouet()
class NoName: pass
p3 = NoName()
'''
astroid = builder.parse(code, __name__)
p1 = next(astroid['p1'].infer())
self.assertTrue(p1.getattr('__name__'))
p2 = next(astroid['p2'].infer())
self.assertTrue(p2.getattr('__name__'))
self.assertTrue(astroid['NoName'].getattr('__name__'))
p3 = next(astroid['p3'].infer())
self.assertRaises(exceptions.AttributeInferenceError, p3.getattr, '__name__')
def test_function_module_special(self):
astroid = builder.parse('''
def initialize(linter):
"""initialize linter with checkers in this package """
package_load(linter, __path__[0])
''', 'data.__init__')
path = [n for n in astroid.nodes_of_class(nodes.Name) if n.name == '__path__'][0]
self.assertEqual(len(path.lookup('__path__')[1]), 1)
def test_builtin_lookup(self):
self.assertEqual(scoped_nodes.builtin_lookup('__dict__')[1], ())
intstmts = scoped_nodes.builtin_lookup('int')[1]
self.assertEqual(len(intstmts), 1)
self.assertIsInstance(intstmts[0], nodes.ClassDef)
self.assertEqual(intstmts[0].name, 'int')
# pylint: disable=no-member; union type in const_factory, this shouldn't happen
self.assertIs(intstmts[0], nodes.const_factory(1)._proxied)
def test_decorator_arguments_lookup(self):
code = '''
def decorator(value):
def wrapper(function):
return function
return wrapper
class foo:
member = 10 #@
@decorator(member) #This will cause pylint to complain
def test(self):
pass
'''
member = builder.extract_node(code, __name__).targets[0]
it = member.infer()
obj = next(it)
self.assertIsInstance(obj, nodes.Const)
self.assertEqual(obj.value, 10)
self.assertRaises(StopIteration, functools.partial(next, it))
def test_inner_decorator_member_lookup(self):
code = '''
class FileA:
def decorator(bla):
return bla
@__(decorator)
def funcA():
return 4
'''
decname = builder.extract_node(code, __name__)
it = decname.infer()
obj = next(it)
self.assertIsInstance(obj, nodes.FunctionDef)
self.assertRaises(StopIteration, functools.partial(next, it))
def test_static_method_lookup(self):
code = '''
class FileA:
@staticmethod
def funcA():
return 4
class Test:
FileA = [1,2,3]
def __init__(self):
print (FileA.funcA())
'''
astroid = builder.parse(code, __name__)
it = astroid['Test']['__init__'].ilookup('FileA')
obj = next(it)
self.assertIsInstance(obj, nodes.ClassDef)
self.assertRaises(StopIteration, functools.partial(next, it))
def test_global_delete(self):
code = '''
def run2():
f = Frobble()
class Frobble:
pass
Frobble.mumble = True
del Frobble
def run1():
f = Frobble()
'''
astroid = builder.parse(code, __name__)
stmts = astroid['run2'].lookup('Frobbel')[1]
self.assertEqual(len(stmts), 0)
stmts = astroid['run1'].lookup('Frobbel')[1]
self.assertEqual(len(stmts), 0)
if __name__ == '__main__':
unittest.main()
| |
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright (c) 2012 OpenStack Foundation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
import re
class CommandFilter(object):
"""Command filter only checking that the 1st argument matches exec_path."""
def __init__(self, exec_path, run_as, *args):
self.name = ''
self.exec_path = exec_path
self.run_as = run_as
self.args = args
self.real_exec = None
def get_exec(self, exec_dirs=[]):
"""Returns existing executable, or empty string if none found."""
if self.real_exec is not None:
return self.real_exec
self.real_exec = ""
if self.exec_path.startswith('/'):
if os.access(self.exec_path, os.X_OK):
self.real_exec = self.exec_path
else:
for binary_path in exec_dirs:
expanded_path = os.path.join(binary_path, self.exec_path)
if os.access(expanded_path, os.X_OK):
self.real_exec = expanded_path
break
return self.real_exec
def match(self, userargs):
"""Only check that the first argument (command) matches exec_path."""
return os.path.basename(self.exec_path) == userargs[0]
def get_command(self, userargs, exec_dirs=[]):
"""Returns command to execute (with sudo -u if run_as != root)."""
to_exec = self.get_exec(exec_dirs=exec_dirs) or self.exec_path
if (self.run_as != 'root'):
# Used to run commands at lesser privileges
return ['sudo', '-u', self.run_as, to_exec] + userargs[1:]
return [to_exec] + userargs[1:]
def get_environment(self, userargs):
"""Returns specific environment to set, None if none."""
return None
class ExecCommandFilter(CommandFilter):
def exec_args(self, userargs):
return []
class RegExpFilter(CommandFilter):
"""Command filter doing regexp matching for every argument."""
def match(self, userargs):
# Early skip if command or number of args don't match
if (len(self.args) != len(userargs)):
# DENY: argument numbers don't match
return False
# Compare each arg (anchoring pattern explicitly at end of string)
for (pattern, arg) in zip(self.args, userargs):
try:
if not re.match(pattern + '$', arg):
break
except re.error:
# DENY: Badly-formed filter
return False
else:
# ALLOW: All arguments matched
return True
# DENY: Some arguments did not match
return False
class PathFilter(CommandFilter):
"""Command filter checking that path arguments are within given dirs
One can specify the following constraints for command arguments:
1) pass - pass an argument as is to the resulting command
2) some_str - check if an argument is equal to the given string
3) abs path - check if a path argument is within the given base dir
A typical rootwrapper filter entry looks like this:
# cmdname: filter name, raw command, user, arg_i_constraint [, ...]
chown: PathFilter, /bin/chown, root, nova, /var/lib/images
"""
def match(self, userargs):
command, arguments = userargs[0], userargs[1:]
equal_args_num = len(self.args) == len(arguments)
exec_is_valid = super(PathFilter, self).match(userargs)
args_equal_or_pass = all(
arg == 'pass' or arg == value
for arg, value in zip(self.args, arguments)
if not os.path.isabs(arg) # arguments not specifying abs paths
)
paths_are_within_base_dirs = all(
os.path.commonprefix([arg, os.path.realpath(value)]) == arg
for arg, value in zip(self.args, arguments)
if os.path.isabs(arg) # arguments specifying abs paths
)
return (equal_args_num and
exec_is_valid and
args_equal_or_pass and
paths_are_within_base_dirs)
def get_command(self, userargs, exec_dirs=[]):
command, arguments = userargs[0], userargs[1:]
# convert path values to canonical ones; copy other args as is
args = [os.path.realpath(value) if os.path.isabs(arg) else value
for arg, value in zip(self.args, arguments)]
return super(PathFilter, self).get_command([command] + args,
exec_dirs)
class DnsmasqFilter(CommandFilter):
"""Specific filter for the dnsmasq call (which includes env)."""
def is_dnsmasq_cmd(self, argv):
if (argv[0] == "dnsmasq"):
return True
return False
def is_dnsmasq_env_vars(self, argv):
if (argv[0].startswith("NEUTRON_RELAY_SOCKET_PATH=") and
argv[1].startswith("NEUTRON_NETWORK_ID=")):
return True
return False
def match(self, userargs):
"""This matches the combination of the leading env
vars plus "dnsmasq"
"""
if (self.is_dnsmasq_env_vars(userargs) and
self.is_dnsmasq_cmd(userargs[2:])):
return True
return False
def get_command(self, userargs, exec_dirs=[]):
to_exec = self.get_exec(exec_dirs=exec_dirs) or self.exec_path
return [to_exec] + userargs[3:]
def get_environment(self, userargs):
env = os.environ.copy()
env['NEUTRON_RELAY_SOCKET_PATH'] = userargs[0].split('=')[-1]
env['NEUTRON_NETWORK_ID'] = userargs[1].split('=')[-1]
return env
class DnsmasqNetnsFilter(DnsmasqFilter):
"""Specific filter for the dnsmasq call (which includes env)."""
def is_ip_netns_cmd(self, argv):
if ((argv[0] == "ip") and
(argv[1] == "netns") and
(argv[2] == "exec")):
return True
return False
def match(self, userargs):
"""This matches the combination of the leading env
vars plus "ip" "netns" "exec" <foo> "dnsmasq"
"""
if (self.is_dnsmasq_env_vars(userargs) and
self.is_ip_netns_cmd(userargs[2:]) and
self.is_dnsmasq_cmd(userargs[6:])):
return True
return False
class KillFilter(CommandFilter):
"""Specific filter for the kill calls.
1st argument is the user to run /bin/kill under
2nd argument is the location of the affected executable
Subsequent arguments list the accepted signals (if any)
This filter relies on /proc to accurately determine affected
executable, so it will only work on procfs-capable systems (not OSX).
"""
def __init__(self, *args):
super(KillFilter, self).__init__("/bin/kill", *args)
def match(self, userargs):
if userargs[0] != "kill":
return False
args = list(userargs)
if len(args) == 3:
# A specific signal is requested
signal = args.pop(1)
if signal not in self.args[1:]:
# Requested signal not in accepted list
return False
else:
if len(args) != 2:
# Incorrect number of arguments
return False
if len(self.args) > 1:
# No signal requested, but filter requires specific signal
return False
try:
command = os.readlink("/proc/%d/exe" % int(args[1]))
# NOTE(dprince): /proc/PID/exe may have ' (deleted)' on
# the end if an executable is updated or deleted
if command.endswith(" (deleted)"):
command = command[:command.rindex(" ")]
if command != self.args[0]:
# Affected executable does not match
return False
except (ValueError, OSError):
# Incorrect PID
return False
return True
class ReadFileFilter(CommandFilter):
"""Specific filter for the utils.read_file_as_root call."""
def __init__(self, file_path, *args):
self.file_path = file_path
super(ReadFileFilter, self).__init__("/bin/cat", "root", *args)
def match(self, userargs):
if userargs[0] != 'cat':
return False
if userargs[1] != self.file_path:
return False
if len(userargs) != 2:
return False
return True
class IpFilter(CommandFilter):
"""Specific filter for the ip utility to that does not match exec."""
def match(self, userargs):
if userargs[0] == 'ip':
if userargs[1] == 'netns':
if userargs[2] in ('list', 'add', 'delete'):
return True
else:
return False
else:
return True
class IpNetnsExecFilter(ExecCommandFilter):
"""Specific filter for the ip utility to that does match exec."""
def match(self, userargs):
if userargs[:3] == ['ip', 'netns', 'exec']:
return True
else:
return False
def exec_args(self, userargs):
args = userargs[4:]
if args:
args[0] = os.path.basename(args[0])
return args
| |
"""Module with shell specific actions, each shell class should
implement `from_shell`, `to_shell`, `app_alias`, `put_to_history` and `get_aliases`
methods.
"""
from collections import defaultdict
from psutil import Process
from subprocess import Popen, PIPE
from time import time
import io
import os
from .utils import DEVNULL, memoize
class Generic(object):
def get_aliases(self):
return {}
def _expand_aliases(self, command_script):
aliases = self.get_aliases()
binary = command_script.split(' ')[0]
if binary in aliases:
return command_script.replace(binary, aliases[binary], 1)
else:
return command_script
def from_shell(self, command_script):
"""Prepares command before running in app."""
return self._expand_aliases(command_script)
def to_shell(self, command_script):
"""Prepares command for running in shell."""
return command_script
def app_alias(self, fuck):
return "alias {0}='TF_ALIAS={0} eval $(thefuck $(fc -ln -1))'".format(fuck)
def _get_history_file_name(self):
return ''
def _get_history_line(self, command_script):
return ''
def put_to_history(self, command_script):
"""Puts command script to shell history."""
history_file_name = self._get_history_file_name()
if os.path.isfile(history_file_name):
with open(history_file_name, 'a') as history:
history.write(self._get_history_line(command_script))
def _script_from_history(self, line):
"""Returns prepared history line.
Should return a blank line if history line is corrupted or empty.
"""
return ''
def get_history(self):
"""Returns list of history entries."""
history_file_name = self._get_history_file_name()
if os.path.isfile(history_file_name):
with io.open(history_file_name, 'r',
encoding='utf-8', errors='ignore') as history:
for line in history:
prepared = self._script_from_history(line)\
.strip()
if prepared:
yield prepared
def and_(self, *commands):
return u' && '.join(commands)
class Bash(Generic):
def app_alias(self, fuck):
return "TF_ALIAS={0} alias {0}='eval $(thefuck $(fc -ln -1));" \
" history -r'".format(fuck)
def _parse_alias(self, alias):
name, value = alias.replace('alias ', '', 1).split('=', 1)
if value[0] == value[-1] == '"' or value[0] == value[-1] == "'":
value = value[1:-1]
return name, value
def get_aliases(self):
proc = Popen('bash -ic alias', stdout=PIPE, stderr=DEVNULL,
shell=True)
return dict(
self._parse_alias(alias)
for alias in proc.stdout.read().decode('utf-8').split('\n')
if alias and '=' in alias)
def _get_history_file_name(self):
return os.environ.get("HISTFILE",
os.path.expanduser('~/.bash_history'))
def _get_history_line(self, command_script):
return u'{}\n'.format(command_script)
def _script_from_history(self, line):
return line
class Fish(Generic):
def _get_overridden_aliases(self):
overridden_aliases = os.environ.get('TF_OVERRIDDEN_ALIASES', '').strip()
if overridden_aliases:
return [alias.strip() for alias in overridden_aliases.split(',')]
else:
return ['cd', 'grep', 'ls', 'man', 'open']
def app_alias(self, fuck):
return ("set TF_ALIAS {0}\n"
"function {0} -d 'Correct your previous console command'\n"
" set -l exit_code $status\n"
" set -l eval_script"
" (mktemp 2>/dev/null ; or mktemp -t 'thefuck')\n"
" set -l fucked_up_commandd $history[1]\n"
" thefuck $fucked_up_commandd > $eval_script\n"
" . $eval_script\n"
" rm $eval_script\n"
" if test $exit_code -ne 0\n"
" history --delete $fucked_up_commandd\n"
" end\n"
"end").format(fuck)
def get_aliases(self):
overridden = self._get_overridden_aliases()
proc = Popen('fish -ic functions', stdout=PIPE, stderr=DEVNULL,
shell=True)
functions = proc.stdout.read().decode('utf-8').strip().split('\n')
return {func: func for func in functions if func not in overridden}
def _expand_aliases(self, command_script):
aliases = self.get_aliases()
binary = command_script.split(' ')[0]
if binary in aliases:
return u'fish -ic "{}"'.format(command_script.replace('"', r'\"'))
else:
return command_script
def from_shell(self, command_script):
"""Prepares command before running in app."""
return self._expand_aliases(command_script)
def _get_history_file_name(self):
return os.path.expanduser('~/.config/fish/fish_history')
def _get_history_line(self, command_script):
return u'- cmd: {}\n when: {}\n'.format(command_script, int(time()))
def and_(self, *commands):
return u'; and '.join(commands)
class Zsh(Generic):
def app_alias(self, fuck):
return "TF_ALIAS={0}" \
" alias {0}='eval $(thefuck $(fc -ln -1 | tail -n 1));" \
" fc -R'".format(fuck)
def _parse_alias(self, alias):
name, value = alias.split('=', 1)
if value[0] == value[-1] == '"' or value[0] == value[-1] == "'":
value = value[1:-1]
return name, value
def get_aliases(self):
proc = Popen('zsh -ic alias', stdout=PIPE, stderr=DEVNULL,
shell=True)
return dict(
self._parse_alias(alias)
for alias in proc.stdout.read().decode('utf-8').split('\n')
if alias and '=' in alias)
def _get_history_file_name(self):
return os.environ.get("HISTFILE",
os.path.expanduser('~/.zsh_history'))
def _get_history_line(self, command_script):
return u': {}:0;{}\n'.format(int(time()), command_script)
def _script_from_history(self, line):
if ';' in line:
return line.split(';', 1)[1]
else:
return ''
class Tcsh(Generic):
def app_alias(self, fuck):
return ("alias {0} 'setenv TF_ALIAS {0} && "
"set fucked_cmd=`history -h 2 | head -n 1` && "
"eval `thefuck ${{fucked_cmd}}`'").format(fuck)
def _parse_alias(self, alias):
name, value = alias.split("\t", 1)
return name, value
def get_aliases(self):
proc = Popen('tcsh -ic alias', stdout=PIPE, stderr=DEVNULL,
shell=True)
return dict(
self._parse_alias(alias)
for alias in proc.stdout.read().decode('utf-8').split('\n')
if alias and '\t' in alias)
def _get_history_file_name(self):
return os.environ.get("HISTFILE",
os.path.expanduser('~/.history'))
def _get_history_line(self, command_script):
return u'#+{}\n{}\n'.format(int(time()), command_script)
shells = defaultdict(lambda: Generic(), {
'bash': Bash(),
'fish': Fish(),
'zsh': Zsh(),
'csh': Tcsh(),
'tcsh': Tcsh()})
@memoize
def _get_shell():
try:
shell = Process(os.getpid()).parent().name()
except TypeError:
shell = Process(os.getpid()).parent.name
return shells[shell]
def from_shell(command):
return _get_shell().from_shell(command)
def to_shell(command):
return _get_shell().to_shell(command)
def app_alias(alias):
return _get_shell().app_alias(alias)
def thefuck_alias():
return os.environ.get('TF_ALIAS', 'fuck')
def put_to_history(command):
return _get_shell().put_to_history(command)
def and_(*commands):
return _get_shell().and_(*commands)
@memoize
def get_aliases():
return list(_get_shell().get_aliases().keys())
@memoize
def get_history():
return list(_get_shell().get_history())
| |
"""Testing utilities."""
# Copyright (c) 2011, 2012
# Authors: Pietro Berkes,
# Andreas Muller
# Mathieu Blondel
# Olivier Grisel
# Arnaud Joly
# Denis Engemann
# Giorgio Patrini
# License: BSD 3 clause
import os
import inspect
import pkgutil
import warnings
import sys
import re
import platform
import struct
import scipy as sp
import scipy.io
from functools import wraps
from operator import itemgetter
try:
# Python 2
from urllib2 import urlopen
from urllib2 import HTTPError
except ImportError:
# Python 3+
from urllib.request import urlopen
from urllib.error import HTTPError
import tempfile
import shutil
import os.path as op
import atexit
# WindowsError only exist on Windows
try:
WindowsError
except NameError:
WindowsError = None
import sklearn
from sklearn.base import BaseEstimator
from sklearn.externals import joblib
# Conveniently import all assertions in one place.
from nose.tools import assert_equal
from nose.tools import assert_not_equal
from nose.tools import assert_true
from nose.tools import assert_false
from nose.tools import assert_raises
from nose.tools import raises
from nose import SkipTest
from nose import with_setup
from numpy.testing import assert_almost_equal
from numpy.testing import assert_array_equal
from numpy.testing import assert_array_almost_equal
from numpy.testing import assert_array_less
from numpy.testing import assert_approx_equal
import numpy as np
from sklearn.base import (ClassifierMixin, RegressorMixin, TransformerMixin,
ClusterMixin)
from sklearn.cluster import DBSCAN
__all__ = ["assert_equal", "assert_not_equal", "assert_raises",
"assert_raises_regexp", "raises", "with_setup", "assert_true",
"assert_false", "assert_almost_equal", "assert_array_equal",
"assert_array_almost_equal", "assert_array_less",
"assert_less", "assert_less_equal",
"assert_greater", "assert_greater_equal",
"assert_approx_equal"]
try:
from nose.tools import assert_in, assert_not_in
except ImportError:
# Nose < 1.0.0
def assert_in(x, container):
assert_true(x in container, msg="%r in %r" % (x, container))
def assert_not_in(x, container):
assert_false(x in container, msg="%r in %r" % (x, container))
try:
from nose.tools import assert_raises_regex
except ImportError:
# for Python 2
def assert_raises_regex(expected_exception, expected_regexp,
callable_obj=None, *args, **kwargs):
"""Helper function to check for message patterns in exceptions"""
not_raised = False
try:
callable_obj(*args, **kwargs)
not_raised = True
except expected_exception as e:
error_message = str(e)
if not re.compile(expected_regexp).search(error_message):
raise AssertionError("Error message should match pattern "
"%r. %r does not." %
(expected_regexp, error_message))
if not_raised:
raise AssertionError("%s not raised by %s" %
(expected_exception.__name__,
callable_obj.__name__))
# assert_raises_regexp is deprecated in Python 3.4 in favor of
# assert_raises_regex but lets keep the backward compat in scikit-learn with
# the old name for now
assert_raises_regexp = assert_raises_regex
def _assert_less(a, b, msg=None):
message = "%r is not lower than %r" % (a, b)
if msg is not None:
message += ": " + msg
assert a < b, message
def _assert_greater(a, b, msg=None):
message = "%r is not greater than %r" % (a, b)
if msg is not None:
message += ": " + msg
assert a > b, message
def assert_less_equal(a, b, msg=None):
message = "%r is not lower than or equal to %r" % (a, b)
if msg is not None:
message += ": " + msg
assert a <= b, message
def assert_greater_equal(a, b, msg=None):
message = "%r is not greater than or equal to %r" % (a, b)
if msg is not None:
message += ": " + msg
assert a >= b, message
def assert_warns(warning_class, func, *args, **kw):
"""Test that a certain warning occurs.
Parameters
----------
warning_class : the warning class
The class to test for, e.g. UserWarning.
func : callable
Calable object to trigger warnings.
*args : the positional arguments to `func`.
**kw : the keyword arguments to `func`
Returns
-------
result : the return value of `func`
"""
# very important to avoid uncontrolled state propagation
clean_warning_registry()
with warnings.catch_warnings(record=True) as w:
# Cause all warnings to always be triggered.
warnings.simplefilter("always")
# Trigger a warning.
result = func(*args, **kw)
if hasattr(np, 'VisibleDeprecationWarning'):
# Filter out numpy-specific warnings in numpy >= 1.9
w = [e for e in w
if e.category is not np.VisibleDeprecationWarning]
# Verify some things
if not len(w) > 0:
raise AssertionError("No warning raised when calling %s"
% func.__name__)
found = any(warning.category is warning_class for warning in w)
if not found:
raise AssertionError("%s did not give warning: %s( is %s)"
% (func.__name__, warning_class, w))
return result
def assert_warns_message(warning_class, message, func, *args, **kw):
# very important to avoid uncontrolled state propagation
"""Test that a certain warning occurs and with a certain message.
Parameters
----------
warning_class : the warning class
The class to test for, e.g. UserWarning.
message : str | callable
The entire message or a substring to test for. If callable,
it takes a string as argument and will trigger an assertion error
if it returns `False`.
func : callable
Calable object to trigger warnings.
*args : the positional arguments to `func`.
**kw : the keyword arguments to `func`.
Returns
-------
result : the return value of `func`
"""
clean_warning_registry()
with warnings.catch_warnings(record=True) as w:
# Cause all warnings to always be triggered.
warnings.simplefilter("always")
if hasattr(np, 'VisibleDeprecationWarning'):
# Let's not catch the numpy internal DeprecationWarnings
warnings.simplefilter('ignore', np.VisibleDeprecationWarning)
# Trigger a warning.
result = func(*args, **kw)
# Verify some things
if not len(w) > 0:
raise AssertionError("No warning raised when calling %s"
% func.__name__)
found = [issubclass(warning.category, warning_class) for warning in w]
if not any(found):
raise AssertionError("No warning raised for %s with class "
"%s"
% (func.__name__, warning_class))
message_found = False
# Checks the message of all warnings belong to warning_class
for index in [i for i, x in enumerate(found) if x]:
# substring will match, the entire message with typo won't
msg = w[index].message # For Python 3 compatibility
msg = str(msg.args[0] if hasattr(msg, 'args') else msg)
if callable(message): # add support for certain tests
check_in_message = message
else:
check_in_message = lambda msg: message in msg
if check_in_message(msg):
message_found = True
break
if not message_found:
raise AssertionError("Did not receive the message you expected "
"('%s') for <%s>, got: '%s'"
% (message, func.__name__, msg))
return result
# To remove when we support numpy 1.7
def assert_no_warnings(func, *args, **kw):
# XXX: once we may depend on python >= 2.6, this can be replaced by the
# warnings module context manager.
# very important to avoid uncontrolled state propagation
clean_warning_registry()
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
result = func(*args, **kw)
if hasattr(np, 'VisibleDeprecationWarning'):
# Filter out numpy-specific warnings in numpy >= 1.9
w = [e for e in w
if e.category is not np.VisibleDeprecationWarning]
if len(w) > 0:
raise AssertionError("Got warnings when calling %s: %s"
% (func.__name__, w))
return result
def ignore_warnings(obj=None):
""" Context manager and decorator to ignore warnings
Note. Using this (in both variants) will clear all warnings
from all python modules loaded. In case you need to test
cross-module-warning-logging this is not your tool of choice.
Examples
--------
>>> with ignore_warnings():
... warnings.warn('buhuhuhu')
>>> def nasty_warn():
... warnings.warn('buhuhuhu')
... print(42)
>>> ignore_warnings(nasty_warn)()
42
"""
if callable(obj):
return _ignore_warnings(obj)
else:
return _IgnoreWarnings()
def _ignore_warnings(fn):
"""Decorator to catch and hide warnings without visual nesting"""
@wraps(fn)
def wrapper(*args, **kwargs):
# very important to avoid uncontrolled state propagation
clean_warning_registry()
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
return fn(*args, **kwargs)
w[:] = []
return wrapper
class _IgnoreWarnings(object):
"""Improved and simplified Python warnings context manager
Copied from Python 2.7.5 and modified as required.
"""
def __init__(self):
"""
Parameters
==========
category : warning class
The category to filter. Defaults to Warning. If None,
all categories will be muted.
"""
self._record = True
self._module = sys.modules['warnings']
self._entered = False
self.log = []
def __repr__(self):
args = []
if self._record:
args.append("record=True")
if self._module is not sys.modules['warnings']:
args.append("module=%r" % self._module)
name = type(self).__name__
return "%s(%s)" % (name, ", ".join(args))
def __enter__(self):
clean_warning_registry() # be safe and not propagate state + chaos
warnings.simplefilter('always')
if self._entered:
raise RuntimeError("Cannot enter %r twice" % self)
self._entered = True
self._filters = self._module.filters
self._module.filters = self._filters[:]
self._showwarning = self._module.showwarning
if self._record:
self.log = []
def showwarning(*args, **kwargs):
self.log.append(warnings.WarningMessage(*args, **kwargs))
self._module.showwarning = showwarning
return self.log
else:
return None
def __exit__(self, *exc_info):
if not self._entered:
raise RuntimeError("Cannot exit %r without entering first" % self)
self._module.filters = self._filters
self._module.showwarning = self._showwarning
self.log[:] = []
clean_warning_registry() # be safe and not propagate state + chaos
try:
from nose.tools import assert_less
except ImportError:
assert_less = _assert_less
try:
from nose.tools import assert_greater
except ImportError:
assert_greater = _assert_greater
def _assert_allclose(actual, desired, rtol=1e-7, atol=0,
err_msg='', verbose=True):
actual, desired = np.asanyarray(actual), np.asanyarray(desired)
if np.allclose(actual, desired, rtol=rtol, atol=atol):
return
msg = ('Array not equal to tolerance rtol=%g, atol=%g: '
'actual %s, desired %s') % (rtol, atol, actual, desired)
raise AssertionError(msg)
if hasattr(np.testing, 'assert_allclose'):
assert_allclose = np.testing.assert_allclose
else:
assert_allclose = _assert_allclose
def assert_raise_message(exceptions, message, function, *args, **kwargs):
"""Helper function to test error messages in exceptions
Parameters
----------
exceptions : exception or tuple of exception
Name of the estimator
func : callable
Calable object to raise error
*args : the positional arguments to `func`.
**kw : the keyword arguments to `func`
"""
try:
function(*args, **kwargs)
except exceptions as e:
error_message = str(e)
if message not in error_message:
raise AssertionError("Error message does not include the expected"
" string: %r. Observed error message: %r" %
(message, error_message))
else:
# concatenate exception names
if isinstance(exceptions, tuple):
names = " or ".join(e.__name__ for e in exceptions)
else:
names = exceptions.__name__
raise AssertionError("%s not raised by %s" %
(names, function.__name__))
def fake_mldata(columns_dict, dataname, matfile, ordering=None):
"""Create a fake mldata data set.
Parameters
----------
columns_dict : dict, keys=str, values=ndarray
Contains data as columns_dict[column_name] = array of data.
dataname : string
Name of data set.
matfile : string or file object
The file name string or the file-like object of the output file.
ordering : list, default None
List of column_names, determines the ordering in the data set.
Notes
-----
This function transposes all arrays, while fetch_mldata only transposes
'data', keep that into account in the tests.
"""
datasets = dict(columns_dict)
# transpose all variables
for name in datasets:
datasets[name] = datasets[name].T
if ordering is None:
ordering = sorted(list(datasets.keys()))
# NOTE: setting up this array is tricky, because of the way Matlab
# re-packages 1D arrays
datasets['mldata_descr_ordering'] = sp.empty((1, len(ordering)),
dtype='object')
for i, name in enumerate(ordering):
datasets['mldata_descr_ordering'][0, i] = name
scipy.io.savemat(matfile, datasets, oned_as='column')
class mock_mldata_urlopen(object):
def __init__(self, mock_datasets):
"""Object that mocks the urlopen function to fake requests to mldata.
`mock_datasets` is a dictionary of {dataset_name: data_dict}, or
{dataset_name: (data_dict, ordering).
`data_dict` itself is a dictionary of {column_name: data_array},
and `ordering` is a list of column_names to determine the ordering
in the data set (see `fake_mldata` for details).
When requesting a dataset with a name that is in mock_datasets,
this object creates a fake dataset in a StringIO object and
returns it. Otherwise, it raises an HTTPError.
"""
self.mock_datasets = mock_datasets
def __call__(self, urlname):
dataset_name = urlname.split('/')[-1]
if dataset_name in self.mock_datasets:
resource_name = '_' + dataset_name
from io import BytesIO
matfile = BytesIO()
dataset = self.mock_datasets[dataset_name]
ordering = None
if isinstance(dataset, tuple):
dataset, ordering = dataset
fake_mldata(dataset, resource_name, matfile, ordering)
matfile.seek(0)
return matfile
else:
raise HTTPError(urlname, 404, dataset_name + " is not available",
[], None)
def install_mldata_mock(mock_datasets):
# Lazy import to avoid mutually recursive imports
from sklearn import datasets
datasets.mldata.urlopen = mock_mldata_urlopen(mock_datasets)
def uninstall_mldata_mock():
# Lazy import to avoid mutually recursive imports
from sklearn import datasets
datasets.mldata.urlopen = urlopen
# Meta estimators need another estimator to be instantiated.
META_ESTIMATORS = ["OneVsOneClassifier", "MultiOutputRegressor",
"OutputCodeClassifier", "OneVsRestClassifier", "RFE",
"RFECV", "BaseEnsemble"]
# estimators that there is no way to default-construct sensibly
OTHER = ["Pipeline", "FeatureUnion", "GridSearchCV", "RandomizedSearchCV",
"SelectFromModel"]
# some trange ones
DONT_TEST = ['SparseCoder', 'EllipticEnvelope', 'DictVectorizer',
'LabelBinarizer', 'LabelEncoder',
'MultiLabelBinarizer', 'TfidfTransformer',
'TfidfVectorizer', 'IsotonicRegression',
'OneHotEncoder', 'RandomTreesEmbedding',
'FeatureHasher', 'DummyClassifier', 'DummyRegressor',
'TruncatedSVD', 'PolynomialFeatures',
'GaussianRandomProjectionHash', 'HashingVectorizer',
'CheckingClassifier', 'PatchExtractor', 'CountVectorizer',
# GradientBoosting base estimators, maybe should
# exclude them in another way
'ZeroEstimator', 'ScaledLogOddsEstimator',
'QuantileEstimator', 'MeanEstimator',
'LogOddsEstimator', 'PriorProbabilityEstimator',
'_SigmoidCalibration', 'VotingClassifier']
def all_estimators(include_meta_estimators=False,
include_other=False, type_filter=None,
include_dont_test=False):
"""Get a list of all estimators from sklearn.
This function crawls the module and gets all classes that inherit
from BaseEstimator. Classes that are defined in test-modules are not
included.
By default meta_estimators such as GridSearchCV are also not included.
Parameters
----------
include_meta_estimators : boolean, default=False
Whether to include meta-estimators that can be constructed using
an estimator as their first argument. These are currently
BaseEnsemble, OneVsOneClassifier, OutputCodeClassifier,
OneVsRestClassifier, RFE, RFECV.
include_other : boolean, default=False
Wether to include meta-estimators that are somehow special and can
not be default-constructed sensibly. These are currently
Pipeline, FeatureUnion and GridSearchCV
include_dont_test : boolean, default=False
Whether to include "special" label estimator or test processors.
type_filter : string, list of string, or None, default=None
Which kind of estimators should be returned. If None, no filter is
applied and all estimators are returned. Possible values are
'classifier', 'regressor', 'cluster' and 'transformer' to get
estimators only of these specific types, or a list of these to
get the estimators that fit at least one of the types.
Returns
-------
estimators : list of tuples
List of (name, class), where ``name`` is the class name as string
and ``class`` is the actuall type of the class.
"""
def is_abstract(c):
if not(hasattr(c, '__abstractmethods__')):
return False
if not len(c.__abstractmethods__):
return False
return True
all_classes = []
# get parent folder
path = sklearn.__path__
for importer, modname, ispkg in pkgutil.walk_packages(
path=path, prefix='sklearn.', onerror=lambda x: None):
if (".tests." in modname):
continue
module = __import__(modname, fromlist="dummy")
classes = inspect.getmembers(module, inspect.isclass)
all_classes.extend(classes)
all_classes = set(all_classes)
estimators = [c for c in all_classes
if (issubclass(c[1], BaseEstimator)
and c[0] != 'BaseEstimator')]
# get rid of abstract base classes
estimators = [c for c in estimators if not is_abstract(c[1])]
if not include_dont_test:
estimators = [c for c in estimators if not c[0] in DONT_TEST]
if not include_other:
estimators = [c for c in estimators if not c[0] in OTHER]
# possibly get rid of meta estimators
if not include_meta_estimators:
estimators = [c for c in estimators if not c[0] in META_ESTIMATORS]
if type_filter is not None:
if not isinstance(type_filter, list):
type_filter = [type_filter]
else:
type_filter = list(type_filter) # copy
filtered_estimators = []
filters = {'classifier': ClassifierMixin,
'regressor': RegressorMixin,
'transformer': TransformerMixin,
'cluster': ClusterMixin}
for name, mixin in filters.items():
if name in type_filter:
type_filter.remove(name)
filtered_estimators.extend([est for est in estimators
if issubclass(est[1], mixin)])
estimators = filtered_estimators
if type_filter:
raise ValueError("Parameter type_filter must be 'classifier', "
"'regressor', 'transformer', 'cluster' or None, got"
" %s." % repr(type_filter))
# drop duplicates, sort for reproducibility
# itemgetter is used to ensure the sort does not extend to the 2nd item of
# the tuple
return sorted(set(estimators), key=itemgetter(0))
def set_random_state(estimator, random_state=0):
"""Set random state of an estimator if it has the `random_state` param.
Classes for whom random_state is deprecated are ignored. Currently DBSCAN
is one such class.
"""
if isinstance(estimator, DBSCAN):
return
if "random_state" in estimator.get_params():
estimator.set_params(random_state=random_state)
def if_matplotlib(func):
"""Test decorator that skips test if matplotlib not installed. """
@wraps(func)
def run_test(*args, **kwargs):
try:
import matplotlib
matplotlib.use('Agg', warn=False)
# this fails if no $DISPLAY specified
import matplotlib.pyplot as plt
plt.figure()
except ImportError:
raise SkipTest('Matplotlib not available.')
else:
return func(*args, **kwargs)
return run_test
def skip_if_32bit(func):
"""Test decorator that skips tests on 32bit platforms."""
@wraps(func)
def run_test(*args, **kwargs):
bits = 8 * struct.calcsize("P")
if bits == 32:
raise SkipTest('Test skipped on 32bit platforms.')
else:
return func(*args, **kwargs)
return run_test
def if_not_mac_os(versions=('10.7', '10.8', '10.9'),
message='Multi-process bug in Mac OS X >= 10.7 '
'(see issue #636)'):
"""Test decorator that skips test if OS is Mac OS X and its
major version is one of ``versions``.
"""
warnings.warn("if_not_mac_os is deprecated in 0.17 and will be removed"
" in 0.19: use the safer and more generic"
" if_safe_multiprocessing_with_blas instead",
DeprecationWarning)
mac_version, _, _ = platform.mac_ver()
skip = '.'.join(mac_version.split('.')[:2]) in versions
def decorator(func):
if skip:
@wraps(func)
def func(*args, **kwargs):
raise SkipTest(message)
return func
return decorator
def if_safe_multiprocessing_with_blas(func):
"""Decorator for tests involving both BLAS calls and multiprocessing
Under POSIX (e.g. Linux or OSX), using multiprocessing in conjunction with
some implementation of BLAS (or other libraries that manage an internal
posix thread pool) can cause a crash or a freeze of the Python process.
In practice all known packaged distributions (from Linux distros or
Anaconda) of BLAS under Linux seems to be safe. So we this problem seems to
only impact OSX users.
This wrapper makes it possible to skip tests that can possibly cause
this crash under OS X with.
Under Python 3.4+ it is possible to use the `forkserver` start method
for multiprocessing to avoid this issue. However it can cause pickling
errors on interactively defined functions. It therefore not enabled by
default.
"""
@wraps(func)
def run_test(*args, **kwargs):
if sys.platform == 'darwin':
raise SkipTest(
"Possible multi-process bug with some BLAS")
return func(*args, **kwargs)
return run_test
def clean_warning_registry():
"""Safe way to reset warnings """
warnings.resetwarnings()
reg = "__warningregistry__"
for mod_name, mod in list(sys.modules.items()):
if 'six.moves' in mod_name:
continue
if hasattr(mod, reg):
getattr(mod, reg).clear()
def check_skip_network():
if int(os.environ.get('SKLEARN_SKIP_NETWORK_TESTS', 0)):
raise SkipTest("Text tutorial requires large dataset download")
def check_skip_travis():
"""Skip test if being run on Travis."""
if os.environ.get('TRAVIS') == "true":
raise SkipTest("This test needs to be skipped on Travis")
def _delete_folder(folder_path, warn=False):
"""Utility function to cleanup a temporary folder if still existing.
Copy from joblib.pool (for independence)"""
try:
if os.path.exists(folder_path):
# This can fail under windows,
# but will succeed when called by atexit
shutil.rmtree(folder_path)
except WindowsError:
if warn:
warnings.warn("Could not delete temporary folder %s" % folder_path)
class TempMemmap(object):
def __init__(self, data, mmap_mode='r'):
self.temp_folder = tempfile.mkdtemp(prefix='sklearn_testing_')
self.mmap_mode = mmap_mode
self.data = data
def __enter__(self):
fpath = op.join(self.temp_folder, 'data.pkl')
joblib.dump(self.data, fpath)
data_read_only = joblib.load(fpath, mmap_mode=self.mmap_mode)
atexit.register(lambda: _delete_folder(self.temp_folder, warn=True))
return data_read_only
def __exit__(self, exc_type, exc_val, exc_tb):
_delete_folder(self.temp_folder)
with_network = with_setup(check_skip_network)
with_travis = with_setup(check_skip_travis)
| |
#!/usr/bin/python
# Copyright 2017 Google Inc.
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
module: gce_eip
version_added: "2.3"
short_description: Create or Destroy Global or Regional External IP addresses.
description:
- Create (reserve) or Destroy (release) Regional or Global IP Addresses. See
U(https://cloud.google.com/compute/docs/configure-instance-ip-addresses#reserve_new_static) for more on reserving static addresses.
requirements:
- "python >= 2.6"
- "apache-libcloud >= 0.19.0"
notes:
- Global addresses can only be used with Global Forwarding Rules.
author:
- "Tom Melendez (@supertom) <tom@supertom.com>"
options:
name:
description:
- Name of Address.
required: true
region:
description:
- Region to create the address in. Set to 'global' to create a global address.
required: true
state:
description: The state the address should be in. C(present) or C(absent) are the only valid options.
default: present
required: false
choices: [present, absent]
'''
EXAMPLES = '''
# Create a Global external IP address
gce_eip:
service_account_email: "{{ service_account_email }}"
credentials_file: "{{ credentials_file }}"
project_id: "{{ project_id }}"
name: my-global-ip
region: global
state: present
# Create a Regional external IP address
gce_eip:
service_account_email: "{{ service_account_email }}"
credentials_file: "{{ credentials_file }}"
project_id: "{{ project_id }}"
name: my-global-ip
region: us-east1
state: present
'''
RETURN = '''
address:
description: IP address being operated on
returned: always
type: string
sample: "35.186.222.233"
name:
description: name of the address being operated on
returned: always
type: string
sample: "my-address"
region:
description: Which region an address belongs.
returned: always
type: string
sample: "global"
'''
USER_AGENT_VERSION = 'v1'
USER_AGENT_PRODUCT = 'Ansible-gce_eip'
try:
from ast import literal_eval
HAS_PYTHON26 = True
except ImportError:
HAS_PYTHON26 = False
try:
import libcloud
from libcloud.compute.types import Provider
from libcloud.compute.providers import get_driver
from libcloud.common.google import GoogleBaseError, QuotaExceededError, \
ResourceExistsError, ResourceInUseError, ResourceNotFoundError
from libcloud.compute.drivers.gce import GCEAddress
_ = Provider.GCE
HAS_LIBCLOUD = True
except ImportError:
HAS_LIBCLOUD = False
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.gcp import gcp_connect
def get_address(gce, name, region):
"""
Get an Address from GCE.
:param gce: An initialized GCE driver object.
:type gce: :class: `GCENodeDriver`
:param name: Name of the Address.
:type name: ``str``
:return: A GCEAddress object or None.
:rtype: :class: `GCEAddress` or None
"""
try:
return gce.ex_get_address(name=name, region=region)
except ResourceNotFoundError:
return None
def create_address(gce, params):
"""
Create a new Address.
:param gce: An initialized GCE driver object.
:type gce: :class: `GCENodeDriver`
:param params: Dictionary of parameters needed by the module.
:type params: ``dict``
:return: Tuple with changed status and address.
:rtype: tuple in the format of (bool, str)
"""
changed = False
return_data = []
address = gce.ex_create_address(
name=params['name'], region=params['region'])
if address:
changed = True
return_data = address.address
return (changed, return_data)
def delete_address(address):
"""
Delete an Address.
:param gce: An initialized GCE driver object.
:type gce: :class: `GCENodeDriver`
:param params: Dictionary of parameters needed by the module.
:type params: ``dict``
:return: Tuple with changed status and address.
:rtype: tuple in the format of (bool, str)
"""
changed = False
return_data = []
if address.destroy():
changed = True
return_data = address.address
return (changed, return_data)
def main():
module = AnsibleModule(argument_spec=dict(
name=dict(required=True),
state=dict(choices=['absent', 'present'], default='present'),
region=dict(required=True),
service_account_email=dict(),
service_account_permissions=dict(type='list'),
pem_file=dict(type='path'),
credentials_file=dict(type='path'),
project_id=dict(), ), )
if not HAS_PYTHON26:
module.fail_json(
msg="GCE module requires python's 'ast' module, python v2.6+")
if not HAS_LIBCLOUD:
module.fail_json(
msg='libcloud with GCE support (+0.19) required for this module.')
gce = gcp_connect(module, Provider.GCE, get_driver,
USER_AGENT_PRODUCT, USER_AGENT_VERSION)
params = {}
params['state'] = module.params.get('state')
params['name'] = module.params.get('name')
params['region'] = module.params.get('region')
changed = False
json_output = {'state': params['state']}
address = get_address(gce, params['name'], region=params['region'])
if params['state'] == 'absent':
if not address:
# Doesn't exist in GCE, and state==absent.
changed = False
module.fail_json(
msg="Cannot delete unknown address: %s" %
(params['name']))
else:
# Delete
(changed, json_output['address']) = delete_address(address)
else:
if not address:
# Create
(changed, json_output['address']) = create_address(gce,
params)
else:
changed = False
json_output['address'] = address.address
json_output['changed'] = changed
json_output.update(params)
module.exit_json(**json_output)
if __name__ == '__main__':
main()
| |
"""Preference management for cloud."""
from ipaddress import ip_address
from typing import Optional
from homeassistant.auth.const import GROUP_ID_ADMIN
from homeassistant.auth.models import User
from homeassistant.core import callback
from homeassistant.util.logging import async_create_catching_coro
from .const import (
DEFAULT_ALEXA_REPORT_STATE,
DEFAULT_GOOGLE_REPORT_STATE,
DOMAIN,
PREF_ALEXA_ENTITY_CONFIGS,
PREF_ALEXA_REPORT_STATE,
PREF_ALIASES,
PREF_CLOUD_USER,
PREF_CLOUDHOOKS,
PREF_DISABLE_2FA,
PREF_ENABLE_ALEXA,
PREF_ENABLE_GOOGLE,
PREF_ENABLE_REMOTE,
PREF_GOOGLE_ENTITY_CONFIGS,
PREF_GOOGLE_LOCAL_WEBHOOK_ID,
PREF_GOOGLE_REPORT_STATE,
PREF_GOOGLE_SECURE_DEVICES_PIN,
PREF_OVERRIDE_NAME,
PREF_SHOULD_EXPOSE,
PREF_USERNAME,
InvalidTrustedNetworks,
InvalidTrustedProxies,
)
STORAGE_KEY = DOMAIN
STORAGE_VERSION = 1
_UNDEF = object()
class CloudPreferences:
"""Handle cloud preferences."""
def __init__(self, hass):
"""Initialize cloud prefs."""
self._hass = hass
self._store = hass.helpers.storage.Store(STORAGE_VERSION, STORAGE_KEY)
self._prefs = None
self._listeners = []
async def async_initialize(self):
"""Finish initializing the preferences."""
prefs = await self._store.async_load()
if prefs is None:
prefs = self._empty_config("")
self._prefs = prefs
if PREF_GOOGLE_LOCAL_WEBHOOK_ID not in self._prefs:
await self._save_prefs(
{
**self._prefs,
PREF_GOOGLE_LOCAL_WEBHOOK_ID: self._hass.components.webhook.async_generate_id(),
}
)
@callback
def async_listen_updates(self, listener):
"""Listen for updates to the preferences."""
self._listeners.append(listener)
async def async_update(
self,
*,
google_enabled=_UNDEF,
alexa_enabled=_UNDEF,
remote_enabled=_UNDEF,
google_secure_devices_pin=_UNDEF,
cloudhooks=_UNDEF,
cloud_user=_UNDEF,
google_entity_configs=_UNDEF,
alexa_entity_configs=_UNDEF,
alexa_report_state=_UNDEF,
google_report_state=_UNDEF,
):
"""Update user preferences."""
prefs = {**self._prefs}
for key, value in (
(PREF_ENABLE_GOOGLE, google_enabled),
(PREF_ENABLE_ALEXA, alexa_enabled),
(PREF_ENABLE_REMOTE, remote_enabled),
(PREF_GOOGLE_SECURE_DEVICES_PIN, google_secure_devices_pin),
(PREF_CLOUDHOOKS, cloudhooks),
(PREF_CLOUD_USER, cloud_user),
(PREF_GOOGLE_ENTITY_CONFIGS, google_entity_configs),
(PREF_ALEXA_ENTITY_CONFIGS, alexa_entity_configs),
(PREF_ALEXA_REPORT_STATE, alexa_report_state),
(PREF_GOOGLE_REPORT_STATE, google_report_state),
):
if value is not _UNDEF:
prefs[key] = value
if remote_enabled is True and self._has_local_trusted_network:
prefs[PREF_ENABLE_REMOTE] = False
raise InvalidTrustedNetworks
if remote_enabled is True and self._has_local_trusted_proxies:
prefs[PREF_ENABLE_REMOTE] = False
raise InvalidTrustedProxies
await self._save_prefs(prefs)
async def async_update_google_entity_config(
self,
*,
entity_id,
override_name=_UNDEF,
disable_2fa=_UNDEF,
aliases=_UNDEF,
should_expose=_UNDEF,
):
"""Update config for a Google entity."""
entities = self.google_entity_configs
entity = entities.get(entity_id, {})
changes = {}
for key, value in (
(PREF_OVERRIDE_NAME, override_name),
(PREF_DISABLE_2FA, disable_2fa),
(PREF_ALIASES, aliases),
(PREF_SHOULD_EXPOSE, should_expose),
):
if value is not _UNDEF:
changes[key] = value
if not changes:
return
updated_entity = {**entity, **changes}
updated_entities = {**entities, entity_id: updated_entity}
await self.async_update(google_entity_configs=updated_entities)
async def async_update_alexa_entity_config(
self, *, entity_id, should_expose=_UNDEF
):
"""Update config for an Alexa entity."""
entities = self.alexa_entity_configs
entity = entities.get(entity_id, {})
changes = {}
for key, value in ((PREF_SHOULD_EXPOSE, should_expose),):
if value is not _UNDEF:
changes[key] = value
if not changes:
return
updated_entity = {**entity, **changes}
updated_entities = {**entities, entity_id: updated_entity}
await self.async_update(alexa_entity_configs=updated_entities)
async def async_set_username(self, username):
"""Set the username that is logged in."""
# Logging out.
if username is None:
user = await self._load_cloud_user()
if user is not None:
await self._hass.auth.async_remove_user(user)
await self._save_prefs({**self._prefs, PREF_CLOUD_USER: None})
return
cur_username = self._prefs.get(PREF_USERNAME)
if cur_username == username:
return
if cur_username is None:
await self._save_prefs({**self._prefs, PREF_USERNAME: username})
else:
await self._save_prefs(self._empty_config(username))
def as_dict(self):
"""Return dictionary version."""
return {
PREF_ENABLE_ALEXA: self.alexa_enabled,
PREF_ENABLE_GOOGLE: self.google_enabled,
PREF_ENABLE_REMOTE: self.remote_enabled,
PREF_GOOGLE_SECURE_DEVICES_PIN: self.google_secure_devices_pin,
PREF_GOOGLE_ENTITY_CONFIGS: self.google_entity_configs,
PREF_ALEXA_ENTITY_CONFIGS: self.alexa_entity_configs,
PREF_ALEXA_REPORT_STATE: self.alexa_report_state,
PREF_GOOGLE_REPORT_STATE: self.google_report_state,
PREF_CLOUDHOOKS: self.cloudhooks,
}
@property
def remote_enabled(self):
"""Return if remote is enabled on start."""
enabled = self._prefs.get(PREF_ENABLE_REMOTE, False)
if not enabled:
return False
if self._has_local_trusted_network or self._has_local_trusted_proxies:
return False
return True
@property
def alexa_enabled(self):
"""Return if Alexa is enabled."""
return self._prefs[PREF_ENABLE_ALEXA]
@property
def alexa_report_state(self):
"""Return if Alexa report state is enabled."""
return self._prefs.get(PREF_ALEXA_REPORT_STATE, DEFAULT_ALEXA_REPORT_STATE)
@property
def google_enabled(self):
"""Return if Google is enabled."""
return self._prefs[PREF_ENABLE_GOOGLE]
@property
def google_report_state(self):
"""Return if Google report state is enabled."""
return self._prefs.get(PREF_GOOGLE_REPORT_STATE, DEFAULT_GOOGLE_REPORT_STATE)
@property
def google_secure_devices_pin(self):
"""Return if Google is allowed to unlock locks."""
return self._prefs.get(PREF_GOOGLE_SECURE_DEVICES_PIN)
@property
def google_entity_configs(self):
"""Return Google Entity configurations."""
return self._prefs.get(PREF_GOOGLE_ENTITY_CONFIGS, {})
@property
def google_local_webhook_id(self):
"""Return Google webhook ID to receive local messages."""
return self._prefs[PREF_GOOGLE_LOCAL_WEBHOOK_ID]
@property
def alexa_entity_configs(self):
"""Return Alexa Entity configurations."""
return self._prefs.get(PREF_ALEXA_ENTITY_CONFIGS, {})
@property
def cloudhooks(self):
"""Return the published cloud webhooks."""
return self._prefs.get(PREF_CLOUDHOOKS, {})
async def get_cloud_user(self) -> str:
"""Return ID from Home Assistant Cloud system user."""
user = await self._load_cloud_user()
if user:
return user.id
user = await self._hass.auth.async_create_system_user(
"Home Assistant Cloud", [GROUP_ID_ADMIN]
)
await self.async_update(cloud_user=user.id)
return user.id
async def _load_cloud_user(self) -> Optional[User]:
"""Load cloud user if available."""
user_id = self._prefs.get(PREF_CLOUD_USER)
if user_id is None:
return None
# Fetch the user. It can happen that the user no longer exists if
# an image was restored without restoring the cloud prefs.
return await self._hass.auth.async_get_user(user_id)
@property
def _has_local_trusted_network(self) -> bool:
"""Return if we allow localhost to bypass auth."""
local4 = ip_address("127.0.0.1")
local6 = ip_address("::1")
for prv in self._hass.auth.auth_providers:
if prv.type != "trusted_networks":
continue
for network in prv.trusted_networks:
if local4 in network or local6 in network:
return True
return False
@property
def _has_local_trusted_proxies(self) -> bool:
"""Return if we allow localhost to be a proxy and use its data."""
if not hasattr(self._hass, "http"):
return False
local4 = ip_address("127.0.0.1")
local6 = ip_address("::1")
if any(
local4 in nwk or local6 in nwk for nwk in self._hass.http.trusted_proxies
):
return True
return False
async def _save_prefs(self, prefs):
"""Save preferences to disk."""
self._prefs = prefs
await self._store.async_save(self._prefs)
for listener in self._listeners:
self._hass.async_create_task(async_create_catching_coro(listener(self)))
@callback
def _empty_config(self, username):
"""Return an empty config."""
return {
PREF_ENABLE_ALEXA: True,
PREF_ENABLE_GOOGLE: True,
PREF_ENABLE_REMOTE: False,
PREF_GOOGLE_SECURE_DEVICES_PIN: None,
PREF_GOOGLE_ENTITY_CONFIGS: {},
PREF_ALEXA_ENTITY_CONFIGS: {},
PREF_CLOUDHOOKS: {},
PREF_CLOUD_USER: None,
PREF_USERNAME: username,
PREF_GOOGLE_LOCAL_WEBHOOK_ID: self._hass.components.webhook.async_generate_id(),
}
| |
# Licensed to the StackStorm, Inc ('StackStorm') under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from st2client.client import Client
from st2client.models import KeyValuePair
from st2common.services.access import create_token
from st2common.util.api import get_full_public_api_url
class DatastoreService(object):
"""
Class provides public methods for accessing datastore items.
"""
DATASTORE_NAME_SEPARATOR = ':'
def __init__(self, logger, pack_name, class_name, api_username):
self._api_username = api_username
self._pack_name = pack_name
self._class_name = class_name
self._logger = logger
self._client = None
##################################
# Methods for datastore management
##################################
def list_values(self, local=True, prefix=None):
"""
Retrieve all the datastores items.
:param local: List values from a namespace local to this pack/class. Defaults to True.
:type: local: ``bool``
:param prefix: Optional key name prefix / startswith filter.
:type prefix: ``str``
:rtype: ``list`` of :class:`KeyValuePair`
"""
client = self._get_api_client()
self._logger.audit('Retrieving all the value from the datastore')
key_prefix = self._get_full_key_prefix(local=local, prefix=prefix)
kvps = client.keys.get_all(prefix=key_prefix)
return kvps
def get_value(self, name, local=True):
"""
Retrieve a value from the datastore for the provided key.
By default, value is retrieved from the namespace local to the pack/class. If you want to
retrieve a global value from a datastore, pass local=False to this method.
:param name: Key name.
:type name: ``str``
:param local: Retrieve value from a namespace local to the pack/class. Defaults to True.
:type: local: ``bool``
:rtype: ``str`` or ``None``
"""
name = self._get_full_key_name(name=name, local=local)
client = self._get_api_client()
self._logger.audit('Retrieving value from the datastore (name=%s)', name)
try:
kvp = client.keys.get_by_id(id=name)
except Exception:
return None
if kvp:
return kvp.value
return None
def set_value(self, name, value, ttl=None, local=True):
"""
Set a value for the provided key.
By default, value is set in a namespace local to the pack/class. If you want to
set a global value, pass local=False to this method.
:param name: Key name.
:type name: ``str``
:param value: Key value.
:type value: ``str``
:param ttl: Optional TTL (in seconds).
:type ttl: ``int``
:param local: Set value in a namespace local to the pack/class. Defaults to True.
:type: local: ``bool``
:return: ``True`` on success, ``False`` otherwise.
:rtype: ``bool``
"""
name = self._get_full_key_name(name=name, local=local)
value = str(value)
client = self._get_api_client()
self._logger.audit('Setting value in the datastore (name=%s)', name)
instance = KeyValuePair()
instance.id = name
instance.name = name
instance.value = value
if ttl:
instance.ttl = ttl
client.keys.update(instance=instance)
return True
def delete_value(self, name, local=True):
"""
Delete the provided key.
By default, value is deleted from a namespace local to the pack/class. If you want to
delete a global value, pass local=False to this method.
:param name: Name of the key to delete.
:type name: ``str``
:param local: Delete a value in a namespace local to the pack/class. Defaults to True.
:type: local: ``bool``
:return: ``True`` on success, ``False`` otherwise.
:rtype: ``bool``
"""
name = self._get_full_key_name(name=name, local=local)
client = self._get_api_client()
instance = KeyValuePair()
instance.id = name
instance.name = name
self._logger.audit('Deleting value from the datastore (name=%s)', name)
try:
client.keys.delete(instance=instance)
except Exception:
return False
return True
def _get_api_client(self):
"""
Retrieve API client instance.
"""
if not self._client:
ttl = (24 * 60 * 60)
temporary_token = create_token(username=self._api_username, ttl=ttl)
api_url = get_full_public_api_url()
self._client = Client(api_url=api_url, token=temporary_token.token)
return self._client
def _get_full_key_name(self, name, local):
"""
Retrieve a full key name.
:rtype: ``str``
"""
if local:
name = self._get_key_name_with_prefix(name=name)
return name
def _get_full_key_prefix(self, local, prefix=None):
if local:
key_prefix = self._get_local_key_name_prefix()
if prefix:
key_prefix += prefix
else:
key_prefix = prefix
return key_prefix
def _get_local_key_name_prefix(self):
"""
Retrieve key prefix which is local to this pack/class.
"""
key_prefix = self._get_datastore_key_prefix() + self.DATASTORE_NAME_SEPARATOR
return key_prefix
def _get_key_name_with_prefix(self, name):
"""
Retrieve a full key name which is local to the current pack/class.
:param name: Base datastore key name.
:type name: ``str``
:rtype: ``str``
"""
prefix = self._get_datastore_key_prefix()
full_name = prefix + self.DATASTORE_NAME_SEPARATOR + name
return full_name
def _get_datastore_key_prefix(self):
prefix = '%s.%s' % (self._pack_name, self._class_name)
return prefix
| |
#These tests are used for the adapter_sqlite
#Require sqlite3 installed.
#Test 1: test loading records from the sqlite table to create an index.
#Test 2: When the server is running, update the record in sqlite,
# then the listener should fetch the results.
#Test 3: Shut down the engine, and delete the records in sqlite.
# Then start the engine to test if the engine can fetch the changes.
import sys, urllib2, json, time, subprocess, os, commands, signal,shutil
try:
import sqlite3
except ImportError:
os._exit(-1)
sys.path.insert(0,'srch2lib')
import test_lib
port = '8087'
serverHandle = None
totalFailCount = 0
binary_path = None
#Start the SRCH2 engine with sqlite config file.
def startSrch2Engine():
global serverHandle
#Start the engine server
args = [binary_path , '--config-file=adapter_sqlite/conf.xml']
if test_lib.confirmPortAvailable(port) == False:
print 'Port' + str(port) + ' already in use -aborting '
return -1
print 'starting engine: ' + args[0] + ' ' + args[1]
serverHandle = test_lib.startServer(args)
test_lib.pingServer(port)
#Shut down the srch2 engine
def shutdownSrch2Engine():
global serverHandle
#Shutdown the engine server
test_lib.killServer(serverHandle)
#Compare the results with the expected outputs.
def compareResults(testQueriesPath):
f_test = open(testQueriesPath,'r')
failCount = 0
global totalFailCount
for line in f_test:
#Get the query keyword and result from the input file
value = line.split('||')
queryValue = value[0].split()
resultValue = value[1].split()
#Construct the query
query = prepareQuery(queryValue)
#Execute the query
response = urllib2.urlopen(query).read()
response_json = json.loads(response)
#Check the result
failCount += checkResult(query, response_json['results'],resultValue)
totalFailCount += failCount
#prepare the query based on the valid syntax
def prepareQuery(queryKeywords):
query = 'http://localhost:' + port + '/search?'
# prepare the main query part
query = query + 'q='
# keywords section
for i in range(0, len(queryKeywords)):
if i == (len(queryKeywords)-1):
query=query+queryKeywords[i] # last keyword prefix
else:
query=query+queryKeywords[i]+'%20AND%20'
return query
#Function of checking the results
#Compare the record 'director' part with the result value
def checkResult(query, responseJson,resultValue):
# for key, value in responseJson:
# print key, value
isPass=1
if len(responseJson) == len(resultValue):
for i in range(0, len(resultValue)):
#print response_json['results'][i]['record']['id']
if responseJson[i]['record']['ID'] != resultValue[i]:
isPass=0
print query+' test failed'
print 'query results||given results'
print 'number of results:'+str(len(responseJson))+'||'+str(len(resultValue))
for i in range(0, len(responseJson)):
print responseJson[i]['record']['ID']+'||'+resultValue[i]
break
else:
isPass=0
print query+' test failed'
print 'query results||given results'
print 'number of results:'+str(len(responseJson))+'||'+str(len(resultValue))
maxLen = max(len(responseJson),len(resultValue))
for i in range(0, maxLen):
if i >= len(resultValue):
print responseJson[i]['record']['ID']+'||'
elif i >= len(responseJson):
print ' '+'||'+resultValue[i]
else:
print responseJson[i]['record']['ID']+'||'+resultValue[i]
if isPass == 1:
print query+' test pass'
return 0
return 1
#Test 1: test loading index from the sqlite table to create the index.
def testCreateIndexes(conn,sqlQueriesPath,testQueriesPath):
#Create the test table and Insert record into it
f_sql = open(sqlQueriesPath,'r')
for line in f_sql:
conn.cursor().execute(line)
print line
conn.commit()
#Start the engine and wait it fetch the data,
#the engine will create an index from the Sqlite table
startSrch2Engine()
time.sleep(5)
#Compare the results with the expecting results
compareResults(testQueriesPath)
print '=============================='
#Test 2: When the server is running, update the record in sqlite,
#then the listener should fetch the results.
def testRunListener(conn,sqlQueriesPath,testQueriesPath):
#Modify the table while the srch2 engine is running.
f_sql = open(sqlQueriesPath,'r')
for line in f_sql:
conn.cursor().execute(line)
print line
for x in xrange(10,100):
data = 'INSERT INTO COMPANY (ID,NAME,AGE,ADDRESS,SALARY)VALUES (' +\
str(x) + ', "Paul", 32, "California", 20000.00 );'
conn.cursor().execute(data)
conn.commit()
time.sleep(0.01)
#Wait for the engine to fetch the changes
time.sleep(10)
#Compare the results with the expecting results
compareResults(testQueriesPath)
print '=============================='
#Test 3: Shut down the engine, and delete the records in sqlite.
#Then start the engine to test if the engine can fetch the changes
def testOfflineLog(conn,sqlQueriesPath,testQueriesPath):
#Shutdown the engine
shutdownSrch2Engine()
#Modify the table while the srch2 engine is not running
f_sql = open(sqlQueriesPath,'r')
for line in f_sql:
conn.cursor().execute(line)
print line
conn.commit()
#Start the engine and wait it fetch the changes,
#the engine will get the offline changes.
startSrch2Engine()
time.sleep(5)
#Compare the results with the expecting results
compareResults(testQueriesPath)
#Shutdown the engine. Finish the test.
shutdownSrch2Engine()
print '=============================='
if __name__ == '__main__':
if(os.path.exists("data")):
shutil.rmtree("data")
if(os.path.exists("./adapter_sqlite/srch2Test.db")):
os.remove("./adapter_sqlite/srch2Test.db")
conn = sqlite3.connect('./adapter_sqlite/srch2Test.db')
#Start the test cases
binary_path = sys.argv[1]
testCreateIndexes(conn,sys.argv[2],sys.argv[3])
testRunListener(conn,sys.argv[4],sys.argv[5])
#testOfflineLog(conn,sys.argv[6],sys.argv[7])
#Do not need to drop the table, remove the db file after the exit.
print '=============================='
shutdownSrch2Engine()
conn.close()
if(os.path.exists("data")):
shutil.rmtree("data")
if(os.path.exists("./adapter_sqlite/srch2Test.db")):
os.remove("./adapter_sqlite/srch2Test.db")
os._exit(totalFailCount)
| |
#!/usr/bin/env python
import json
import requests
import logging
import ssl
from datetime import datetime
from os.path import exists, expanduser, dirname
from os import makedirs, remove
from requests.adapters import HTTPAdapter
from requests.packages.urllib3.poolmanager import PoolManager
LOG = logging.getLogger(__name__)
default_cache_dir = expanduser('~/.nest')
login_url = 'https://home.nest.com/user/login'
user_agent = 'Nest/2.1.3 CFNetwork/548.0.4'
class TlsAdapter(HTTPAdapter):
def init_poolmanager(self, connections, maxsize, block=False):
self.poolmanager = PoolManager(num_pools=connections,
maxsize=maxsize,
block=block,
ssl_version=ssl.PROTOCOL_TLSv1)
class FailedRequest(Exception):
def __init__(self, message, response):
super(FailedRequest, self).__init__(message)
self.response = response
class NotAuthenticated(Exception):
def __init__(self, message):
super(NotAuthenticated, self).__init__(message)
class Nest(object):
def __init__(self, id, structure):
'''Initialize this Nest.'''
self._id = str(id)
self._structure = structure
self._account = structure.account
@property
def account(self):
return self._account
@property
def structure(self):
return self._structure
@property
def name(self):
return self.account.status['shared'][self.id]['name']
@property
def id(self):
return self._id
@property
def scale(self):
return self.account.status['device'][self.id]['temperature_scale']
@property
def ip(self):
return self.account.status['metadata'][self.id]['last_ip']
@property
def humidity(self):
return self.account.status['device'][self.id]['current_humidity']
@property
def temperature(self):
temp = self.account.status['shared'][self.id]['current_temperature']
if self.scale == 'F':
temp = (temp * 1.8) + 32
return temp
@property
def leaf(self):
return self.account.status['device'][self.id]['leaf']
@property
def mode(self):
mode = self.account.status['device'][self.id][
'current_schedule_mode']
return mode.lower()
@mode.setter
def mode(self, mode):
mode = mode.upper()
data = {'device': {self.id: {'current_schedule_mode': mode}}}
self.account.request('POST', 'put', data=data)
self.account.status['device'][self.id]['current_schedule_mode'] = mode
@property
def fan(self):
return self.account.status['device'][self.id]['fan_mode']
@fan.setter
def fan(self, mode):
if mode not in ('auto', 'on'):
raise Exception('Invalid fan mode "{}". Must be "auto" or '
'"on"'.format(mode))
data = {'device': {self.id: {'fan_mode': mode}}}
self.account.request('POST', 'put', data=data)
self.account.status['device'][self.id]['fan_mode'] = mode
@property
def target_temperature(self):
shared = self.account.status['shared'][self.id]
if self.mode == 'range':
temp = [shared['target_temperature_low'],
shared['target_temperature_high']]
if self.scale == 'F':
temp = [(t * 1.8) + 32 for t in temp]
else:
temp = shared['target_temperature']
if self.scale == 'F':
temp = (temp * 1.8) + 32
return temp
@target_temperature.setter
def target_temperature(self, temp):
if isinstance(temp, (list, tuple)):
# temp is (low, high)
lo_and_hi = [float(t) for t in temp]
if lo_and_hi[1] - lo_and_hi[0] < 3.0:
raise Exception('High and low temperatures are too close')
if self.scale == 'F':
lo_and_hi = [(t - 32) / 1.8 for t in lo_and_hi]
data = {
'target_temperature_low': lo_and_hi[0],
'target_temperature_high': lo_and_hi[1],
}
else:
temp = float(temp)
if self.scale == 'F':
temp = (temp - 32) / 1.8
data = {
'target_change_pending': True,
'target_temperature': temp
}
self.account.request('POST', 'put/shared.{}'.format(self.id),
data=data)
shared = self.account.status['shared'][self.id]
if isinstance(temp, (list, tuple)):
shared['target_temperature_low'] = lo_and_hi[0]
shared['target_temperature_high'] = lo_and_hi[1]
else:
shared['target_temperature'] = temp
class Structure(object):
def __init__(self, structure_id, account):
'''Initialize this structure.'''
self._account = account
self._id = structure_id
self._nests = None
@property
def account(self):
return self._account
@property
def id(self):
return self._id
@property
def name(self):
return self.account.status['structure'][self.id]['name']
@property
def nests(self):
if self._nests is None:
nests = {}
for dev in self.account.status['structure'][self.id]['devices']:
id = dev.split('.')[1]
nests[id] = Nest(id, self)
self._nests = nests
return self._nests
@property
def location(self):
return self.account.status['structure'][self.id]['postal_code']
@property
def weather(self):
url = '{}{}'.format(self.account.session['urls']['weather_url'],
self.location)
return requests.get(url).json()[self.location]
# away ###############################
@property
def away(self):
return self.account.status['structure'][self.id]['away']
@away.setter
def away(self, value):
from time import time
value = bool(value)
data = {
'away_timestamp': int(time()),
'away': value,
'away_setter': 0
}
self.account.request('POST', 'put/structure.{}'.format(self.id),
data=data)
self.account.status['structure'][self.id]['away'] = value
class Account(object):
def __init__(self, cache_dir=None):
'''Initialize this nest interface.'''
if cache_dir is None:
cache_dir = default_cache_dir
self._session_file = '{}/session.json'.format(cache_dir)
self._status = None
self._structures = None
self._nests = None
self._session = None
@property
def status(self):
if self._status is None:
r = self.request('GET', 'mobile/user.{}'.format(self.user_id))
self._status = r.json()
return self._status
@property
def structures(self):
if self._structures is None:
structures = {}
user_structs = self.status['user'][self.user_id]['structures']
LOG.debug('structs: %s', user_structs)
for struct in user_structs:
id = struct.split('.')[1]
structures[id] = Structure(id, self)
self._structures = structures
return self._structures
@property
def nests(self):
if self._nests is None:
nests = {}
for struct in self.structures.values():
for id, nest in struct.nests.items():
nests[id] = nest
self._nests = nests
return self._nests
@property
def user_id(self):
return self.session['userid']
@property
def session(self):
return self._session
@property
def has_session(self):
try:
with open(self._session_file, 'rt') as sfile:
self._session = json.load(sfile)
expiry = datetime.strptime(self.session['expires_in'],
'%a, %d-%b-%Y %H:%M:%S GMT')
if datetime.utcnow() <= expiry:
return True
except Exception:
LOG.exception('missing or corrupt session file')
return False
def clear_session(self):
'''Delete the session file'''
remove(self._session_file)
def login(self, email, password):
'''Login to the user's Nest account.'''
# make the cache dir if it doesn't exist
cache_dir = dirname(self._session_file)
if not exists(cache_dir):
makedirs(cache_dir)
# authenticate with Nest and save the returned session data
res = requests.post(login_url, {'username': email,
'password': password})
if res.status_code != 200:
return False
session = res.json()
with open(self._session_file, 'wt') as sfile:
json.dump(session, sfile, indent=2)
self._session = session
return True
def request(self, method='GET', path='', data=None):
'''GET from or POST to a user's Nest account.
This function requires a valid session to exist.
'''
# check that we have a valid session
if not self.has_session:
raise NotAuthenticated('No session -- login first')
#from requests.utils import cookiejar_from_dict
self._requestor = requests.Session()
self._requestor.mount('https://', TlsAdapter())
self._requestor.headers.update({
'User-Agent': user_agent,
'Authorization': 'Basic ' + self.session['access_token'],
'X-nl-user-id': self.session['userid'],
'X-nl-protocol-version': '1',
'Accept-Language': 'en-us',
'Connection': 'keep-alive',
'Accept': '*/*'
})
base_url = '{}/v2'.format(self.session['urls']['transport_url'])
url = '{}/{}'.format(base_url, path)
if method == 'GET':
LOG.info('GETting %s', url)
# don't put headers it a status request
if not url.endswith('.json'):
r = self._requestor.get(url)
else:
r = requests.get(url)
elif method == 'POST':
if not isinstance(data, (str, unicode)):
# convert data dicts to JSON strings
data = json.dumps(data)
r = self._requestor.post(url, data=data)
else:
raise Exception('Invalid method "{}"'.format(method))
if r.status_code != 200:
raise FailedRequest('Request failed', r)
return r
if __name__ == '__main__':
from argparse import ArgumentParser
parser = ArgumentParser()
parser.add_argument('property', help='Property to get or set',
choices=('ip', 'status', 'temperature', 'humidity',
'target_temperature', 'away', 'leaf',
'weather'))
parser.add_argument('value', nargs='?', help='Value to set')
args = parser.parse_args()
nest = Nest()
from pprint import pprint
if hasattr(nest, args.property):
pprint(getattr(nest, args.property))
elif args.property in globals():
globals()[args.property]()
if args.value:
print 'Setting {} to {}'.format(args.property, args.value)
setattr(nest, args.property, args.value)
| |
from __future__ import print_function
import logging
from datetime import datetime
import barotropic
import interpolation as interp
import numpy as np
from netCDF4 import Dataset, date2num
import IOinitial
import IOsubset
import IOwrite
import datetimeFunctions
import forcingFilenames as fc
import interp2D
try:
import ESMF
except ImportError:
print("Could not find module ESMF")
pass
__author__ = 'Trond Kristiansen'
__email__ = 'trond.kristiansen@niva.no'
__created__ = datetime(2008, 8, 15)
__modified__ = datetime(2021, 3, 23)
__version__ = "1.8"
__status__ = "Development, modified on 15.08.2008,01.10.2009,07.01.2010, " \
"15.07.2014, 01.12.2014, 07.08.2015, " \
"08.02.2018, 04.03.2019, 13.03.2019, 23.03.2021"
def vertical_interpolation(myvar, array1, array2, grdROMS, grdMODEL):
outINDEX_ST = (grdROMS.nlevels, grdROMS.eta_rho, grdROMS.xi_rho)
outINDEX_U = (grdROMS.nlevels, grdROMS.eta_u, grdROMS.xi_u)
outINDEX_UBAR = (grdROMS.eta_u, grdROMS.xi_u)
outINDEX_V = (grdROMS.nlevels, grdROMS.eta_v, grdROMS.xi_v)
outINDEX_VBAR = (grdROMS.eta_v, grdROMS.xi_v)
if myvar in ['salinity', 'temperature', 'O3_c', 'O3_TA', 'N1_p', 'N3_n', 'N5_s', 'O2_o']:
logging.info(
'Start vertical interpolation for {} (dimensions={} x {})'.format(myvar, grdROMS.xi_rho, grdROMS.eta_rho))
outdata = np.empty((outINDEX_ST), dtype=np.float, order='F')
outdata = interp.interpolation.dovertinter(np.asarray(outdata, order='F'),
np.asarray(array1, order='F'),
np.asarray(grdROMS.h, order='F'),
np.asarray(grdROMS.z_r, order='F'),
np.asarray(grdMODEL.z_r, order='F'),
int(grdROMS.nlevels),
int(grdMODEL.nlevels),
int(grdROMS.xi_rho),
int(grdROMS.eta_rho),
int(grdROMS.xi_rho),
int(grdROMS.eta_rho))
outdata = np.ma.masked_where(abs(outdata) > 1000, outdata)
# The BCG has to be capped at 0
if myvar in ['O3_c', 'O3_TA', 'N1_p', 'N3_p', 'N3_n', 'N5_s', 'O2_o']:
outdata = np.ma.masked_where(abs(outdata) < 0, outdata)
# import plotData
# for k in range(grdROMS.nlevels):
# plotData.contourMap(grdROMS, grdROMS.lon_rho, grdROMS.lat_rho, np.squeeze(outdata[k,:,:]),k, varname)
return outdata
if myvar == 'vvel':
logging.info('Start vertical interpolation for uvel (dimensions={} x {})'.format(grdROMS.xi_u, grdROMS.eta_u))
outdataU = np.zeros((outINDEX_U), dtype=np.float)
outdataUBAR = np.zeros((outINDEX_UBAR), dtype=np.float)
outdataU = interp.interpolation.dovertinter(np.asarray(outdataU, order='F'),
np.asarray(array1, order='F'),
np.asarray(grdROMS.h, order='F'),
np.asarray(grdROMS.z_r, order='F'),
np.asarray(grdMODEL.z_r, order='F'),
int(grdROMS.nlevels),
int(grdMODEL.nlevels),
int(grdROMS.xi_u),
int(grdROMS.eta_u),
int(grdROMS.xi_rho),
int(grdROMS.eta_rho))
outdataU = np.ma.masked_where(abs(outdataU) > 1000, outdataU)
logging.info('Start vertical interpolation for vvel (dimensions={} x {})'.format(grdROMS.xi_v, grdROMS.eta_v))
outdataV = np.zeros((outINDEX_V), dtype=np.float)
outdataVBAR = np.zeros((outINDEX_VBAR), dtype=np.float)
outdataV = interp.interpolation.dovertinter(np.asarray(outdataV, order='F'),
np.asarray(array2, order='F'),
np.asarray(grdROMS.h, order='F'),
np.asarray(grdROMS.z_r, order='F'),
np.asarray(grdMODEL.z_r, order='F'),
int(grdROMS.nlevels),
int(grdMODEL.nlevels),
int(grdROMS.xi_v),
int(grdROMS.eta_v),
int(grdROMS.xi_rho),
int(grdROMS.eta_rho))
outdataV = np.ma.masked_where(abs(outdataV) > 1000, outdataV)
z_wu = np.zeros((grdROMS.nlevels + 1, grdROMS.eta_u, grdROMS.xi_u), dtype=np.float)
z_wv = np.zeros((grdROMS.nlevels + 1, grdROMS.eta_v, grdROMS.xi_v), dtype=np.float)
outdataUBAR = barotropic.velocity.ubar(np.asarray(outdataU, order='F'),
np.asarray(outdataUBAR, order='F'),
np.asarray(grdROMS.z_w, order='F'),
np.asarray(z_wu, order='F'),
grdROMS.nlevels,
grdROMS.xi_u,
grdROMS.eta_u,
grdROMS.xi_rho,
grdROMS.eta_rho)
outdataUBAR = np.ma.masked_where(abs(outdataUBAR) > 1000, outdataUBAR)
# plotData.contourMap(grdROMS, grdROMS.lon_rho, grdROMS.lat_rho, outdataUBAR,1, "ubar")
outdataVBAR = barotropic.velocity.vbar(np.asarray(outdataV, order='F'),
np.asarray(outdataVBAR, order='F'),
np.asarray(grdROMS.z_w, order='F'),
np.asarray(z_wv, order='F'),
grdROMS.nlevels,
grdROMS.xi_v,
grdROMS.eta_v,
grdROMS.xi_rho,
grdROMS.eta_rho)
# plotData.contourMap(grdROMS, grdROMS.lon_rho, grdROMS.lat_rho, outdataVBAR,1, "vbar")
outdataVBAR = np.ma.masked_where(abs(outdataVBAR) > 1000, outdataVBAR)
return outdataU, outdataV, outdataUBAR, outdataVBAR
def rotate(grdROMS, grdMODEL, data, u, v):
"""
First rotate the values of U, V at rho points with the angle, and then interpolate
the rho point values to U and V points and save the result
"""
urot = np.zeros((int(grdMODEL.nlevels), int(grdROMS.eta_rho), int(grdROMS.xi_rho)), np.float)
vrot = np.zeros((int(grdMODEL.nlevels), int(grdROMS.eta_rho), int(grdROMS.xi_rho)), np.float)
urot, vrot = interp.interpolation.rotate(np.asarray(urot, order='F'),
np.asarray(vrot, order='F'),
np.asarray(u, order='F'),
np.asarray(v, order='F'),
np.asarray(grdROMS.angle, order='F'),
int(grdROMS.xi_rho),
int(grdROMS.eta_rho),
int(grdMODEL.nlevels))
return urot, vrot
def interpolate2uv(grdROMS, grdMODEL, urot, vrot):
Zu = np.zeros((int(grdMODEL.nlevels), int(grdROMS.eta_u), int(grdROMS.xi_u)), np.float)
Zv = np.zeros((int(grdMODEL.nlevels), int(grdROMS.eta_v), int(grdROMS.xi_v)), np.float)
# Interpolate from RHO points to U and V points for velocities
Zu = interp.interpolation.rho2u(np.asarray(Zu, order='F'),
np.asarray(urot, order='F'),
int(grdROMS.xi_rho),
int(grdROMS.eta_rho),
int(grdMODEL.nlevels))
# plotData.contourMap(grdROMS,grdMODEL,Zu[0,:,:],"1",'urot')
Zv = interp.interpolation.rho2v(np.asarray(Zv, order='F'),
np.asarray(vrot, order='F'),
int(grdROMS.xi_rho),
int(grdROMS.eta_rho),
int(grdMODEL.nlevels))
# plotData.contourMap(grdROMS,grdMODEL,Zv[0,:,:],"1",'vrot')
return Zu, Zv
def get_time(confM2R, year, month, day, ntime):
"""
Create a date object to keep track of Julian dates etc.
Also create a reference date starting at 1948/01/01.
Go here to check results:http://lena.gsfc.nasa.gov/lenaDEV/html/doy_conv.html
"""
if confM2R.ocean_indata_type == 'SODA3':
filename = fc.getSODA3filename(confM2R, year, month, day, None)
if confM2R.ocean_indata_type == 'SODA3_5DAY':
filename = fc.getSODA3_5DAYfilename(confM2R, year, month, day, None)
if confM2R.ocean_indata_type == 'SODAMONTHLY':
filename = fc.getSODAMONTHLYfilename(confM2R, year, month, None)
if confM2R.ocean_indata_type == 'GLORYS':
filename = fc.get_GLORYS_filename(confM2R, year, month, "So")
if confM2R.ocean_indata_type == 'NORESM':
filename = fc.getNORESMfilename(confM2R, year, month, "salnlvl")
# Now open the input file and get the time
cdf = Dataset(filename)
jdref = date2num(datetime(1948, 1, 1),
units="days since 1948-01-01 00:00:00",
calendar="standard")
if confM2R.ocean_indata_type == 'SODA3_5DAY':
currentdate = datetime(year, month, day)
units = confM2R.time_object.units
jd = date2num(currentdate, units=confM2R.time_object.units, calendar=confM2R.time_object.calendar)
else:
# Find the day and month that the GLORYS file represents based on the year and ID number.
# Each file represents a 1 month average.
# calendar = cdf.variables["time"].calendar
units = cdf.variables["time"].units
currentdate = datetime(year, month, day)
jd = date2num(currentdate, units="days since 1948-01-01 00:00:00", calendar="standard")
confM2R.grdROMS.time = (jd - jdref)
confM2R.grdROMS.reftime = jdref
confM2R.grdROMS.timeunits = "days since 1948-01-01 00:00:00"
cdf.close()
logging.info("-------------------------------")
logging.info('Current time of {} file : {}'.format(confM2R.ocean_indata_type,
currentdate))
logging.info("-------------------------------")
def get_3d_data(confM2R, varname, year, month, day, timecounter):
varN = confM2R.global_varnames.index(varname)
# The variable splitExtract is defined in IOsubset.py and depends on the orientation
# and ocean_indata_type of grid (-180-180 or 0-360). Assumes regular grid.
filename = fc.get_filename(confM2R, year, month, day, confM2R.input_varnames[varN])
try:
cdf = Dataset(filename)
except:
logging.error("[M2R_model2roms] Unable to open input file {}".format(filename))
return
if confM2R.ocean_indata_type == "SODA3":
data = cdf.variables[confM2R.input_varnames[varN]][month - 1, :, :, :]
data = np.where(data.mask, confM2R.fillvaluein, data)
if confM2R.ocean_indata_type == "NORESM":
# For NorESM data - all data is in one big file so we need the timecounter to access correct data
myunits = cdf.variables[str(confM2R.input_varnames[varN])].units
data = np.squeeze(cdf.variables[str(confM2R.input_varnames[varN])][timecounter, :, :, :])
data = np.where(data.mask, confM2R.fillvaluein, data)
if confM2R.ocean_indata_type == "GLORYS":
myunits = cdf.variables[str(confM2R.input_varnames[varN])].units
data = np.squeeze(cdf.variables[str(confM2R.input_varnames[varN])][0, :, :, :])
data = np.where(data.mask, confM2R.fillvaluein, data)
cdf.close()
if varname == 'temperature' and confM2R.ocean_indata_type in ["GLORYS", "NORESM"]:
if myunits == "degree_Kelvin" or myunits == "K":
if confM2R.ocean_indata_type in ["GLORYS"]:
data = np.where(data <= -32767, confM2R.grdROMS.fillval, data)
data = data - 273.15
if confM2R.ocean_indata_type == "GLORYS":
data = np.where(data <= -32767, confM2R.grdROMS.fillval, data)
data = np.ma.masked_where(data <= confM2R.grdROMS.fillval, data)
logging.debug('Data range of {} just after extracting from netcdf file: {:3.3f}-{:3.3f}'.format(
str(confM2R.input_varnames[varN]),
float(data.min()), float(data.max())))
return data
def get_2d_data(confM2R, myvar, year, month, day, timecounter):
varN = confM2R.global_varnames.index(myvar)
if confM2R.set_2d_vars_to_zero and confM2R.input_varnames[varN] in ['ageice', 'uice',
'vice',
'aice',
'hice',
'hs']:
return np.zeros((np.shape(confM2R.grdMODEL.lon)))
else:
filename = fc.get_filename(confM2R, year, month, day, confM2R.input_varnames[varN])
try:
cdf = Dataset(filename)
except:
logging.error("[M2R_model2roms] Unable to open input file {}".format(filename))
return
if confM2R.ocean_indata_type in ["SODA", "SODA3_5DAY"]:
data = cdf.variables[confM2R.input_varnames[varN]][0, :, :]
if confM2R.ocean_indata_type == "SODA3":
if myvar == 'aice':
# We only extract the first thickness concentration. Need to fix this so all 5 classes can be extracted.
# http://www.atmos.umd.edu/~ocean/index_files/soda3_readme.htm
# hi: sea ice thickness [m ice]
# mi: sea ice mass [kg/m^2]
# hs: snow thickness [m snow]
# {cn1,cn2,cn3,cn4,cn5}: sea ice concentration [0:1] in five ice thickness classes
data = cdf.variables[confM2R.input_varnames[varN]][int(month - 1), 0, :, :]
else:
data = cdf.variables[confM2R.input_varnames[varN]][int(month - 1), :, :]
if confM2R.ocean_indata_type == "NORESM" and confM2R.set_2d_vars_to_zero is False:
# myunits = cdf.variables[str(grdROMS.varNames[varN])].units
# For NORESM data are 12 months of data stored in ice files. Use ID as month indicator to get data.
data = np.squeeze(cdf.variables[str(confM2R.input_varnames[varN])][timecounter, :, :])
data = np.where(data.mask, confM2R.grdROMS.fillval, data)
if confM2R.ocean_indata_type == "GLORYS":
data = np.squeeze(cdf.variables[str(confM2R.input_varnames[varN])][0, :, :])
data = np.where(data.mask, confM2R.grdROMS.fillval, data)
if not confM2R.set_2d_vars_to_zero:
cdf.close()
if __debug__ and not confM2R.set_2d_vars_to_zero:
logging.info("[M2R_model2roms] Data range of {} just after extracting from netcdf "
"file: {:3.3f}-{:3.3f}".format(str(confM2R.input_varnames[varN]),
float(data.min()), float(data.max())))
return data
def convert_MODEL2ROMS(confM2R):
# First opening of input file is just for initialization of grid
filenamein = fc.get_filename(confM2R, confM2R.start_year, confM2R.start_month, confM2R.start_day, None)
# Finalize creating the model grd object now that we know the filename for input data
confM2R.grdMODEL.create_object(confM2R, filenamein)
confM2R.grdMODEL.getdims()
# Create the ESMF weights used to do all of the horizontal interpolation
interp2D.setup_ESMF_interpolation_weights(confM2R)
# Now we want to subset the data to avoid storing more information than we need.
# We do this by finding the indices of maximum and minimum latitude and longitude in the matrixes
if confM2R.subset_indata:
IOsubset.find_subset_indices(confM2R.grdMODEL, min_lat=confM2R.subset[0], max_lat=confM2R.subset[1],
min_lon=confM2R.subset[2], max_lon=confM2R.subset[3])
logging.info("[M2R_model2roms] ==> Initializing done")
logging.info("[M2R_model2roms] --------------------------")
logging.info("[M2R_model2roms] ==> Starting loop over time")
time_counter = 0
first_run = True
for year in confM2R.years:
months = datetimeFunctions.create_list_of_months(confM2R, year)
for month in months:
days = datetimeFunctions.create_list_of_days(confM2R, year, month, first_run)
for day in days:
# Get the current date for given time-step
get_time(confM2R, year, month, day, time_counter)
# Each MODEL file consist only of one time step. Get the subset data selected, and
# store that time step in a new array:
if first_run:
logging.info("[M2R_model2roms] => NOTE! Make sure that these two arrays are in sequential order:")
logging.info("[M2R_model2roms] ==> myvars: {}".format(confM2R.input_varnames))
logging.info("[M2R_model2roms] ==> varNames {}".format(confM2R.global_varnames))
first_run = False
if confM2R.subset_indata:
# The first iteration we want to organize the subset indices we want to extract
# from the input data to get the interpolation correct and to function fast
IOsubset.organize_split(confM2R.grdMODEL, confM2R.grdROMS)
for myvar in confM2R.global_varnames:
if myvar in ['temperature', 'salinity', 'uvel', 'vvel', 'O3_c', 'O3_TA', 'N1_p', 'N3_n', 'N5_s',
'O2_o']:
data = get_3d_data(confM2R, myvar, year, month, day, time_counter)
if myvar in ['ssh', 'ageice', 'uice', 'vice', 'aice', 'hice', 'snow_thick']:
data = get_2d_data(confM2R, myvar, year, month, day, time_counter)
# Take the input data and horizontally interpolate to your grid
array1 = interp2D.do_hor_interpolation_regular_grid(confM2R, data, myvar)
if myvar in ['temperature', 'salinity', 'O3_c', 'O3_TA', 'N1_p', 'N3_n', 'N5_s', 'O2_o']:
STdata = vertical_interpolation(myvar, array1, array1, confM2R.grdROMS, confM2R.grdMODEL)
for dd in range(len(STdata[:, 0, 0])):
STdata[dd, :, :] = np.where(confM2R.grdROMS.mask_rho == 0, confM2R.grdROMS.fillval,
STdata[dd, :, :])
STdata = np.where(abs(STdata) > 1000, confM2R.grdROMS.fillval, STdata)
IOwrite.write_clim_file(confM2R, time_counter, myvar, STdata)
if time_counter == confM2R.grdROMS.inittime and confM2R.grdROMS.write_init is True:
IOinitial.create_init_file(confM2R, time_counter, myvar, STdata)
if myvar in ['ssh', 'ageice', 'aice', 'hice', 'snow_thick']:
SSHdata = array1[0, :, :]
SSHdata = np.where(confM2R.grdROMS.mask_rho == 0, confM2R.grdROMS.fillval, SSHdata)
SSHdata = np.where((abs(SSHdata) > 100) | (SSHdata == 0), confM2R.grdROMS.fillval, SSHdata)
# Specific for ROMS - we set 0 where we should have fillvalue for ice otherwise ROMS blows up.
SSHdata = np.where(abs(SSHdata) == confM2R.grdROMS.fillval, 0, SSHdata)
IOwrite.write_clim_file(confM2R, time_counter, myvar, SSHdata)
if time_counter == confM2R.grdROMS.inittime:
IOinitial.create_init_file(confM2R, time_counter, myvar, SSHdata)
# The following are special routines used to calculate the u and v velocity
# of ice based on the transport, which is divided by snow and ice thickenss
# and then multiplied by grid size in dx or dy direction (opposite of transport).
if myvar in ['uice', 'vice']:
SSHdata = array1[0, :, :]
if myvar == "uice":
mymask = confM2R.grdROMS.mask_u
if myvar == "vice":
mymask = confM2R.grdROMS.mask_v
SSHdata = np.where(mymask == 0, confM2R.grdROMS.fillval, SSHdata)
SSHdata = np.where((abs(SSHdata) > 100) | (SSHdata == 0), confM2R.grdROMS.fillval, SSHdata)
SSHdata = np.where(abs(SSHdata) == confM2R.grdROMS.fillval, 0, SSHdata)
IOwrite.write_clim_file(confM2R, time_counter, myvar, SSHdata)
if time_counter == confM2R.grdROMS.inittime:
if myvar in ['uice', 'vice']:
IOinitial.create_init_file(confM2R, time_counter, myvar, SSHdata)
if myvar == 'uvel':
array2 = array1
if myvar == 'vvel':
urot, vrot = rotate(confM2R.grdROMS, confM2R.grdMODEL, data, array2, array1)
u, v = interpolate2uv(confM2R.grdROMS, confM2R.grdMODEL, urot, vrot)
Udata, Vdata, UBARdata, VBARdata = vertical_interpolation(myvar, u, v, confM2R.grdROMS,
confM2R.grdMODEL)
if myvar == 'vvel':
Udata = np.where(confM2R.grdROMS.mask_u == 0, confM2R.grdROMS.fillval, Udata)
Udata = np.where(abs(Udata) > 1000, confM2R.grdROMS.fillval, Udata)
Vdata = np.where(confM2R.grdROMS.mask_v == 0, confM2R.grdROMS.fillval, Vdata)
Vdata = np.where(abs(Vdata) > 1000, confM2R.grdROMS.fillval, Vdata)
UBARdata = np.where(confM2R.grdROMS.mask_u == 0, confM2R.grdROMS.fillval, UBARdata)
UBARdata = np.where(abs(UBARdata) > 1000, confM2R.grdROMS.fillval, UBARdata)
VBARdata = np.where(confM2R.grdROMS.mask_v == 0, confM2R.grdROMS.fillval, VBARdata)
VBARdata = np.where(abs(VBARdata) > 1000, confM2R.grdROMS.fillval, VBARdata)
IOwrite.write_clim_file(confM2R, time_counter, myvar, Udata, Vdata, UBARdata, VBARdata)
if time_counter == confM2R.grdROMS.inittime:
IOinitial.create_init_file(confM2R, time_counter, myvar, Udata, Vdata, UBARdata, VBARdata)
time_counter += 1
| |
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for locally-connected layers."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from absl.testing import parameterized
from tensorflow.python import keras
from tensorflow.python.framework import test_util as tf_test_util
from tensorflow.python.keras import testing_utils
from tensorflow.python.platform import test
from tensorflow.python.training.rmsprop import RMSPropOptimizer
_DATA_FORMAT_PADDING_IMPLEMENTATION = [{
'data_format': 'channels_first',
'padding': 'valid',
'implementation': 1
}, {
'data_format': 'channels_first',
'padding': 'same',
'implementation': 1
}, {
'data_format': 'channels_last',
'padding': 'valid',
'implementation': 1
}, {
'data_format': 'channels_last',
'padding': 'same',
'implementation': 1
}, {
'data_format': 'channels_first',
'padding': 'valid',
'implementation': 2
}, {
'data_format': 'channels_first',
'padding': 'same',
'implementation': 2
}, {
'data_format': 'channels_last',
'padding': 'valid',
'implementation': 2
}, {
'data_format': 'channels_last',
'padding': 'same',
'implementation': 2
}]
@tf_test_util.run_all_in_graph_and_eager_modes
class LocallyConnected1DLayersTest(test.TestCase, parameterized.TestCase):
@parameterized.parameters(_DATA_FORMAT_PADDING_IMPLEMENTATION)
def test_locallyconnected_1d(self, data_format, padding, implementation):
with self.cached_session():
num_samples = 2
num_steps = 8
input_dim = 5
filter_length = 3
filters = 4
for strides in [1]:
if padding == 'same' and strides != 1:
continue
kwargs = {
'filters': filters,
'kernel_size': filter_length,
'padding': padding,
'strides': strides,
'data_format': data_format,
'implementation': implementation
}
if padding == 'same' and implementation == 1:
self.assertRaises(ValueError, keras.layers.LocallyConnected1D,
**kwargs)
else:
testing_utils.layer_test(
keras.layers.LocallyConnected1D,
kwargs=kwargs,
input_shape=(num_samples, num_steps, input_dim))
@parameterized.parameters(_DATA_FORMAT_PADDING_IMPLEMENTATION)
def test_locallyconnected_1d_regularization(self, data_format, padding,
implementation):
num_samples = 2
num_steps = 8
input_dim = 5
filter_length = 3
filters = 4
kwargs = {
'filters': filters,
'kernel_size': filter_length,
'kernel_regularizer': 'l2',
'bias_regularizer': 'l2',
'activity_regularizer': 'l2',
'data_format': data_format,
'implementation': implementation,
'padding': padding
}
if padding == 'same' and implementation == 1:
self.assertRaises(ValueError, keras.layers.LocallyConnected1D, **kwargs)
else:
with self.cached_session():
layer = keras.layers.LocallyConnected1D(**kwargs)
layer.build((num_samples, num_steps, input_dim))
self.assertEqual(len(layer.losses), 2)
layer(
keras.backend.variable(
np.ones((num_samples, num_steps, input_dim))))
self.assertEqual(len(layer.losses), 3)
k_constraint = keras.constraints.max_norm(0.01)
b_constraint = keras.constraints.max_norm(0.01)
kwargs = {
'filters': filters,
'kernel_size': filter_length,
'kernel_constraint': k_constraint,
'bias_constraint': b_constraint,
}
with self.cached_session():
layer = keras.layers.LocallyConnected1D(**kwargs)
layer.build((num_samples, num_steps, input_dim))
self.assertEqual(layer.kernel.constraint, k_constraint)
self.assertEqual(layer.bias.constraint, b_constraint)
@tf_test_util.run_all_in_graph_and_eager_modes
class LocallyConnected2DLayersTest(test.TestCase, parameterized.TestCase):
@parameterized.parameters(_DATA_FORMAT_PADDING_IMPLEMENTATION)
def test_locallyconnected_2d(self, data_format, padding, implementation):
with self.cached_session():
num_samples = 8
filters = 3
stack_size = 4
num_row = 6
num_col = 10
for strides in [(1, 1), (2, 2)]:
if padding == 'same' and strides != (1, 1):
continue
kwargs = {
'filters': filters,
'kernel_size': 3,
'padding': padding,
'kernel_regularizer': 'l2',
'bias_regularizer': 'l2',
'strides': strides,
'data_format': data_format,
'implementation': implementation
}
if padding == 'same' and implementation == 1:
self.assertRaises(ValueError, keras.layers.LocallyConnected2D,
**kwargs)
else:
testing_utils.layer_test(
keras.layers.LocallyConnected2D,
kwargs=kwargs,
input_shape=(num_samples, num_row, num_col, stack_size))
@parameterized.parameters(_DATA_FORMAT_PADDING_IMPLEMENTATION)
def test_locallyconnected_2d_channels_first(self, data_format, padding,
implementation):
with self.cached_session():
num_samples = 8
filters = 3
stack_size = 4
num_row = 6
num_col = 10
kwargs = {
'filters': filters,
'kernel_size': 3,
'data_format': data_format,
'implementation': implementation,
'padding': padding
}
if padding == 'same' and implementation == 1:
self.assertRaises(ValueError, keras.layers.LocallyConnected2D, **kwargs)
else:
testing_utils.layer_test(
keras.layers.LocallyConnected2D,
kwargs=kwargs,
input_shape=(num_samples, num_row, num_col, stack_size))
@parameterized.parameters(_DATA_FORMAT_PADDING_IMPLEMENTATION)
def test_locallyconnected_2d_regularization(self, data_format, padding,
implementation):
num_samples = 2
filters = 3
stack_size = 4
num_row = 6
num_col = 7
kwargs = {
'filters': filters,
'kernel_size': 3,
'kernel_regularizer': 'l2',
'bias_regularizer': 'l2',
'activity_regularizer': 'l2',
'implementation': implementation,
'padding': padding
}
if padding == 'same' and implementation == 1:
self.assertRaises(ValueError, keras.layers.LocallyConnected2D, **kwargs)
else:
with self.cached_session():
layer = keras.layers.LocallyConnected2D(**kwargs)
layer.build((num_samples, num_row, num_col, stack_size))
self.assertEqual(len(layer.losses), 2)
layer(
keras.backend.variable(
np.ones((num_samples, num_row, num_col, stack_size))))
self.assertEqual(len(layer.losses), 3)
k_constraint = keras.constraints.max_norm(0.01)
b_constraint = keras.constraints.max_norm(0.01)
kwargs = {
'filters': filters,
'kernel_size': 3,
'kernel_constraint': k_constraint,
'bias_constraint': b_constraint,
}
with self.cached_session():
layer = keras.layers.LocallyConnected2D(**kwargs)
layer.build((num_samples, num_row, num_col, stack_size))
self.assertEqual(layer.kernel.constraint, k_constraint)
self.assertEqual(layer.bias.constraint, b_constraint)
@tf_test_util.run_all_in_graph_and_eager_modes
class LocallyConnectedImplementationModeTest(test.TestCase,
parameterized.TestCase):
@parameterized.parameters(['channels_first', 'channels_last'])
def test_locallyconnected_implementation(self, data_format):
with self.cached_session():
num_samples = 4
num_classes = 3
num_epochs = 2
np.random.seed(1)
targets = np.random.randint(0, num_classes, (num_samples,))
for width in [1, 6]:
for height in [7]:
for filters in [2]:
inputs = get_inputs(data_format, filters, height, num_samples,
width)
for kernel_x in [(3,)]:
for kernel_y in [()] if width == 1 else [(2,)]:
for stride_x in [(1,)]:
for stride_y in [()] if width == 1 else [(3,)]:
for layers in [2]:
kwargs = {
'layers': layers,
'filters': filters,
'kernel_size': kernel_x + kernel_y,
'strides': stride_x + stride_y,
'data_format': data_format,
'num_classes': num_classes
}
model_1 = get_model(implementation=1, **kwargs)
model_2 = get_model(implementation=2, **kwargs)
# Build models.
model_1.train_on_batch(inputs, targets)
model_2.train_on_batch(inputs, targets)
# Copy weights.
copy_model_weights(model_2, model_1)
# Compare outputs at initialization.
out_1 = model_1.call(inputs)
out_2 = model_2.call(inputs)
self.assertAllCloseAccordingToType(
out_1, out_2, rtol=1e-5, atol=1e-5)
# Train.
model_1.fit(
x=inputs,
y=targets,
epochs=num_epochs,
batch_size=num_samples)
model_2.fit(
x=inputs,
y=targets,
epochs=num_epochs,
batch_size=num_samples)
# Compare outputs after a few training steps.
out_1 = model_1.call(inputs)
out_2 = model_2.call(inputs)
self.assertAllCloseAccordingToType(
out_1, out_2, atol=2e-4)
def test_make_2d(self):
input_shapes = [
(0,),
(0, 0),
(1,),
(2,),
(3,),
(1, 0),
(0, 3),
(1, 1),
(1, 2),
(3, 1),
(2, 2),
(3, 3),
(1, 0, 1),
(5, 2, 3),
(3, 5, 6, 7, 0),
(3, 2, 2, 4, 4),
(1, 2, 3, 4, 7, 2),
]
np.random.seed(1)
for input_shape in input_shapes:
inputs = np.random.normal(0, 1, input_shape)
inputs_tf = keras.backend.variable(inputs)
split_dim = np.random.randint(0, inputs.ndim + 1)
shape_2d = (int(np.prod(inputs.shape[:split_dim])),
int(np.prod(inputs.shape[split_dim:])))
inputs_2d = np.reshape(inputs, shape_2d)
inputs_2d_tf = keras.layers.local.make_2d(inputs_tf, split_dim)
inputs_2d_tf = keras.backend.get_value(inputs_2d_tf)
self.assertAllCloseAccordingToType(inputs_2d, inputs_2d_tf)
def get_inputs(data_format, filters, height, num_samples, width):
if data_format == 'channels_first':
if width == 1:
input_shape = (filters, height)
else:
input_shape = (filters, height, width)
elif data_format == 'channels_last':
if width == 1:
input_shape = (height, filters)
else:
input_shape = (height, width, filters)
else:
raise NotImplementedError(data_format)
inputs = np.random.normal(0, 1,
(num_samples,) + input_shape).astype(np.float32)
return inputs
def xent(y_true, y_pred):
y_true = keras.backend.cast(
keras.backend.reshape(y_true, (-1,)),
keras.backend.dtypes_module.int32)
return keras.backend.nn.sparse_softmax_cross_entropy_with_logits(
labels=y_true,
logits=y_pred)
def get_model(implementation,
filters,
kernel_size,
strides,
layers,
num_classes,
data_format):
model = keras.Sequential()
if len(kernel_size) == 1:
lc_layer = keras.layers.LocallyConnected1D
elif len(kernel_size) == 2:
lc_layer = keras.layers.LocallyConnected2D
else:
raise NotImplementedError(kernel_size)
for _ in range(layers):
model.add(lc_layer(
padding='valid',
kernel_initializer=keras.initializers.random_normal(),
bias_initializer=keras.initializers.random_normal(),
filters=filters,
strides=strides,
kernel_size=kernel_size,
activation=keras.activations.relu,
data_format=data_format,
implementation=implementation))
model.add(keras.layers.Flatten())
model.add(keras.layers.Dense(num_classes))
model.compile(
optimizer=RMSPropOptimizer(0.01),
metrics=[keras.metrics.categorical_accuracy],
loss=xent
)
return model
def copy_lc_weights(lc_layer_2_from, lc_layer_1_to):
lc_2_kernel, lc_2_bias = lc_layer_2_from.weights
lc_2_kernel_masked = lc_2_kernel * lc_layer_2_from.kernel_mask
data_format = lc_layer_2_from.data_format
if data_format == 'channels_first':
if isinstance(lc_layer_2_from, keras.layers.LocallyConnected1D):
permutation = (3, 0, 1, 2)
elif isinstance(lc_layer_2_from, keras.layers.LocallyConnected2D):
permutation = (4, 5, 0, 1, 2, 3)
else:
raise NotImplementedError(lc_layer_2_from)
elif data_format == 'channels_last':
if isinstance(lc_layer_2_from, keras.layers.LocallyConnected1D):
permutation = (2, 0, 1, 3)
elif isinstance(lc_layer_2_from, keras.layers.LocallyConnected2D):
permutation = (3, 4, 0, 1, 2, 5)
else:
raise NotImplementedError(lc_layer_2_from)
else:
raise NotImplementedError(data_format)
lc_2_kernel_masked = keras.backend.permute_dimensions(
lc_2_kernel_masked, permutation)
lc_2_kernel_mask = keras.backend.math_ops.not_equal(
lc_2_kernel_masked, 0)
lc_2_kernel_flat = keras.backend.array_ops.boolean_mask(
lc_2_kernel_masked, lc_2_kernel_mask)
lc_2_kernel_reshaped = keras.backend.reshape(lc_2_kernel_flat,
lc_layer_1_to.kernel.shape)
lc_2_kernel_reshaped = keras.backend.get_value(lc_2_kernel_reshaped)
lc_2_bias = keras.backend.get_value(lc_2_bias)
lc_layer_1_to.set_weights([lc_2_kernel_reshaped, lc_2_bias])
def copy_model_weights(model_2_from, model_1_to):
for l in range(len(model_2_from.layers)):
layer_2_from = model_2_from.layers[l]
layer_1_to = model_1_to.layers[l]
if isinstance(layer_2_from, (keras.layers.LocallyConnected2D,
keras.layers.LocallyConnected1D)):
copy_lc_weights(layer_2_from, layer_1_to)
elif isinstance(layer_2_from, keras.layers.Dense):
weights_2, bias_2 = layer_2_from.weights
weights_2 = keras.backend.get_value(weights_2)
bias_2 = keras.backend.get_value(bias_2)
layer_1_to.set_weights([weights_2, bias_2])
else:
continue
if __name__ == '__main__':
test.main()
| |
#!/usr/bin/python
# -*- coding: utf-8 -*-
"""Test IP module/regex."""
#
# (C) Pywikibot team, 2012-2019
#
# Distributed under the terms of the MIT license.
from __future__ import absolute_import, division, unicode_literals
from pywikibot.tools import is_IP, PY2, PYTHON_VERSION
from tests import unittest_print
from tests.aspects import unittest, TestCase
from tests.utils import expected_failure_if
class IPAddressModuleTestCase(TestCase):
"""Unit test class base for IP matching."""
net = False
def setUp(self):
"""Set up test."""
self.total = 0
super(IPAddressModuleTestCase, self).setUp()
def tearDown(self):
"""Tear down test."""
super(IPAddressModuleTestCase, self).tearDown()
unittest_print('{} subtests done'.format(self.total))
def ipv6test(self, result, ip_address):
"""Perform one ip_address test."""
self.total += 1
with self.subTest(ip_address=ip_address):
msg = '"{}" match should be {} - not OK'.format(
ip_address, result) if PY2 else None
self.assertEqual(result, is_IP(ip_address), msg)
def test_ipaddress_module(self):
"""Test ipaddress module."""
# test from http://download.dartware.com/thirdparty/test-ipv6-regex.pl
self.ipv6test(False, '') # empty string
self.ipv6test(True, '::1') # loopback, compressed, non-routable
self.ipv6test(True, '::') # unspecified, compressed, non-routable
self.ipv6test(True, '0:0:0:0:0:0:0:1') # loopback, full
self.ipv6test(True, '0:0:0:0:0:0:0:0') # unspecified, full
self.ipv6test(True, '2001:DB8:0:0:8:800:200C:417A') # unicast, full
self.ipv6test(True, 'FF01:0:0:0:0:0:0:101') # multicast, full
# unicast, compressed
self.ipv6test(True, '2001:DB8::8:800:200C:417A')
self.ipv6test(True, 'FF01::101') # multicast, compressed
# unicast, full
self.ipv6test(False, '2001:DB8:0:0:8:800:200C:417A:221')
self.ipv6test(False, 'FF01::101::2') # multicast, compressed
self.ipv6test(True, 'fe80::217:f2ff:fe07:ed62')
self.ipv6test(True, '2001:0000:1234:0000:0000:C1C0:ABCD:0876')
self.ipv6test(True, '3ffe:0b00:0000:0000:0001:0000:0000:000a')
self.ipv6test(True, 'FF02:0000:0000:0000:0000:0000:0000:0001')
self.ipv6test(True, '0000:0000:0000:0000:0000:0000:0000:0001')
self.ipv6test(True, '0000:0000:0000:0000:0000:0000:0000:0000')
# leading space
self.ipv6test(False, ' 2001:0000:1234:0000:0000:C1C0:ABCD:0876')
# trailing space
self.ipv6test(False, '2001:0000:1234:0000:0000:C1C0:ABCD:0876 ')
# leading and trailing space
self.ipv6test(False, ' 2001:0000:1234:0000:0000:C1C0:ABCD:0876 ')
# junk after valid address
self.ipv6test(False, '2001:0000:1234:0000:0000:C1C0:ABCD:0876 0')
# internal space
self.ipv6test(False, '2001:0000:1234: 0000:0000:C1C0:ABCD:0876')
# seven segments
self.ipv6test(False, '3ffe:0b00:0000:0001:0000:0000:000a')
# nine segments
self.ipv6test(False, 'FF02:0000:0000:0000:0000:0000:0000:0000:0001')
self.ipv6test(False, '3ffe:b00::1::a') # double '::'
# double "::"
self.ipv6test(False, '::1111:2222:3333:4444:5555:6666::')
self.ipv6test(True, '2::10')
self.ipv6test(True, 'ff02::1')
self.ipv6test(True, 'fe80::')
self.ipv6test(True, '2002::')
self.ipv6test(True, '2001:db8::')
self.ipv6test(True, '2001:0db8:1234::')
self.ipv6test(True, '::ffff:0:0')
self.ipv6test(True, '::1')
self.ipv6test(True, '1:2:3:4:5:6:7:8')
self.ipv6test(True, '1:2:3:4:5:6::8')
self.ipv6test(True, '1:2:3:4:5::8')
self.ipv6test(True, '1:2:3:4::8')
self.ipv6test(True, '1:2:3::8')
self.ipv6test(True, '1:2::8')
self.ipv6test(True, '1::8')
self.ipv6test(True, '1::2:3:4:5:6:7')
self.ipv6test(True, '1::2:3:4:5:6')
self.ipv6test(True, '1::2:3:4:5')
self.ipv6test(True, '1::2:3:4')
self.ipv6test(True, '1::2:3')
self.ipv6test(True, '1::8')
self.ipv6test(True, '::2:3:4:5:6:7:8')
self.ipv6test(True, '::2:3:4:5:6:7')
self.ipv6test(True, '::2:3:4:5:6')
self.ipv6test(True, '::2:3:4:5')
self.ipv6test(True, '::2:3:4')
self.ipv6test(True, '::2:3')
self.ipv6test(True, '::8')
self.ipv6test(True, '1:2:3:4:5:6::')
self.ipv6test(True, '1:2:3:4:5::')
self.ipv6test(True, '1:2:3:4::')
self.ipv6test(True, '1:2:3::')
self.ipv6test(True, '1:2::')
self.ipv6test(True, '1::')
self.ipv6test(True, '1:2:3:4:5::7:8')
self.ipv6test(False, '1:2:3::4:5::7:8') # Double "::"
self.ipv6test(False, '12345::6:7:8')
self.ipv6test(True, '1:2:3:4::7:8')
self.ipv6test(True, '1:2:3::7:8')
self.ipv6test(True, '1:2::7:8')
self.ipv6test(True, '1::7:8')
# IPv4 addresses as dotted-quads
self.ipv6test(True, '1:2:3:4:5:6:1.2.3.4')
self.ipv6test(True, '1:2:3:4:5::1.2.3.4')
self.ipv6test(True, '1:2:3:4::1.2.3.4')
self.ipv6test(True, '1:2:3::1.2.3.4')
self.ipv6test(True, '1:2::1.2.3.4')
self.ipv6test(True, '1::1.2.3.4')
self.ipv6test(True, '1:2:3:4::5:1.2.3.4')
self.ipv6test(True, '1:2:3::5:1.2.3.4')
self.ipv6test(True, '1:2::5:1.2.3.4')
self.ipv6test(True, '1::5:1.2.3.4')
self.ipv6test(True, '1::5:11.22.33.44')
self.ipv6test(False, '1::5:400.2.3.4')
self.ipv6test(False, '1::5:260.2.3.4')
self.ipv6test(False, '1::5:256.2.3.4')
self.ipv6test(False, '1::5:1.256.3.4')
self.ipv6test(False, '1::5:1.2.256.4')
self.ipv6test(False, '1::5:1.2.3.256')
self.ipv6test(False, '1::5:300.2.3.4')
self.ipv6test(False, '1::5:1.300.3.4')
self.ipv6test(False, '1::5:1.2.300.4')
self.ipv6test(False, '1::5:1.2.3.300')
self.ipv6test(False, '1::5:900.2.3.4')
self.ipv6test(False, '1::5:1.900.3.4')
self.ipv6test(False, '1::5:1.2.900.4')
self.ipv6test(False, '1::5:1.2.3.900')
self.ipv6test(False, '1::5:300.300.300.300')
self.ipv6test(False, '1::5:3000.30.30.30')
self.ipv6test(False, '1::400.2.3.4')
self.ipv6test(False, '1::260.2.3.4')
self.ipv6test(False, '1::256.2.3.4')
self.ipv6test(False, '1::1.256.3.4')
self.ipv6test(False, '1::1.2.256.4')
self.ipv6test(False, '1::1.2.3.256')
self.ipv6test(False, '1::300.2.3.4')
self.ipv6test(False, '1::1.300.3.4')
self.ipv6test(False, '1::1.2.300.4')
self.ipv6test(False, '1::1.2.3.300')
self.ipv6test(False, '1::900.2.3.4')
self.ipv6test(False, '1::1.900.3.4')
self.ipv6test(False, '1::1.2.900.4')
self.ipv6test(False, '1::1.2.3.900')
self.ipv6test(False, '1::300.300.300.300')
self.ipv6test(False, '1::3000.30.30.30')
self.ipv6test(False, '::400.2.3.4')
self.ipv6test(False, '::260.2.3.4')
self.ipv6test(False, '::256.2.3.4')
self.ipv6test(False, '::1.256.3.4')
self.ipv6test(False, '::1.2.256.4')
self.ipv6test(False, '::1.2.3.256')
self.ipv6test(False, '::300.2.3.4')
self.ipv6test(False, '::1.300.3.4')
self.ipv6test(False, '::1.2.300.4')
self.ipv6test(False, '::1.2.3.300')
self.ipv6test(False, '::900.2.3.4')
self.ipv6test(False, '::1.900.3.4')
self.ipv6test(False, '::1.2.900.4')
self.ipv6test(False, '::1.2.3.900')
self.ipv6test(False, '::300.300.300.300')
self.ipv6test(False, '::3000.30.30.30')
self.ipv6test(True, 'fe80::217:f2ff:254.7.237.98')
self.ipv6test(True, '::ffff:192.168.1.26')
# garbage instead of "." in IPv4
self.ipv6test(False, '2001:1:1:1:1:1:255Z255X255Y255')
self.ipv6test(False, '::ffff:192x168.1.26') # ditto
self.ipv6test(True, '::ffff:192.168.1.1')
# IPv4-compatible IPv6 address, full, deprecated
self.ipv6test(True, '0:0:0:0:0:0:13.1.68.3')
# IPv4-mapped IPv6 address, full
self.ipv6test(True, '0:0:0:0:0:FFFF:129.144.52.38')
# IPv4-compatible IPv6 address, compressed, deprecated
self.ipv6test(True, '::13.1.68.3')
# IPv4-mapped IPv6 address, compressed
self.ipv6test(True, '::FFFF:129.144.52.38')
self.ipv6test(True, 'fe80:0:0:0:204:61ff:254.157.241.86')
self.ipv6test(True, 'fe80::204:61ff:254.157.241.86')
self.ipv6test(True, '::ffff:12.34.56.78')
self.ipv6test(False, '::ffff:2.3.4')
self.ipv6test(False, '::ffff:257.1.2.3')
self.ipv6test(False, '1.2.3.4:1111:2222:3333:4444::5555') # Aeron
self.ipv6test(False, '1.2.3.4:1111:2222:3333::5555')
self.ipv6test(False, '1.2.3.4:1111:2222::5555')
self.ipv6test(False, '1.2.3.4:1111::5555')
self.ipv6test(False, '1.2.3.4::5555')
self.ipv6test(False, '1.2.3.4::')
# Testing IPv4 addresses represented as dotted-quads
self.ipv6test(True, '::ffff:192.0.2.128')
self.ipv6test(False, 'XXXX:XXXX:XXXX:XXXX:XXXX:XXXX:1.2.3.4')
self.ipv6test(False, '1111:2222:3333:4444:5555:6666:256.256.256.256')
# Subnet mask not accepted
# full, with prefix
self.ipv6test(False, '2001:0DB8:0000:CD30:0000:0000:0000:0000/60')
# compressed, with prefix
self.ipv6test(False, '2001:0DB8::CD30:0:0:0:0/60')
# compressed, with prefix #2
self.ipv6test(False, '2001:0DB8:0:CD30::/60')
# compressed, unspecified address type, non-routable
self.ipv6test(False, '::/128')
# compressed, loopback address type, non-routable
self.ipv6test(False, '::1/128')
# compressed, multicast address type
self.ipv6test(False, 'FF00::/8')
# compressed, link-local unicast, non-routable
self.ipv6test(False, 'FE80::/10')
# compressed, site-local unicast, deprecated
self.ipv6test(False, 'FEC0::/10')
# standard IPv4, prefix not allowed
self.ipv6test(False, '124.15.6.89/60')
self.ipv6test(True, 'fe80:0000:0000:0000:0204:61ff:fe9d:f156')
self.ipv6test(True, 'fe80:0:0:0:204:61ff:fe9d:f156')
self.ipv6test(True, 'fe80::204:61ff:fe9d:f156')
self.ipv6test(True, '::1')
self.ipv6test(True, 'fe80::')
self.ipv6test(True, 'fe80::1')
self.ipv6test(False, ':')
self.ipv6test(True, '::ffff:c000:280')
# Aeron supplied these test cases
self.ipv6test(False, '1111:2222:3333:4444::5555:')
self.ipv6test(False, '1111:2222:3333::5555:')
self.ipv6test(False, '1111:2222::5555:')
self.ipv6test(False, '1111::5555:')
self.ipv6test(False, '::5555:')
self.ipv6test(False, ':::')
self.ipv6test(False, '1111:')
self.ipv6test(False, ':')
self.ipv6test(False, ':1111:2222:3333:4444::5555')
self.ipv6test(False, ':1111:2222:3333::5555')
self.ipv6test(False, ':1111:2222::5555')
self.ipv6test(False, ':1111::5555')
self.ipv6test(False, ':::5555')
self.ipv6test(False, ':::')
# Additional test cases
# from https://rt.cpan.org/Public/Bug/Display.html?id=50693
self.ipv6test(True, '2001:0db8:85a3:0000:0000:8a2e:0370:7334')
self.ipv6test(True, '2001:db8:85a3:0:0:8a2e:370:7334')
self.ipv6test(True, '2001:db8:85a3::8a2e:370:7334')
self.ipv6test(True, '2001:0db8:0000:0000:0000:0000:1428:57ab')
self.ipv6test(True, '2001:0db8:0000:0000:0000::1428:57ab')
self.ipv6test(True, '2001:0db8:0:0:0:0:1428:57ab')
self.ipv6test(True, '2001:0db8:0:0::1428:57ab')
self.ipv6test(True, '2001:0db8::1428:57ab')
self.ipv6test(True, '2001:db8::1428:57ab')
self.ipv6test(True, '0000:0000:0000:0000:0000:0000:0000:0001')
self.ipv6test(True, '::1')
self.ipv6test(True, '::ffff:0c22:384e')
self.ipv6test(True, '2001:0db8:1234:0000:0000:0000:0000:0000')
self.ipv6test(True, '2001:0db8:1234:ffff:ffff:ffff:ffff:ffff')
self.ipv6test(True, '2001:db8:a::123')
self.ipv6test(True, 'fe80::')
self.ipv6test(False, '123')
self.ipv6test(False, 'ldkfj')
self.ipv6test(False, '2001::FFD3::57ab')
self.ipv6test(False, '2001:db8:85a3::8a2e:37023:7334')
self.ipv6test(False, '2001:db8:85a3::8a2e:370k:7334')
self.ipv6test(False, '1:2:3:4:5:6:7:8:9')
self.ipv6test(False, '1::2::3')
self.ipv6test(False, '1:::3:4:5')
self.ipv6test(False, '1:2:3::4:5:6:7:8:9')
# New from Aeron
self.ipv6test(True, '1111:2222:3333:4444:5555:6666:7777:8888')
self.ipv6test(True, '1111:2222:3333:4444:5555:6666:7777::')
self.ipv6test(True, '1111:2222:3333:4444:5555:6666::')
self.ipv6test(True, '1111:2222:3333:4444:5555::')
self.ipv6test(True, '1111:2222:3333:4444::')
self.ipv6test(True, '1111:2222:3333::')
self.ipv6test(True, '1111:2222::')
self.ipv6test(True, '1111::')
# self.ipv6test(True, "::") #duplicate
self.ipv6test(True, '1111:2222:3333:4444:5555:6666::8888')
self.ipv6test(True, '1111:2222:3333:4444:5555::8888')
self.ipv6test(True, '1111:2222:3333:4444::8888')
self.ipv6test(True, '1111:2222:3333::8888')
self.ipv6test(True, '1111:2222::8888')
self.ipv6test(True, '1111::8888')
self.ipv6test(True, '::8888')
self.ipv6test(True, '1111:2222:3333:4444:5555::7777:8888')
self.ipv6test(True, '1111:2222:3333:4444::7777:8888')
self.ipv6test(True, '1111:2222:3333::7777:8888')
self.ipv6test(True, '1111:2222::7777:8888')
self.ipv6test(True, '1111::7777:8888')
self.ipv6test(True, '::7777:8888')
self.ipv6test(True, '1111:2222:3333:4444::6666:7777:8888')
self.ipv6test(True, '1111:2222:3333::6666:7777:8888')
self.ipv6test(True, '1111:2222::6666:7777:8888')
self.ipv6test(True, '1111::6666:7777:8888')
self.ipv6test(True, '::6666:7777:8888')
self.ipv6test(True, '1111:2222:3333::5555:6666:7777:8888')
self.ipv6test(True, '1111:2222::5555:6666:7777:8888')
self.ipv6test(True, '1111::5555:6666:7777:8888')
self.ipv6test(True, '::5555:6666:7777:8888')
self.ipv6test(True, '1111:2222::4444:5555:6666:7777:8888')
self.ipv6test(True, '1111::4444:5555:6666:7777:8888')
self.ipv6test(True, '::4444:5555:6666:7777:8888')
self.ipv6test(True, '1111::3333:4444:5555:6666:7777:8888')
self.ipv6test(True, '::3333:4444:5555:6666:7777:8888')
self.ipv6test(True, '::2222:3333:4444:5555:6666:7777:8888')
self.ipv6test(True, '1111:2222:3333:4444:5555:6666:123.123.123.123')
self.ipv6test(True, '1111:2222:3333:4444:5555::123.123.123.123')
self.ipv6test(True, '1111:2222:3333:4444::123.123.123.123')
self.ipv6test(True, '1111:2222:3333::123.123.123.123')
self.ipv6test(True, '1111:2222::123.123.123.123')
self.ipv6test(True, '1111::123.123.123.123')
self.ipv6test(True, '::123.123.123.123')
self.ipv6test(True, '1111:2222:3333:4444::6666:123.123.123.123')
self.ipv6test(True, '1111:2222:3333::6666:123.123.123.123')
self.ipv6test(True, '1111:2222::6666:123.123.123.123')
self.ipv6test(True, '1111::6666:123.123.123.123')
self.ipv6test(True, '::6666:123.123.123.123')
self.ipv6test(True, '1111:2222:3333::5555:6666:123.123.123.123')
self.ipv6test(True, '1111:2222::5555:6666:123.123.123.123')
self.ipv6test(True, '1111::5555:6666:123.123.123.123')
self.ipv6test(True, '::5555:6666:123.123.123.123')
self.ipv6test(True, '1111:2222::4444:5555:6666:123.123.123.123')
self.ipv6test(True, '1111::4444:5555:6666:123.123.123.123')
self.ipv6test(True, '::4444:5555:6666:123.123.123.123')
self.ipv6test(True, '1111::3333:4444:5555:6666:123.123.123.123')
self.ipv6test(True, '::2222:3333:4444:5555:6666:123.123.123.123')
# Playing with combinations of "0" and "::"
# NB: these are all sytactically correct, but are bad form
# because "0" adjacent to "::" should be combined into "::"
self.ipv6test(True, '::0:0:0:0:0:0:0')
self.ipv6test(True, '::0:0:0:0:0:0')
self.ipv6test(True, '::0:0:0:0:0')
self.ipv6test(True, '::0:0:0:0')
self.ipv6test(True, '::0:0:0')
self.ipv6test(True, '::0:0')
self.ipv6test(True, '::0')
self.ipv6test(True, '0:0:0:0:0:0:0::')
self.ipv6test(True, '0:0:0:0:0:0::')
self.ipv6test(True, '0:0:0:0:0::')
self.ipv6test(True, '0:0:0:0::')
self.ipv6test(True, '0:0:0::')
self.ipv6test(True, '0:0::')
self.ipv6test(True, '0::')
# New invalid from Aeron
# Invalid data
self.ipv6test(False, 'XXXX:XXXX:XXXX:XXXX:XXXX:XXXX:XXXX:XXXX')
# Too many components
self.ipv6test(False, '1111:2222:3333:4444:5555:6666:7777:8888:9999')
self.ipv6test(False, '1111:2222:3333:4444:5555:6666:7777:8888::')
self.ipv6test(False, '::2222:3333:4444:5555:6666:7777:8888:9999')
# Too few components
self.ipv6test(False, '1111:2222:3333:4444:5555:6666:7777')
self.ipv6test(False, '1111:2222:3333:4444:5555:6666')
self.ipv6test(False, '1111:2222:3333:4444:5555')
self.ipv6test(False, '1111:2222:3333:4444')
self.ipv6test(False, '1111:2222:3333')
self.ipv6test(False, '1111:2222')
self.ipv6test(False, '1111')
# Missing :
self.ipv6test(False, '11112222:3333:4444:5555:6666:7777:8888')
self.ipv6test(False, '1111:22223333:4444:5555:6666:7777:8888')
self.ipv6test(False, '1111:2222:33334444:5555:6666:7777:8888')
self.ipv6test(False, '1111:2222:3333:44445555:6666:7777:8888')
self.ipv6test(False, '1111:2222:3333:4444:55556666:7777:8888')
self.ipv6test(False, '1111:2222:3333:4444:5555:66667777:8888')
self.ipv6test(False, '1111:2222:3333:4444:5555:6666:77778888')
# Missing : intended for ::
self.ipv6test(False, '1111:2222:3333:4444:5555:6666:7777:8888:')
self.ipv6test(False, '1111:2222:3333:4444:5555:6666:7777:')
self.ipv6test(False, '1111:2222:3333:4444:5555:6666:')
self.ipv6test(False, '1111:2222:3333:4444:5555:')
self.ipv6test(False, '1111:2222:3333:4444:')
self.ipv6test(False, '1111:2222:3333:')
self.ipv6test(False, '1111:2222:')
self.ipv6test(False, '1111:')
self.ipv6test(False, ':')
self.ipv6test(False, ':8888')
self.ipv6test(False, ':7777:8888')
self.ipv6test(False, ':6666:7777:8888')
self.ipv6test(False, ':5555:6666:7777:8888')
self.ipv6test(False, ':4444:5555:6666:7777:8888')
self.ipv6test(False, ':3333:4444:5555:6666:7777:8888')
self.ipv6test(False, ':2222:3333:4444:5555:6666:7777:8888')
self.ipv6test(False, ':1111:2222:3333:4444:5555:6666:7777:8888')
# :::
self.ipv6test(False, ':::2222:3333:4444:5555:6666:7777:8888')
self.ipv6test(False, '1111:::3333:4444:5555:6666:7777:8888')
self.ipv6test(False, '1111:2222:::4444:5555:6666:7777:8888')
self.ipv6test(False, '1111:2222:3333:::5555:6666:7777:8888')
self.ipv6test(False, '1111:2222:3333:4444:::6666:7777:8888')
self.ipv6test(False, '1111:2222:3333:4444:5555:::7777:8888')
self.ipv6test(False, '1111:2222:3333:4444:5555:6666:::8888')
self.ipv6test(False, '1111:2222:3333:4444:5555:6666:7777:::')
# Double ::")
self.ipv6test(False, '::2222::4444:5555:6666:7777:8888')
self.ipv6test(False, '::2222:3333::5555:6666:7777:8888')
self.ipv6test(False, '::2222:3333:4444::6666:7777:8888')
self.ipv6test(False, '::2222:3333:4444:5555::7777:8888')
self.ipv6test(False, '::2222:3333:4444:5555:7777::8888')
self.ipv6test(False, '::2222:3333:4444:5555:7777:8888::')
self.ipv6test(False, '1111::3333::5555:6666:7777:8888')
self.ipv6test(False, '1111::3333:4444::6666:7777:8888')
self.ipv6test(False, '1111::3333:4444:5555::7777:8888')
self.ipv6test(False, '1111::3333:4444:5555:6666::8888')
self.ipv6test(False, '1111::3333:4444:5555:6666:7777::')
self.ipv6test(False, '1111:2222::4444::6666:7777:8888')
self.ipv6test(False, '1111:2222::4444:5555::7777:8888')
self.ipv6test(False, '1111:2222::4444:5555:6666::8888')
self.ipv6test(False, '1111:2222::4444:5555:6666:7777::')
self.ipv6test(False, '1111:2222:3333::5555::7777:8888')
self.ipv6test(False, '1111:2222:3333::5555:6666::8888')
self.ipv6test(False, '1111:2222:3333::5555:6666:7777::')
self.ipv6test(False, '1111:2222:3333:4444::6666::8888')
self.ipv6test(False, '1111:2222:3333:4444::6666:7777::')
self.ipv6test(False, '1111:2222:3333:4444:5555::7777::')
# Too many components"
self.ipv6test(False, '1111:2222:3333:4444:5555:6666:7777:8888:1.2.3.4')
self.ipv6test(False, '1111:2222:3333:4444:5555:6666:7777:1.2.3.4')
self.ipv6test(False, '1111:2222:3333:4444:5555:6666::1.2.3.4')
self.ipv6test(False, '::2222:3333:4444:5555:6666:7777:1.2.3.4')
self.ipv6test(False, '1111:2222:3333:4444:5555:6666:1.2.3.4.5')
# Too few components
self.ipv6test(False, '1111:2222:3333:4444:5555:1.2.3.4')
self.ipv6test(False, '1111:2222:3333:4444:1.2.3.4')
self.ipv6test(False, '1111:2222:3333:1.2.3.4')
self.ipv6test(False, '1111:2222:1.2.3.4')
self.ipv6test(False, '1111:1.2.3.4')
# Missing :
self.ipv6test(False, '11112222:3333:4444:5555:6666:1.2.3.4')
self.ipv6test(False, '1111:22223333:4444:5555:6666:1.2.3.4')
self.ipv6test(False, '1111:2222:33334444:5555:6666:1.2.3.4')
self.ipv6test(False, '1111:2222:3333:44445555:6666:1.2.3.4')
self.ipv6test(False, '1111:2222:3333:4444:55556666:1.2.3.4')
self.ipv6test(False, '1111:2222:3333:4444:5555:66661.2.3.4')
# Missing .
self.ipv6test(False, '1111:2222:3333:4444:5555:6666:255255.255.255')
self.ipv6test(False, '1111:2222:3333:4444:5555:6666:255.255255.255')
self.ipv6test(False, '1111:2222:3333:4444:5555:6666:255.255.255255')
# Missing : intended for ::
self.ipv6test(False, ':1.2.3.4')
self.ipv6test(False, ':6666:1.2.3.4')
self.ipv6test(False, ':5555:6666:1.2.3.4')
self.ipv6test(False, ':4444:5555:6666:1.2.3.4')
self.ipv6test(False, ':3333:4444:5555:6666:1.2.3.4')
self.ipv6test(False, ':2222:3333:4444:5555:6666:1.2.3.4')
self.ipv6test(False, ':1111:2222:3333:4444:5555:6666:1.2.3.4')
# :::
self.ipv6test(False, ':::2222:3333:4444:5555:6666:1.2.3.4')
self.ipv6test(False, '1111:::3333:4444:5555:6666:1.2.3.4')
self.ipv6test(False, '1111:2222:::4444:5555:6666:1.2.3.4')
self.ipv6test(False, '1111:2222:3333:::5555:6666:1.2.3.4')
self.ipv6test(False, '1111:2222:3333:4444:::6666:1.2.3.4')
self.ipv6test(False, '1111:2222:3333:4444:5555:::1.2.3.4')
# Double ::
self.ipv6test(False, '::2222::4444:5555:6666:1.2.3.4')
self.ipv6test(False, '::2222:3333::5555:6666:1.2.3.4')
self.ipv6test(False, '::2222:3333:4444::6666:1.2.3.4')
self.ipv6test(False, '::2222:3333:4444:5555::1.2.3.4')
self.ipv6test(False, '1111::3333::5555:6666:1.2.3.4')
self.ipv6test(False, '1111::3333:4444::6666:1.2.3.4')
self.ipv6test(False, '1111::3333:4444:5555::1.2.3.4')
self.ipv6test(False, '1111:2222::4444::6666:1.2.3.4')
self.ipv6test(False, '1111:2222::4444:5555::1.2.3.4')
self.ipv6test(False, '1111:2222:3333::5555::1.2.3.4')
# Missing parts
self.ipv6test(False, '::.')
self.ipv6test(False, '::..')
self.ipv6test(False, '::...')
self.ipv6test(False, '::1...')
self.ipv6test(False, '::1.2..')
self.ipv6test(False, '::1.2.3.')
self.ipv6test(False, '::.2..')
self.ipv6test(False, '::.2.3.')
self.ipv6test(False, '::.2.3.4')
self.ipv6test(False, '::..3.')
self.ipv6test(False, '::..3.4')
self.ipv6test(False, '::...4')
# Extra : in front
self.ipv6test(False, ':1111:2222:3333:4444:5555:6666:7777::')
self.ipv6test(False, ':1111:2222:3333:4444:5555:6666::')
self.ipv6test(False, ':1111:2222:3333:4444:5555::')
self.ipv6test(False, ':1111:2222:3333:4444::')
self.ipv6test(False, ':1111:2222:3333::')
self.ipv6test(False, ':1111:2222::')
self.ipv6test(False, ':1111::')
self.ipv6test(False, ':::')
self.ipv6test(False, ':1111:2222:3333:4444:5555:6666::8888')
self.ipv6test(False, ':1111:2222:3333:4444:5555::8888')
self.ipv6test(False, ':1111:2222:3333:4444::8888')
self.ipv6test(False, ':1111:2222:3333::8888')
self.ipv6test(False, ':1111:2222::8888')
self.ipv6test(False, ':1111::8888')
self.ipv6test(False, ':::8888')
self.ipv6test(False, ':1111:2222:3333:4444:5555::7777:8888')
self.ipv6test(False, ':1111:2222:3333:4444::7777:8888')
self.ipv6test(False, ':1111:2222:3333::7777:8888')
self.ipv6test(False, ':1111:2222::7777:8888')
self.ipv6test(False, ':1111::7777:8888')
self.ipv6test(False, ':::7777:8888')
self.ipv6test(False, ':1111:2222:3333:4444::6666:7777:8888')
self.ipv6test(False, ':1111:2222:3333::6666:7777:8888')
self.ipv6test(False, ':1111:2222::6666:7777:8888')
self.ipv6test(False, ':1111::6666:7777:8888')
self.ipv6test(False, ':::6666:7777:8888')
self.ipv6test(False, ':1111:2222:3333::5555:6666:7777:8888')
self.ipv6test(False, ':1111:2222::5555:6666:7777:8888')
self.ipv6test(False, ':1111::5555:6666:7777:8888')
self.ipv6test(False, ':::5555:6666:7777:8888')
self.ipv6test(False, ':1111:2222::4444:5555:6666:7777:8888')
self.ipv6test(False, ':1111::4444:5555:6666:7777:8888')
self.ipv6test(False, ':::4444:5555:6666:7777:8888')
self.ipv6test(False, ':1111::3333:4444:5555:6666:7777:8888')
self.ipv6test(False, ':::3333:4444:5555:6666:7777:8888')
self.ipv6test(False, ':::2222:3333:4444:5555:6666:7777:8888')
self.ipv6test(False, ':1111:2222:3333:4444:5555:6666:1.2.3.4')
self.ipv6test(False, ':1111:2222:3333:4444:5555::1.2.3.4')
self.ipv6test(False, ':1111:2222:3333:4444::1.2.3.4')
self.ipv6test(False, ':1111:2222:3333::1.2.3.4')
self.ipv6test(False, ':1111:2222::1.2.3.4')
self.ipv6test(False, ':1111::1.2.3.4')
self.ipv6test(False, ':::1.2.3.4')
self.ipv6test(False, ':1111:2222:3333:4444::6666:1.2.3.4')
self.ipv6test(False, ':1111:2222:3333::6666:1.2.3.4')
self.ipv6test(False, ':1111:2222::6666:1.2.3.4')
self.ipv6test(False, ':1111::6666:1.2.3.4')
self.ipv6test(False, ':::6666:1.2.3.4')
self.ipv6test(False, ':1111:2222:3333::5555:6666:1.2.3.4')
self.ipv6test(False, ':1111:2222::5555:6666:1.2.3.4')
self.ipv6test(False, ':1111::5555:6666:1.2.3.4')
self.ipv6test(False, ':::5555:6666:1.2.3.4')
self.ipv6test(False, ':1111:2222::4444:5555:6666:1.2.3.4')
self.ipv6test(False, ':1111::4444:5555:6666:1.2.3.4')
self.ipv6test(False, ':::4444:5555:6666:1.2.3.4')
self.ipv6test(False, ':1111::3333:4444:5555:6666:1.2.3.4')
self.ipv6test(False, ':::2222:3333:4444:5555:6666:1.2.3.4')
# Extra : at end
self.ipv6test(False, '1111:2222:3333:4444:5555:6666:7777:::')
self.ipv6test(False, '1111:2222:3333:4444:5555:6666:::')
self.ipv6test(False, '1111:2222:3333:4444:5555:::')
self.ipv6test(False, '1111:2222:3333:4444:::')
self.ipv6test(False, '1111:2222:3333:::')
self.ipv6test(False, '1111:2222:::')
self.ipv6test(False, '1111:::')
self.ipv6test(False, ':::')
self.ipv6test(False, '1111:2222:3333:4444:5555:6666::8888:')
self.ipv6test(False, '1111:2222:3333:4444:5555::8888:')
self.ipv6test(False, '1111:2222:3333:4444::8888:')
self.ipv6test(False, '1111:2222:3333::8888:')
self.ipv6test(False, '1111:2222::8888:')
self.ipv6test(False, '1111::8888:')
self.ipv6test(False, '::8888:')
self.ipv6test(False, '1111:2222:3333:4444:5555::7777:8888:')
self.ipv6test(False, '1111:2222:3333:4444::7777:8888:')
self.ipv6test(False, '1111:2222:3333::7777:8888:')
self.ipv6test(False, '1111:2222::7777:8888:')
self.ipv6test(False, '1111::7777:8888:')
self.ipv6test(False, '::7777:8888:')
self.ipv6test(False, '1111:2222:3333:4444::6666:7777:8888:')
self.ipv6test(False, '1111:2222:3333::6666:7777:8888:')
self.ipv6test(False, '1111:2222::6666:7777:8888:')
self.ipv6test(False, '1111::6666:7777:8888:')
self.ipv6test(False, '::6666:7777:8888:')
self.ipv6test(False, '1111:2222:3333::5555:6666:7777:8888:')
self.ipv6test(False, '1111:2222::5555:6666:7777:8888:')
self.ipv6test(False, '1111::5555:6666:7777:8888:')
self.ipv6test(False, '::5555:6666:7777:8888:')
self.ipv6test(False, '1111:2222::4444:5555:6666:7777:8888:')
self.ipv6test(False, '1111::4444:5555:6666:7777:8888:')
self.ipv6test(False, '::4444:5555:6666:7777:8888:')
self.ipv6test(False, '1111::3333:4444:5555:6666:7777:8888:')
self.ipv6test(False, '::3333:4444:5555:6666:7777:8888:')
self.ipv6test(False, '::2222:3333:4444:5555:6666:7777:8888:')
# Additional cases:
# http://crisp.tweakblogs.net/blog/2031/ipv6-validation-%28and-caveats%29.html
self.ipv6test(True, '0:a:b:c:d:e:f::')
# syntactically correct, but bad form (::0:... could be combined)
self.ipv6test(True, '::0:a:b:c:d:e:f')
self.ipv6test(True, 'a:b:c:d:e:f:0::')
self.ipv6test(False, "':10.0.0.1")
# Known bugs with ipaddr v2.1.10 but works with ipaddress
self.ipv6test(False, '02001:0000:1234:0000:0000:C1C0:ABCD:0876')
self.ipv6test(False, '2001:0000:1234:0000:00001:C1C0:ABCD:0876')
@unittest.expectedFailure
def test_T76286a_failures(self):
"""Test known bugs in the ipaddress module."""
# The following fail with the ipaddress module. See T76286
self.ipv6test(False, '1111:2222:3333:4444:5555:6666:00.00.00.00')
@unittest.expectedFailure
def test_T76286b_failures(self):
"""Test known bugs in the ipaddress module."""
self.ipv6test(False, '1111:2222:3333:4444:5555:6666:000.000.000.000')
@expected_failure_if(PYTHON_VERSION >= (3, 8))
def test_T240060_failures(self):
"""Test known deviated behaviour in Python 3.8."""
# Testing IPv4 addresses represented as dotted-quads
# Leading zero's in IPv4 addresses not allowed: some systems treat the
# leading "0" in ".086" as the start of an octal number
# Update: The BNF in RFC-3986 explicitly defines the dec-octet
# (for IPv4 addresses) not to have a leading zero
self.ipv6test(False,
'fe80:0000:0000:0000:0204:61ff:254.157.241.086')
if __name__ == '__main__': # pragma: no cover
try:
unittest.main()
except SystemExit:
pass
| |
"""
The LLVM Compiler Infrastructure
This file is distributed under the University of Illinois Open Source
License. See LICENSE.TXT for details.
Provides classes used by the test results reporting infrastructure
within the LLDB test suite.
This module provides process-management support for the LLDB test
running infrastructure.
"""
# System imports
import os
import re
import signal
import subprocess
import sys
import threading
class CommunicatorThread(threading.Thread):
"""Provides a thread class that communicates with a subprocess."""
def __init__(self, process, event, output_file):
super(CommunicatorThread, self).__init__()
# Don't let this thread prevent shutdown.
self.daemon = True
self.process = process
self.pid = process.pid
self.event = event
self.output_file = output_file
self.output = None
def run(self):
try:
# Communicate with the child process.
# This will not complete until the child process terminates.
self.output = self.process.communicate()
except Exception as exception: # pylint: disable=broad-except
if self.output_file:
self.output_file.write(
"exception while using communicate() for pid: {}\n".format(
exception))
finally:
# Signal that the thread's run is complete.
self.event.set()
# Provides a regular expression for matching gtimeout-based durations.
TIMEOUT_REGEX = re.compile(r"(^\d+)([smhd])?$")
def timeout_to_seconds(timeout):
"""Converts timeout/gtimeout timeout values into seconds.
@param timeout a timeout in the form of xm representing x minutes.
@return None if timeout is None, or the number of seconds as a float
if a valid timeout format was specified.
"""
if timeout is None:
return None
else:
match = TIMEOUT_REGEX.match(timeout)
if match:
value = float(match.group(1))
units = match.group(2)
if units is None:
# default is seconds. No conversion necessary.
return value
elif units == 's':
# Seconds. No conversion necessary.
return value
elif units == 'm':
# Value is in minutes.
return 60.0 * value
elif units == 'h':
# Value is in hours.
return (60.0 * 60.0) * value
elif units == 'd':
# Value is in days.
return 24 * (60.0 * 60.0) * value
else:
raise Exception("unexpected units value '{}'".format(units))
else:
raise Exception("could not parse TIMEOUT spec '{}'".format(
timeout))
class ProcessHelper(object):
"""Provides an interface for accessing process-related functionality.
This class provides a factory method that gives the caller a
platform-specific implementation instance of the class.
Clients of the class should stick to the methods provided in this
base class.
@see ProcessHelper.process_helper()
"""
def __init__(self):
super(ProcessHelper, self).__init__()
@classmethod
def process_helper(cls):
"""Returns a platform-specific ProcessHelper instance.
@return a ProcessHelper instance that does the right thing for
the current platform.
"""
# If you add a new platform, create an instance here and
# return it.
if os.name == "nt":
return WindowsProcessHelper()
else:
# For all POSIX-like systems.
return UnixProcessHelper()
def create_piped_process(self, command, new_process_group=True):
# pylint: disable=no-self-use,unused-argument
# As expected. We want derived classes to implement this.
"""Creates a subprocess.Popen-based class with I/O piped to the parent.
@param command the command line list as would be passed to
subprocess.Popen(). Use the list form rather than the string form.
@param new_process_group indicates if the caller wants the
process to be created in its own process group. Each OS handles
this concept differently. It provides a level of isolation and
can simplify or enable terminating the process tree properly.
@return a subprocess.Popen-like object.
"""
raise Exception("derived class must implement")
def supports_soft_terminate(self):
# pylint: disable=no-self-use
# As expected. We want derived classes to implement this.
"""Indicates if the platform supports soft termination.
Soft termination is the concept of a terminate mechanism that
allows the target process to shut down nicely, but with the
catch that the process might choose to ignore it.
Platform supporter note: only mark soft terminate as supported
if the target process has some way to evade the soft terminate
request; otherwise, just support the hard terminate method.
@return True if the platform supports a soft terminate mechanism.
"""
# By default, we do not support a soft terminate mechanism.
return False
def soft_terminate(self, popen_process, log_file=None, want_core=True):
# pylint: disable=no-self-use,unused-argument
# As expected. We want derived classes to implement this.
"""Attempts to terminate the process in a polite way.
This terminate method is intended to give the child process a
chance to clean up and exit on its own, possibly with a request
to drop a core file or equivalent (i.e. [mini-]crashdump, crashlog,
etc.) If new_process_group was set in the process creation method
and the platform supports it, this terminate call will attempt to
kill the whole process tree rooted in this child process.
@param popen_process the subprocess.Popen-like object returned
by one of the process-creation methods of this class.
@param log_file file-like object used to emit error-related
logging info. May be None if no error-related info is desired.
@param want_core True if the caller would like to get a core
dump (or the analogous crash report) from the terminated process.
"""
popen_process.terminate()
def hard_terminate(self, popen_process, log_file=None):
# pylint: disable=no-self-use,unused-argument
# As expected. We want derived classes to implement this.
"""Attempts to terminate the process immediately.
This terminate method is intended to kill child process in
a manner in which the child process has no ability to block,
and also has no ability to clean up properly. If new_process_group
was specified when creating the process, and if the platform
implementation supports it, this will attempt to kill the
whole process tree rooted in the child process.
@param popen_process the subprocess.Popen-like object returned
by one of the process-creation methods of this class.
@param log_file file-like object used to emit error-related
logging info. May be None if no error-related info is desired.
"""
popen_process.kill()
def was_soft_terminate(self, returncode, with_core):
# pylint: disable=no-self-use,unused-argument
# As expected. We want derived classes to implement this.
"""Returns if Popen-like object returncode matches soft terminate.
@param returncode the returncode from the Popen-like object that
terminated with a given return code.
@param with_core indicates whether the returncode should match
a core-generating return signal.
@return True when the returncode represents what the system would
issue when a soft_terminate() with the given with_core arg occurred;
False otherwise.
"""
if not self.supports_soft_terminate():
# If we don't support soft termination on this platform,
# then this should always be False.
return False
else:
# Once a platform claims to support soft terminate, it
# needs to be able to identify it by overriding this method.
raise Exception("platform needs to implement")
def was_hard_terminate(self, returncode):
# pylint: disable=no-self-use,unused-argument
# As expected. We want derived classes to implement this.
"""Returns if Popen-like object returncode matches that of a hard
terminate attempt.
@param returncode the returncode from the Popen-like object that
terminated with a given return code.
@return True when the returncode represents what the system would
issue when a hard_terminate() occurred; False
otherwise.
"""
raise Exception("platform needs to implement")
def soft_terminate_signals(self):
# pylint: disable=no-self-use
"""Retrieve signal numbers that can be sent to soft terminate.
@return a list of signal numbers that can be sent to soft terminate
a process, or None if not applicable.
"""
return None
def is_exceptional_exit(self, popen_status):
"""Returns whether the program exit status is exceptional.
Returns whether the return code from a Popen process is exceptional
(e.g. signals on POSIX systems).
Derived classes should override this if they can detect exceptional
program exit.
@return True if the given popen_status represents an exceptional
program exit; False otherwise.
"""
return False
def exceptional_exit_details(self, popen_status):
"""Returns the normalized exceptional exit code and a description.
Given an exceptional exit code, returns the integral value of the
exception (e.g. signal number for POSIX) and a description (e.g.
signal name on POSIX) for the result.
Derived classes should override this if they can detect exceptional
program exit.
It is fine to not implement this so long as is_exceptional_exit()
always returns False.
@return (normalized exception code, symbolic exception description)
"""
raise Exception("exception_exit_details() called on unsupported class")
class UnixProcessHelper(ProcessHelper):
"""Provides a ProcessHelper for Unix-like operating systems.
This implementation supports anything that looks Posix-y
(e.g. Darwin, Linux, *BSD, etc.)
"""
def __init__(self):
super(UnixProcessHelper, self).__init__()
@classmethod
def _create_new_process_group(cls):
"""Creates a new process group for the calling process."""
os.setpgid(os.getpid(), os.getpid())
def create_piped_process(self, command, new_process_group=True):
# Determine what to run after the fork but before the exec.
if new_process_group:
preexec_func = self._create_new_process_group
else:
preexec_func = None
# Create the process.
process = subprocess.Popen(
command,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
universal_newlines=True, # Elicits automatic byte -> string decoding in Py3
close_fds=True,
preexec_fn=preexec_func)
# Remember whether we're using process groups for this
# process.
process.using_process_groups = new_process_group
return process
def supports_soft_terminate(self):
# POSIX does support a soft terminate via:
# * SIGTERM (no core requested)
# * SIGQUIT (core requested if enabled, see ulimit -c)
return True
@classmethod
def _validate_pre_terminate(cls, popen_process, log_file):
# Validate args.
if popen_process is None:
raise ValueError("popen_process is None")
# Ensure we have something that looks like a valid process.
if popen_process.pid < 1:
if log_file:
log_file.write("skipping soft_terminate(): no process id")
return False
# We only do the process liveness check if we're not using
# process groups. With process groups, checking if the main
# inferior process is dead and short circuiting here is no
# good - children of it in the process group could still be
# alive, and they should be killed during a timeout.
if not popen_process.using_process_groups:
# Don't kill if it's already dead.
popen_process.poll()
if popen_process.returncode is not None:
# It has a returncode. It has already stopped.
if log_file:
log_file.write(
"requested to terminate pid {} but it has already "
"terminated, returncode {}".format(
popen_process.pid, popen_process.returncode))
# Move along...
return False
# Good to go.
return True
def _kill_with_signal(self, popen_process, log_file, signum):
# Validate we're ready to terminate this.
if not self._validate_pre_terminate(popen_process, log_file):
return
# Choose kill mechanism based on whether we're targeting
# a process group or just a process.
try:
if popen_process.using_process_groups:
# if log_file:
# log_file.write(
# "sending signum {} to process group {} now\n".format(
# signum, popen_process.pid))
os.killpg(popen_process.pid, signum)
else:
# if log_file:
# log_file.write(
# "sending signum {} to process {} now\n".format(
# signum, popen_process.pid))
os.kill(popen_process.pid, signum)
except OSError as error:
import errno
if error.errno == errno.ESRCH:
# This is okay - failed to find the process. It may be that
# that the timeout pre-kill hook eliminated the process. We'll
# ignore.
pass
else:
raise
def soft_terminate(self, popen_process, log_file=None, want_core=True):
# Choose signal based on desire for core file.
if want_core:
# SIGQUIT will generate core by default. Can be caught.
signum = signal.SIGQUIT
else:
# SIGTERM is the traditional nice way to kill a process.
# Can be caught, doesn't generate a core.
signum = signal.SIGTERM
self._kill_with_signal(popen_process, log_file, signum)
def hard_terminate(self, popen_process, log_file=None):
self._kill_with_signal(popen_process, log_file, signal.SIGKILL)
def was_soft_terminate(self, returncode, with_core):
if with_core:
return returncode == -signal.SIGQUIT
else:
return returncode == -signal.SIGTERM
def was_hard_terminate(self, returncode):
return returncode == -signal.SIGKILL
def soft_terminate_signals(self):
return [signal.SIGQUIT, signal.SIGTERM]
def is_exceptional_exit(self, popen_status):
return popen_status < 0
@classmethod
def _signal_names_by_number(cls):
return dict(
(k, v) for v, k in reversed(sorted(signal.__dict__.items()))
if v.startswith('SIG') and not v.startswith('SIG_'))
def exceptional_exit_details(self, popen_status):
signo = -popen_status
signal_names_by_number = self._signal_names_by_number()
signal_name = signal_names_by_number.get(signo, "")
return (signo, signal_name)
class WindowsProcessHelper(ProcessHelper):
"""Provides a Windows implementation of the ProcessHelper class."""
def __init__(self):
super(WindowsProcessHelper, self).__init__()
def create_piped_process(self, command, new_process_group=True):
if new_process_group:
# We need this flag if we want os.kill() to work on the subprocess.
creation_flags = subprocess.CREATE_NEW_PROCESS_GROUP
else:
creation_flags = 0
return subprocess.Popen(
command,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
universal_newlines=True, # Elicits automatic byte -> string decoding in Py3
creationflags=creation_flags)
def was_hard_terminate(self, returncode):
return returncode != 0
class ProcessDriver(object):
"""Drives a child process, notifies on important events, and can timeout.
Clients are expected to derive from this class and override the
on_process_started and on_process_exited methods if they want to
hook either of those.
This class supports timing out the child process in a platform-agnostic
way. The on_process_exited method is informed if the exit was natural
or if it was due to a timeout.
"""
def __init__(self, soft_terminate_timeout=10.0):
super(ProcessDriver, self).__init__()
self.process_helper = ProcessHelper.process_helper()
self.pid = None
# Create the synchronization event for notifying when the
# inferior dotest process is complete.
self.done_event = threading.Event()
self.io_thread = None
self.process = None
# Number of seconds to wait for the soft terminate to
# wrap up, before moving to more drastic measures.
# Might want this longer if core dumps are generated and
# take a long time to write out.
self.soft_terminate_timeout = soft_terminate_timeout
# Number of seconds to wait for the hard terminate to
# wrap up, before giving up on the io thread. This should
# be fast.
self.hard_terminate_timeout = 5.0
self.returncode = None
# =============================================
# Methods for subclasses to override if desired.
# =============================================
def on_process_started(self):
pass
def on_process_exited(self, command, output, was_timeout, exit_status):
pass
def on_timeout_pre_kill(self):
"""Called after the timeout interval elapses but before killing it.
This method is added to enable derived classes the ability to do
something to the process prior to it being killed. For example,
this would be a good spot to run a program that samples the process
to see what it was doing (or not doing).
Do not attempt to reap the process (i.e. use wait()) in this method.
That will interfere with the kill mechanism and return code processing.
"""
pass
def write(self, content):
# pylint: disable=no-self-use
# Intended - we want derived classes to be able to override
# this and use any self state they may contain.
sys.stdout.write(content)
# ==============================================================
# Operations used to drive processes. Clients will want to call
# one of these.
# ==============================================================
def run_command(self, command):
# Start up the child process and the thread that does the
# communication pump.
self._start_process_and_io_thread(command)
# Wait indefinitely for the child process to finish
# communicating. This indicates it has closed stdout/stderr
# pipes and is done.
self.io_thread.join()
self.returncode = self.process.wait()
if self.returncode is None:
raise Exception(
"no exit status available for pid {} after the "
" inferior dotest.py should have completed".format(
self.process.pid))
# Notify of non-timeout exit.
self.on_process_exited(
command,
self.io_thread.output,
False,
self.returncode)
def run_command_with_timeout(self, command, timeout, want_core):
# Figure out how many seconds our timeout description is requesting.
timeout_seconds = timeout_to_seconds(timeout)
# Start up the child process and the thread that does the
# communication pump.
self._start_process_and_io_thread(command)
self._wait_with_timeout(timeout_seconds, command, want_core)
# ================
# Internal details.
# ================
def _start_process_and_io_thread(self, command):
# Create the process.
self.process = self.process_helper.create_piped_process(command)
self.pid = self.process.pid
self.on_process_started()
# Ensure the event is cleared that is used for signaling
# from the communication() thread when communication is
# complete (i.e. the inferior process has finished).
self.done_event.clear()
self.io_thread = CommunicatorThread(
self.process, self.done_event, self.write)
self.io_thread.start()
def _attempt_soft_kill(self, want_core):
# The inferior dotest timed out. Attempt to clean it
# with a non-drastic method (so it can clean up properly
# and/or generate a core dump). Often the OS can't guarantee
# that the process will really terminate after this.
self.process_helper.soft_terminate(
self.process,
want_core=want_core,
log_file=self)
# Now wait up to a certain timeout period for the io thread
# to say that the communication ended. If that wraps up
# within our soft terminate timeout, we're all done here.
self.io_thread.join(self.soft_terminate_timeout)
if not self.io_thread.is_alive():
# stdout/stderr were closed on the child process side. We
# should be able to wait and reap the child process here.
self.returncode = self.process.wait()
# We terminated, and the done_trying result is n/a
terminated = True
done_trying = None
else:
self.write("soft kill attempt of process {} timed out "
"after {} seconds\n".format(
self.process.pid, self.soft_terminate_timeout))
terminated = False
done_trying = False
return terminated, done_trying
def _attempt_hard_kill(self):
# Instruct the process to terminate and really force it to
# happen. Don't give the process a chance to ignore.
self.process_helper.hard_terminate(
self.process,
log_file=self)
# Reap the child process. This should not hang as the
# hard_kill() mechanism is supposed to really kill it.
# Improvement option:
# If this does ever hang, convert to a self.process.poll()
# loop checking on self.process.returncode until it is not
# None or the timeout occurs.
self.returncode = self.process.wait()
# Wait a few moments for the io thread to finish...
self.io_thread.join(self.hard_terminate_timeout)
if self.io_thread.is_alive():
# ... but this is not critical if it doesn't end for some
# reason.
self.write(
"hard kill of process {} timed out after {} seconds waiting "
"for the io thread (ignoring)\n".format(
self.process.pid, self.hard_terminate_timeout))
# Set if it terminated. (Set up for optional improvement above).
terminated = self.returncode is not None
# Nothing else to try.
done_trying = True
return terminated, done_trying
def _attempt_termination(self, attempt_count, want_core):
if self.process_helper.supports_soft_terminate():
# When soft termination is supported, we first try to stop
# the process with a soft terminate. Failing that, we try
# the hard terminate option.
if attempt_count == 1:
return self._attempt_soft_kill(want_core)
elif attempt_count == 2:
return self._attempt_hard_kill()
else:
# We don't have anything else to try.
terminated = self.returncode is not None
done_trying = True
return terminated, done_trying
else:
# We only try the hard terminate option when there
# is no soft terminate available.
if attempt_count == 1:
return self._attempt_hard_kill()
else:
# We don't have anything else to try.
terminated = self.returncode is not None
done_trying = True
return terminated, done_trying
def _wait_with_timeout(self, timeout_seconds, command, want_core):
# Allow up to timeout seconds for the io thread to wrap up.
# If that completes, the child process should be done.
completed_normally = self.done_event.wait(timeout_seconds)
if completed_normally:
# Reap the child process here.
self.returncode = self.process.wait()
else:
# Allow derived classes to do some work after we detected
# a timeout but before we touch the timed-out process.
self.on_timeout_pre_kill()
# Prepare to stop the process
process_terminated = completed_normally
terminate_attempt_count = 0
# Try as many attempts as we support for trying to shut down
# the child process if it's not already shut down.
while not process_terminated:
terminate_attempt_count += 1
# Attempt to terminate.
process_terminated, done_trying = self._attempt_termination(
terminate_attempt_count, want_core)
# Check if there's nothing more to try.
if done_trying:
# Break out of our termination attempt loop.
break
# At this point, we're calling it good. The process
# finished gracefully, was shut down after one or more
# attempts, or we failed but gave it our best effort.
self.on_process_exited(
command,
self.io_thread.output,
not completed_normally,
self.returncode)
def patched_init(self, *args, **kwargs):
self.original_init(*args, **kwargs)
# Initialize our condition variable that protects wait()/poll().
self.wait_condition = threading.Condition()
def patched_wait(self, *args, **kwargs):
self.wait_condition.acquire()
try:
result = self.original_wait(*args, **kwargs)
# The process finished. Signal the condition.
self.wait_condition.notify_all()
return result
finally:
self.wait_condition.release()
def patched_poll(self, *args, **kwargs):
self.wait_condition.acquire()
try:
result = self.original_poll(*args, **kwargs)
if self.returncode is not None:
# We did complete, and we have the return value.
# Signal the event to indicate we're done.
self.wait_condition.notify_all()
return result
finally:
self.wait_condition.release()
def patch_up_subprocess_popen():
subprocess.Popen.original_init = subprocess.Popen.__init__
subprocess.Popen.__init__ = patched_init
subprocess.Popen.original_wait = subprocess.Popen.wait
subprocess.Popen.wait = patched_wait
subprocess.Popen.original_poll = subprocess.Popen.poll
subprocess.Popen.poll = patched_poll
# Replace key subprocess.Popen() threading-unprotected methods with
# threading-protected versions.
patch_up_subprocess_popen()
| |
"""
Room class and mechanics for the Evscaperoom.
"""
from evennia import DefaultRoom, DefaultCharacter, DefaultObject
from evennia import utils
from evennia.utils.ansi import strip_ansi
from evennia import logger
from evennia.locks.lockhandler import check_lockstring
from evennia.utils.utils import lazy_property, list_to_string
from .objects import EvscaperoomObject
from .commands import CmdSetEvScapeRoom
from .state import StateHandler
from .utils import create_fantasy_word
class EvscapeRoom(EvscaperoomObject, DefaultRoom):
"""
The room to escape from.
"""
def at_object_creation(self):
"""
Called once, when the room is first created.
"""
super().at_object_creation()
# starting state
self.db.state = None # name
self.db.prev_state = None
# this is used for tagging of all objects belonging to this
# particular room instance, so they can be cleaned up later
# this is accessed through the .tagcategory getter.
self.db.tagcategory = "evscaperoom_{}".format(self.key)
# room progress statistics
self.db.stats = {
"progress": 0, # in percent
"score": {}, # reason: score
"max_score": 100,
"hints_used": 0, # total across all states
"hints_total": 41,
"total_achievements": 14
}
self.cmdset.add(CmdSetEvScapeRoom, permanent=True)
self.log("Room created and log started.")
@lazy_property
def statehandler(self):
return StateHandler(self)
@property
def state(self):
return self.statehandler.current_state
def log(self, message, caller=None):
"""
Log to a file specificially for this room.
"""
caller = f"[caller.key]: " if caller else ""
logger.log_file(
strip_ansi(f"{caller}{message.strip()}"),
filename=self.tagcategory + ".log")
def score(self, new_score, reason):
"""
We don't score individually but for everyone in room together.
You can only be scored for a given reason once."""
if reason not in self.db.stats['score']:
self.log(f"score: {reason} ({new_score}pts)")
self.db.stats['score'][reason] = new_score
def progress(self, new_progress):
"Progress is what we set it to be (0-100%)"
self.log(f"progress: {new_progress}%")
self.db.stats['progress'] = new_progress
def achievement(self, caller, achievement, subtext=""):
"""
Give the caller a personal achievment. You will only
ever get one of the same type
Args:
caller (Object): The receiver of the achievement.
achievement (str): The title/name of the achievement.
subtext (str, optional): Eventual subtext/explanation
of the achievement.
"""
achievements = caller.attributes.get(
"achievements", category=self.tagcategory)
if not achievements:
achievements = {}
if achievement not in achievements:
self.log(f"achievement: {caller} earned '{achievement}' - {subtext}")
achievements[achievement] = subtext
caller.attributes.add("achievements", achievements, category=self.tagcategory)
def get_all_characters(self):
"""
Get the player characters in the room.
Returns:
chars (Queryset): The characters.
"""
return DefaultCharacter.objects.filter_family(db_location=self)
def set_flag(self, flagname):
self.db.flags[flagname] = True
def unset_flag(self, flagname):
if flagname in self.db.flags:
del self.db.flags[flagname]
def check_flag(self, flagname):
return self.db.flags.get(flagname, False)
def check_perm(self, caller, permission):
return check_lockstring(caller, f"dummy:perm({permission})")
def tag_character(self, character, tag, category=None):
"""
Tag a given character in this room.
Args:
character (Character): Player character to tag.
tag (str): Tag to set.
category (str, optional): Tag-category. If unset, use room's
tagcategory.
"""
category = category if category else self.db.tagcategory
character.tags.add(tag, category=category)
def tag_all_characters(self, tag, category=None):
"""
Set a given tag on all players in the room.
Args:
room (EvscapeRoom): The room to escape from.
tag (str): The tag to set.
category (str, optional): If unset, will use the room's tagcategory.
"""
category = category if category else self.tagcategory
for char in self.get_all_characters():
char.tags.add(tag, category=category)
def character_cleanup(self, char):
"""
Clean all custom tags/attrs on a character.
"""
if self.tagcategory:
char.tags.remove(category=self.tagcategory)
char.attributes.remove(category=self.tagcategory)
def character_exit(self, char):
"""
Have a character exit the room - return them to the room menu.
"""
self.log(f"EXIT: {char} left room")
from .menu import run_evscaperoom_menu
self.character_cleanup(char)
char.location = char.home
# check if room should be deleted
if len(self.get_all_characters()) < 1:
self.delete()
# we must run menu after deletion so we don't include this room!
run_evscaperoom_menu(char)
# Evennia hooks
def at_object_receive(self, moved_obj, source_location):
"""
Called when an object arrives in the room. This can be used to
sum up the situation, set tags etc.
"""
if moved_obj.is_superuser:
string = ("|rWARNING: You are playing as superuser. Consider |wquell|ring if "
"you want to \nexperience the game normally.|n")
string = "-" * 78 + "\n" + string + "\n" + "-" * 78
moved_obj.msg(string)
else:
# quell user
if moved_obj.account:
moved_obj.account.execute_cmd("quell")
moved_obj.msg("(Auto-quelling while in room)")
if utils.inherits_from(moved_obj, "evennia.objects.objects.DefaultCharacter"):
self.log(f"JOIN: {moved_obj} joined room")
self.state.character_enters(moved_obj)
def at_object_leave(self, moved_obj, target_location, **kwargs):
"""
Called when an object leaves the room; if this is a Character we need
to clean them up and move them to the menu state.
"""
if utils.inherits_from(moved_obj, "evennia.objects.objects.DefaultCharacter"):
self.character_cleanup(moved_obj)
if len(self.get_all_characters()) <= 1:
# after this move there'll be no more characters in the room - delete the room!
# let the garbage-collection script remove this later
self.db.deleting = True
# logger.log_info("DEBUG: Don't delete room when last player leaving")
if moved_obj.account:
moved_obj.account.execute_cmd("unquell")
def delete(self):
"""
Delete this room and all items related to it. Only move the players.
"""
for char in self.get_all_characters():
self.character_exit(char)
for obj in self.contents:
obj.delete()
self.log("END: Room cleaned up and deleted")
return super().delete()
def return_appearance(self, looker, **kwargs):
obj, pos = self.get_position(looker)
pos = (f"\n|x[{self.position_prep_map[pos]} on "
f"{obj.get_display_name(looker)}]|n" if obj else "")
admin_only = ""
if self.check_perm(looker, "Admin"):
# only for admins
objs = DefaultObject.objects.filter_family(
db_location=self).exclude(id=looker.id)
admin_only = "\n|xAdmin only: " + \
list_to_string([obj.get_display_name(looker) for obj in objs])
return f"{self.db.desc}{pos}{admin_only}"
| |
# Copyright (c) 2015, Web Notes Technologies Pvt. Ltd. and Contributors
# MIT License. See license.txt
from __future__ import unicode_literals, absolute_import
import sys
import os
import json
import click
import hashlib
import cProfile
import StringIO
import pstats
import frappe
import frappe.utils
from frappe.utils import cint
from distutils.spawn import find_executable
from functools import wraps
click.disable_unicode_literals_warning = True
def pass_context(f):
@wraps(f)
def _func(ctx, *args, **kwargs):
profile = ctx.obj['profile']
if profile:
pr = cProfile.Profile()
pr.enable()
ret = f(frappe._dict(ctx.obj), *args, **kwargs)
if profile:
pr.disable()
s = StringIO.StringIO()
ps = pstats.Stats(pr, stream=s)\
.sort_stats('cumtime', 'tottime', 'ncalls')
ps.print_stats()
print s.getvalue()
return ret
return click.pass_context(_func)
def get_single_site(context):
if not context.sites or not len(context.sites) == 1:
print 'please select a site'
sys.exit(1)
site = context.sites[0]
return site
def call_command(cmd, context):
return click.Context(cmd, obj=context).forward(cmd)
@click.command('new-site')
@click.argument('site')
@click.option('--db-name', help='Database name')
@click.option('--mariadb-root-username', default='root', help='Root username for MariaDB')
@click.option('--mariadb-root-password', help='Root password for MariaDB')
@click.option('--admin-password', help='Administrator password for new site', default=None)
@click.option('--verbose', is_flag=True, default=False, help='Verbose')
@click.option('--force', help='Force restore if site/database already exists', is_flag=True, default=False)
@click.option('--source_sql', help='Initiate database with a SQL file')
@click.option('--install-app', multiple=True, help='Install app after installation')
def new_site(site, mariadb_root_username=None, mariadb_root_password=None, admin_password=None, verbose=False, install_apps=None, source_sql=None, force=None, install_app=None, db_name=None):
"Install a new site"
if not db_name:
db_name = hashlib.sha1(site).hexdigest()[:10]
frappe.init(site=site, new_site=True)
_new_site(db_name, site, mariadb_root_username=mariadb_root_username, mariadb_root_password=mariadb_root_password, admin_password=admin_password, verbose=verbose, install_apps=install_app, source_sql=source_sql, force=force)
if len(frappe.utils.get_sites()) == 1:
use(site)
def _new_site(db_name, site, mariadb_root_username=None, mariadb_root_password=None, admin_password=None, verbose=False, install_apps=None, source_sql=None,force=False, reinstall=False):
"Install a new Frappe site"
from frappe.installer import install_db, make_site_dirs
from frappe.installer import install_app as _install_app
import frappe.utils.scheduler
frappe.init(site=site)
try:
# enable scheduler post install?
enable_scheduler = _is_scheduler_enabled()
except:
enable_scheduler = False
install_db(root_login=mariadb_root_username, root_password=mariadb_root_password, db_name=db_name, admin_password=admin_password, verbose=verbose, source_sql=source_sql,force=force, reinstall=reinstall)
make_site_dirs()
_install_app("frappe", verbose=verbose, set_as_patched=not source_sql)
if frappe.conf.get("install_apps"):
for app in frappe.conf.install_apps:
_install_app(app, verbose=verbose, set_as_patched=not source_sql)
if install_apps:
for app in install_apps:
_install_app(app, verbose=verbose, set_as_patched=not source_sql)
frappe.utils.scheduler.toggle_scheduler(enable_scheduler)
scheduler_status = "disabled" if frappe.utils.scheduler.is_scheduler_disabled() else "enabled"
print "*** Scheduler is", scheduler_status, "***"
frappe.destroy()
def _is_scheduler_enabled():
enable_scheduler = False
try:
frappe.connect()
enable_scheduler = cint(frappe.db.get_single_value("System Settings", "enable_scheduler")) and True or False
except:
pass
finally:
frappe.db.close()
return enable_scheduler
@click.command('restore')
@click.argument('sql-file-path')
@click.option('--mariadb-root-username', default='root', help='Root username for MariaDB')
@click.option('--mariadb-root-password', help='Root password for MariaDB')
@click.option('--db-name', help='Database name for site in case it is a new one')
@click.option('--admin-password', help='Administrator password for new site')
@click.option('--install-app', multiple=True, help='Install app after installation')
@pass_context
def restore(context, sql_file_path, mariadb_root_username=None, mariadb_root_password=None, db_name=None, verbose=None, install_app=None, admin_password=None, force=None):
"Restore site database from an sql file"
site = get_single_site(context)
frappe.init(site=site)
db_name = db_name or frappe.conf.db_name or hashlib.sha1(site).hexdigest()[:10]
_new_site(db_name, site, mariadb_root_username=mariadb_root_username, mariadb_root_password=mariadb_root_password, admin_password=admin_password, verbose=context.verbose, install_apps=install_app, source_sql=sql_file_path, force=context.force)
@click.command('reinstall')
@pass_context
def reinstall(context):
"Reinstall site ie. wipe all data and start over"
site = get_single_site(context)
try:
frappe.init(site=site)
frappe.connect()
frappe.clear_cache()
installed = frappe.get_installed_apps()
frappe.clear_cache()
except Exception:
installed = []
finally:
if frappe.db:
frappe.db.close()
frappe.destroy()
frappe.init(site=site)
_new_site(frappe.conf.db_name, site, verbose=context.verbose, force=True, reinstall=True, install_apps=installed)
@click.command('install-app')
@click.argument('app')
@pass_context
def install_app(context, app):
"Install a new app to site"
from frappe.installer import install_app as _install_app
for site in context.sites:
frappe.init(site=site)
frappe.connect()
try:
_install_app(app, verbose=context.verbose)
finally:
frappe.destroy()
@click.command('list-apps')
@pass_context
def list_apps(context):
"Reinstall site ie. wipe all data and start over"
site = get_single_site(context)
frappe.init(site=site)
frappe.connect()
print "\n".join(frappe.get_installed_apps())
frappe.destroy()
@click.command('add-system-manager')
@click.argument('email')
@click.option('--first-name')
@click.option('--last-name')
@pass_context
def add_system_manager(context, email, first_name, last_name):
"Add a new system manager to a site"
import frappe.utils.user
for site in context.sites:
frappe.connect(site=site)
try:
frappe.utils.user.add_system_manager(email, first_name, last_name)
frappe.db.commit()
finally:
frappe.destroy()
@click.command('migrate')
@click.option('--rebuild-website', help="Rebuild webpages after migration")
@pass_context
def migrate(context, rebuild_website=False):
"Run patches, sync schema and rebuild files/translations"
import frappe.modules.patch_handler
import frappe.model.sync
from frappe.utils.fixtures import sync_fixtures
import frappe.translate
from frappe.desk.notifications import clear_notifications
for site in context.sites:
print 'Migrating', site
frappe.init(site=site)
frappe.connect()
try:
prepare_for_update()
# run patches
frappe.modules.patch_handler.run_all()
# sync
frappe.model.sync.sync_all(verbose=context.verbose)
frappe.translate.clear_cache()
sync_fixtures()
clear_notifications()
finally:
frappe.publish_realtime("version-update")
frappe.destroy()
if rebuild_website:
call_command(build_website, context)
else:
call_command(sync_www, context)
def prepare_for_update():
from frappe.sessions import clear_global_cache
clear_global_cache()
@click.command('run-patch')
@click.argument('module')
@pass_context
def run_patch(context, module):
"Run a particular patch"
import frappe.modules.patch_handler
for site in context.sites:
frappe.init(site=site)
try:
frappe.connect()
frappe.modules.patch_handler.run_single(module, force=context.force)
finally:
frappe.destroy()
@click.command('reload-doc')
@click.argument('module')
@click.argument('doctype')
@click.argument('docname')
@pass_context
def reload_doc(context, module, doctype, docname):
"Reload schema for a DocType"
for site in context.sites:
try:
frappe.init(site=site)
frappe.connect()
frappe.reload_doc(module, doctype, docname, force=context.force)
frappe.db.commit()
finally:
frappe.destroy()
@click.command('build')
@click.option('--make-copy', is_flag=True, default=False, help='Copy the files instead of symlinking')
@click.option('--verbose', is_flag=True, default=False, help='Verbose')
def build(make_copy=False, verbose=False):
"Minify + concatenate JS and CSS files, build translations"
import frappe.build
import frappe
frappe.init('')
frappe.build.bundle(False, make_copy=make_copy, verbose=verbose)
@click.command('watch')
def watch():
"Watch and concatenate JS and CSS files as and when they change"
import frappe.build
frappe.init('')
frappe.build.watch(True)
@click.command('clear-cache')
@pass_context
def clear_cache(context):
"Clear cache, doctype cache and defaults"
import frappe.sessions
import frappe.website.render
from frappe.desk.notifications import clear_notifications
for site in context.sites:
try:
frappe.connect(site)
frappe.clear_cache()
clear_notifications()
frappe.website.render.clear_cache()
finally:
frappe.destroy()
@click.command('clear-website-cache')
@pass_context
def clear_website_cache(context):
"Clear website cache"
import frappe.website.render
for site in context.sites:
try:
frappe.init(site=site)
frappe.connect()
frappe.website.render.clear_cache()
finally:
frappe.destroy()
@click.command('destroy-all-sessions')
@pass_context
def destroy_all_sessions(context):
"Clear sessions of all users (logs them out)"
import frappe.sessions
for site in context.sites:
try:
frappe.init(site=site)
frappe.connect()
frappe.sessions.clear_all_sessions()
frappe.db.commit()
finally:
frappe.destroy()
@click.command('sync-www')
@click.option('--force', help='Rebuild all pages', is_flag=True, default=False)
@pass_context
def sync_www(context, force=False):
"Sync files from static pages from www directory to Web Pages"
from frappe.website import statics
for site in context.sites:
try:
frappe.init(site=site)
frappe.connect()
statics.sync_statics(rebuild=force)
frappe.db.commit()
finally:
frappe.destroy()
@click.command('build-website')
@pass_context
def build_website(context):
"Sync statics and clear cache"
from frappe.website import render, statics
for site in context.sites:
try:
frappe.init(site=site)
frappe.connect()
render.clear_cache()
statics.sync(verbose=context.verbose).start(True)
frappe.db.commit()
finally:
frappe.destroy()
@click.command('make-docs')
@pass_context
@click.argument('app')
@click.argument('docs_version')
def make_docs(context, app, docs_version):
"Setup docs in target folder of target app"
from frappe.utils.setup_docs import setup_docs
for site in context.sites:
try:
frappe.init(site=site)
frappe.connect()
make = setup_docs(app)
make.build(docs_version)
finally:
frappe.destroy()
@click.command('sync-docs')
@pass_context
@click.argument('app')
def sync_docs(context, app):
"Sync docs from /docs folder into the database (Web Page)"
from frappe.utils.setup_docs import setup_docs
for site in context.sites:
try:
frappe.init(site=site)
frappe.connect()
make = setup_docs(app)
make.sync_docs()
finally:
frappe.destroy()
@click.command('write-docs')
@pass_context
@click.argument('app')
@click.argument('target')
@click.option('--local', default=False, is_flag=True, help='Run app locally')
def write_docs(context, app, target, local=False):
"Setup docs in target folder of target app"
from frappe.utils.setup_docs import setup_docs
for site in context.sites:
try:
frappe.init(site=site)
frappe.connect()
make = setup_docs(app)
make.make_docs(target, local)
finally:
frappe.destroy()
@click.command('build-docs')
@pass_context
@click.argument('app')
@click.option('--docs-version', default='current')
@click.option('--target', default=None)
@click.option('--local', default=False, is_flag=True, help='Run app locally')
@click.option('--watch', default=False, is_flag=True, help='Watch for changes and rewrite')
def build_docs(context, app, docs_version="current", target=None, local=False, watch=False):
"Setup docs in target folder of target app"
from frappe.utils import watch as start_watch
if not target:
target = os.path.abspath(os.path.join("..", "docs", app))
for site in context.sites:
_build_docs_once(site, app, docs_version, target, local)
if watch:
def trigger_make(source_path, event_type):
if "/templates/autodoc/" in source_path:
_build_docs_once(site, app, docs_version, target, local)
elif ("/docs.css" in source_path
or "/docs/" in source_path
or "docs.py" in source_path):
_build_docs_once(site, app, docs_version, target, local, only_content_updated=True)
apps_path = frappe.get_app_path("frappe", "..", "..")
start_watch(apps_path, handler=trigger_make)
def _build_docs_once(site, app, docs_version, target, local, only_content_updated=False):
from frappe.utils.setup_docs import setup_docs
try:
frappe.init(site=site)
frappe.connect()
make = setup_docs(app)
if not only_content_updated:
make.build(docs_version)
make.sync_docs()
make.make_docs(target, local)
finally:
frappe.destroy()
@click.command('reset-perms')
@pass_context
def reset_perms(context):
"Reset permissions for all doctypes"
from frappe.permissions import reset_perms
for site in context.sites:
try:
frappe.init(site=site)
frappe.connect()
for d in frappe.db.sql_list("""select name from `tabDocType`
where istable=0 and custom=0"""):
frappe.clear_cache(doctype=d)
reset_perms(d)
finally:
frappe.destroy()
@click.command('execute')
@click.argument('method')
@click.option('--args')
@click.option('--kwargs')
@pass_context
def execute(context, method, args=None, kwargs=None):
"execute a function"
for site in context.sites:
try:
frappe.init(site=site)
frappe.connect()
if args:
args = eval(args)
else:
args = ()
if kwargs:
kwargs = eval(args)
else:
kwargs = {}
ret = frappe.get_attr(method)(*args, **kwargs)
if frappe.db:
frappe.db.commit()
finally:
frappe.destroy()
if ret:
print json.dumps(ret)
@click.command('celery')
@click.argument('args')
def celery(args):
"Run a celery command"
python = sys.executable
os.execv(python, [python, "-m", "frappe.celery_app"] + args.split())
@click.command('trigger-scheduler-event')
@click.argument('event')
@pass_context
def trigger_scheduler_event(context, event):
"Trigger a scheduler event"
import frappe.utils.scheduler
for site in context.sites:
try:
frappe.init(site=site)
frappe.connect()
frappe.utils.scheduler.trigger(site, event, now=context.force)
finally:
frappe.destroy()
@click.command('enable-scheduler')
@pass_context
def enable_scheduler(context):
"Enable scheduler"
import frappe.utils.scheduler
for site in context.sites:
try:
frappe.init(site=site)
frappe.connect()
frappe.utils.scheduler.enable_scheduler()
frappe.db.commit()
print "Enabled for", site
finally:
frappe.destroy()
@click.command('disable-scheduler')
@pass_context
def disable_scheduler(context):
"Disable scheduler"
import frappe.utils.scheduler
for site in context.sites:
try:
frappe.init(site=site)
frappe.connect()
frappe.utils.scheduler.disable_scheduler()
frappe.db.commit()
print "Disabled for", site
finally:
frappe.destroy()
@click.command('export-doc')
@click.argument('doctype')
@click.argument('docname')
@pass_context
def export_doc(context, doctype, docname):
"Export a single document to csv"
import frappe.modules
for site in context.sites:
try:
frappe.init(site=site)
frappe.connect()
frappe.modules.export_doc(doctype, docname)
finally:
frappe.destroy()
@click.command('export-json')
@click.argument('doctype')
@click.argument('name')
@click.argument('path')
@pass_context
def export_json(context, doctype, name, path):
"Export doclist as json to the given path, use '-' as name for Singles."
from frappe.core.page.data_import_tool import data_import_tool
for site in context.sites:
try:
frappe.init(site=site)
frappe.connect()
data_import_tool.export_json(doctype, path, name=name)
finally:
frappe.destroy()
@click.command('export-csv')
@click.argument('doctype')
@click.argument('path')
@pass_context
def export_csv(context, doctype, path):
"Dump DocType as csv"
from frappe.core.page.data_import_tool import data_import_tool
for site in context.sites:
try:
frappe.init(site=site)
frappe.connect()
data_import_tool.export_csv(doctype, path)
finally:
frappe.destroy()
@click.command('export-fixtures')
@pass_context
def export_fixtures(context):
"export fixtures"
from frappe.utils.fixtures import export_fixtures
for site in context.sites:
try:
frappe.init(site=site)
frappe.connect()
export_fixtures()
finally:
frappe.destroy()
@click.command('import-doc')
@click.argument('path')
@pass_context
def import_doc(context, path, force=False):
"Import (insert/update) doclist. If the argument is a directory, all files ending with .json are imported"
from frappe.core.page.data_import_tool import data_import_tool
for site in context.sites:
try:
frappe.init(site=site)
frappe.connect()
data_import_tool.import_doc(path, overwrite=context.force)
finally:
frappe.destroy()
@click.command('import-csv')
@click.argument('path')
@click.option('--only-insert', default=False, is_flag=True, help='Do not overwrite existing records')
@click.option('--submit-after-import', default=False, is_flag=True, help='Submit document after importing it')
@click.option('--ignore-encoding-errors', default=False, is_flag=True, help='Ignore encoding errors while coverting to unicode')
@pass_context
def import_csv(context, path, only_insert=False, submit_after_import=False, ignore_encoding_errors=False):
"Import CSV using data import tool"
from frappe.core.page.data_import_tool import importer
from frappe.utils.csvutils import read_csv_content
site = get_single_site(context)
with open(path, 'r') as csvfile:
content = read_csv_content(csvfile.read())
frappe.init(site=site)
frappe.connect()
try:
importer.upload(content, submit_after_import=submit_after_import,
ignore_encoding_errors=ignore_encoding_errors, overwrite=not only_insert,
via_console=True)
frappe.db.commit()
except Exception:
print frappe.get_traceback()
frappe.destroy()
@click.command('bulk-rename')
@click.argument('doctype')
@click.argument('path')
@pass_context
def _bulk_rename(context, doctype, path):
"Rename multiple records via CSV file"
from frappe.model.rename_doc import bulk_rename
from frappe.utils.csvutils import read_csv_content
site = get_single_site(context)
with open(path, 'r') as csvfile:
rows = read_csv_content(csvfile.read())
frappe.init(site=site)
frappe.connect()
bulk_rename(doctype, rows, via_console = True)
frappe.destroy()
# translation
@click.command('build-message-files')
@pass_context
def build_message_files(context):
"Build message files for translation"
import frappe.translate
for site in context.sites:
try:
frappe.init(site=site)
frappe.connect()
frappe.translate.rebuild_all_translation_files()
finally:
frappe.destroy()
@click.command('new-language') #, help="Create lang-code.csv for given app")
@pass_context
@click.argument('lang_code') #, help="Language code eg. en")
@click.argument('app') #, help="App name eg. frappe")
def new_language(context, lang_code, app):
"""Create lang-code.csv for given app"""
import frappe.translate
if not context['sites']:
raise Exception('--site is required')
# init site
frappe.connect(site=context['sites'][0])
frappe.translate.write_translations_file(app, lang_code)
print "File created at ./apps/{app}/{app}/translations/{lang_code}.csv".format(app=app, lang_code=lang_code)
print "You will need to add the language in frappe/data/languages.txt, if you haven't done it already."
@click.command('get-untranslated')
@click.argument('lang')
@click.argument('untranslated_file')
@click.option('--all', default=False, is_flag=True, help='Get all message strings')
@pass_context
def get_untranslated(context, lang, untranslated_file, all=None):
"Get untranslated strings for language"
import frappe.translate
site = get_single_site(context)
try:
frappe.init(site=site)
frappe.connect()
frappe.translate.get_untranslated(lang, untranslated_file, get_all=all)
finally:
frappe.destroy()
@click.command('update-translations')
@click.argument('lang')
@click.argument('untranslated_file')
@click.argument('translated-file')
@pass_context
def update_translations(context, lang, untranslated_file, translated_file):
"Update translated strings"
import frappe.translate
site = get_single_site(context)
try:
frappe.init(site=site)
frappe.connect()
frappe.translate.update_translations(lang, untranslated_file, translated_file)
finally:
frappe.destroy()
@click.command('import-translations')
@click.argument('lang')
@click.argument('path')
@pass_context
def import_translations(context, lang, path):
"Update translated strings"
import frappe.translate
site = get_single_site(context)
try:
frappe.init(site=site)
frappe.connect()
frappe.translate.import_translations(lang, path)
finally:
frappe.destroy()
@click.command('set-admin-password')
@click.argument('admin-password')
@pass_context
def set_admin_password(context, admin_password):
"Set Administrator password for a site"
import getpass
for site in context.sites:
try:
frappe.init(site=site)
while not admin_password:
admin_password = getpass.getpass("Administrator's password for {0}: ".format(site))
frappe.connect()
frappe.db.sql("""update __Auth set `password`=password(%s)
where user='Administrator'""", (admin_password,))
frappe.db.commit()
admin_password = None
finally:
frappe.destroy()
@click.command('mysql')
@pass_context
def mysql(context):
"Start Mariadb console for a site"
site = get_single_site(context)
frappe.init(site=site)
msq = find_executable('mysql')
os.execv(msq, [msq, '-u', frappe.conf.db_name, '-p'+frappe.conf.db_password, frappe.conf.db_name, '-h', frappe.conf.db_host or "localhost", "-A"])
@click.command('console')
@pass_context
def console(context):
"Start ipython console for a site"
site = get_single_site(context)
frappe.init(site=site)
frappe.connect()
frappe.local.lang = frappe.db.get_default("lang")
import IPython
IPython.embed()
@click.command('run-tests')
@click.option('--app')
@click.option('--doctype')
@click.option('--test', multiple=True)
@click.option('--driver')
@click.option('--module')
@pass_context
def run_tests(context, app=None, module=None, doctype=None, test=(), driver=None):
"Run tests"
import frappe.test_runner
from frappe.utils import sel
tests = test
site = get_single_site(context)
frappe.init(site=site)
if frappe.conf.run_selenium_tests and False:
sel.start(context.verbose, driver)
try:
ret = frappe.test_runner.main(app, module, doctype, context.verbose, tests=tests, force=context.force)
if len(ret.failures) == 0 and len(ret.errors) == 0:
ret = 0
finally:
pass
if frappe.conf.run_selenium_tests:
sel.close()
sys.exit(ret)
@click.command('serve')
@click.option('--port', default=8000)
@click.option('--profile', is_flag=True, default=False)
@pass_context
def serve(context, port=None, profile=False, sites_path='.', site=None):
"Start development web server"
if not context.sites:
site = None
else:
site = context.sites[0]
import frappe.app
frappe.app.serve(port=port, profile=profile, site=site, sites_path='.')
@click.command('request')
@click.argument('args')
@pass_context
def request(context, args):
"Run a request as an admin"
import frappe.handler
import frappe.api
for site in context.sites:
try:
frappe.init(site=site)
frappe.connect()
if "?" in args:
frappe.local.form_dict = frappe._dict([a.split("=") for a in args.split("?")[-1].split("&")])
else:
frappe.local.form_dict = frappe._dict()
if args.startswith("/api/method"):
frappe.local.form_dict.cmd = args.split("?")[0].split("/")[-1]
frappe.handler.execute_cmd(frappe.form_dict.cmd)
print frappe.response
finally:
frappe.destroy()
@click.command('doctor')
def doctor():
"Get diagnostic info about background workers"
from frappe.utils.doctor import doctor as _doctor
return _doctor()
@click.command('celery-doctor')
@click.option('--site', help='site name')
def celery_doctor(site=None):
"Get diagnostic info about background workers"
from frappe.utils.doctor import celery_doctor as _celery_doctor
frappe.init('')
return _celery_doctor(site=site)
@click.command('purge-pending-tasks')
@click.option('--site', help='site name')
@click.option('--event', default=None, help='one of "all", "weekly", "monthly", "hourly", "daily", "weekly_long", "daily_long"')
def purge_all_tasks(site=None, event=None):
"Purge any pending periodic tasks, if event option is not given, it will purge everything for the site"
from frappe.utils.doctor import purge_pending_tasks
frappe.init(site or '')
count = purge_pending_tasks(event=None, site=None)
print "Purged {} tasks".format(count)
@click.command('dump-queue-status')
def dump_queue_status():
"Dump detailed diagnostic infomation for task queues in JSON format"
frappe.init('')
from frappe.utils.doctor import dump_queue_status as _dump_queue_status, inspect_queue
print json.dumps(_dump_queue_status(), indent=1)
inspect_queue()
@click.command('make-app')
@click.argument('destination')
@click.argument('app_name')
def make_app(destination, app_name):
from frappe.utils.boilerplate import make_boilerplate
make_boilerplate(destination, app_name)
@click.command('use')
@click.argument('site')
def _use(site, sites_path='.'):
use(site, sites_path=sites_path)
def use(site, sites_path='.'):
with open(os.path.join(sites_path, "currentsite.txt"), "w") as sitefile:
sitefile.write(site)
@click.command('backup')
@click.option('--with-files', default=False, is_flag=True, help="Take backup with files")
@pass_context
def backup(context, with_files=False, backup_path_db=None, backup_path_files=None,
backup_path_private_files=None, quiet=False):
"Backup"
from frappe.utils.backups import scheduled_backup
verbose = context.verbose
for site in context.sites:
frappe.init(site=site)
frappe.connect()
odb = scheduled_backup(ignore_files=not with_files, backup_path_db=backup_path_db, backup_path_files=backup_path_files, backup_path_private_files=backup_path_private_files, force=True)
if verbose:
from frappe.utils import now
print "database backup taken -", odb.backup_path_db, "- on", now()
if with_files:
print "files backup taken -", odb.backup_path_files, "- on", now()
print "private files backup taken -", odb.backup_path_private_files, "- on", now()
frappe.destroy()
@click.command('remove-from-installed-apps')
@click.argument('app')
@pass_context
def remove_from_installed_apps(context, app):
from frappe.installer import remove_from_installed_apps
for site in context.sites:
try:
frappe.init(site=site)
frappe.connect()
remove_from_installed_apps(app)
finally:
frappe.destroy()
@click.command('uninstall-app')
@click.argument('app')
@click.option('--dry-run', help='List all doctypes that will be deleted', is_flag=True, default=False)
@pass_context
def uninstall(context, app, dry_run=False):
from frappe.installer import remove_app
for site in context.sites:
try:
frappe.init(site=site)
frappe.connect()
remove_app(app, dry_run)
finally:
frappe.destroy()
def move(dest_dir, site):
import os
if not os.path.isdir(dest_dir):
raise Exception, "destination is not a directory or does not exist"
frappe.init(site)
old_path = frappe.utils.get_site_path()
new_path = os.path.join(dest_dir, site)
# check if site dump of same name already exists
site_dump_exists = True
count = 0
while site_dump_exists:
final_new_path = new_path + (count and str(count) or "")
site_dump_exists = os.path.exists(final_new_path)
count = int(count or 0) + 1
os.rename(old_path, final_new_path)
frappe.destroy()
return final_new_path
@click.command('set-config')
@click.argument('key')
@click.argument('value')
@pass_context
def set_config(context, key, value):
from frappe.installer import update_site_config
for site in context.sites:
frappe.init(site=site)
update_site_config(key, value)
frappe.destroy()
@click.command('drop-site')
@click.argument('site')
@click.option('--root-login', default='root')
@click.option('--root-password')
def drop_site(site, root_login='root', root_password=None):
from frappe.installer import get_current_host, make_connection
from frappe.model.db_schema import DbManager
from frappe.utils.backups import scheduled_backup
frappe.init(site=site)
frappe.connect()
scheduled_backup(ignore_files=False, force=True)
db_name = frappe.local.conf.db_name
frappe.local.db = make_connection(root_login, root_password)
dbman = DbManager(frappe.local.db)
dbman.delete_user(db_name, get_current_host())
dbman.drop_database(db_name)
archived_sites_dir = os.path.join(frappe.get_app_path('frappe'), '..', '..', '..', 'archived_sites')
if not os.path.exists(archived_sites_dir):
os.mkdir(archived_sites_dir)
move(archived_sites_dir, site)
@click.command('version')
def get_version():
frappe.init('')
for m in sorted(frappe.get_all_apps()):
module = frappe.get_module(m)
if hasattr(module, "__version__"):
print "{0} {1}".format(m, module.__version__)
# commands = [
# new_site,
# restore,
# install_app,
# run_patch,
# migrate,
# add_system_manager,
# celery
# ]
commands = [
new_site,
restore,
reinstall,
install_app,
list_apps,
add_system_manager,
migrate,
run_patch,
reload_doc,
build,
watch,
clear_cache,
clear_website_cache,
destroy_all_sessions,
sync_www,
build_website,
make_docs,
sync_docs,
write_docs,
build_docs,
reset_perms,
execute,
celery,
trigger_scheduler_event,
enable_scheduler,
disable_scheduler,
export_doc,
export_json,
export_csv,
export_fixtures,
import_doc,
import_csv,
_bulk_rename,
build_message_files,
get_untranslated,
update_translations,
import_translations,
set_admin_password,
mysql,
run_tests,
serve,
request,
doctor,
celery_doctor,
purge_all_tasks,
dump_queue_status,
console,
make_app,
_use,
backup,
remove_from_installed_apps,
uninstall,
drop_site,
set_config,
get_version,
new_language
]
| |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import uuid
import jsonfield2.fields
from django.conf import settings
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('protoLib', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Artefact',
fields=[
('id', models.AutoField(serialize=False, primary_key=True, verbose_name='ID', auto_created=True)),
('smNaturalCode', models.CharField(blank=True, editable=False, null=True, max_length=50)),
('smRegStatus', models.CharField(blank=True, editable=False, null=True, max_length=50)),
('smWflowStatus', models.CharField(blank=True, editable=False, null=True, max_length=50)),
('smCreatedOn', models.DateTimeField(null=True, auto_now_add=True)),
('smModifiedOn', models.DateTimeField(auto_now=True, null=True)),
('smUUID', models.UUIDField(editable=False, default=uuid.uuid4)),
('code', models.CharField(max_length=200)),
('description', models.TextField(blank=True, null=True)),
('info', jsonfield2.fields.JSONField(default={})),
],
options={
'abstract': False,
},
),
migrations.CreateModel(
name='ArtefactCapacity',
fields=[
('id', models.AutoField(serialize=False, primary_key=True, verbose_name='ID', auto_created=True)),
('smNaturalCode', models.CharField(blank=True, editable=False, null=True, max_length=50)),
('smRegStatus', models.CharField(blank=True, editable=False, null=True, max_length=50)),
('smWflowStatus', models.CharField(blank=True, editable=False, null=True, max_length=50)),
('smCreatedOn', models.DateTimeField(null=True, auto_now_add=True)),
('smModifiedOn', models.DateTimeField(auto_now=True, null=True)),
('smUUID', models.UUIDField(editable=False, default=uuid.uuid4)),
('notes', models.TextField(blank=True, null=True)),
('description', models.TextField(blank=True, null=True)),
('isMain', models.BooleanField(default=False)),
('artefact', models.ForeignKey(to='rai01ref.Artefact')),
],
),
migrations.CreateModel(
name='ArtefactComposition',
fields=[
('id', models.AutoField(serialize=False, primary_key=True, verbose_name='ID', auto_created=True)),
('smNaturalCode', models.CharField(blank=True, editable=False, null=True, max_length=50)),
('smRegStatus', models.CharField(blank=True, editable=False, null=True, max_length=50)),
('smWflowStatus', models.CharField(blank=True, editable=False, null=True, max_length=50)),
('smCreatedOn', models.DateTimeField(null=True, auto_now_add=True)),
('smModifiedOn', models.DateTimeField(auto_now=True, null=True)),
('smUUID', models.UUIDField(editable=False, default=uuid.uuid4)),
('condition', models.TextField(blank=True, null=True)),
('notes', models.TextField(blank=True, null=True)),
('description', models.TextField(blank=True, null=True)),
('containerArt', models.ForeignKey(verbose_name='Artefact', related_name='artefactcomposition_set', to='rai01ref.Artefact')),
('inputArt', models.ForeignKey(verbose_name='in', related_name='+', to='rai01ref.Artefact')),
('outputArt', models.ForeignKey(blank=True, to='rai01ref.Artefact', verbose_name='out', related_name='+', null=True)),
('smCreatedBy', models.ForeignKey(blank=True, to=settings.AUTH_USER_MODEL, editable=False, related_name='+', null=True)),
('smModifiedBy', models.ForeignKey(blank=True, to=settings.AUTH_USER_MODEL, editable=False, related_name='+', null=True)),
('smOwningTeam', models.ForeignKey(blank=True, to='protoLib.TeamHierarchy', editable=False, related_name='+', null=True)),
('smOwningUser', models.ForeignKey(blank=True, to=settings.AUTH_USER_MODEL, editable=False, related_name='+', null=True)),
],
),
migrations.CreateModel(
name='ArtefactRequirement',
fields=[
('id', models.AutoField(serialize=False, primary_key=True, verbose_name='ID', auto_created=True)),
('smNaturalCode', models.CharField(blank=True, editable=False, null=True, max_length=50)),
('smRegStatus', models.CharField(blank=True, editable=False, null=True, max_length=50)),
('smWflowStatus', models.CharField(blank=True, editable=False, null=True, max_length=50)),
('smCreatedOn', models.DateTimeField(null=True, auto_now_add=True)),
('smModifiedOn', models.DateTimeField(auto_now=True, null=True)),
('smUUID', models.UUIDField(editable=False, default=uuid.uuid4)),
('notes', models.TextField(blank=True, null=True)),
('description', models.TextField(blank=True, null=True)),
('isMain', models.BooleanField(default=False)),
('artefact', models.ForeignKey(to='rai01ref.Artefact')),
],
),
migrations.CreateModel(
name='ArtefactSource',
fields=[
('id', models.AutoField(serialize=False, primary_key=True, verbose_name='ID', auto_created=True)),
('smNaturalCode', models.CharField(blank=True, editable=False, null=True, max_length=50)),
('smRegStatus', models.CharField(blank=True, editable=False, null=True, max_length=50)),
('smWflowStatus', models.CharField(blank=True, editable=False, null=True, max_length=50)),
('smCreatedOn', models.DateTimeField(null=True, auto_now_add=True)),
('smModifiedOn', models.DateTimeField(auto_now=True, null=True)),
('smUUID', models.UUIDField(editable=False, default=uuid.uuid4)),
('notes', models.TextField(blank=True, null=True)),
('description', models.TextField(blank=True, null=True)),
('artefact', models.ForeignKey(blank=True, to='rai01ref.Artefact', null=True)),
('smCreatedBy', models.ForeignKey(blank=True, to=settings.AUTH_USER_MODEL, editable=False, related_name='+', null=True)),
('smModifiedBy', models.ForeignKey(blank=True, to=settings.AUTH_USER_MODEL, editable=False, related_name='+', null=True)),
('smOwningTeam', models.ForeignKey(blank=True, to='protoLib.TeamHierarchy', editable=False, related_name='+', null=True)),
('smOwningUser', models.ForeignKey(blank=True, to=settings.AUTH_USER_MODEL, editable=False, related_name='+', null=True)),
],
),
migrations.CreateModel(
name='Capacity',
fields=[
('id', models.AutoField(serialize=False, primary_key=True, verbose_name='ID', auto_created=True)),
('smNaturalCode', models.CharField(blank=True, editable=False, null=True, max_length=50)),
('smRegStatus', models.CharField(blank=True, editable=False, null=True, max_length=50)),
('smWflowStatus', models.CharField(blank=True, editable=False, null=True, max_length=50)),
('smCreatedOn', models.DateTimeField(null=True, auto_now_add=True)),
('smModifiedOn', models.DateTimeField(auto_now=True, null=True)),
('smUUID', models.UUIDField(editable=False, default=uuid.uuid4)),
('code', models.CharField(max_length=200)),
('description', models.TextField(blank=True, null=True)),
('info', jsonfield2.fields.JSONField(default={})),
('copyFrom', models.ForeignKey(blank=True, to='rai01ref.Capacity', related_name='copy_set', null=True)),
],
options={
'abstract': False,
},
),
migrations.CreateModel(
name='DocAttribute',
fields=[
('id', models.AutoField(serialize=False, primary_key=True, verbose_name='ID', auto_created=True)),
('smNaturalCode', models.CharField(blank=True, editable=False, null=True, max_length=50)),
('smRegStatus', models.CharField(blank=True, editable=False, null=True, max_length=50)),
('smWflowStatus', models.CharField(blank=True, editable=False, null=True, max_length=50)),
('smCreatedOn', models.DateTimeField(null=True, auto_now_add=True)),
('smModifiedOn', models.DateTimeField(auto_now=True, null=True)),
('smUUID', models.UUIDField(editable=False, default=uuid.uuid4)),
('code', models.CharField(max_length=200)),
('baseType', models.CharField(blank=True, default='string', choices=[('string', 'string'), ('text', 'text'), ('bool', 'bool'), ('int', 'int'), ('sequence', 'sequence'), ('decimal', 'decimal'), ('money', 'money'), ('combo', 'combo'), ('date', 'date'), ('datetime', 'datetime'), ('time', 'time')], null=True, max_length=50)),
('prpLength', models.IntegerField(blank=True, null=True)),
('prpScale', models.IntegerField(blank=True, null=True)),
('vType', models.CharField(blank=True, default='string', choices=[('string', 'string'), ('text', 'text'), ('bool', 'bool'), ('int', 'int'), ('sequence', 'sequence'), ('decimal', 'decimal'), ('money', 'money'), ('combo', 'combo'), ('date', 'date'), ('datetime', 'datetime'), ('time', 'time')], null=True, max_length=50)),
('prpDefault', models.CharField(blank=True, null=True, max_length=50)),
('prpChoices', models.TextField(blank=True, null=True)),
('isRequired', models.BooleanField(default=False)),
('isSensitive', models.BooleanField(default=False)),
('crudType', models.CharField(blank=True, choices=[('editable', 'Default behavior'), ('readOnly', 'Never saved (rules, functions, linked, ...)'), ('insertOnly', 'Never updated (absorbed at the time of the creation field, eg shipping address'), ('updateOnly', 'Adding null or VrDefault, (fixed initial state)'), ('storeOnly', 'Never show on screen (id, json Types, etc)'), ('screenOnly', 'Calculated on the frontend')], null=True, max_length=20)),
('description', models.TextField(blank=True, null=True)),
],
),
migrations.CreateModel(
name='DocType',
fields=[
('id', models.AutoField(serialize=False, primary_key=True, verbose_name='ID', auto_created=True)),
('smNaturalCode', models.CharField(blank=True, editable=False, null=True, max_length=50)),
('smRegStatus', models.CharField(blank=True, editable=False, null=True, max_length=50)),
('smWflowStatus', models.CharField(blank=True, editable=False, null=True, max_length=50)),
('smCreatedOn', models.DateTimeField(null=True, auto_now_add=True)),
('smModifiedOn', models.DateTimeField(auto_now=True, null=True)),
('smUUID', models.UUIDField(editable=False, default=uuid.uuid4)),
('document', models.CharField(choices=[('Artefact', 'Artefact'), ('Capacity', 'Capacity'), ('Requirement', 'Requirement')], max_length=11)),
('dtype', models.CharField(verbose_name='DocType', max_length=200)),
('category', models.CharField(blank=True, null=True, max_length=50)),
('notes', models.TextField(blank=True, null=True)),
('description', models.TextField(blank=True, null=True)),
('smCreatedBy', models.ForeignKey(blank=True, to=settings.AUTH_USER_MODEL, editable=False, related_name='+', null=True)),
('smModifiedBy', models.ForeignKey(blank=True, to=settings.AUTH_USER_MODEL, editable=False, related_name='+', null=True)),
('smOwningTeam', models.ForeignKey(blank=True, to='protoLib.TeamHierarchy', editable=False, related_name='+', null=True)),
('smOwningUser', models.ForeignKey(blank=True, to=settings.AUTH_USER_MODEL, editable=False, related_name='+', null=True)),
],
),
migrations.CreateModel(
name='ProjectArtefact',
fields=[
('id', models.AutoField(serialize=False, primary_key=True, verbose_name='ID', auto_created=True)),
('smNaturalCode', models.CharField(blank=True, editable=False, null=True, max_length=50)),
('smRegStatus', models.CharField(blank=True, editable=False, null=True, max_length=50)),
('smWflowStatus', models.CharField(blank=True, editable=False, null=True, max_length=50)),
('smCreatedOn', models.DateTimeField(null=True, auto_now_add=True)),
('smModifiedOn', models.DateTimeField(auto_now=True, null=True)),
('smUUID', models.UUIDField(editable=False, default=uuid.uuid4)),
('notes', models.TextField(blank=True, null=True)),
('description', models.TextField(blank=True, null=True)),
('artefact', models.ForeignKey(to='rai01ref.Artefact')),
],
),
migrations.CreateModel(
name='ProjectCapacity',
fields=[
('id', models.AutoField(serialize=False, primary_key=True, verbose_name='ID', auto_created=True)),
('smNaturalCode', models.CharField(blank=True, editable=False, null=True, max_length=50)),
('smRegStatus', models.CharField(blank=True, editable=False, null=True, max_length=50)),
('smWflowStatus', models.CharField(blank=True, editable=False, null=True, max_length=50)),
('smCreatedOn', models.DateTimeField(null=True, auto_now_add=True)),
('smModifiedOn', models.DateTimeField(auto_now=True, null=True)),
('smUUID', models.UUIDField(editable=False, default=uuid.uuid4)),
('notes', models.TextField(blank=True, null=True)),
('description', models.TextField(blank=True, null=True)),
('capacity', models.ForeignKey(to='rai01ref.Capacity')),
],
),
migrations.CreateModel(
name='ProjectRequirement',
fields=[
('id', models.AutoField(serialize=False, primary_key=True, verbose_name='ID', auto_created=True)),
('smNaturalCode', models.CharField(blank=True, editable=False, null=True, max_length=50)),
('smRegStatus', models.CharField(blank=True, editable=False, null=True, max_length=50)),
('smWflowStatus', models.CharField(blank=True, editable=False, null=True, max_length=50)),
('smCreatedOn', models.DateTimeField(null=True, auto_now_add=True)),
('smModifiedOn', models.DateTimeField(auto_now=True, null=True)),
('smUUID', models.UUIDField(editable=False, default=uuid.uuid4)),
('notes', models.TextField(blank=True, null=True)),
('description', models.TextField(blank=True, null=True)),
],
),
migrations.CreateModel(
name='Projet',
fields=[
('id', models.AutoField(serialize=False, primary_key=True, verbose_name='ID', auto_created=True)),
('smNaturalCode', models.CharField(blank=True, editable=False, null=True, max_length=50)),
('smRegStatus', models.CharField(blank=True, editable=False, null=True, max_length=50)),
('smWflowStatus', models.CharField(blank=True, editable=False, null=True, max_length=50)),
('smCreatedOn', models.DateTimeField(null=True, auto_now_add=True)),
('smModifiedOn', models.DateTimeField(auto_now=True, null=True)),
('smUUID', models.UUIDField(editable=False, default=uuid.uuid4)),
('code', models.CharField(max_length=200)),
('notes', models.TextField(blank=True, null=True)),
('description', models.TextField(blank=True, null=True)),
('smCreatedBy', models.ForeignKey(blank=True, to=settings.AUTH_USER_MODEL, editable=False, related_name='+', null=True)),
('smModifiedBy', models.ForeignKey(blank=True, to=settings.AUTH_USER_MODEL, editable=False, related_name='+', null=True)),
('smOwningTeam', models.ForeignKey(blank=True, to='protoLib.TeamHierarchy', editable=False, related_name='+', null=True)),
('smOwningUser', models.ForeignKey(blank=True, to=settings.AUTH_USER_MODEL, editable=False, related_name='+', null=True)),
],
),
migrations.CreateModel(
name='Requirement',
fields=[
('id', models.AutoField(serialize=False, primary_key=True, verbose_name='ID', auto_created=True)),
('smNaturalCode', models.CharField(blank=True, editable=False, null=True, max_length=50)),
('smRegStatus', models.CharField(blank=True, editable=False, null=True, max_length=50)),
('smWflowStatus', models.CharField(blank=True, editable=False, null=True, max_length=50)),
('smCreatedOn', models.DateTimeField(null=True, auto_now_add=True)),
('smModifiedOn', models.DateTimeField(auto_now=True, null=True)),
('smUUID', models.UUIDField(editable=False, default=uuid.uuid4)),
('code', models.CharField(max_length=200)),
('description', models.TextField(blank=True, null=True)),
('info', jsonfield2.fields.JSONField(default={})),
('copyFrom', models.ForeignKey(blank=True, to='rai01ref.Requirement', related_name='copy_set', null=True)),
('docType', models.ForeignKey(blank=True, to='rai01ref.DocType', related_name='+', null=True)),
('refRequirement', models.ForeignKey(blank=True, to='rai01ref.Requirement', verbose_name='Parent', related_name='ref_set', null=True)),
('smCreatedBy', models.ForeignKey(blank=True, to=settings.AUTH_USER_MODEL, editable=False, related_name='+', null=True)),
('smModifiedBy', models.ForeignKey(blank=True, to=settings.AUTH_USER_MODEL, editable=False, related_name='+', null=True)),
('smOwningTeam', models.ForeignKey(blank=True, to='protoLib.TeamHierarchy', editable=False, related_name='+', null=True)),
('smOwningUser', models.ForeignKey(blank=True, to=settings.AUTH_USER_MODEL, editable=False, related_name='+', null=True)),
],
options={
'abstract': False,
},
),
migrations.CreateModel(
name='Source',
fields=[
('id', models.AutoField(serialize=False, primary_key=True, verbose_name='ID', auto_created=True)),
('smNaturalCode', models.CharField(blank=True, editable=False, null=True, max_length=50)),
('smRegStatus', models.CharField(blank=True, editable=False, null=True, max_length=50)),
('smWflowStatus', models.CharField(blank=True, editable=False, null=True, max_length=50)),
('smCreatedOn', models.DateTimeField(null=True, auto_now_add=True)),
('smModifiedOn', models.DateTimeField(auto_now=True, null=True)),
('smUUID', models.UUIDField(editable=False, default=uuid.uuid4)),
('code', models.CharField(max_length=200)),
('reference', models.CharField(blank=True, null=True, max_length=200)),
('notes', models.TextField(blank=True, null=True)),
('description', models.TextField(blank=True, null=True)),
('smCreatedBy', models.ForeignKey(blank=True, to=settings.AUTH_USER_MODEL, editable=False, related_name='+', null=True)),
('smModifiedBy', models.ForeignKey(blank=True, to=settings.AUTH_USER_MODEL, editable=False, related_name='+', null=True)),
('smOwningTeam', models.ForeignKey(blank=True, to='protoLib.TeamHierarchy', editable=False, related_name='+', null=True)),
('smOwningUser', models.ForeignKey(blank=True, to=settings.AUTH_USER_MODEL, editable=False, related_name='+', null=True)),
],
),
migrations.AddField(
model_name='projectrequirement',
name='projet',
field=models.ForeignKey(to='rai01ref.Projet'),
),
migrations.AddField(
model_name='projectrequirement',
name='requirement',
field=models.ForeignKey(to='rai01ref.Requirement'),
),
migrations.AddField(
model_name='projectrequirement',
name='smCreatedBy',
field=models.ForeignKey(blank=True, to=settings.AUTH_USER_MODEL, editable=False, related_name='+', null=True),
),
migrations.AddField(
model_name='projectrequirement',
name='smModifiedBy',
field=models.ForeignKey(blank=True, to=settings.AUTH_USER_MODEL, editable=False, related_name='+', null=True),
),
migrations.AddField(
model_name='projectrequirement',
name='smOwningTeam',
field=models.ForeignKey(blank=True, to='protoLib.TeamHierarchy', editable=False, related_name='+', null=True),
),
migrations.AddField(
model_name='projectrequirement',
name='smOwningUser',
field=models.ForeignKey(blank=True, to=settings.AUTH_USER_MODEL, editable=False, related_name='+', null=True),
),
migrations.AddField(
model_name='projectcapacity',
name='projet',
field=models.ForeignKey(to='rai01ref.Projet'),
),
migrations.AddField(
model_name='projectcapacity',
name='smCreatedBy',
field=models.ForeignKey(blank=True, to=settings.AUTH_USER_MODEL, editable=False, related_name='+', null=True),
),
migrations.AddField(
model_name='projectcapacity',
name='smModifiedBy',
field=models.ForeignKey(blank=True, to=settings.AUTH_USER_MODEL, editable=False, related_name='+', null=True),
),
migrations.AddField(
model_name='projectcapacity',
name='smOwningTeam',
field=models.ForeignKey(blank=True, to='protoLib.TeamHierarchy', editable=False, related_name='+', null=True),
),
migrations.AddField(
model_name='projectcapacity',
name='smOwningUser',
field=models.ForeignKey(blank=True, to=settings.AUTH_USER_MODEL, editable=False, related_name='+', null=True),
),
migrations.AddField(
model_name='projectartefact',
name='projet',
field=models.ForeignKey(to='rai01ref.Projet'),
),
migrations.AddField(
model_name='projectartefact',
name='smCreatedBy',
field=models.ForeignKey(blank=True, to=settings.AUTH_USER_MODEL, editable=False, related_name='+', null=True),
),
migrations.AddField(
model_name='projectartefact',
name='smModifiedBy',
field=models.ForeignKey(blank=True, to=settings.AUTH_USER_MODEL, editable=False, related_name='+', null=True),
),
migrations.AddField(
model_name='projectartefact',
name='smOwningTeam',
field=models.ForeignKey(blank=True, to='protoLib.TeamHierarchy', editable=False, related_name='+', null=True),
),
migrations.AddField(
model_name='projectartefact',
name='smOwningUser',
field=models.ForeignKey(blank=True, to=settings.AUTH_USER_MODEL, editable=False, related_name='+', null=True),
),
migrations.AddField(
model_name='docattribute',
name='docType',
field=models.ForeignKey(blank=True, to='rai01ref.DocType', null=True),
),
migrations.AddField(
model_name='docattribute',
name='smCreatedBy',
field=models.ForeignKey(blank=True, to=settings.AUTH_USER_MODEL, editable=False, related_name='+', null=True),
),
migrations.AddField(
model_name='docattribute',
name='smModifiedBy',
field=models.ForeignKey(blank=True, to=settings.AUTH_USER_MODEL, editable=False, related_name='+', null=True),
),
migrations.AddField(
model_name='docattribute',
name='smOwningTeam',
field=models.ForeignKey(blank=True, to='protoLib.TeamHierarchy', editable=False, related_name='+', null=True),
),
migrations.AddField(
model_name='docattribute',
name='smOwningUser',
field=models.ForeignKey(blank=True, to=settings.AUTH_USER_MODEL, editable=False, related_name='+', null=True),
),
migrations.AddField(
model_name='capacity',
name='docType',
field=models.ForeignKey(blank=True, to='rai01ref.DocType', related_name='+', null=True),
),
migrations.AddField(
model_name='capacity',
name='refCapacity',
field=models.ForeignKey(blank=True, to='rai01ref.Capacity', verbose_name='Parent', related_name='ref_set', null=True),
),
migrations.AddField(
model_name='capacity',
name='smCreatedBy',
field=models.ForeignKey(blank=True, to=settings.AUTH_USER_MODEL, editable=False, related_name='+', null=True),
),
migrations.AddField(
model_name='capacity',
name='smModifiedBy',
field=models.ForeignKey(blank=True, to=settings.AUTH_USER_MODEL, editable=False, related_name='+', null=True),
),
migrations.AddField(
model_name='capacity',
name='smOwningTeam',
field=models.ForeignKey(blank=True, to='protoLib.TeamHierarchy', editable=False, related_name='+', null=True),
),
migrations.AddField(
model_name='capacity',
name='smOwningUser',
field=models.ForeignKey(blank=True, to=settings.AUTH_USER_MODEL, editable=False, related_name='+', null=True),
),
migrations.AddField(
model_name='artefactsource',
name='source',
field=models.ForeignKey(blank=True, to='rai01ref.Source', null=True),
),
migrations.AddField(
model_name='artefactrequirement',
name='requirement',
field=models.ForeignKey(to='rai01ref.Requirement'),
),
migrations.AddField(
model_name='artefactrequirement',
name='smCreatedBy',
field=models.ForeignKey(blank=True, to=settings.AUTH_USER_MODEL, editable=False, related_name='+', null=True),
),
migrations.AddField(
model_name='artefactrequirement',
name='smModifiedBy',
field=models.ForeignKey(blank=True, to=settings.AUTH_USER_MODEL, editable=False, related_name='+', null=True),
),
migrations.AddField(
model_name='artefactrequirement',
name='smOwningTeam',
field=models.ForeignKey(blank=True, to='protoLib.TeamHierarchy', editable=False, related_name='+', null=True),
),
migrations.AddField(
model_name='artefactrequirement',
name='smOwningUser',
field=models.ForeignKey(blank=True, to=settings.AUTH_USER_MODEL, editable=False, related_name='+', null=True),
),
migrations.AddField(
model_name='artefactcapacity',
name='capacity',
field=models.ForeignKey(to='rai01ref.Capacity'),
),
migrations.AddField(
model_name='artefactcapacity',
name='smCreatedBy',
field=models.ForeignKey(blank=True, to=settings.AUTH_USER_MODEL, editable=False, related_name='+', null=True),
),
migrations.AddField(
model_name='artefactcapacity',
name='smModifiedBy',
field=models.ForeignKey(blank=True, to=settings.AUTH_USER_MODEL, editable=False, related_name='+', null=True),
),
migrations.AddField(
model_name='artefactcapacity',
name='smOwningTeam',
field=models.ForeignKey(blank=True, to='protoLib.TeamHierarchy', editable=False, related_name='+', null=True),
),
migrations.AddField(
model_name='artefactcapacity',
name='smOwningUser',
field=models.ForeignKey(blank=True, to=settings.AUTH_USER_MODEL, editable=False, related_name='+', null=True),
),
migrations.AddField(
model_name='artefact',
name='capacity',
field=models.ForeignKey(blank=True, to='rai01ref.Capacity', null=True),
),
migrations.AddField(
model_name='artefact',
name='copyFrom',
field=models.ForeignKey(blank=True, to='rai01ref.Artefact', related_name='copy_set', null=True),
),
migrations.AddField(
model_name='artefact',
name='docType',
field=models.ForeignKey(blank=True, to='rai01ref.DocType', related_name='+', null=True),
),
migrations.AddField(
model_name='artefact',
name='refArtefact',
field=models.ForeignKey(blank=True, to='rai01ref.Artefact', verbose_name='Parent', related_name='ref_set', null=True),
),
migrations.AddField(
model_name='artefact',
name='requirement',
field=models.ForeignKey(blank=True, to='rai01ref.Requirement', null=True),
),
migrations.AddField(
model_name='artefact',
name='smCreatedBy',
field=models.ForeignKey(blank=True, to=settings.AUTH_USER_MODEL, editable=False, related_name='+', null=True),
),
migrations.AddField(
model_name='artefact',
name='smModifiedBy',
field=models.ForeignKey(blank=True, to=settings.AUTH_USER_MODEL, editable=False, related_name='+', null=True),
),
migrations.AddField(
model_name='artefact',
name='smOwningTeam',
field=models.ForeignKey(blank=True, to='protoLib.TeamHierarchy', editable=False, related_name='+', null=True),
),
migrations.AddField(
model_name='artefact',
name='smOwningUser',
field=models.ForeignKey(blank=True, to=settings.AUTH_USER_MODEL, editable=False, related_name='+', null=True),
),
migrations.AlterUniqueTogether(
name='projectrequirement',
unique_together=set([('projet', 'requirement')]),
),
migrations.AlterUniqueTogether(
name='projectcapacity',
unique_together=set([('projet', 'capacity')]),
),
migrations.AlterUniqueTogether(
name='projectartefact',
unique_together=set([('artefact', 'projet')]),
),
migrations.AlterUniqueTogether(
name='doctype',
unique_together=set([('document', 'dtype')]),
),
migrations.AlterUniqueTogether(
name='docattribute',
unique_together=set([('docType', 'code')]),
),
migrations.AlterUniqueTogether(
name='artefactsource',
unique_together=set([('source', 'artefact')]),
),
migrations.AlterUniqueTogether(
name='artefactrequirement',
unique_together=set([('artefact', 'requirement')]),
),
migrations.AlterUniqueTogether(
name='artefactcapacity',
unique_together=set([('artefact', 'capacity')]),
),
]
| |
# Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""GYP backend that generates Eclipse CDT settings files.
This backend DOES NOT generate Eclipse CDT projects. Instead, it generates XML
files that can be imported into an Eclipse CDT project. The XML file contains a
list of include paths and symbols (i.e. defines).
Because a full .cproject definition is not created by this generator, it's not
possible to properly define the include dirs and symbols for each file
individually. Instead, one set of includes/symbols is generated for the entire
project. This works fairly well (and is a vast improvement in general), but may
still result in a few indexer issues here and there.
This generator has no automated tests, so expect it to be broken.
"""
from xml.sax.saxutils import escape
import os.path
import subprocess
import gyp
import gyp.common
import gyp.msvs_emulation
import shlex
import xml.etree.cElementTree as ET
generator_wants_static_library_dependencies_adjusted = False
generator_default_variables = {
}
for dirname in ['INTERMEDIATE_DIR', 'PRODUCT_DIR', 'LIB_DIR', 'SHARED_LIB_DIR']:
# Some gyp steps fail if these are empty(!), so we convert them to variables
generator_default_variables[dirname] = '$' + dirname
for unused in ['RULE_INPUT_PATH', 'RULE_INPUT_ROOT', 'RULE_INPUT_NAME',
'RULE_INPUT_DIRNAME', 'RULE_INPUT_EXT',
'EXECUTABLE_PREFIX', 'EXECUTABLE_SUFFIX',
'STATIC_LIB_PREFIX', 'STATIC_LIB_SUFFIX',
'SHARED_LIB_PREFIX', 'SHARED_LIB_SUFFIX',
'CONFIGURATION_NAME']:
generator_default_variables[unused] = ''
# Include dirs will occasionally use the SHARED_INTERMEDIATE_DIR variable as
# part of the path when dealing with generated headers. This value will be
# replaced dynamically for each configuration.
generator_default_variables['SHARED_INTERMEDIATE_DIR'] = \
'$SHARED_INTERMEDIATE_DIR'
def CalculateVariables(default_variables, params):
generator_flags = params.get('generator_flags', {})
for key, val in generator_flags.items():
default_variables.setdefault(key, val)
flavor = gyp.common.GetFlavor(params)
default_variables.setdefault('OS', flavor)
if flavor == 'win':
# Copy additional generator configuration data from VS, which is shared
# by the Eclipse generator.
import gyp.generator.msvs as msvs_generator
generator_additional_non_configuration_keys = getattr(msvs_generator,
'generator_additional_non_configuration_keys', [])
generator_additional_path_sections = getattr(msvs_generator,
'generator_additional_path_sections', [])
gyp.msvs_emulation.CalculateCommonVariables(default_variables, params)
def CalculateGeneratorInputInfo(params):
"""Calculate the generator specific info that gets fed to input (called by
gyp)."""
generator_flags = params.get('generator_flags', {})
if generator_flags.get('adjust_static_libraries', False):
global generator_wants_static_library_dependencies_adjusted
generator_wants_static_library_dependencies_adjusted = True
def GetAllIncludeDirectories(target_list, target_dicts,
shared_intermediate_dirs, config_name, params,
compiler_path):
"""Calculate the set of include directories to be used.
Returns:
A list including all the include_dir's specified for every target followed
by any include directories that were added as cflag compiler options.
"""
gyp_includes_set = set()
compiler_includes_list = []
# Find compiler's default include dirs.
if compiler_path:
command = shlex.split(compiler_path)
command.extend(['-E', '-xc++', '-v', '-'])
proc = subprocess.Popen(args=command, stdin=subprocess.PIPE,
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
output = proc.communicate()[1]
# Extract the list of include dirs from the output, which has this format:
# ...
# #include "..." search starts here:
# #include <...> search starts here:
# /usr/include/c++/4.6
# /usr/local/include
# End of search list.
# ...
in_include_list = False
for line in output.splitlines():
if line.startswith('#include'):
in_include_list = True
continue
if line.startswith('End of search list.'):
break
if in_include_list:
include_dir = line.strip()
if include_dir not in compiler_includes_list:
compiler_includes_list.append(include_dir)
flavor = gyp.common.GetFlavor(params)
if flavor == 'win':
generator_flags = params.get('generator_flags', {})
for target_name in target_list:
target = target_dicts[target_name]
if config_name in target['configurations']:
config = target['configurations'][config_name]
# Look for any include dirs that were explicitly added via cflags. This
# may be done in gyp files to force certain includes to come at the end.
# TODO(jgreenwald): Change the gyp files to not abuse cflags for this, and
# remove this.
if flavor == 'win':
msvs_settings = gyp.msvs_emulation.MsvsSettings(target, generator_flags)
cflags = msvs_settings.GetCflags(config_name)
else:
cflags = config['cflags']
for cflag in cflags:
if cflag.startswith('-I'):
include_dir = cflag[2:]
if include_dir not in compiler_includes_list:
compiler_includes_list.append(include_dir)
# Find standard gyp include dirs.
if 'include_dirs' in config:
include_dirs = config['include_dirs']
for shared_intermediate_dir in shared_intermediate_dirs:
for include_dir in include_dirs:
include_dir = include_dir.replace('$SHARED_INTERMEDIATE_DIR',
shared_intermediate_dir)
if not os.path.isabs(include_dir):
base_dir = os.path.dirname(target_name)
include_dir = base_dir + '/' + include_dir
include_dir = os.path.abspath(include_dir)
gyp_includes_set.add(include_dir)
# Generate a list that has all the include dirs.
all_includes_list = list(gyp_includes_set)
all_includes_list.sort()
for compiler_include in compiler_includes_list:
if not compiler_include in gyp_includes_set:
all_includes_list.append(compiler_include)
# All done.
return all_includes_list
def GetCompilerPath(target_list, data, options):
"""Determine a command that can be used to invoke the compiler.
Returns:
If this is a gyp project that has explicit make settings, try to determine
the compiler from that. Otherwise, see if a compiler was specified via the
CC_target environment variable.
"""
# First, see if the compiler is configured in make's settings.
build_file, _, _ = gyp.common.ParseQualifiedTarget(target_list[0])
make_global_settings_dict = data[build_file].get('make_global_settings', {})
for key, value in make_global_settings_dict:
if key in ['CC', 'CXX']:
return os.path.join(options.toplevel_dir, value)
# Check to see if the compiler was specified as an environment variable.
for key in ['CC_target', 'CC', 'CXX']:
compiler = os.environ.get(key)
if compiler:
return compiler
return 'gcc'
def GetAllDefines(target_list, target_dicts, data, config_name, params,
compiler_path):
"""Calculate the defines for a project.
Returns:
A dict that includes explict defines declared in gyp files along with all of
the default defines that the compiler uses.
"""
# Get defines declared in the gyp files.
all_defines = {}
flavor = gyp.common.GetFlavor(params)
if flavor == 'win':
generator_flags = params.get('generator_flags', {})
for target_name in target_list:
target = target_dicts[target_name]
if flavor == 'win':
msvs_settings = gyp.msvs_emulation.MsvsSettings(target, generator_flags)
extra_defines = msvs_settings.GetComputedDefines(config_name)
else:
extra_defines = []
if config_name in target['configurations']:
config = target['configurations'][config_name]
target_defines = config['defines']
else:
target_defines = []
for define in target_defines + extra_defines:
split_define = define.split('=', 1)
if len(split_define) == 1:
split_define.append('1')
if split_define[0].strip() in all_defines:
# Already defined
continue
all_defines[split_define[0].strip()] = split_define[1].strip()
# Get default compiler defines (if possible).
if flavor == 'win':
return all_defines # Default defines already processed in the loop above.
if compiler_path:
command = shlex.split(compiler_path)
command.extend(['-E', '-dM', '-'])
cpp_proc = subprocess.Popen(args=command, cwd='.',
stdin=subprocess.PIPE, stdout=subprocess.PIPE)
cpp_output = cpp_proc.communicate()[0]
cpp_lines = cpp_output.split('\n')
for cpp_line in cpp_lines:
if not cpp_line.strip():
continue
cpp_line_parts = cpp_line.split(' ', 2)
key = cpp_line_parts[1]
if len(cpp_line_parts) >= 3:
val = cpp_line_parts[2]
else:
val = '1'
all_defines[key] = val
return all_defines
def WriteIncludePaths(out, eclipse_langs, include_dirs):
"""Write the includes section of a CDT settings export file."""
out.write(' <section name="org.eclipse.cdt.internal.ui.wizards.' \
'settingswizards.IncludePaths">\n')
out.write(' <language name="holder for library settings"></language>\n')
for lang in eclipse_langs:
out.write(' <language name="%s">\n' % lang)
for include_dir in include_dirs:
out.write(' <includepath workspace_path="false">%s</includepath>\n' %
include_dir)
out.write(' </language>\n')
out.write(' </section>\n')
def WriteMacros(out, eclipse_langs, defines):
"""Write the macros section of a CDT settings export file."""
out.write(' <section name="org.eclipse.cdt.internal.ui.wizards.' \
'settingswizards.Macros">\n')
out.write(' <language name="holder for library settings"></language>\n')
for lang in eclipse_langs:
out.write(' <language name="%s">\n' % lang)
for key in sorted(defines.keys()):
out.write(' <macro><name>%s</name><value>%s</value></macro>\n' %
(escape(key), escape(defines[key])))
out.write(' </language>\n')
out.write(' </section>\n')
def GenerateOutputForConfig(target_list, target_dicts, data, params,
config_name):
options = params['options']
generator_flags = params.get('generator_flags', {})
# build_dir: relative path from source root to our output files.
# e.g. "out/Debug"
build_dir = os.path.join(generator_flags.get('output_dir', 'out'),
config_name)
toplevel_build = os.path.join(options.toplevel_dir, build_dir)
# Ninja uses out/Debug/gen while make uses out/Debug/obj/gen as the
# SHARED_INTERMEDIATE_DIR. Include both possible locations.
shared_intermediate_dirs = [os.path.join(toplevel_build, 'obj', 'gen'),
os.path.join(toplevel_build, 'gen')]
GenerateCdtSettingsFile(target_list,
target_dicts,
data,
params,
config_name,
os.path.join(toplevel_build,
'eclipse-cdt-settings.xml'),
options,
shared_intermediate_dirs)
GenerateClasspathFile(target_list,
target_dicts,
options.toplevel_dir,
toplevel_build,
os.path.join(toplevel_build,
'eclipse-classpath.xml'))
def GenerateCdtSettingsFile(target_list, target_dicts, data, params,
config_name, out_name, options,
shared_intermediate_dirs):
gyp.common.EnsureDirExists(out_name)
with open(out_name, 'w') as out:
out.write('<?xml version="1.0" encoding="UTF-8"?>\n')
out.write('<cdtprojectproperties>\n')
eclipse_langs = ['C++ Source File', 'C Source File', 'Assembly Source File',
'GNU C++', 'GNU C', 'Assembly']
compiler_path = GetCompilerPath(target_list, data, options)
include_dirs = GetAllIncludeDirectories(target_list, target_dicts,
shared_intermediate_dirs,
config_name, params, compiler_path)
WriteIncludePaths(out, eclipse_langs, include_dirs)
defines = GetAllDefines(target_list, target_dicts, data, config_name,
params, compiler_path)
WriteMacros(out, eclipse_langs, defines)
out.write('</cdtprojectproperties>\n')
def GenerateClasspathFile(target_list, target_dicts, toplevel_dir,
toplevel_build, out_name):
'''Generates a classpath file suitable for symbol navigation and code
completion of Java code (such as in Android projects) by finding all
.java and .jar files used as action inputs.'''
gyp.common.EnsureDirExists(out_name)
result = ET.Element('classpath')
def AddElements(kind, paths):
# First, we need to normalize the paths so they are all relative to the
# toplevel dir.
rel_paths = set()
for path in paths:
if os.path.isabs(path):
rel_paths.add(os.path.relpath(path, toplevel_dir))
else:
rel_paths.add(path)
for path in sorted(rel_paths):
entry_element = ET.SubElement(result, 'classpathentry')
entry_element.set('kind', kind)
entry_element.set('path', path)
AddElements('lib', GetJavaJars(target_list, target_dicts, toplevel_dir))
AddElements('src', GetJavaSourceDirs(target_list, target_dicts, toplevel_dir))
# Include the standard JRE container and a dummy out folder
AddElements('con', ['org.eclipse.jdt.launching.JRE_CONTAINER'])
# Include a dummy out folder so that Eclipse doesn't use the default /bin
# folder in the root of the project.
AddElements('output', [os.path.join(toplevel_build, '.eclipse-java-build')])
ET.ElementTree(result).write(out_name)
def GetJavaJars(target_list, target_dicts, toplevel_dir):
'''Generates a sequence of all .jars used as inputs.'''
for target_name in target_list:
target = target_dicts[target_name]
for action in target.get('actions', []):
for input_ in action['inputs']:
if os.path.splitext(input_)[1] == '.jar' and not input_.startswith('$'):
if os.path.isabs(input_):
yield input_
else:
yield os.path.join(os.path.dirname(target_name), input_)
def GetJavaSourceDirs(target_list, target_dicts, toplevel_dir):
'''Generates a sequence of all likely java package root directories.'''
for target_name in target_list:
target = target_dicts[target_name]
for action in target.get('actions', []):
for input_ in action['inputs']:
if (os.path.splitext(input_)[1] == '.java' and
not input_.startswith('$')):
dir_ = os.path.dirname(os.path.join(os.path.dirname(target_name),
input_))
# If there is a parent 'src' or 'java' folder, navigate up to it -
# these are canonical package root names in Chromium. This will
# break if 'src' or 'java' exists in the package structure. This
# could be further improved by inspecting the java file for the
# package name if this proves to be too fragile in practice.
parent_search = dir_
while os.path.basename(parent_search) not in ['src', 'java']:
parent_search, _ = os.path.split(parent_search)
if not parent_search or parent_search == toplevel_dir:
# Didn't find a known root, just return the original path
yield dir_
break
else:
yield parent_search
def GenerateOutput(target_list, target_dicts, data, params):
"""Generate an XML settings file that can be imported into a CDT project."""
if params['options'].generator_output:
raise NotImplementedError("--generator_output not implemented for eclipse")
user_config = params.get('generator_flags', {}).get('config', None)
if user_config:
GenerateOutputForConfig(target_list, target_dicts, data, params,
user_config)
else:
config_names = target_dicts[target_list[0]]['configurations']
for config_name in config_names:
GenerateOutputForConfig(target_list, target_dicts, data, params,
config_name)
| |
"""Support for the Transmission BitTorrent client API."""
from __future__ import annotations
from datetime import timedelta
import logging
from typing import List
import transmissionrpc
from transmissionrpc.error import TransmissionError
import voluptuous as vol
from homeassistant.config_entries import SOURCE_IMPORT
from homeassistant.const import (
CONF_HOST,
CONF_ID,
CONF_NAME,
CONF_PASSWORD,
CONF_PORT,
CONF_SCAN_INTERVAL,
CONF_USERNAME,
)
from homeassistant.exceptions import ConfigEntryNotReady
from homeassistant.helpers import config_validation as cv
from homeassistant.helpers.dispatcher import dispatcher_send
from homeassistant.helpers.event import async_track_time_interval
from .const import (
ATTR_DELETE_DATA,
ATTR_TORRENT,
CONF_LIMIT,
CONF_ORDER,
DATA_UPDATED,
DEFAULT_DELETE_DATA,
DEFAULT_LIMIT,
DEFAULT_NAME,
DEFAULT_ORDER,
DEFAULT_PORT,
DEFAULT_SCAN_INTERVAL,
DOMAIN,
EVENT_DOWNLOADED_TORRENT,
EVENT_REMOVED_TORRENT,
EVENT_STARTED_TORRENT,
SERVICE_ADD_TORRENT,
SERVICE_REMOVE_TORRENT,
SERVICE_START_TORRENT,
SERVICE_STOP_TORRENT,
)
from .errors import AuthenticationError, CannotConnect, UnknownError
_LOGGER = logging.getLogger(__name__)
SERVICE_ADD_TORRENT_SCHEMA = vol.Schema(
{vol.Required(ATTR_TORRENT): cv.string, vol.Required(CONF_NAME): cv.string}
)
SERVICE_REMOVE_TORRENT_SCHEMA = vol.Schema(
{
vol.Required(CONF_NAME): cv.string,
vol.Required(CONF_ID): cv.positive_int,
vol.Optional(ATTR_DELETE_DATA, default=DEFAULT_DELETE_DATA): cv.boolean,
}
)
SERVICE_START_TORRENT_SCHEMA = vol.Schema(
{
vol.Required(CONF_NAME): cv.string,
vol.Required(CONF_ID): cv.positive_int,
}
)
SERVICE_STOP_TORRENT_SCHEMA = vol.Schema(
{
vol.Required(CONF_NAME): cv.string,
vol.Required(CONF_ID): cv.positive_int,
}
)
TRANS_SCHEMA = vol.All(
vol.Schema(
{
vol.Required(CONF_HOST): cv.string,
vol.Optional(CONF_PASSWORD): cv.string,
vol.Optional(CONF_USERNAME): cv.string,
vol.Optional(CONF_PORT, default=DEFAULT_PORT): cv.port,
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
vol.Optional(
CONF_SCAN_INTERVAL, default=DEFAULT_SCAN_INTERVAL
): cv.time_period,
}
)
)
CONFIG_SCHEMA = vol.Schema(
{DOMAIN: vol.All(cv.ensure_list, [TRANS_SCHEMA])}, extra=vol.ALLOW_EXTRA
)
PLATFORMS = ["sensor", "switch"]
async def async_setup(hass, config):
"""Import the Transmission Component from config."""
if DOMAIN in config:
for entry in config[DOMAIN]:
hass.async_create_task(
hass.config_entries.flow.async_init(
DOMAIN, context={"source": SOURCE_IMPORT}, data=entry
)
)
return True
async def async_setup_entry(hass, config_entry):
"""Set up the Transmission Component."""
client = TransmissionClient(hass, config_entry)
hass.data.setdefault(DOMAIN, {})[config_entry.entry_id] = client
if not await client.async_setup():
return False
return True
async def async_unload_entry(hass, config_entry):
"""Unload Transmission Entry from config_entry."""
client = hass.data[DOMAIN].pop(config_entry.entry_id)
if client.unsub_timer:
client.unsub_timer()
for platform in PLATFORMS:
await hass.config_entries.async_forward_entry_unload(config_entry, platform)
if not hass.data[DOMAIN]:
hass.services.async_remove(DOMAIN, SERVICE_ADD_TORRENT)
hass.services.async_remove(DOMAIN, SERVICE_REMOVE_TORRENT)
hass.services.async_remove(DOMAIN, SERVICE_START_TORRENT)
hass.services.async_remove(DOMAIN, SERVICE_STOP_TORRENT)
return True
async def get_api(hass, entry):
"""Get Transmission client."""
host = entry[CONF_HOST]
port = entry[CONF_PORT]
username = entry.get(CONF_USERNAME)
password = entry.get(CONF_PASSWORD)
try:
api = await hass.async_add_executor_job(
transmissionrpc.Client, host, port, username, password
)
_LOGGER.debug("Successfully connected to %s", host)
return api
except TransmissionError as error:
if "401: Unauthorized" in str(error):
_LOGGER.error("Credentials for Transmission client are not valid")
raise AuthenticationError from error
if "111: Connection refused" in str(error):
_LOGGER.error("Connecting to the Transmission client %s failed", host)
raise CannotConnect from error
_LOGGER.error(error)
raise UnknownError from error
class TransmissionClient:
"""Transmission Client Object."""
def __init__(self, hass, config_entry):
"""Initialize the Transmission RPC API."""
self.hass = hass
self.config_entry = config_entry
self.tm_api = None # type: transmissionrpc.Client
self._tm_data = None # type: TransmissionData
self.unsub_timer = None
@property
def api(self) -> TransmissionData:
"""Return the TransmissionData object."""
return self._tm_data
async def async_setup(self):
"""Set up the Transmission client."""
try:
self.tm_api = await get_api(self.hass, self.config_entry.data)
except CannotConnect as error:
raise ConfigEntryNotReady from error
except (AuthenticationError, UnknownError):
return False
self._tm_data = TransmissionData(self.hass, self.config_entry, self.tm_api)
await self.hass.async_add_executor_job(self._tm_data.init_torrent_list)
await self.hass.async_add_executor_job(self._tm_data.update)
self.add_options()
self.set_scan_interval(self.config_entry.options[CONF_SCAN_INTERVAL])
for platform in PLATFORMS:
self.hass.async_create_task(
self.hass.config_entries.async_forward_entry_setup(
self.config_entry, platform
)
)
def add_torrent(service):
"""Add new torrent to download."""
tm_client = None
for entry in self.hass.config_entries.async_entries(DOMAIN):
if entry.data[CONF_NAME] == service.data[CONF_NAME]:
tm_client = self.hass.data[DOMAIN][entry.entry_id]
break
if tm_client is None:
_LOGGER.error("Transmission instance is not found")
return
torrent = service.data[ATTR_TORRENT]
if torrent.startswith(
("http", "ftp:", "magnet:")
) or self.hass.config.is_allowed_path(torrent):
tm_client.tm_api.add_torrent(torrent)
tm_client.api.update()
else:
_LOGGER.warning(
"Could not add torrent: unsupported type or no permission"
)
def start_torrent(service):
"""Start torrent."""
tm_client = None
for entry in self.hass.config_entries.async_entries(DOMAIN):
if entry.data[CONF_NAME] == service.data[CONF_NAME]:
tm_client = self.hass.data[DOMAIN][entry.entry_id]
break
if tm_client is None:
_LOGGER.error("Transmission instance is not found")
return
torrent_id = service.data[CONF_ID]
tm_client.tm_api.start_torrent(torrent_id)
tm_client.api.update()
def stop_torrent(service):
"""Stop torrent."""
tm_client = None
for entry in self.hass.config_entries.async_entries(DOMAIN):
if entry.data[CONF_NAME] == service.data[CONF_NAME]:
tm_client = self.hass.data[DOMAIN][entry.entry_id]
break
if tm_client is None:
_LOGGER.error("Transmission instance is not found")
return
torrent_id = service.data[CONF_ID]
tm_client.tm_api.stop_torrent(torrent_id)
tm_client.api.update()
def remove_torrent(service):
"""Remove torrent."""
tm_client = None
for entry in self.hass.config_entries.async_entries(DOMAIN):
if entry.data[CONF_NAME] == service.data[CONF_NAME]:
tm_client = self.hass.data[DOMAIN][entry.entry_id]
break
if tm_client is None:
_LOGGER.error("Transmission instance is not found")
return
torrent_id = service.data[CONF_ID]
delete_data = service.data[ATTR_DELETE_DATA]
tm_client.tm_api.remove_torrent(torrent_id, delete_data=delete_data)
tm_client.api.update()
self.hass.services.async_register(
DOMAIN, SERVICE_ADD_TORRENT, add_torrent, schema=SERVICE_ADD_TORRENT_SCHEMA
)
self.hass.services.async_register(
DOMAIN,
SERVICE_REMOVE_TORRENT,
remove_torrent,
schema=SERVICE_REMOVE_TORRENT_SCHEMA,
)
self.hass.services.async_register(
DOMAIN,
SERVICE_START_TORRENT,
start_torrent,
schema=SERVICE_START_TORRENT_SCHEMA,
)
self.hass.services.async_register(
DOMAIN,
SERVICE_STOP_TORRENT,
stop_torrent,
schema=SERVICE_STOP_TORRENT_SCHEMA,
)
self.config_entry.add_update_listener(self.async_options_updated)
return True
def add_options(self):
"""Add options for entry."""
if not self.config_entry.options:
scan_interval = self.config_entry.data.get(
CONF_SCAN_INTERVAL, DEFAULT_SCAN_INTERVAL
)
limit = self.config_entry.data.get(CONF_LIMIT, DEFAULT_LIMIT)
order = self.config_entry.data.get(CONF_ORDER, DEFAULT_ORDER)
options = {
CONF_SCAN_INTERVAL: scan_interval,
CONF_LIMIT: limit,
CONF_ORDER: order,
}
self.hass.config_entries.async_update_entry(
self.config_entry, options=options
)
def set_scan_interval(self, scan_interval):
"""Update scan interval."""
def refresh(event_time):
"""Get the latest data from Transmission."""
self._tm_data.update()
if self.unsub_timer is not None:
self.unsub_timer()
self.unsub_timer = async_track_time_interval(
self.hass, refresh, timedelta(seconds=scan_interval)
)
@staticmethod
async def async_options_updated(hass, entry):
"""Triggered by config entry options updates."""
tm_client = hass.data[DOMAIN][entry.entry_id]
tm_client.set_scan_interval(entry.options[CONF_SCAN_INTERVAL])
await hass.async_add_executor_job(tm_client.api.update)
class TransmissionData:
"""Get the latest data and update the states."""
def __init__(self, hass, config, api: transmissionrpc.Client):
"""Initialize the Transmission RPC API."""
self.hass = hass
self.config = config
self.data = None # type: transmissionrpc.Session
self.available = True # type: bool
self._all_torrents = [] # type: List[transmissionrpc.Torrent]
self._api = api # type: transmissionrpc.Client
self._completed_torrents = [] # type: List[transmissionrpc.Torrent]
self._session = None # type: transmissionrpc.Session
self._started_torrents = [] # type: List[transmissionrpc.Torrent]
self._torrents = [] # type: List[transmissionrpc.Torrent]
@property
def host(self):
"""Return the host name."""
return self.config.data[CONF_HOST]
@property
def signal_update(self):
"""Update signal per transmission entry."""
return f"{DATA_UPDATED}-{self.host}"
@property
def torrents(self) -> List[transmissionrpc.Torrent]:
"""Get the list of torrents."""
return self._torrents
def update(self):
"""Get the latest data from Transmission instance."""
try:
self.data = self._api.session_stats()
self._torrents = self._api.get_torrents()
self._session = self._api.get_session()
self.check_completed_torrent()
self.check_started_torrent()
self.check_removed_torrent()
_LOGGER.debug("Torrent Data for %s Updated", self.host)
self.available = True
except TransmissionError:
self.available = False
_LOGGER.error("Unable to connect to Transmission client %s", self.host)
dispatcher_send(self.hass, self.signal_update)
def init_torrent_list(self):
"""Initialize torrent lists."""
self._torrents = self._api.get_torrents()
self._completed_torrents = [
torrent for torrent in self._torrents if torrent.status == "seeding"
]
self._started_torrents = [
torrent for torrent in self._torrents if torrent.status == "downloading"
]
def check_completed_torrent(self):
"""Get completed torrent functionality."""
old_completed_torrent_names = {
torrent.name for torrent in self._completed_torrents
}
current_completed_torrents = [
torrent for torrent in self._torrents if torrent.status == "seeding"
]
for torrent in current_completed_torrents:
if torrent.name not in old_completed_torrent_names:
self.hass.bus.fire(
EVENT_DOWNLOADED_TORRENT, {"name": torrent.name, "id": torrent.id}
)
self._completed_torrents = current_completed_torrents
def check_started_torrent(self):
"""Get started torrent functionality."""
old_started_torrent_names = {torrent.name for torrent in self._started_torrents}
current_started_torrents = [
torrent for torrent in self._torrents if torrent.status == "downloading"
]
for torrent in current_started_torrents:
if torrent.name not in old_started_torrent_names:
self.hass.bus.fire(
EVENT_STARTED_TORRENT, {"name": torrent.name, "id": torrent.id}
)
self._started_torrents = current_started_torrents
def check_removed_torrent(self):
"""Get removed torrent functionality."""
current_torrent_names = {torrent.name for torrent in self._torrents}
for torrent in self._all_torrents:
if torrent.name not in current_torrent_names:
self.hass.bus.fire(
EVENT_REMOVED_TORRENT, {"name": torrent.name, "id": torrent.id}
)
self._all_torrents = self._torrents.copy()
def start_torrents(self):
"""Start all torrents."""
if len(self._torrents) <= 0:
return
self._api.start_all()
def stop_torrents(self):
"""Stop all active torrents."""
torrent_ids = [torrent.id for torrent in self._torrents]
self._api.stop_torrent(torrent_ids)
def set_alt_speed_enabled(self, is_enabled):
"""Set the alternative speed flag."""
self._api.set_session(alt_speed_enabled=is_enabled)
def get_alt_speed_enabled(self):
"""Get the alternative speed flag."""
if self._session is None:
return None
return self._session.alt_speed_enabled
| |
# -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import warnings
from typing import Callable, Dict, Optional, Sequence, Tuple
from google.api_core import grpc_helpers # type: ignore
from google.api_core import gapic_v1 # type: ignore
import google.auth # type: ignore
from google.auth import credentials as ga_credentials # type: ignore
from google.auth.transport.grpc import SslCredentials # type: ignore
import grpc # type: ignore
from google.ads.googleads.v8.resources.types import parental_status_view
from google.ads.googleads.v8.services.types import parental_status_view_service
from .base import ParentalStatusViewServiceTransport, DEFAULT_CLIENT_INFO
class ParentalStatusViewServiceGrpcTransport(
ParentalStatusViewServiceTransport
):
"""gRPC backend transport for ParentalStatusViewService.
Service to manage parental status views.
This class defines the same methods as the primary client, so the
primary client can load the underlying transport implementation
and call it.
It sends protocol buffers over the wire using gRPC (which is built on
top of HTTP/2); the ``grpcio`` package must be installed.
"""
def __init__(
self,
*,
host: str = "googleads.googleapis.com",
credentials: ga_credentials.Credentials = None,
credentials_file: str = None,
scopes: Sequence[str] = None,
channel: grpc.Channel = None,
api_mtls_endpoint: str = None,
client_cert_source: Callable[[], Tuple[bytes, bytes]] = None,
ssl_channel_credentials: grpc.ChannelCredentials = None,
quota_project_id: Optional[str] = None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
) -> None:
"""Instantiate the transport.
Args:
host (Optional[str]):
The hostname to connect to.
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
This argument is ignored if ``channel`` is provided.
credentials_file (Optional[str]): A file with credentials that can
be loaded with :func:`google.auth.load_credentials_from_file`.
This argument is ignored if ``channel`` is provided.
scopes (Optional(Sequence[str])): A list of scopes. This argument is
ignored if ``channel`` is provided.
channel (Optional[grpc.Channel]): A ``Channel`` instance through
which to make calls.
api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint.
If provided, it overrides the ``host`` argument and tries to create
a mutual TLS channel with client SSL credentials from
``client_cert_source`` or applicatin default SSL credentials.
client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]):
Deprecated. A callback to provide client SSL certificate bytes and
private key bytes, both in PEM format. It is ignored if
``api_mtls_endpoint`` is None.
ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials
for grpc channel. It is ignored if ``channel`` is provided.
quota_project_id (Optional[str]): An optional project to use for billing
and quota.
client_info (google.api_core.gapic_v1.client_info.ClientInfo):
The client info used to send a user-agent string along with
API requests. If ``None``, then default info will be used.
Generally, you only need to set this if you're developing
your own client library.
Raises:
google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport
creation failed for any reason.
"""
self._ssl_channel_credentials = ssl_channel_credentials
if channel:
# Sanity check: Ensure that channel and credentials are not both
# provided.
credentials = False
# If a channel was explicitly provided, set it.
self._grpc_channel = channel
self._ssl_channel_credentials = None
elif api_mtls_endpoint:
warnings.warn(
"api_mtls_endpoint and client_cert_source are deprecated",
DeprecationWarning,
)
host = (
api_mtls_endpoint
if ":" in api_mtls_endpoint
else api_mtls_endpoint + ":443"
)
if credentials is None:
credentials, _ = google.auth.default(
scopes=self.AUTH_SCOPES, quota_project_id=quota_project_id
)
# Create SSL credentials with client_cert_source or application
# default SSL credentials.
if client_cert_source:
cert, key = client_cert_source()
ssl_credentials = grpc.ssl_channel_credentials(
certificate_chain=cert, private_key=key
)
else:
ssl_credentials = SslCredentials().ssl_credentials
# create a new channel. The provided one is ignored.
self._grpc_channel = type(self).create_channel(
host,
credentials=credentials,
credentials_file=credentials_file,
ssl_credentials=ssl_credentials,
scopes=scopes or self.AUTH_SCOPES,
quota_project_id=quota_project_id,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
self._ssl_channel_credentials = ssl_credentials
else:
host = host if ":" in host else host + ":443"
if credentials is None:
credentials, _ = google.auth.default(scopes=self.AUTH_SCOPES)
# create a new channel. The provided one is ignored.
self._grpc_channel = type(self).create_channel(
host,
credentials=credentials,
ssl_credentials=ssl_channel_credentials,
scopes=self.AUTH_SCOPES,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
self._stubs = {} # type: Dict[str, Callable]
# Run the base constructor.
super().__init__(
host=host, credentials=credentials, client_info=client_info,
)
@classmethod
def create_channel(
cls,
host: str = "googleads.googleapis.com",
credentials: ga_credentials.Credentials = None,
scopes: Optional[Sequence[str]] = None,
**kwargs,
) -> grpc.Channel:
"""Create and return a gRPC channel object.
Args:
address (Optionsl[str]): The host for the channel to use.
credentials (Optional[~.Credentials]): The
authorization credentials to attach to requests. These
credentials identify this application to the service. If
none are specified, the client will attempt to ascertain
the credentials from the environment.
scopes (Optional[Sequence[str]]): A optional list of scopes needed for this
service. These are only used when credentials are not specified and
are passed to :func:`google.auth.default`.
kwargs (Optional[dict]): Keyword arguments, which are passed to the
channel creation.
Returns:
grpc.Channel: A gRPC channel object.
"""
return grpc_helpers.create_channel(
host,
credentials=credentials,
scopes=scopes or cls.AUTH_SCOPES,
**kwargs,
)
@property
def grpc_channel(self) -> grpc.Channel:
"""Return the channel designed to connect to this service.
"""
return self._grpc_channel
@property
def get_parental_status_view(
self,
) -> Callable[
[parental_status_view_service.GetParentalStatusViewRequest],
parental_status_view.ParentalStatusView,
]:
r"""Return a callable for the get parental status view method over gRPC.
Returns the requested parental status view in full detail.
List of thrown errors: `AuthenticationError <>`__
`AuthorizationError <>`__ `HeaderError <>`__
`InternalError <>`__ `QuotaError <>`__ `RequestError <>`__
Returns:
Callable[[~.GetParentalStatusViewRequest],
~.ParentalStatusView]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "get_parental_status_view" not in self._stubs:
self._stubs[
"get_parental_status_view"
] = self.grpc_channel.unary_unary(
"/google.ads.googleads.v8.services.ParentalStatusViewService/GetParentalStatusView",
request_serializer=parental_status_view_service.GetParentalStatusViewRequest.serialize,
response_deserializer=parental_status_view.ParentalStatusView.deserialize,
)
return self._stubs["get_parental_status_view"]
__all__ = ("ParentalStatusViewServiceGrpcTransport",)
| |
#-------------------------------------------------------------------------
# The Azure Batch Apps Python Client
#
# Copyright (c) Microsoft Corporation. All rights reserved.
#
# The MIT License (MIT)
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the ""Software""), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED *AS IS*, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
#--------------------------------------------------------------------------
"""Batch Apps Client Utility Module"""
import re
import urllib
import sys
import traceback
try:
from urllib.parse import quote as urlquote, unquote as urlunquote
from urllib.parse import urlsplit
except ImportError:
from urllib import quote as urlquote, unquote as urlunquote
from urlparse import urlsplit
import logging
LOG = logging.getLogger('batch_apps')
def parse_date_string(time_string):
"""Format datetime string into an easily comparable and REST happy form.
:Args:
- time_string (str): The datetime string to be formatted.
:Returns:
The string formatted for use with the REST API (str).
"""
formatted = ''.join(re.findall('\\d+', time_string)) + "000000"
LOG.debug("Parsed date string {in_t} to "
"{out_t}".format(in_t=time_string, out_t=formatted[0:17]))
return formatted[0:17]
def url_from_filename(filename):
"""
Format a given filename for use in a URL, version independent.
:Args:
- filename (str): The filename to be used in the URL.
:Returns:
- The correctly formatted filename (str).
"""
return urlquote(filename)
def filename_from_url(url, ext):
"""Extract a valid filename from a URL
:Args:
- url (str): URL to extract the filename from.
- ext (str): An additional file extension if necessary.
May be ``None``.
:Returns:
- A valid filename.
"""
alt = urlsplit(url)
url_file = alt.path.rpartition('/')[2]
filename = urlunquote(url_file)
LOG.debug("Filename {fn} with extension {ex} from URL "
"{ur}".format(fn=filename, ex=ext, ur=url))
return (filename + ext) if ext else filename
def format_dictionary(dictionary):
"""Format parameter dictionary into a list for REST consumption.
:Args:
- dictionary (dict): parameter dict in the format
{'parameter': 'value}.
:Returns:
- REST list in the format
[ {'Name':'parameter}, {'Value':'value'} ]
"""
return ({"Name": str(k), "Value": str(v)} for k, v in dictionary.items())
def valid_keys(resp_dict, search_keys):
'''
Version independent checking if a list of keys are present in a
given dictionary.
:Args:
- resp_dict (dict): I dictionary from a server response.
- search_keys (list): A list of keys to verify they are
present in ``resp_dict``.
:Returns:
- ``True`` if all keys present in ``resp_dict`` else ``False``.
'''
if not isinstance(resp_dict, dict):
return False
try:
overlap = list(list(resp_dict) & set(search_keys))
return len(overlap) == len(search_keys)
except TypeError:
matching_keys = set(search_keys).intersection(list(resp_dict))
return len(list(matching_keys)) == len(search_keys)
def get_trace(excep):
"""Retrieve an exception traceback
:Args:
- excep (:class:`Exception`): The exception that was thrown.
:Returns:
The traceback information (str).
"""
try:
trace = traceback.format_exc()
return trace
except AttributeError:
return None
class Listener(object):
"""
Process wrapper object for starting, stopping and monitoring
background subprocesses.
:Attributes:
- pid (int): The process pid.
- name (str): The process name.
- children (list): A list of dependent :class:`.Listener` objects.
"""
def __init__(self, process, *child):
"""Create new listener.
:Args:
- process (:class:`multiprocessing.Process`): The process to be
wrapped for monitoring
- child (:class:`.Listener`): And child processes that should be
stopped before the parent process is stopped.
"""
self._proc = process
self.pid = self._proc.pid
self.name = self._proc.name
self.children = list(child)
def working(self):
"""Check if the background process is still running.
:Returns:
``True`` if the process is still running, else ``False``.
"""
self.pid = self._proc.pid
return self._proc.is_alive()
def stop(self):
"""Terminate the background process"""
self.pid = self._proc.pid
try:
for child in self.children:
child.stop()
self._proc.terminate()
self._proc.join()
except OSError as exc:
LOG.debug("Interrupted download process: {0}".format(exc))
def listen(self, timeout=100):
"""Join the background process for a period of time.
:Kwargs:
- timeout (int): The number of seconds that the subprocess will
be monitored. The default is 100 seconds. If set to ``None``,
the process will be listened to indefinitely.
:Raises:
:class:`ValueError` if an invalid timeout is passed in.
"""
if not timeout or isinstance(timeout, int):
self._proc.join(timeout)
else:
raise ValueError(
"Invalid timeout, please set an number of seconds (int)")
| |
import numpy as np
import matplotlib.pyplot as plt
import dpmm
from test_utils import timer
from dpmm.utils import plot_ellipse, random_wish, random_invwish
from unittest import skip
@skip
@timer
def test_GaussianMeanKnownVariance():
mu_0 = 1.1
sigsqr_0 = 0.42
sigsqr = 0.21
model = dpmm.GaussianMeanKnownVariance(mu_0, sigsqr_0, sigsqr)
samples = model.sample(size=1000)
f = plt.figure(figsize=(5, 3))
ax = f.add_subplot(111)
ax.hist(samples, bins=30, normed=True, alpha=0.5, color='k')
xlim = np.percentile(samples, [1.0, 99.0])
ax.set_xlim(xlim)
x = np.linspace(xlim[0], xlim[1], 100)
y = model(x)
ax.plot(x, y, c='k', lw=3)
ax.set_xlabel("$\mu$")
ax.set_ylabel("Pr($\mu$)")
f.tight_layout()
ax.set_title("GaussianMeanKnownVariance")
f.savefig("plots/GaussianMeanKnownVariance_samples.png")
@skip
@timer
def test_InvGamma():
alpha = 1.4
beta = 1.3
mu = 1.2
model = dpmm.InvGamma(alpha, beta, mu)
samples = model.sample(size=1000)
xlim = np.percentile(samples, [0.0, 95.0])
f = plt.figure(figsize=(5, 3))
ax = f.add_subplot(111)
ax.hist(samples, bins=30, range=xlim, normed=True, alpha=0.5, color='k')
ax.set_xlim(xlim)
x = np.linspace(xlim[0], xlim[1], 100)
y = model(x)
ax.plot(x, y, c='k', lw=3)
ax.set_xlabel("$\sigma^2$")
ax.set_ylabel("Pr($\sigma^2$)")
f.tight_layout()
ax.set_title("InvGamma")
f.savefig("plots/InvGamma_samples.png")
@skip
@timer
def test_NormInvChi2():
mu_0 = 1.5
kappa_0 = 2.3
sigsqr_0 = 0.24
nu_0 = 2
model = dpmm.NormInvChi2(mu_0, kappa_0, sigsqr_0, nu_0)
samples = model.sample(size=1000)
mu_samples = np.array([s[0] for s in samples])
var_samples = np.array([s[1] for s in samples])
xlim = np.percentile(mu_samples, [2.5, 97.5])
f = plt.figure(figsize=(5, 3))
ax = f.add_subplot(111)
ax.hist(mu_samples, bins=30, range=xlim, normed=True, alpha=0.5, color='k')
ax.set_xlim(xlim)
x = np.linspace(xlim[0], xlim[1], 100)
y = model.marginal_mu(x)
ax.plot(x, y, c='k', lw=3)
ax.set_xlabel("$\mu$")
ax.set_ylabel("Pr($\mu$)")
f.tight_layout()
ax.set_title("NormInvChi2")
f.savefig("plots/NormInvChi2_mu_samples.png")
xlim = np.percentile(var_samples, [0.0, 95.0])
f = plt.figure(figsize=(5, 3))
ax = f.add_subplot(111)
ax.hist(var_samples, bins=30, range=xlim, normed=True, alpha=0.5, color='k')
ax.set_xlim(xlim)
x = np.linspace(xlim[0], xlim[1], 100)
y = model.marginal_var(x)
ax.plot(x, y, c='k', lw=3)
ax.set_xlabel("$\sigma^2$")
ax.set_ylabel("Pr($\sigma^2$)")
f.tight_layout()
ax.set_title("NormInvChi2")
f.savefig("plots/NormInvChi2_var_samples.png")
@skip
@timer
def test_NormInvGamma():
mu_0 = 1.5
V_0 = 1.2
a_0 = 1.24
b_0 = 1.1
model = dpmm.NormInvGamma(mu_0, V_0, a_0, b_0)
samples = model.sample(size=1000)
mu_samples = np.array([s[0] for s in samples])
var_samples = np.array([s[1] for s in samples])
xlim = np.percentile(mu_samples, [2.5, 97.5])
f = plt.figure(figsize=(5, 3))
ax = f.add_subplot(111)
ax.hist(mu_samples, bins=30, range=xlim, normed=True, alpha=0.5, color='k')
ax.set_xlim(xlim)
x = np.linspace(xlim[0], xlim[1], 100)
y = model.marginal_mu(x)
ax.plot(x, y, c='k', lw=3)
ax.set_xlabel("$\mu$")
ax.set_ylabel("Pr($\mu$)")
f.tight_layout()
ax.set_title("NormInvGamma")
f.savefig("plots/NormInvGamma_mu_samples.png")
xlim = np.percentile(var_samples, [0.0, 95.0])
f = plt.figure(figsize=(5, 3))
ax = f.add_subplot(111)
ax.hist(var_samples, bins=30, range=xlim, normed=True, alpha=0.5, color='k')
ax.set_xlim(xlim)
x = np.linspace(xlim[0], xlim[1], 100)
y = model.marginal_var(x)
ax.plot(x, y, c='k', lw=3)
ax.set_xlabel("$\sigma^2$")
ax.set_ylabel("Pr($\sigma^2$)")
f.tight_layout()
ax.set_title("NormInvGamma")
f.savefig("plots/NormInvGamma_var_samples.png")
@skip
@timer
def test_NormInvWish():
mu_0 = np.r_[0.3, -0.2]
d = len(mu_0)
Lam_0 = np.linalg.inv(np.array([[2, 1.1], [1.1, 1.2]]))
kappa_0 = 2.1
nu_0 = 8
model = dpmm.NormInvWish(mu_0, kappa_0, Lam_0, nu_0)
# First check some numerics
Nsample = 5000
samples = model.sample(size=Nsample)
mu_samples = [s[0] for s in samples]
cov_samples = [s[1] for s in samples]
mean = np.mean(mu_samples, axis=0)
std = np.std(mu_samples, axis=0)/np.sqrt(Nsample)
print "NormInvWish mu_0 = {}".format(mu_0)
print "NormInvWish E(mu) = {} +/- {}".format(mean, std)
mean_cov = np.mean(cov_samples, axis=0)
std_cov = np.std(cov_samples, axis=0)/np.sqrt(Nsample)
print "NormInvWish (Lam_0)^(-1)/(nu_0-d-1) = \n{}".format(np.linalg.inv(Lam_0)/(nu_0-d-1))
print "NormInvWish E(Sig) = \n{}\n +/-\n{}".format(mean_cov, std_cov)
# Now try some plots with different values of kappa_0 and nu_0
f = plt.figure(figsize=(7, 7))
for i, (kappa_0, nu_0) in enumerate(zip([0.4, 0.4, 6.5, 6.5],
[10, 4, 10, 4])):
model = dpmm.NormInvWish(mu_0, kappa_0, Lam_0, nu_0)
samples = model.sample(size=25)
ax = f.add_subplot(2, 2, i+1)
for sample in samples:
mu, Sig = sample
plot_ellipse(mu, Sig, ax=ax, facecolor='none', edgecolor='k', alpha=0.2)
plot_ellipse(mu_0, np.linalg.inv(Lam_0)/(nu_0-d-1), ax=ax, facecolor='none', edgecolor='r')
ax.set_xlim(-3, 3)
ax.set_ylim(-3, 3)
ax.axvline(mu_0[0], c='r', alpha=0.1)
ax.axhline(mu_0[1], c='r', alpha=0.1)
ax.set_title(r"$\kappa_0$={}, $\nu_0$={}".format(kappa_0, nu_0))
print np.mean([s[1] for s in samples], axis=0)
f.savefig("plots/NormInvWish_samples.png")
@skip
@timer
def test_random_wish():
dof = 3
S = np.array([[1.0, 0.25], [0.25, 0.5]])
Nsamples = 5000
samples = random_wish(dof, S, size=Nsamples)
mean = np.mean(samples, axis=0)
std = np.std(samples, axis=0)/np.sqrt(Nsamples)
print "E(wish) = \n{}".format(dof * S)
print "<wish> = \n{}\n +/-\n{}".format(mean, std)
@skip
@timer
def test_random_invwish():
dof = 6
d = 2
S = np.array([[1.0, 0.25], [0.25, 0.5]])
invS = np.linalg.inv(S)
Nsamples = 5000
samples = random_invwish(dof, invS, size=Nsamples)
mean = np.mean(samples, axis=0)
std = np.std(samples, axis=0)/np.sqrt(Nsamples)
print "E(invwish) = \n{}".format(S/(dof-d-1))
print "<invwish> = \n{}\n +/-\n{}".format(mean, std)
@skip
@timer
def test_ellipse_plotter():
f = plt.figure(figsize=(7, 7))
for i, Sig in enumerate([np.array([[1.0, 0.0], [0.0, 0.25]]),
np.array([[0.25, 0.0], [0.0, 1.0]]),
np.array([[1.0, 0.8], [0.8, 1.0]]),
np.array([[1.0, -0.8], [-0.8, 1.0]])]):
ax = f.add_subplot(2, 2, i+1)
ax.set_xlim(-1, 1)
ax.set_ylim(-1, 1)
plot_ellipse([0., 0.], Sig)
ax.set_title("$\Sigma$={}".format(Sig))
f.tight_layout()
f.savefig("plots/ellipse.png")
if __name__ == "__main__":
test_GaussianMeanKnownVariance()
test_InvGamma()
test_NormInvChi2()
test_NormInvGamma()
test_NormInvWish()
test_random_wish()
test_random_invwish()
test_ellipse_plotter()
| |
'''
Modular Input Script
Copyright (C) 2012 Splunk, Inc.
All Rights Reserved
'''
import sys,logging,os,time,re
import xml.dom.minidom
SPLUNK_HOME = os.environ.get("SPLUNK_HOME")
RESPONSE_HANDLER_INSTANCE = None
SPLUNK_PORT = 8089
STANZA = None
SESSION_TOKEN = None
REGEX_PATTERN = None
#dynamically load in any eggs in /etc/apps/snmp_ta/bin
EGG_DIR = SPLUNK_HOME + "/etc/apps/rest_ta/bin/"
for filename in os.listdir(EGG_DIR):
if filename.endswith(".egg"):
sys.path.append(EGG_DIR + filename)
import requests,json
from requests.auth import HTTPBasicAuth
from requests.auth import HTTPDigestAuth
from requests_oauthlib import OAuth1
from requests_oauthlib import OAuth2Session
from oauthlib.oauth2 import WebApplicationClient
from requests.auth import AuthBase
from splunklib.client import connect
from splunklib.client import Service
#set up logging
logging.root
logging.root.setLevel(logging.ERROR)
formatter = logging.Formatter('%(levelname)s %(message)s')
#with zero args , should go to STD ERR
handler = logging.StreamHandler()
handler.setFormatter(formatter)
logging.root.addHandler(handler)
SCHEME = """<scheme>
<title>REST</title>
<description>REST API input for polling data from RESTful endpoints</description>
<use_external_validation>true</use_external_validation>
<streaming_mode>xml</streaming_mode>
<use_single_instance>false</use_single_instance>
<endpoint>
<args>
<arg name="name">
<title>REST input name</title>
<description>Name of this REST input</description>
</arg>
<arg name="endpoint">
<title>Endpoint URL</title>
<description>URL to send the HTTP GET request to</description>
<required_on_edit>false</required_on_edit>
<required_on_create>true</required_on_create>
</arg>
<arg name="auth_type">
<title>Authentication Type</title>
<description>Authentication method to use : none | basic | digest | oauth1 | oauth2 | custom</description>
<required_on_edit>false</required_on_edit>
<required_on_create>true</required_on_create>
</arg>
<arg name="auth_user">
<title>Authentication User</title>
<description>Authentication user for BASIC or DIGEST auth</description>
<required_on_edit>false</required_on_edit>
<required_on_create>false</required_on_create>
</arg>
<arg name="auth_password">
<title>Authentication Password</title>
<description>Authentication password for BASIC or DIGEST auth</description>
<required_on_edit>false</required_on_edit>
<required_on_create>false</required_on_create>
</arg>
<arg name="oauth1_client_key">
<title>OAUTH 1 Client Key</title>
<description>OAUTH 1 client key</description>
<required_on_edit>false</required_on_edit>
<required_on_create>false</required_on_create>
</arg>
<arg name="oauth1_client_secret">
<title>OAUTH 1 Client Secret</title>
<description>OAUTH 1 client secret</description>
<required_on_edit>false</required_on_edit>
<required_on_create>false</required_on_create>
</arg>
<arg name="oauth1_access_token">
<title>OAUTH 1 Access Token</title>
<description>OAUTH 1 access token</description>
<required_on_edit>false</required_on_edit>
<required_on_create>false</required_on_create>
</arg>
<arg name="oauth1_access_token_secret">
<title>OAUTH 1 Access Token Secret</title>
<description>OAUTH 1 access token secret</description>
<required_on_edit>false</required_on_edit>
<required_on_create>false</required_on_create>
</arg>
<arg name="oauth2_token_type">
<title>OAUTH 2 Token Type</title>
<description>OAUTH 2 token type</description>
<required_on_edit>false</required_on_edit>
<required_on_create>false</required_on_create>
</arg>
<arg name="oauth2_access_token">
<title>OAUTH 2 Access Token</title>
<description>OAUTH 2 access token</description>
<required_on_edit>false</required_on_edit>
<required_on_create>false</required_on_create>
</arg>
<arg name="oauth2_expires_in">
<title>OAUTH 2 Expiration Time</title>
<description>OAUTH 2 expiration time</description>
<required_on_edit>false</required_on_edit>
<required_on_create>false</required_on_create>
</arg>
<arg name="oauth2_refresh_token">
<title>OAUTH 2 Refresh Token</title>
<description>OAUTH 2 refresh token</description>
<required_on_edit>false</required_on_edit>
<required_on_create>false</required_on_create>
</arg>
<arg name="oauth2_refresh_url">
<title>OAUTH 2 Token Refresh URL</title>
<description>OAUTH 2 token refresh URL</description>
<required_on_edit>false</required_on_edit>
<required_on_create>false</required_on_create>
</arg>
<arg name="oauth2_refresh_props">
<title>OAUTH 2 Token Refresh Propertys</title>
<description>OAUTH 2 token refresh propertys : : key=value,key2=value2</description>
<required_on_edit>false</required_on_edit>
<required_on_create>false</required_on_create>
</arg>
<arg name="oauth2_client_id">
<title>OAUTH 2 Client ID</title>
<description>OAUTH 2 client ID</description>
<required_on_edit>false</required_on_edit>
<required_on_create>false</required_on_create>
</arg>
<arg name="oauth2_client_secret">
<title>OAUTH 2 Client Secret</title>
<description>OAUTH 2 client secret</description>
<required_on_edit>false</required_on_edit>
<required_on_create>false</required_on_create>
</arg>
<arg name="http_header_propertys">
<title>HTTP Header Propertys</title>
<description>Custom HTTP header propertys : key=value,key2=value2</description>
<required_on_edit>false</required_on_edit>
<required_on_create>false</required_on_create>
</arg>
<arg name="url_args">
<title>URL Arguments</title>
<description>Custom URL arguments : key=value,key2=value2</description>
<required_on_edit>false</required_on_edit>
<required_on_create>false</required_on_create>
</arg>
<arg name="response_type">
<title>Response Type</title>
<description>Rest Data Response Type : json | xml | text</description>
<required_on_edit>false</required_on_edit>
<required_on_create>false</required_on_create>
</arg>
<arg name="streaming_request">
<title>Streaming Request</title>
<description>Whether or not this is a HTTP streaming request : true | false</description>
<required_on_edit>false</required_on_edit>
<required_on_create>false</required_on_create>
</arg>
<arg name="http_proxy">
<title>HTTP Proxy Address</title>
<description>HTTP Proxy Address</description>
<required_on_edit>false</required_on_edit>
<required_on_create>false</required_on_create>
</arg>
<arg name="https_proxy">
<title>HTTPs Proxy Address</title>
<description>HTTPs Proxy Address</description>
<required_on_edit>false</required_on_edit>
<required_on_create>false</required_on_create>
</arg>
<arg name="request_timeout">
<title>Request Timeout</title>
<description>Request Timeout in seconds</description>
<required_on_edit>false</required_on_edit>
<required_on_create>false</required_on_create>
</arg>
<arg name="backoff_time">
<title>Backoff Time</title>
<description>Time in seconds to wait for retry after error or timeout</description>
<required_on_edit>false</required_on_edit>
<required_on_create>false</required_on_create>
</arg>
<arg name="polling_interval">
<title>Polling Interval</title>
<description>Interval time in seconds to poll the endpoint</description>
<required_on_edit>false</required_on_edit>
<required_on_create>false</required_on_create>
</arg>
<arg name="index_error_response_codes">
<title>Index Error Responses</title>
<description>Whether or not to index error response codes : true | false</description>
<required_on_edit>false</required_on_edit>
<required_on_create>false</required_on_create>
</arg>
<arg name="response_handler">
<title>Response Handler</title>
<description>Python classname of custom response handler</description>
<required_on_edit>false</required_on_edit>
<required_on_create>false</required_on_create>
</arg>
<arg name="response_handler_args">
<title>Response Handler Arguments</title>
<description>Response Handler arguments string , key=value,key2=value2</description>
<required_on_edit>false</required_on_edit>
<required_on_create>false</required_on_create>
</arg>
<arg name="response_filter_pattern">
<title>Response Filter Pattern</title>
<description>Python Regex pattern, if present , responses must match this pattern to be indexed</description>
<required_on_edit>false</required_on_edit>
<required_on_create>false</required_on_create>
</arg>
<arg name="custom_auth_handler">
<title>Custom_Auth Handler</title>
<description>Python classname of custom auth handler</description>
<required_on_edit>false</required_on_edit>
<required_on_create>false</required_on_create>
</arg>
<arg name="custom_auth_handler_args">
<title>Custom_Auth Handler Arguments</title>
<description>Custom Authentication Handler arguments string , key=value,key2=value2</description>
<required_on_edit>false</required_on_edit>
<required_on_create>false</required_on_create>
</arg>
</args>
</endpoint>
</scheme>
"""
def do_validate():
config = get_validation_config()
#TODO
#if error , print_validation_error & sys.exit(2)
def do_run():
config = get_input_config()
#setup some globals
server_uri = config.get("server_uri")
global SPLUNK_PORT
global STANZA
global SESSION_TOKEN
SPLUNK_PORT = server_uri[18:]
STANZA = config.get("name")
SESSION_TOKEN = config.get("session_key")
#params
endpoint=config.get("endpoint")
#none | basic | digest | oauth1 | oauth2
auth_type=config.get("auth_type","none")
#for basic and digest
auth_user=config.get("auth_user")
auth_password=config.get("auth_password")
#for oauth1
oauth1_client_key=config.get("oauth1_client_key")
oauth1_client_secret=config.get("oauth1_client_secret")
oauth1_access_token=config.get("oauth1_access_token")
oauth1_access_token_secret=config.get("oauth1_access_token_secret")
#for oauth2
oauth2_token_type=config.get("oauth2_token_type","Bearer")
oauth2_access_token=config.get("oauth2_access_token")
oauth2_expires_in=config.get("oauth2_expires_in")
oauth2_refresh_token=config.get("oauth2_refresh_token")
oauth2_refresh_url=config.get("oauth2_refresh_url")
oauth2_refresh_props_str=config.get("oauth2_refresh_props")
oauth2_client_id=config.get("oauth2_client_id")
oauth2_client_secret=config.get("oauth2_client_secret")
oauth2_refresh_props={}
if not oauth2_refresh_props_str is None:
oauth2_refresh_props = dict((k.strip(), v.strip()) for k,v in
(item.split('=') for item in oauth2_refresh_props_str.split(',')))
oauth2_refresh_props['client_id'] = oauth2_client_id
oauth2_refresh_props['client_secret'] = oauth2_client_secret
http_header_propertys={}
http_header_propertys_str=config.get("http_header_propertys")
if not http_header_propertys_str is None:
http_header_propertys = dict((k.strip(), v.strip()) for k,v in
(item.split('=') for item in http_header_propertys_str.split(',')))
url_args={}
url_args_str=config.get("url_args")
if not url_args_str is None:
url_args = dict((k.strip(), v.strip()) for k,v in
(item.split('=') for item in url_args_str.split(',')))
#json | xml | text
response_type=config.get("response_type","text")
streaming_request=int(config.get("streaming_request",0))
http_proxy=config.get("http_proxy")
https_proxy=config.get("https_proxy")
proxies={}
if not http_proxy is None:
proxies["http"] = http_proxy
if not https_proxy is None:
proxies["https"] = https_proxy
request_timeout=int(config.get("request_timeout",30))
backoff_time=int(config.get("backoff_time",10))
polling_interval=int(config.get("polling_interval",60))
index_error_response_codes=int(config.get("index_error_response_codes",0))
response_filter_pattern=config.get("response_filter_pattern")
if response_filter_pattern:
global REGEX_PATTERN
REGEX_PATTERN = re.compile(response_filter_pattern)
response_handler_args={}
response_handler_args_str=config.get("response_handler_args")
if not response_handler_args_str is None:
response_handler_args = dict((k.strip(), v.strip()) for k,v in
(item.split('=') for item in response_handler_args_str.split(',')))
response_handler=config.get("response_handler","DefaultResponseHandler")
module = __import__("responsehandlers")
class_ = getattr(module,response_handler)
global RESPONSE_HANDLER_INSTANCE
RESPONSE_HANDLER_INSTANCE = class_(**response_handler_args)
custom_auth_handler=config.get("custom_auth_handler")
if custom_auth_handler:
module = __import__("authhandlers")
class_ = getattr(module,custom_auth_handler)
custom_auth_handler_args={}
custom_auth_handler_args_str=config.get("custom_auth_handler_args")
if not custom_auth_handler_args_str is None:
custom_auth_handler_args = dict((k.strip(), v.strip()) for k,v in (item.split('=') for item in custom_auth_handler_args_str.split(',')))
CUSTOM_AUTH_HANDLER_INSTANCE = class_(**custom_auth_handler_args)
try:
auth=None
oauth2=None
if auth_type == "basic":
auth = HTTPBasicAuth(auth_user, auth_password)
elif auth_type == "digest":
auth = HTTPDigestAuth(auth_user, auth_password)
elif auth_type == "oauth1":
auth = OAuth1(oauth1_client_key, oauth1_client_secret,
oauth1_access_token ,oauth1_access_token_secret)
elif auth_type == "oauth2":
token={}
token["token_type"] = oauth2_token_type
token["access_token"] = oauth2_access_token
token["refresh_token"] = oauth2_refresh_token
token["expires_in"] = oauth2_expires_in
client = WebApplicationClient(oauth2_client_id)
oauth2 = OAuth2Session(client, token=token,auto_refresh_url=oauth2_refresh_url,auto_refresh_kwargs=oauth2_refresh_props,token_updater=oauth2_token_updater)
elif auth_type == "custom" and CUSTOM_AUTH_HANDLER_INSTANCE:
auth = CUSTOM_AUTH_HANDLER_INSTANCE
req_args = {"verify" : False ,"stream" : bool(streaming_request) , "timeout" : float(request_timeout)}
if auth:
req_args["auth"]= auth
if url_args:
req_args["params"]= url_args
if http_header_propertys:
req_args["headers"]= http_header_propertys
if proxies:
req_args["proxies"]= proxies
while True:
try:
if oauth2:
r = oauth2.get(endpoint,**req_args)
else:
r = requests.get(endpoint,**req_args)
except requests.exceptions.Timeout,e:
logging.error("HTTP Request Timeout error: %s" % str(e))
time.sleep(float(backoff_time))
continue
try:
r.raise_for_status()
if streaming_request:
for line in r.iter_lines():
if line:
handle_output(r,line,response_type)
else:
handle_output(r,r.text,response_type)
except requests.exceptions.HTTPError,e:
error_output = r.text
error_http_code = r.status_code
if index_error_response_codes:
error_event=""
error_event += 'http_error_code = %s error_message = %s' % (error_http_code.prettyPrint(), error_output.prettyPrint())
print_xml_single_instance_mode(error_event)
sys.stdout.flush()
logging.error("HTTP Request error: %s" % str(e))
time.sleep(float(backoff_time))
continue
time.sleep(float(polling_interval))
except RuntimeError,e:
logging.error("Looks like an error: %s" % str(e))
sys.exit(2)
def oauth2_token_updater(token):
try:
args = {'host':'localhost','port':SPLUNK_PORT,'token':SESSION_TOKEN}
service = Service(**args)
item = service.inputs.__getitem__(STANZA[7:])
item.update(oauth2_access_token=token["access_token"],oauth2_refresh_token=token["refresh_token"],oauth2_expires_in=token["expires_in"])
except RuntimeError,e:
logging.error("Looks like an error updating the oauth2 token: %s" % str(e))
def handle_output(response,output,type):
try:
if REGEX_PATTERN:
search_result = REGEX_PATTERN.search(output)
if search_result == None:
return
RESPONSE_HANDLER_INSTANCE(response,output,type)
sys.stdout.flush()
except RuntimeError,e:
logging.error("Looks like an error handle the response output: %s" % str(e))
# prints validation error data to be consumed by Splunk
def print_validation_error(s):
print "<error><message>%s</message></error>" % encodeXMLText(s)
# prints XML stream
def print_xml_single_instance_mode(s):
print "<stream><event><data>%s</data></event></stream>" % encodeXMLText(s)
# prints simple stream
def print_simple(s):
print "%s\n" % s
def encodeXMLText(text):
text = text.replace("&", "&")
text = text.replace("\"", """)
text = text.replace("'", "'")
text = text.replace("<", "<")
text = text.replace(">", ">")
return text
def usage():
print "usage: %s [--scheme|--validate-arguments]"
logging.error("Incorrect Program Usage")
sys.exit(2)
def do_scheme():
print SCHEME
#read XML configuration passed from splunkd, need to refactor to support single instance mode
def get_input_config():
config = {}
try:
# read everything from stdin
config_str = sys.stdin.read()
# parse the config XML
doc = xml.dom.minidom.parseString(config_str)
root = doc.documentElement
session_key_node = root.getElementsByTagName("session_key")[0]
if session_key_node and session_key_node.firstChild and session_key_node.firstChild.nodeType == session_key_node.firstChild.TEXT_NODE:
data = session_key_node.firstChild.data
config["session_key"] = data
server_uri_node = root.getElementsByTagName("server_uri")[0]
if server_uri_node and server_uri_node.firstChild and server_uri_node.firstChild.nodeType == server_uri_node.firstChild.TEXT_NODE:
data = server_uri_node.firstChild.data
config["server_uri"] = data
conf_node = root.getElementsByTagName("configuration")[0]
if conf_node:
logging.debug("XML: found configuration")
stanza = conf_node.getElementsByTagName("stanza")[0]
if stanza:
stanza_name = stanza.getAttribute("name")
if stanza_name:
logging.debug("XML: found stanza " + stanza_name)
config["name"] = stanza_name
params = stanza.getElementsByTagName("param")
for param in params:
param_name = param.getAttribute("name")
logging.debug("XML: found param '%s'" % param_name)
if param_name and param.firstChild and \
param.firstChild.nodeType == param.firstChild.TEXT_NODE:
data = param.firstChild.data
config[param_name] = data
logging.debug("XML: '%s' -> '%s'" % (param_name, data))
checkpnt_node = root.getElementsByTagName("checkpoint_dir")[0]
if checkpnt_node and checkpnt_node.firstChild and \
checkpnt_node.firstChild.nodeType == checkpnt_node.firstChild.TEXT_NODE:
config["checkpoint_dir"] = checkpnt_node.firstChild.data
if not config:
raise Exception, "Invalid configuration received from Splunk."
except Exception, e:
raise Exception, "Error getting Splunk configuration via STDIN: %s" % str(e)
return config
#read XML configuration passed from splunkd, need to refactor to support single instance mode
def get_validation_config():
val_data = {}
# read everything from stdin
val_str = sys.stdin.read()
# parse the validation XML
doc = xml.dom.minidom.parseString(val_str)
root = doc.documentElement
logging.debug("XML: found items")
item_node = root.getElementsByTagName("item")[0]
if item_node:
logging.debug("XML: found item")
name = item_node.getAttribute("name")
val_data["stanza"] = name
params_node = item_node.getElementsByTagName("param")
for param in params_node:
name = param.getAttribute("name")
logging.debug("Found param %s" % name)
if name and param.firstChild and \
param.firstChild.nodeType == param.firstChild.TEXT_NODE:
val_data[name] = param.firstChild.data
return val_data
if __name__ == '__main__':
if len(sys.argv) > 1:
if sys.argv[1] == "--scheme":
do_scheme()
elif sys.argv[1] == "--validate-arguments":
do_validate()
else:
usage()
else:
do_run()
sys.exit(0)
| |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
try:
from ._models_py3 import Action
from ._models_py3 import AdministratorDetails
from ._models_py3 import Attributes
from ._models_py3 import BackupKeyResult
from ._models_py3 import BackupSecretResult
from ._models_py3 import CertificateAttributes
from ._models_py3 import CertificateBundle
from ._models_py3 import CertificateCreateParameters
from ._models_py3 import CertificateImportParameters
from ._models_py3 import CertificateIssuerItem
from ._models_py3 import CertificateIssuerListResult
from ._models_py3 import CertificateIssuerSetParameters
from ._models_py3 import CertificateIssuerUpdateParameters
from ._models_py3 import CertificateItem
from ._models_py3 import CertificateListResult
from ._models_py3 import CertificateMergeParameters
from ._models_py3 import CertificateOperation
from ._models_py3 import CertificateOperationUpdateParameter
from ._models_py3 import CertificatePolicy
from ._models_py3 import CertificateUpdateParameters
from ._models_py3 import Contact
from ._models_py3 import Contacts
from ._models_py3 import DeletedCertificateBundle
from ._models_py3 import DeletedCertificateItem
from ._models_py3 import DeletedCertificateListResult
from ._models_py3 import DeletedKeyBundle
from ._models_py3 import DeletedKeyItem
from ._models_py3 import DeletedKeyListResult
from ._models_py3 import DeletedSecretBundle
from ._models_py3 import DeletedSecretItem
from ._models_py3 import DeletedSecretListResult
from ._models_py3 import Error
from ._models_py3 import IssuerAttributes
from ._models_py3 import IssuerBundle
from ._models_py3 import IssuerCredentials
from ._models_py3 import IssuerParameters
from ._models_py3 import JsonWebKey
from ._models_py3 import KeyAttributes
from ._models_py3 import KeyBundle
from ._models_py3 import KeyCreateParameters
from ._models_py3 import KeyImportParameters
from ._models_py3 import KeyItem
from ._models_py3 import KeyListResult
from ._models_py3 import KeyOperationResult
from ._models_py3 import KeyOperationsParameters
from ._models_py3 import KeyProperties
from ._models_py3 import KeyRestoreParameters
from ._models_py3 import KeySignParameters
from ._models_py3 import KeyUpdateParameters
from ._models_py3 import KeyVaultError
from ._models_py3 import KeyVerifyParameters
from ._models_py3 import KeyVerifyResult
from ._models_py3 import LifetimeAction
from ._models_py3 import OrganizationDetails
from ._models_py3 import PendingCertificateSigningRequestResult
from ._models_py3 import SasDefinitionAttributes
from ._models_py3 import SasDefinitionBundle
from ._models_py3 import SasDefinitionCreateParameters
from ._models_py3 import SasDefinitionItem
from ._models_py3 import SasDefinitionListResult
from ._models_py3 import SasDefinitionUpdateParameters
from ._models_py3 import SecretAttributes
from ._models_py3 import SecretBundle
from ._models_py3 import SecretItem
from ._models_py3 import SecretListResult
from ._models_py3 import SecretProperties
from ._models_py3 import SecretRestoreParameters
from ._models_py3 import SecretSetParameters
from ._models_py3 import SecretUpdateParameters
from ._models_py3 import StorageAccountAttributes
from ._models_py3 import StorageAccountCreateParameters
from ._models_py3 import StorageAccountItem
from ._models_py3 import StorageAccountRegenerteKeyParameters
from ._models_py3 import StorageAccountUpdateParameters
from ._models_py3 import StorageBundle
from ._models_py3 import StorageListResult
from ._models_py3 import SubjectAlternativeNames
from ._models_py3 import Trigger
from ._models_py3 import X509CertificateProperties
except (SyntaxError, ImportError):
from ._models import Action # type: ignore
from ._models import AdministratorDetails # type: ignore
from ._models import Attributes # type: ignore
from ._models import BackupKeyResult # type: ignore
from ._models import BackupSecretResult # type: ignore
from ._models import CertificateAttributes # type: ignore
from ._models import CertificateBundle # type: ignore
from ._models import CertificateCreateParameters # type: ignore
from ._models import CertificateImportParameters # type: ignore
from ._models import CertificateIssuerItem # type: ignore
from ._models import CertificateIssuerListResult # type: ignore
from ._models import CertificateIssuerSetParameters # type: ignore
from ._models import CertificateIssuerUpdateParameters # type: ignore
from ._models import CertificateItem # type: ignore
from ._models import CertificateListResult # type: ignore
from ._models import CertificateMergeParameters # type: ignore
from ._models import CertificateOperation # type: ignore
from ._models import CertificateOperationUpdateParameter # type: ignore
from ._models import CertificatePolicy # type: ignore
from ._models import CertificateUpdateParameters # type: ignore
from ._models import Contact # type: ignore
from ._models import Contacts # type: ignore
from ._models import DeletedCertificateBundle # type: ignore
from ._models import DeletedCertificateItem # type: ignore
from ._models import DeletedCertificateListResult # type: ignore
from ._models import DeletedKeyBundle # type: ignore
from ._models import DeletedKeyItem # type: ignore
from ._models import DeletedKeyListResult # type: ignore
from ._models import DeletedSecretBundle # type: ignore
from ._models import DeletedSecretItem # type: ignore
from ._models import DeletedSecretListResult # type: ignore
from ._models import Error # type: ignore
from ._models import IssuerAttributes # type: ignore
from ._models import IssuerBundle # type: ignore
from ._models import IssuerCredentials # type: ignore
from ._models import IssuerParameters # type: ignore
from ._models import JsonWebKey # type: ignore
from ._models import KeyAttributes # type: ignore
from ._models import KeyBundle # type: ignore
from ._models import KeyCreateParameters # type: ignore
from ._models import KeyImportParameters # type: ignore
from ._models import KeyItem # type: ignore
from ._models import KeyListResult # type: ignore
from ._models import KeyOperationResult # type: ignore
from ._models import KeyOperationsParameters # type: ignore
from ._models import KeyProperties # type: ignore
from ._models import KeyRestoreParameters # type: ignore
from ._models import KeySignParameters # type: ignore
from ._models import KeyUpdateParameters # type: ignore
from ._models import KeyVaultError # type: ignore
from ._models import KeyVerifyParameters # type: ignore
from ._models import KeyVerifyResult # type: ignore
from ._models import LifetimeAction # type: ignore
from ._models import OrganizationDetails # type: ignore
from ._models import PendingCertificateSigningRequestResult # type: ignore
from ._models import SasDefinitionAttributes # type: ignore
from ._models import SasDefinitionBundle # type: ignore
from ._models import SasDefinitionCreateParameters # type: ignore
from ._models import SasDefinitionItem # type: ignore
from ._models import SasDefinitionListResult # type: ignore
from ._models import SasDefinitionUpdateParameters # type: ignore
from ._models import SecretAttributes # type: ignore
from ._models import SecretBundle # type: ignore
from ._models import SecretItem # type: ignore
from ._models import SecretListResult # type: ignore
from ._models import SecretProperties # type: ignore
from ._models import SecretRestoreParameters # type: ignore
from ._models import SecretSetParameters # type: ignore
from ._models import SecretUpdateParameters # type: ignore
from ._models import StorageAccountAttributes # type: ignore
from ._models import StorageAccountCreateParameters # type: ignore
from ._models import StorageAccountItem # type: ignore
from ._models import StorageAccountRegenerteKeyParameters # type: ignore
from ._models import StorageAccountUpdateParameters # type: ignore
from ._models import StorageBundle # type: ignore
from ._models import StorageListResult # type: ignore
from ._models import SubjectAlternativeNames # type: ignore
from ._models import Trigger # type: ignore
from ._models import X509CertificateProperties # type: ignore
from ._key_vault_client_enums import (
ActionType,
DeletionRecoveryLevel,
JsonWebKeyCurveName,
JsonWebKeyEncryptionAlgorithm,
JsonWebKeyOperation,
JsonWebKeySignatureAlgorithm,
JsonWebKeyType,
KeyUsageType,
)
__all__ = [
'Action',
'AdministratorDetails',
'Attributes',
'BackupKeyResult',
'BackupSecretResult',
'CertificateAttributes',
'CertificateBundle',
'CertificateCreateParameters',
'CertificateImportParameters',
'CertificateIssuerItem',
'CertificateIssuerListResult',
'CertificateIssuerSetParameters',
'CertificateIssuerUpdateParameters',
'CertificateItem',
'CertificateListResult',
'CertificateMergeParameters',
'CertificateOperation',
'CertificateOperationUpdateParameter',
'CertificatePolicy',
'CertificateUpdateParameters',
'Contact',
'Contacts',
'DeletedCertificateBundle',
'DeletedCertificateItem',
'DeletedCertificateListResult',
'DeletedKeyBundle',
'DeletedKeyItem',
'DeletedKeyListResult',
'DeletedSecretBundle',
'DeletedSecretItem',
'DeletedSecretListResult',
'Error',
'IssuerAttributes',
'IssuerBundle',
'IssuerCredentials',
'IssuerParameters',
'JsonWebKey',
'KeyAttributes',
'KeyBundle',
'KeyCreateParameters',
'KeyImportParameters',
'KeyItem',
'KeyListResult',
'KeyOperationResult',
'KeyOperationsParameters',
'KeyProperties',
'KeyRestoreParameters',
'KeySignParameters',
'KeyUpdateParameters',
'KeyVaultError',
'KeyVerifyParameters',
'KeyVerifyResult',
'LifetimeAction',
'OrganizationDetails',
'PendingCertificateSigningRequestResult',
'SasDefinitionAttributes',
'SasDefinitionBundle',
'SasDefinitionCreateParameters',
'SasDefinitionItem',
'SasDefinitionListResult',
'SasDefinitionUpdateParameters',
'SecretAttributes',
'SecretBundle',
'SecretItem',
'SecretListResult',
'SecretProperties',
'SecretRestoreParameters',
'SecretSetParameters',
'SecretUpdateParameters',
'StorageAccountAttributes',
'StorageAccountCreateParameters',
'StorageAccountItem',
'StorageAccountRegenerteKeyParameters',
'StorageAccountUpdateParameters',
'StorageBundle',
'StorageListResult',
'SubjectAlternativeNames',
'Trigger',
'X509CertificateProperties',
'ActionType',
'DeletionRecoveryLevel',
'JsonWebKeyCurveName',
'JsonWebKeyEncryptionAlgorithm',
'JsonWebKeyOperation',
'JsonWebKeySignatureAlgorithm',
'JsonWebKeyType',
'KeyUsageType',
]
| |
#!/usr/bin/env python
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
import glob
import json
import sys
sys.path.insert(0, './gen-py')
lib_path = glob.glob('../../lib/py/build/lib.*')
if lib_path:
sys.path.insert(0, lib_path[0])
from myBinaryStruct.ttypes import *
from myBoolStruct.ttypes import *
from myByteStruct.ttypes import *
from myComplexStruct.ttypes import *
from myDoubleStruct.ttypes import *
from myI16Struct.ttypes import *
from myI32Struct.ttypes import *
from myMixedStruct.ttypes import *
from mySetStruct.ttypes import *
from myMapStruct.ttypes import *
from myNestedMapStruct.ttypes import *
from mySimpleStruct.ttypes import *
from myStringStruct.ttypes import *
from thrift.protocol.TSimpleJSONProtocol import TSimpleJSONProtocolFactory
from thrift.protocol.TProtocol import TProtocolException
import unittest
import time
class SimpleJSONToThriftTest(unittest.TestCase):
def setUp(self):
self.binaryStruct = myBinaryStruct(a='xyzzy')
self.boolStruct1 = myBoolStruct(a=True)
self.boolStruct2 = myBoolStruct(a=False)
self.byteStruct = myByteStruct(a=101)
self.byteStructBad = myByteStruct(a=3232)
self.complexStruct1 = myComplexStruct(
a=mySimpleStruct(
a=True,
b=92,
c=902,
d=65536,
e=123456789,
f=3.1415,
g='Whan that Aprille'
),
b=[314, 15, 9, 26535],
c={"qwerty": mySimpleStruct(c=1),
"slippy": mySimpleStruct(a=False, b=-4, c=5)},
e=EnumTest.EnumTwo,
x=ExceptionTest("test")
)
self.complexStruct2 = myComplexStruct()
self.doubleStruct1 = myDoubleStruct(a=-2.192)
self.doubleStruct2 = myDoubleStruct(a=float('inf'))
self.doubleStruct3 = myDoubleStruct(a=float('-inf'))
self.I16Struct = myI16Struct(a=4567)
self.I16StructBad = myI16Struct(a=0xFEDCBA987)
self.I32Struct = myI32Struct(a=12131415)
self.I32StructBad = myI32Struct(a=0xFFFFFFFFEDCBA)
self.mixedStruct = myMixedStruct(
a=[],
b=[mySuperSimpleStruct(a=5)],
c={'flame': -8, 'fire': -191},
d={},
e=set([1, 2, 3, 4]))
self.setStruct1 = mySetStruct(a=set([4, 8, 15, 16]))
self.setStruct2 = mySetStruct(a=set([]))
self.setStructBad = mySetStruct(a=set([1, 0xFFFFFFFFFF, 2]))
self.mapStruct = myMapStruct(
stringMap={"a": "A", "b": "B"},
boolMap={True: "True", False: "False"},
byteMap={1: "one", 2: "two"},
doubleMap={float("0.1"): "0.one", float("0.2"): "0.two"},
enumMap={1: "male", 2: "female"})
self.nestedMapStruct = myNestedMapStruct(
maps={"1": {"1": mySimpleStruct(c=1)},
"2": {"2": mySimpleStruct(a=False, c=2)}})
self.simpleStruct1 = mySimpleStruct(
a=False,
b=87,
c=7880,
d=-7880,
e=-1,
f=-0.1,
g='T-bone')
self.simpleStruct2 = mySimpleStruct(c=9)
self.simpleStructBad = mySimpleStruct()
self.stringStruct1 = myStringStruct(a='')
self.stringStruct2 = myStringStruct()
self.stringStruct3 = myStringStruct(a="foobar")
def _serialize(self, obj):
trans = TTransport.TMemoryBuffer()
prot = TSimpleJSONProtocolFactory().getProtocol(trans)
obj.write(prot)
return trans.getvalue()
def _testStruct(self, struct, is_empty=False):
gen = struct.__class__()
if not is_empty:
self.assertNotEqual(gen, struct)
else:
self.assertEqual(gen, struct)
gen.readFromJson(self._serialize(struct))
self.assertEqual(gen, struct)
def _testBadStruct(self, struct, is_empty=False):
try:
self._testStruct(struct, is_empty)
self.fail()
except TProtocolException as e:
pass
def testBinaryStruct(self):
self._testStruct(self.binaryStruct)
def testBoolStruct(self):
self._testStruct(self.boolStruct1)
self._testStruct(self.boolStruct2)
def testByteStruct(self):
self._testStruct(self.byteStruct)
self._testBadStruct(self.byteStructBad)
def testComplexStruct(self):
self._testStruct(self.complexStruct1)
self._testStruct(self.complexStruct2, True)
def testDoubleStruct(self):
self._testStruct(self.doubleStruct1)
self._testStruct(self.doubleStruct2)
self._testStruct(self.doubleStruct3)
def testI16Struct(self):
self._testStruct(self.I16Struct)
self._testBadStruct(self.I16StructBad)
def testI32Struct(self):
self._testStruct(self.I32Struct)
self._testBadStruct(self.I32StructBad)
def testMixedStruct(self):
self._testStruct(self.mixedStruct)
def testSetStruct(self):
self._testStruct(self.setStruct1)
self._testStruct(self.setStruct2)
self._testBadStruct(self.setStructBad)
def testMapStruct(self):
self._testStruct(self.mapStruct)
def testNestedMapStruct(self):
self._testStruct(self.nestedMapStruct)
def testSimpleStruct(self):
self._testStruct(self.simpleStruct1)
self._testStruct(self.simpleStruct2)
self._testBadStruct(self.simpleStructBad, True)
def testStringStruct(self):
self._testStruct(self.stringStruct1)
self._testStruct(self.stringStruct2, True)
self._testStruct(self.stringStruct3)
def suite():
suite = unittest.TestSuite()
loader = unittest.TestLoader()
suite.addTest(loader.loadTestsFromTestCase(SimpleJSONToThriftTest))
return suite
if __name__ == "__main__":
unittest.main(defaultTest="suite",
testRunner=unittest.TextTestRunner(verbosity=2))
| |
import os
import sys
import time
# states for jobs
STATUS_UNDONE = "incomplete"
STATUS_PENDING = "pending"
STATUS_RUNNING = "running"
STATUS_DONE = "done"
STATUS_ERROR = "error"
VALID_STATUS = {
STATUS_UNDONE: 1,
STATUS_PENDING: 1,
STATUS_RUNNING: 1,
STATUS_DONE: 1,
STATUS_ERROR: 1,
}
DEFAULT_DISPATCH = "bash $SCRIPT"
BASH_DISPATCH = "bash $SCRIPT"
LSF_DISPATCH = "bsub -o $STATUSDIR/$JOBNAME.output -K bash $SCRIPT"
# autodetect dispatch
def getDefaultDispatch():
platform = getPlatform()
if platform == "lsf":
return LSF_DISPATCH
else:
return DEFAULT_DISPATCH
def getDefaultBackground():
platform = getPlatform()
if platform == "lsf":
return True
else:
return False
class PipelineException (Exception):
def __init__(self, msg):
Exception.__init__(self, "pipeline: " + msg)
class Job:
def __init__(self, name, task, depends=[],
background=True, dispatch=DEFAULT_DISPATCH):
self.pid = -1
self.name = name
self.task = task
self.background = background
self.status = STATUS_UNDONE
self.msg = ""
self.parents = []
self.children = []
self.waitSet = {}
self.dispatch = dispatch
self.subjobs = []
# add dependencies
for dep in depends:
self.addDep(dep)
# determine task
if isinstance(task, str):
self.tasktype = "shell"
else:
self.tasktype = "function"
def undo(self):
self.status = STATUS_UNDONE
for child in self.children:
child.undo()
def addDep(self, dep):
self.parents.append(dep)
dep.children.append(self)
def wait(self, job):
self.waitSet[job] = 1
def unwait(self, job):
if job in self.waitSet:
del self.waitSet[job]
def setWaits(self):
for parent in self.parents:
if parent.status != STATUS_DONE:
self.wait(parent)
if parent.status == STATUS_ERROR:
parent.raiseError()
def notifyChildren(self):
for child in self.children:
child.unwait(self)
def isWaiting(self):
return len(self.waitSet) > 0
def raiseError(self):
raise PipelineException("job '%s' has error" % self.name)
class Pipeline:
def __init__(self,
statusDir="pipeline",
background=None,
dispatch=None):
self.statusDir = statusDir
self.jobs = {} # set of all jobs registered with Pipeline
self.pending = {} # set of all jobs that are ready to run
self.pids = {} # set of all process ids currently running
self.isInit = False
self.testing = False
self.logOutput = None
self.maxNumProc = 40 # maximum allowed number of processes
self.needReset = False
if background is None:
self.background = getDefaultBackground()
else:
self.background = background
if dispatch is None:
self.dispatch = getDefaultDispatch()
else:
self.dispatch = dispatch
def init(self):
# set all job states to UNDONE
if self.needReset:
for job in self.jobs.itervalues():
filename = self.getJobStatusFile(job)
if os.path.exists(filename):
os.remove(filename)
self.needReset = False
# read in the status of all jobs
self.readStatus(False)
if not self.isInit:
# clear any pending jobs
self.pending = {}
for job in self.jobs.itervalues():
# job that were running are now back to pending
if job.status in [STATUS_RUNNING,
STATUS_PENDING,
STATUS_ERROR]:
self.writeJobStatus(job, STATUS_UNDONE)
self.isInit = True
def reinit(self):
self.isInit = False
self.init()
def reset(self):
self.needReset = True
def enableTesting(self, enable=True):
self.testing = enable
def setLogOutput(self, out=sys.stdout):
self.logOutput = out
def setStatusDir(self, statusDir):
assert not self.isInit
self.statusDir = statusDir
def ensureStatusDir(self):
if not os.path.exists(self.statusDir):
os.mkdir(self.statusDir)
def setMaxNumProc(self, nproc):
self.maxNumProc = nproc
def getJobStatusFile(self, job):
return os.path.join(self.statusDir, job.name + ".status")
def getJobOutputFile(self, job):
return os.path.join(self.statusDir, job.name + ".output")
def getJobErrorFile(self, job):
return os.path.join(self.statusDir, job.name + ".error")
def getJobScriptFile(self, job):
return os.path.join(self.statusDir, job.name + ".script")
def readStatus(self, retry=True):
"""Read in status information for all jobs"""
for job in self.jobs.itervalues():
self.readJobStatus(job, retry)
def writeStatus(self):
"""Write status information for all jobs"""
for job in self.jobs.values():
self.writeJobStatus(job)
def readJobStatus(self, job, retry=True):
filename = self.getJobStatusFile(job)
while True:
if os.path.exists(filename):
infile = file(filename, "r")
job.status = infile.read().rstrip()
else:
job.status = STATUS_UNDONE
# handle the case where the status file is only partially written
if job.status in VALID_STATUS or not retry:
break
else:
# wait for a little
time.sleep(.05)
if job.status not in VALID_STATUS:
self.writeJobStatus(job, STATUS_UNDONE)
def writeJobStatus(self, job, status=None):
self.ensureStatusDir()
if status is not None:
job.status = status
out = file(self.getJobStatusFile(job), "w")
out.write(job.status)
out.close()
def log(self, *text):
if self.logOutput:
self.logOutput.write("pipeline: " + " ".join(text) + "\n")
def add(self, name, task, depends=[], background=None, dispatch=None):
# set defaults
if background is None:
background = self.background
if dispatch is None:
dispatch = self.dispatch
parents = []
for dep in depends:
try:
parents.append(self.jobs[dep])
except KeyError:
raise PipelineException("unknown job '%s'" % dep)
self.jobs[name] = Job(name, task, parents, background, dispatch)
return name
def addGroup(self, name, subjobnames, depends=[],
background=None,
dispatch=None):
# set defaults
if background is None:
background = self.background
if dispatch is None:
dispatch = self.dispatch
parents = []
for dep in depends:
try:
parents.append(self.jobs[dep])
except KeyError:
raise PipelineException("unknown job '%s'" % dep)
tasks = []
for jobname in subjobnames:
try:
job = self.jobs[jobname]
except KeyError:
raise PipelineException("unknown job '%s'" % jobname)
assert isinstance(job.task, str), \
"subjob '%s' task must be a command line string" % jobname
tasks.append(job.task)
task = " && ".join(map(lambda t: "( %s )" % t, tasks))
self.jobs[name] = Job(name, task, parents, background, dispatch)
self.jobs[name].subjobs = map(lambda x: self.jobs[x], subjobnames)
return name
def addGroups(self, name, subjobnames, size=1, depends=[],
background=None, dispatch=None):
# set defaults
if background is None:
background = self.background
if dispatch is None:
dispatch = self.dispatch
groups = []
j = 1
for i in xrange(0, len(subjobnames), size):
groups.append(self.addGroup("%s%d" % (name, j),
subjobnames[i:i+size],
depends,
background,
dispatch))
j += 1
return groups
def run(self, name):
self.init()
if name in self.jobs:
self.runJob(self.jobs[name])
else:
raise PipelineException("unknown job '%s'" % name)
def undo(self, name):
if name in self.jobs:
self.undoJob(self.jobs[name])
else:
raise PipelineException("unknown job '%s'" % name)
def addPending(self, job):
self.writeJobStatus(job, STATUS_PENDING)
self.pending[job] = 1
def removePending(self, job, status=None):
if job in self.pending:
del self.pending[job]
if status is not None:
job.status = status
self.writeJobStatus(job, job.status)
assert job.status != STATUS_PENDING
def finishJob(self, job):
self.log("%s: END" % job.name)
self.removePending(job)
if job.pid != -1:
retpid, retcode = os.waitpid(job.pid, 0)
self.finishPid(job.pid)
job.notifyChildren()
def finishPid(self, pid):
job = self.pids[pid]
del self.pids[pid]
job.pid = -1
def runJob(self, job):
# return immediately if job is already done
if job.status == STATUS_DONE:
self.finishJob(job)
return STATUS_DONE
# do not job if it has an error
elif job.status == STATUS_ERROR:
job.raiseError()
# do not run job if it is already running
elif job.status == STATUS_RUNNING:
return STATUS_RUNNING
# ensure these are the only valid job states
assert job.status in (STATUS_UNDONE, STATUS_PENDING), (
"unknown job status '%s'" % job.status)
# determine which jobs to wait for
job.setWaits()
# add job to pending jobs
if job not in self.pending:
self.addPending(job)
# make sure all parents are run first
for parent in job.parents:
self.runJob(parent)
# run job if it is waiting for no one
# and number of processes is less than max allowed
if (not job.isWaiting() and
len(self.pids) < self.maxNumProc):
self.execJob(job)
# return job status
return job.status
def execJob(self, job):
self.log("%s: BEGIN" % job.name)
# mark job as running
self.writeJobStatus(job, STATUS_RUNNING)
if job.tasktype == "function":
# currently functions can not be backgrounded
#
if self.testing:
print "* running job '%s' (python function)\n" % job.name
self.writeJobStatus(job, STATUS_DONE)
return
# run task in main thread (BLOCK)
if job.task():
self.writeJobStatus(job, STATUS_DONE)
else:
self.writeJobStatus(job, STATUS_ERROR)
elif job.tasktype == "shell":
if self.testing:
print "* running job '%s':\n%s\n" % (job.name, job.task)
self.writeJobStatus(job, STATUS_DONE)
return
if job.background:
# run task in separate process and dont block
# save task into script file
script = self.getJobScriptFile(job)
out = file(script, "w")
out.write(job.task)
out.close()
# expand dispatch
dispatch = job.dispatch
dispatch = dispatch.replace("$JOBNAME", job.name)
dispatch = dispatch.replace("$SCRIPT", script)
dispatch = dispatch.replace("$STATUSDIR", self.statusDir)
statusfile = self.getJobStatusFile(job)
job.pid = os.spawnlp(
os.P_NOWAIT, "bash", "bash", "-c",
"""( %s ) &&
(echo done > '%s') ||
(echo error > '%s')
""" %
(
dispatch,
statusfile,
statusfile
))
# save process id
self.pids[job.pid] = job
else:
# run task in separate process and wait for it
if os.system(job.task) == 0:
self.writeJobStatus(job, STATUS_DONE)
else:
self.writeJobStatus(job, STATUS_ERROR)
else:
raise PipelineException("unknown tasktype '%s'" % job.tasktype)
def undoJob(self, job):
"""Make job and all depending jobs as 'incomplete'"""
self.writeJobStatus(job, STATUS_UNDONE)
for child in job.children:
self.undoJob(child)
def process(self, poll=False):
self.init()
while len(self.pending) > 0:
# try to run all pending jobs
for job in self.pending.keys():
self.runJob(job)
# wait for jobs to finish
while len(self.pids) > 0:
if not poll:
pid, retcode = os.waitpid(0, 0)
else:
# do not hang if only polling
pid, retcode = os.waitpid(0, os.WNOHANG)
if pid in self.pids:
job = self.pids[pid]
self.finishPid(pid)
self.readJobStatus(job)
# these are the only allowed states
assert (job.status == STATUS_DONE or
job.status == STATUS_ERROR)
if job.status == STATUS_DONE:
self.finishJob(job)
# try execute children
for child in job.children:
if child in self.pending:
self.runJob(child)
elif job.status == STATUS_ERROR:
job.raiseError()
# try to run all pending jobs
for job in self.pending.keys():
self.runJob(job)
else:
print >>sys.stderr, (
"ERROR: unknown job with pid %d reporting" % pid)
# do not loop if only polling
if poll:
return
def hasLsf():
"""Returns True only if LSF is available"""
ret = os.system("which bsub > /dev/null")
return (ret == 0)
PLATFORM = None
def getPlatform():
global PLATFORM
if PLATFORM is None:
if hasLsf():
PLATFORM = "lsf"
else:
PLATFORM = "none"
return PLATFORM
def submit(cmd):
if getPlatform() == "lsf":
return "bsub -o /dev/null -K " + cmd
else:
return cmd
# testing
if __name__ == "__main__":
pipeline = Pipeline()
pipeline.add("job1", "echo job1")
pipeline.add("job2", "echo job2 && sleep 3")
pipeline.add("job3", "echo job3", ["job1", "job2"])
pipeline.add("job4", "cat nosuchthing", ["job3"])
pipeline.add("job5", "echo job5", ["job1", "job3"])
#pipeline.undo("job1")
pipeline.run("job5")
pipeline.process()
| |
import os
import pandas as pd
import pytest
import subprocess
from activitysim.cli.create import get_example
@pytest.fixture(scope="module")
def est_data():
# !activitysim create -e example_estimation_sf -d _test_est
if os.path.exists("_test_est"):
retain_test_data = True
else:
retain_test_data = False
get_example("example_estimation_sf", "_test_est")
# %cd _test_est
cwd = os.getcwd()
os.chdir("_test_est")
# !activitysim run -c configs_estimation/configs -c configs -o output -d data_sf
if not retain_test_data:
print(f"List of files now in {os.getcwd()}")
subprocess.run(["find", "."])
print(f"\n\nrunning activitysim estimation mode in {os.getcwd()}")
subprocess.run(
[
"activitysim",
"run",
"-c",
"configs_estimation/configs",
"-c",
"configs",
"-o",
"output",
"-d",
"data_sf",
],
)
else:
print(f"reusing existing data in {os.getcwd()}")
yield os.getcwd()
os.chdir(cwd)
# if not retain_test_data:
# os.remove("_test_est")
def _regression_check(dataframe_regression, df, basename=None):
dataframe_regression.check(
df.select_dtypes("number").drop(columns=["holdfast"], errors='ignore').clip(-9e9, 9e9),
# pandas 1.3 handles int8 dtypes as actual numbers, so holdfast needs to be dropped manually
# we're dropping it not adding to the regression check so older pandas will also work.
basename=basename,
default_tolerance=dict(atol=1e-6, rtol=1e-2)
# set a little loose, as there is sometimes a little variance in these
# results when switching backend implementations.
)
@pytest.mark.parametrize("name,method", [
("free_parking", "BHHH"),
("mandatory_tour_frequency", "SLSQP"),
("joint_tour_frequency", "SLSQP"),
("joint_tour_composition", "SLSQP"),
("joint_tour_participation", "SLSQP"),
("mandatory_tour_frequency", "BHHH"),
("atwork_subtour_frequency", "SLSQP"),
("auto_ownership", "BHHH"),
("trip_mode_choice", "SLSQP"),
])
def test_simple_simulate(est_data, num_regression, dataframe_regression, name, method):
from activitysim.estimation.larch import component_model
m = component_model(name)
m.load_data()
m.doctor(repair_ch_av='-')
loglike_prior = m.loglike()
r = m.maximize_loglike(method=method, options={"maxiter": 1000})
num_regression.check(
{"loglike_prior": loglike_prior, "loglike_converge": r.loglike},
basename=f"test_simple_simulate_{name}_{method}_loglike",
default_tolerance=dict(atol=1e-6, rtol=1e-3),
)
_regression_check(dataframe_regression, m.pf)
@pytest.mark.parametrize("name,method", [
("workplace_location", "SLSQP"),
("school_location", "SLSQP"),
("non_mandatory_tour_destination", "SLSQP"),
("atwork_subtour_destination", "BHHH"),
("trip_destination", "SLSQP"),
])
def test_location_model(est_data, num_regression, dataframe_regression, name, method):
from activitysim.estimation.larch import component_model, update_size_spec
m, data = component_model(name, return_data=True)
m.load_data()
loglike_prior = m.loglike()
r = m.maximize_loglike(method=method)
num_regression.check(
{"loglike_prior": loglike_prior, "loglike_converge": r.loglike},
basename=f"test_loc_{name}_loglike",
)
_regression_check(dataframe_regression, m.pf)
size_spec = update_size_spec(
m, data, result_dir=None, output_file=None,
)
dataframe_regression.check(
size_spec,
basename=f"test_loc_{name}_size_spec",
default_tolerance=dict(atol=1e-6, rtol=1e-2)
# set a little loose, as there is sometimes a little variance in these
# results when switching backend implementations.
)
@pytest.mark.parametrize("name,method", [
("non_mandatory_tour_scheduling", "SLSQP"),
("joint_tour_scheduling", "SLSQP"),
("atwork_subtour_scheduling", "SLSQP"),
("mandatory_tour_scheduling_work", "SLSQP"),
("mandatory_tour_scheduling_school", "SLSQP"),
])
def test_scheduling_model(est_data, num_regression, dataframe_regression, name, method):
from activitysim.estimation.larch import component_model, update_size_spec
m, data = component_model(name, return_data=True)
m.load_data()
m.doctor(repair_ch_av='-')
loglike_prior = m.loglike()
r = m.maximize_loglike(method=method)
num_regression.check(
{"loglike_prior": loglike_prior, "loglike_converge": r.loglike},
basename=f"test_{name}_loglike",
)
_regression_check(dataframe_regression, m.pf)
def test_stop_freq_model(est_data, num_regression, dataframe_regression):
from activitysim.estimation.larch import component_model
name = "stop_frequency"
m, data = component_model(name, return_data=True)
m.load_data()
loglike_prior = m.loglike()
r = m.maximize_loglike()
num_regression.check(
{"loglike_prior": loglike_prior, "loglike_converge": r.loglike},
basename=f"test_{name}_loglike",
)
_regression_check(dataframe_regression, m.pf)
def test_workplace_location(est_data, num_regression, dataframe_regression):
from activitysim.estimation.larch import component_model, update_size_spec
m, data = component_model("workplace_location", return_data=True)
m.load_data()
loglike_prior = m.loglike()
r = m.maximize_loglike(method="SLSQP")
num_regression.check(
{"loglike_prior": loglike_prior, "loglike_converge": r.loglike},
basename="test_workplace_location_loglike",
)
_regression_check(dataframe_regression, m.pf)
size_spec = update_size_spec(
m, data, result_dir=None, output_file=None,
)
dataframe_regression.check(
size_spec,
basename="test_workplace_location_size_spec",
default_tolerance=dict(atol=1e-6, rtol=1e-2),
)
def test_school_location(est_data, num_regression, dataframe_regression):
from activitysim.estimation.larch import component_model, update_size_spec
m, data = component_model("school_location", return_data=True)
m.load_data()
loglike_prior = m.loglike()
r = m.maximize_loglike(method="BHHH")
num_regression.check(
{"loglike_prior": loglike_prior, "loglike_converge": r.loglike},
basename="test_school_location_loglike",
)
_regression_check(dataframe_regression, m.pf)
size_spec = update_size_spec(
m, data, result_dir=None, output_file=None,
)
dataframe_regression.check(
size_spec,
basename="test_school_location_size_spec",
default_tolerance=dict(atol=1e-6, rtol=1e-2),
)
def test_cdap_model(est_data, num_regression, dataframe_regression):
from activitysim.estimation.larch.cdap import cdap_model
m = cdap_model()
m.load_data()
loglike_prior = m.loglike()
r = m.maximize_loglike(method="SLSQP", options={"maxiter": 1000})
num_regression.check(
{"loglike_prior": loglike_prior, "loglike_converge": r.loglike},
basename="test_cdap_model_loglike",
)
_regression_check(dataframe_regression, m.pf)
def test_nonmand_and_joint_tour_dest_choice(est_data, num_regression, dataframe_regression):
from activitysim.estimation.larch import component_model
modelname = ("non_mandatory_tour_destination", "joint_tour_destination")
m, d = component_model(modelname, return_data=True)
m.load_data()
m.doctor(repair_ch_av="-")
loglike_prior = m.loglike()
r = m.maximize_loglike(method="SLSQP", options={"maxiter": 1000})
num_regression.check(
{"loglike_prior": loglike_prior, "loglike_converge": r.loglike},
basename="test_nonmand_and_joint_tour_dest_choice_loglike",
)
_regression_check(dataframe_regression, m.pf)
def test_tour_and_subtour_mode_choice(est_data, num_regression, dataframe_regression):
from activitysim.estimation.larch.mode_choice import tour_mode_choice_model, \
atwork_subtour_mode_choice_model
m = tour_mode_choice_model()
s = atwork_subtour_mode_choice_model()
m.extend(s) # join the atwork subtour model to the master group
m.load_data()
m.doctor(repair_ch_av="-")
loglike_prior = m.loglike()
r = m.maximize_loglike(method="SLSQP", options={"maxiter": 1000})
num_regression.check(
{"loglike_prior": loglike_prior, "loglike_converge": r.loglike},
basename="test_tour_mode_choice_loglike",
)
_regression_check(dataframe_regression, m.pf)
def test_nonmand_tour_freq(est_data, num_regression, dataframe_regression):
from activitysim.estimation.larch.nonmand_tour_freq import nonmand_tour_freq_model
m = nonmand_tour_freq_model()
loglike_prior = {}
for segment_name in m:
m[segment_name].load_data()
m[segment_name].doctor(repair_ch_av="-")
loglike_prior[segment_name] = m[segment_name].loglike()
r = {}
for segment_name in m:
r[segment_name] = m[segment_name].maximize_loglike(
method="SLSQP", options={"maxiter": 1000}
)
loglike_priors = [value for key, value in sorted(loglike_prior.items())]
loglike_converge = [value.loglike for key, value in sorted(r.items())]
num_regression.check(
{"loglike_prior": loglike_priors, "loglike_converge": loglike_converge},
basename="test_nonmand_tour_freq_loglike",
)
_regression_check(dataframe_regression, pd.concat([x.pf for x in m.values()]))
| |
import os
import pathlib
import stat
import sys
from io import StringIO
import pytest
import click._termui_impl
import click.utils
from click._compat import WIN
def test_echo(runner):
with runner.isolation() as outstreams:
click.echo("\N{SNOWMAN}")
click.echo(b"\x44\x44")
click.echo(42, nl=False)
click.echo(b"a", nl=False)
click.echo("\x1b[31mx\x1b[39m", nl=False)
bytes = outstreams[0].getvalue().replace(b"\r\n", b"\n")
assert bytes == b"\xe2\x98\x83\nDD\n42ax"
# if wrapped, we expect bytes to survive.
@click.command()
def cli():
click.echo(b"\xf6")
result = runner.invoke(cli, [])
assert result.stdout_bytes == b"\xf6\n"
# Ensure we do not strip for bytes.
with runner.isolation() as outstreams:
click.echo(bytearray(b"\x1b[31mx\x1b[39m"), nl=False)
assert outstreams[0].getvalue() == b"\x1b[31mx\x1b[39m"
def test_echo_custom_file():
import io
f = io.StringIO()
click.echo("hello", file=f)
assert f.getvalue() == "hello\n"
@pytest.mark.parametrize(
("styles", "ref"),
[
({"fg": "black"}, "\x1b[30mx y\x1b[0m"),
({"fg": "red"}, "\x1b[31mx y\x1b[0m"),
({"fg": "green"}, "\x1b[32mx y\x1b[0m"),
({"fg": "yellow"}, "\x1b[33mx y\x1b[0m"),
({"fg": "blue"}, "\x1b[34mx y\x1b[0m"),
({"fg": "magenta"}, "\x1b[35mx y\x1b[0m"),
({"fg": "cyan"}, "\x1b[36mx y\x1b[0m"),
({"fg": "white"}, "\x1b[37mx y\x1b[0m"),
({"bg": "black"}, "\x1b[40mx y\x1b[0m"),
({"bg": "red"}, "\x1b[41mx y\x1b[0m"),
({"bg": "green"}, "\x1b[42mx y\x1b[0m"),
({"bg": "yellow"}, "\x1b[43mx y\x1b[0m"),
({"bg": "blue"}, "\x1b[44mx y\x1b[0m"),
({"bg": "magenta"}, "\x1b[45mx y\x1b[0m"),
({"bg": "cyan"}, "\x1b[46mx y\x1b[0m"),
({"bg": "white"}, "\x1b[47mx y\x1b[0m"),
({"bg": 91}, "\x1b[48;5;91mx y\x1b[0m"),
({"bg": (135, 0, 175)}, "\x1b[48;2;135;0;175mx y\x1b[0m"),
({"blink": True}, "\x1b[5mx y\x1b[0m"),
({"underline": True}, "\x1b[4mx y\x1b[0m"),
({"bold": True}, "\x1b[1mx y\x1b[0m"),
({"dim": True}, "\x1b[2mx y\x1b[0m"),
],
)
def test_styling(styles, ref):
assert click.style("x y", **styles) == ref
assert click.unstyle(ref) == "x y"
@pytest.mark.parametrize(("text", "expect"), [("\x1b[?25lx y\x1b[?25h", "x y")])
def test_unstyle_other_ansi(text, expect):
assert click.unstyle(text) == expect
def test_filename_formatting():
assert click.format_filename(b"foo.txt") == "foo.txt"
assert click.format_filename(b"/x/foo.txt") == "/x/foo.txt"
assert click.format_filename("/x/foo.txt") == "/x/foo.txt"
assert click.format_filename("/x/foo.txt", shorten=True) == "foo.txt"
# filesystem encoding on windows permits this.
if not WIN:
assert click.format_filename(b"/x/foo\xff.txt", shorten=True) == "foo\ufffd.txt"
def test_prompts(runner):
@click.command()
def test():
if click.confirm("Foo"):
click.echo("yes!")
else:
click.echo("no :(")
result = runner.invoke(test, input="y\n")
assert not result.exception
assert result.output == "Foo [y/N]: y\nyes!\n"
result = runner.invoke(test, input="\n")
assert not result.exception
assert result.output == "Foo [y/N]: \nno :(\n"
result = runner.invoke(test, input="n\n")
assert not result.exception
assert result.output == "Foo [y/N]: n\nno :(\n"
@click.command()
def test_no():
if click.confirm("Foo", default=True):
click.echo("yes!")
else:
click.echo("no :(")
result = runner.invoke(test_no, input="y\n")
assert not result.exception
assert result.output == "Foo [Y/n]: y\nyes!\n"
result = runner.invoke(test_no, input="\n")
assert not result.exception
assert result.output == "Foo [Y/n]: \nyes!\n"
result = runner.invoke(test_no, input="n\n")
assert not result.exception
assert result.output == "Foo [Y/n]: n\nno :(\n"
@pytest.mark.skipif(WIN, reason="Different behavior on windows.")
def test_prompts_abort(monkeypatch, capsys):
def f(_):
raise KeyboardInterrupt()
monkeypatch.setattr("click.termui.hidden_prompt_func", f)
try:
click.prompt("Password", hide_input=True)
except click.Abort:
click.echo("Screw you.")
out, err = capsys.readouterr()
assert out == "Password: \nScrew you.\n"
def _test_gen_func():
yield "a"
yield "b"
yield "c"
yield "abc"
@pytest.mark.skipif(WIN, reason="Different behavior on windows.")
@pytest.mark.parametrize("cat", ["cat", "cat ", "cat "])
@pytest.mark.parametrize(
"test",
[
# We need lambda here, because pytest will
# reuse the parameters, and then the generators
# are already used and will not yield anymore
("just text\n", lambda: "just text"),
("iterable\n", lambda: ["itera", "ble"]),
("abcabc\n", lambda: _test_gen_func),
("abcabc\n", lambda: _test_gen_func()),
("012345\n", lambda: (c for c in range(6))),
],
)
def test_echo_via_pager(monkeypatch, capfd, cat, test):
monkeypatch.setitem(os.environ, "PAGER", cat)
monkeypatch.setattr(click._termui_impl, "isatty", lambda x: True)
expected_output = test[0]
test_input = test[1]()
click.echo_via_pager(test_input)
out, err = capfd.readouterr()
assert out == expected_output
@pytest.mark.skipif(WIN, reason="Test does not make sense on Windows.")
def test_echo_color_flag(monkeypatch, capfd):
isatty = True
monkeypatch.setattr(click._compat, "isatty", lambda x: isatty)
text = "foo"
styled_text = click.style(text, fg="red")
assert styled_text == "\x1b[31mfoo\x1b[0m"
click.echo(styled_text, color=False)
out, err = capfd.readouterr()
assert out == f"{text}\n"
click.echo(styled_text, color=True)
out, err = capfd.readouterr()
assert out == f"{styled_text}\n"
isatty = True
click.echo(styled_text)
out, err = capfd.readouterr()
assert out == f"{styled_text}\n"
isatty = False
click.echo(styled_text)
out, err = capfd.readouterr()
assert out == f"{text}\n"
def test_prompt_cast_default(capfd, monkeypatch):
monkeypatch.setattr(sys, "stdin", StringIO("\n"))
value = click.prompt("value", default="100", type=int)
capfd.readouterr()
assert type(value) is int
@pytest.mark.skipif(WIN, reason="Test too complex to make work windows.")
def test_echo_writing_to_standard_error(capfd, monkeypatch):
def emulate_input(text):
"""Emulate keyboard input."""
monkeypatch.setattr(sys, "stdin", StringIO(text))
click.echo("Echo to standard output")
out, err = capfd.readouterr()
assert out == "Echo to standard output\n"
assert err == ""
click.echo("Echo to standard error", err=True)
out, err = capfd.readouterr()
assert out == ""
assert err == "Echo to standard error\n"
emulate_input("asdlkj\n")
click.prompt("Prompt to stdin")
out, err = capfd.readouterr()
assert out == "Prompt to stdin: "
assert err == ""
emulate_input("asdlkj\n")
click.prompt("Prompt to stderr", err=True)
out, err = capfd.readouterr()
assert out == ""
assert err == "Prompt to stderr: "
emulate_input("y\n")
click.confirm("Prompt to stdin")
out, err = capfd.readouterr()
assert out == "Prompt to stdin [y/N]: "
assert err == ""
emulate_input("y\n")
click.confirm("Prompt to stderr", err=True)
out, err = capfd.readouterr()
assert out == ""
assert err == "Prompt to stderr [y/N]: "
monkeypatch.setattr(click.termui, "isatty", lambda x: True)
monkeypatch.setattr(click.termui, "getchar", lambda: " ")
click.pause("Pause to stdout")
out, err = capfd.readouterr()
assert out == "Pause to stdout\n"
assert err == ""
click.pause("Pause to stderr", err=True)
out, err = capfd.readouterr()
assert out == ""
assert err == "Pause to stderr\n"
def test_echo_with_capsys(capsys):
click.echo("Capture me.")
out, err = capsys.readouterr()
assert out == "Capture me.\n"
def test_open_file(runner):
@click.command()
@click.argument("filename")
def cli(filename):
with click.open_file(filename) as f:
click.echo(f.read())
click.echo("meep")
with runner.isolated_filesystem():
with open("hello.txt", "w") as f:
f.write("Cool stuff")
result = runner.invoke(cli, ["hello.txt"])
assert result.exception is None
assert result.output == "Cool stuff\nmeep\n"
result = runner.invoke(cli, ["-"], input="foobar")
assert result.exception is None
assert result.output == "foobar\nmeep\n"
def test_open_file_ignore_errors_stdin(runner):
@click.command()
@click.argument("filename")
def cli(filename):
with click.open_file(filename, errors="ignore") as f:
click.echo(f.read())
result = runner.invoke(cli, ["-"], input=os.urandom(16))
assert result.exception is None
def test_open_file_respects_ignore(runner):
with runner.isolated_filesystem():
with open("test.txt", "w") as f:
f.write("Hello world!")
with click.open_file("test.txt", encoding="utf8", errors="ignore") as f:
assert f.errors == "ignore"
def test_open_file_ignore_invalid_utf8(runner):
with runner.isolated_filesystem():
with open("test.txt", "wb") as f:
f.write(b"\xe2\x28\xa1")
with click.open_file("test.txt", encoding="utf8", errors="ignore") as f:
f.read()
def test_open_file_ignore_no_encoding(runner):
with runner.isolated_filesystem():
with open("test.bin", "wb") as f:
f.write(os.urandom(16))
with click.open_file("test.bin", errors="ignore") as f:
f.read()
@pytest.mark.skipif(WIN, reason="os.chmod() is not fully supported on Windows.")
@pytest.mark.parametrize("permissions", [0o400, 0o444, 0o600, 0o644])
def test_open_file_atomic_permissions_existing_file(runner, permissions):
with runner.isolated_filesystem():
with open("existing.txt", "w") as f:
f.write("content")
os.chmod("existing.txt", permissions)
@click.command()
@click.argument("filename")
def cli(filename):
click.open_file(filename, "w", atomic=True).close()
result = runner.invoke(cli, ["existing.txt"])
assert result.exception is None
assert stat.S_IMODE(os.stat("existing.txt").st_mode) == permissions
@pytest.mark.skipif(WIN, reason="os.stat() is not fully supported on Windows.")
def test_open_file_atomic_permissions_new_file(runner):
with runner.isolated_filesystem():
@click.command()
@click.argument("filename")
def cli(filename):
click.open_file(filename, "w", atomic=True).close()
# Create a test file to get the expected permissions for new files
# according to the current umask.
with open("test.txt", "w"):
pass
permissions = stat.S_IMODE(os.stat("test.txt").st_mode)
result = runner.invoke(cli, ["new.txt"])
assert result.exception is None
assert stat.S_IMODE(os.stat("new.txt").st_mode) == permissions
def test_iter_keepopenfile(tmpdir):
expected = list(map(str, range(10)))
p = tmpdir.mkdir("testdir").join("testfile")
p.write("\n".join(expected))
with p.open() as f:
for e_line, a_line in zip(expected, click.utils.KeepOpenFile(f)):
assert e_line == a_line.strip()
def test_iter_lazyfile(tmpdir):
expected = list(map(str, range(10)))
p = tmpdir.mkdir("testdir").join("testfile")
p.write("\n".join(expected))
with p.open() as f:
with click.utils.LazyFile(f.name) as lf:
for e_line, a_line in zip(expected, lf):
assert e_line == a_line.strip()
class MockMain:
__slots__ = "__package__"
def __init__(self, package_name):
self.__package__ = package_name
@pytest.mark.parametrize(
("path", "main", "expected"),
[
("example.py", None, "example.py"),
(str(pathlib.Path("/foo/bar/example.py")), None, "example.py"),
("example", None, "example"),
(
str(pathlib.Path("example/__main__.py")),
MockMain(".example"),
"python -m example",
),
(
str(pathlib.Path("example/cli.py")),
MockMain(".example"),
"python -m example.cli",
),
],
)
def test_detect_program_name(path, main, expected):
assert click.utils._detect_program_name(path, _main=main) == expected
| |
import ast
from textwrap import dedent
# TODO
# the pandas/numpy tests were written since that what was I originally
# testing on. They should optional so tests don't depend on them
import pandas as pd
import numpy as np
import collections
import pytest
from asttools import (
_eval,
_exec,
_convert_to_expression,
ast_source,
ast_equal,
ast_contains,
code_context_subset,
generate_getter_var,
generate_getter_lazy,
graph_walk
)
from ..graph import NodeLocation
class TestEval:
def test_exec(self):
source = """
d = 123
"""
code = ast.parse(dedent(source))
ns = {}
out = _exec(code.body[0], ns)
assert ns['d'] == 123
assert out is None
# eval versio of exec
source = """
123
"""
code = ast.parse(dedent(source))
ns = {}
out = _exec(code.body[0], ns)
assert out == 123
def test_eval(self):
"""
_eval should only run on expressions
"""
source = """
d = 123
"""
code = ast.parse(dedent(source))
ns = {}
with pytest.raises(Exception):
out = _eval(code.body[0], ns)
def test_ast_source_expression():
""" expressions were having a problem in astor """
source = """np.random.randn(10, 10)"""
code = ast.parse(dedent(source))
expr = _convert_to_expression(code)
assert source == ast_source(expr)
def test_ast_equal():
source = """test(np.random.randn(10, 10))"""
code1 = ast.parse(source, mode='eval')
source2 = """test(np.random.randn(10, 10))"""
code2 = ast.parse(source2, mode='eval')
assert ast_equal(code1, code2)
source3 = """test(np.random.randn(10, 11))"""
code3 = ast.parse(source3, mode='eval')
assert not ast_equal(code1, code3)
# try subset
source4 = """np.random.randn(10, 11)"""
code4 = ast.parse(source4, mode='eval')
assert ast_equal(code3.body.args[0], code4.body)
def test_ast_contains():
source1 = """test(np.random.randn(10, 11)) + test2 / 99"""
code1 = ast.parse(source1, mode='eval').body
source2 = """np.random.randn(10, 11)"""
test = ast.parse(source2, mode='eval').body
assert list(ast_contains(code1, test))[0]
test = ast.parse("10", mode='eval').body
assert list(ast_contains(code1, test))[0]
test = ast.parse("test2", mode='eval').body
assert list(ast_contains(code1, test))[0]
test = ast.parse("np.random.randn", mode='eval').body
assert list(ast_contains(code1, test))[0]
test = ast.parse("test2/99", mode='eval').body
assert list(ast_contains(code1, test))[0]
# False. Not that this isn't about a textual subset.
# random.randn means nothing without np. it implies a
# top level random module
test = ast.parse("random.randn", mode='eval').body
assert not list(ast_contains(code1, test))
# test against a module.
source = """
first_line() + 100
bob = test(np.random.randn(10, 11)) + test2 / 99
"""
mod = ast.parse(dedent(source))
source2 = """np.random.randn(10, 11)"""
test = ast.parse(source2, mode='eval').body
assert list(ast_contains(mod, test))[0]
def test_ast_contains_expression():
"""
Test that the fragment must be an expression.
"""
# test against a module.
source = """
first_line() + 100
bob = test(np.random.randn(10, 11)) + test2 / 99
"""
mod = ast.parse(dedent(source))
# expression compiled as module work sfine
source2 = """np.random.randn(10, 11)"""
test = ast.parse(source2)
assert list(ast_contains(mod, test))[0]
# assignment is a nono
with pytest.raises(Exception, match="Fragment must be an expression"):
source2 = """a = np.random.randn(10, 11)"""
test = ast.parse(source2)
list(ast_contains(mod, test))
def test_ast_contains_ignore_names():
# test against a module.
source = """
test(np.random.randn(10, 11))
"""
mod = ast.parse(dedent(source))
# rename np to test
source2 = """test.random.randn(10, 11)"""
test = ast.parse(source2)
assert list(ast_contains(mod, test, ignore_var_names=True))[0]
# note that only Load(ctx.Load) ids will be ignored
source2 = """test.text"""
test = ast.parse(source2)
assert not list(ast_contains(mod, test, ignore_var_names=True))
# dumb example. single Name will always match
source2 = """anything"""
test = ast.parse(source2)
assert list(ast_contains(mod, test, ignore_var_names=True))[0]
def test_ast_contains_ignore_names_multi():
"""
Note that we can actually match multiple times, especially if we ignore
names. ast_contains need to be changed to yield a generator.
"""
source = """
(a + b) + (c + d) + (e + f)
"""
mod = ast.parse(dedent(source))
source2 = """(x + y)"""
test = ast.parse(source2)
matches = list(ast_contains(mod, test, ignore_var_names=True))
assert len(matches) == 3
def test_ast_graph_walk():
source = """
test(np.random.randn(10, 11))
"""
mod = ast.parse(dedent(source))
items = list(graph_walk(mod))
graph_nodes = [item['node'] for item in items]
walk_nodes = list(ast.walk(mod))
# remove module node which the graph_walk won't have
assert isinstance(walk_nodes.pop(0), ast.Module)
# we should have reached the same nodes, not in same order
assert collections.Counter(graph_nodes) == collections.Counter(walk_nodes)
graph_types = [
ast.Load,
ast.Name,
ast.Load,
ast.Name,
ast.Load,
ast.Attribute,
ast.Load,
ast.Attribute,
ast.Constant,
ast.Constant,
ast.Call,
ast.Call,
ast.Expr,
]
# using type order to check that the type ordering is stable..
assert list(map(type, graph_nodes)) == graph_types
def test_code_context_subset():
df = pd.DataFrame(np.random.randn(30, 3), columns=['a', 'bob', 'c'])
ns = {
'df': df,
'c': 1,
'pd': pd,
'np': np
}
source = """pd.rolling_sum(np.log(df + 10), 5, min_periods=c)"""
code = ast.parse(dedent(source), mode='eval')
# use blah instead of df. same code.
child_ns = ns.copy()
child_ns['blah'] = ns['df']
child_code = ast.parse("np.log(blah+10)") # note that df was renamed blah
assert not list(code_context_subset(code, ns, child_code, child_ns,
ignore_var_names=False))
# ignoring the var names should get us our match
items = code_context_subset(code, ns, child_code, child_ns,
ignore_var_names=True)
items = list(items)
item = items[0]
assert len(items) == 1
assert item is not None
field_name = 'args'
field_index = 0
correct = getattr(code.body, field_name)[field_index]
assert item['node'] is correct
assert item['parent'] is code.body
assert item['field_name'] == field_name
assert item['field_index'] == field_index
def test_code_context_subset_by_value():
"""
test that when we have multiple ast matches,
we properly test by value.
previously ast_contains only returned first match, and so
code_context_subset wouldn't always return if the value match was
on the second match
"""
ns = {
'a': 1,
'b': 2,
'c': 3,
'd': 4
}
source = "(a + b) + (c + d)"
code = ast.parse(dedent(source), mode='eval')
# use blah instead of df. same code.
child_ns = {
'x': 1,
'y': 2
}
child_code = ast.parse("x + y") # note that df was renamed blah
res = list(code_context_subset(code, ns, child_code, child_ns,
ignore_var_names=True))
# matches first group by value
assert ast_source(res[0]['node']) == '(a + b)'
# try to match second group
child_ns = {
'x': 3,
'y': 4
}
res = list(code_context_subset(code, ns, child_code, child_ns,
ignore_var_names=True))
# matched the second group
assert ast_source(res[0]['node']) == '(c + d)'
def test_code_context_subset_match_multi():
# try to match multiple groups
ns = {
'a': 1,
'b': 2,
'c': 1,
'd': 2
}
source = "(a + b) + (c + d)"
code = ast.parse(dedent(source), mode='eval')
child_ns = {
'x': 1,
'y': 2
}
child_code = ast.parse("x + y") # note that df was renamed blah
res = list(code_context_subset(code, ns, child_code, child_ns,
ignore_var_names=True))
test = list(map(lambda x: ast_source(x['node']), res))
correct = ['(a + b)', '(c + d)']
assert collections.Counter(test) == collections.Counter(correct)
def test_generate_getter_var():
key = object()
correct = 10
node, ns = generate_getter_var(key, correct)
val = _eval(node, ns)
assert val == correct
def test_generate_getter_lazy():
class FakeManifest:
def __init__(self, value):
self.value = value
def get_obj(self):
return self.value
correct = "TEST"
manifest = FakeManifest(correct)
node, ns = generate_getter_lazy(manifest)
val = _eval(node, ns)
assert val == correct
def test_node_location():
loc = NodeLocation(object(), 1, 2)
assert collections.Counter(loc.keys()) == collections.Counter(['parent', 'field_name', 'field_index'])
assert collections.Counter(list(dict(loc))) == collections.Counter(['parent', 'field_name', 'field_index'])
source = """
test(np.random.randn(10, 11))
"""
mod = ast.parse(dedent(source))
items = list(graph_walk(mod))
nodes = [item['node'] for item in items]
walk_nodes = list(ast.walk(ast.parse(source)))
gen = graph_walk(mod)
for item in items:
node = item['node']
break
| |
from __future__ import division
import collections
import ctypes
import sys
import numpy
import six
from cupy import binary
from cupy import carray
from cupy import creation
from cupy import cuda
from cupy import elementwise
from cupy import flags
from cupy import indexing
from cupy import internal
from cupy import io
from cupy import linalg
from cupy import logic
from cupy import manipulation
from cupy import math
import cupy.random
from cupy import reduction
from cupy import sorting
from cupy import statistics
from cupy import util
random = cupy.random
# dtype short cut
number = numpy.number
integer = numpy.integer
signedinteger = numpy.signedinteger
unsignedinteger = numpy.unsignedinteger
inexact = numpy.inexact
floating = numpy.floating
bool_ = numpy.bool_
byte = numpy.byte
short = numpy.short
intc = numpy.intc
int_ = numpy.int_
longlong = numpy.longlong
ubyte = numpy.ubyte
ushort = numpy.ushort
uintc = numpy.uintc
uint = numpy.uint
ulonglong = numpy.ulonglong
half = numpy.half
single = numpy.single
float_ = numpy.float_
longfloat = numpy.longfloat
int8 = numpy.int8
int16 = numpy.int16
int32 = numpy.int32
int64 = numpy.int64
uint8 = numpy.uint8
uint16 = numpy.uint16
uint32 = numpy.uint32
uint64 = numpy.uint64
float16 = numpy.float16
float32 = numpy.float32
float64 = numpy.float64
class ndarray(object):
"""Multi-dimensional array on a CUDA device.
This class implements a subset of methods of :class:`numpy.ndarray`.
The difference is that this class allocates the array content on the
current GPU device.
Args:
shape (tuple of ints): Length of axes.
dtype: Data type. It must be an argument of :class:`numpy.dtype`.
memptr (cupy.cuda.MemoryPointer): Pointer to the array content head.
strides (tuple of ints): The strides for axes.
Attributes:
data (cupy.cuda.MemoryPointer): Pointer to the array content head.
base (None or cupy.ndarray): Base array from which this array is
created as a view.
"""
def __init__(self, shape, dtype=float, memptr=None, strides=None):
self._shape = tuple(shape)
self._dtype = numpy.dtype(dtype)
nbytes = self.nbytes
if memptr is None:
self.data = cuda.alloc(nbytes)
else:
self.data = memptr
if strides is None:
self._strides = internal.get_contiguous_strides(
self._shape, self.itemsize)
self._flags = flags.C_CONTIGUOUS | flags.OWNDATA
if numpy.sum(dim != 1 for dim in shape) <= 1 or nbytes == 0:
self._flags |= flags.F_CONTIGUOUS
else:
self._strides = strides
self._flags = flags.OWNDATA
self._mark_dirty()
self.base = None
# The definition order of attributes and methods are borrowed from the
# order of documentation at the following NumPy document.
# http://docs.scipy.org/doc/numpy/reference/arrays.ndarray.html
# -------------------------------------------------------------------------
# Memory layout
# -------------------------------------------------------------------------
@property
def flags(self):
"""Object containing memory-layout information.
It only contains ``c_contiguous``, ``f_contiguous``, and ``owndata``
attributes. All of these are read-only. Accessing by indexes is also
supported.
.. seealso:: :attr:`numpy.ndarray.flags`
"""
if self._flags & flags.C_DIRTY:
self._update_c_contiguity()
if self._flags & flags.F_DIRTY:
self._update_f_contiguity()
return flags.Flags(self._flags)
@property
def shape(self):
"""Lengths of axes.
Setter of this property involves reshaping without copy. If the array
cannot be reshaped without copy, it raises an exception.
.. seealso: :attr:`numpy.ndarray.shape`
"""
return self._shape
@shape.setter
def shape(self, newshape):
newshape = internal.infer_unknown_dimension(newshape, self.size)
strides = internal.get_strides_for_nocopy_reshape(self, newshape)
if strides is None:
raise AttributeError('Incompatible shape')
self._shape = newshape
self._strides = strides
self._mark_f_dirty()
@property
def strides(self):
"""Strides of axes in bytes.
.. seealso:: :attr:`numpy.ndarray.strides`
"""
return self._strides
@property
def ndim(self):
"""Number of dimensions.
``a.ndim`` is equivalent to ``len(a.shape)``.
.. seealso:: :attr:`numpy.ndarray.ndim`
"""
return len(self._shape)
@property
def size(self):
"""Number of elements this array holds.
This is equivalent to product over the shape tuple.
.. seealso:: :attr:`numpy.ndarray.size`
"""
return internal.prod(self._shape)
@property
def itemsize(self):
"""Size of each element in bytes.
.. seealso:: :attr:`numpy.ndarray.itemsize`
"""
return self._dtype.itemsize
@property
def nbytes(self):
"""Size of whole elements in bytes.
It does not count skips between elements.
.. seealso:: :attr:`numpy.ndarray.nbytes`
"""
return self.size * self.itemsize
# -------------------------------------------------------------------------
# Data type
# -------------------------------------------------------------------------
@property
def dtype(self):
"""Dtype object of element type.
.. seealso::
`Data type objects (dtype) \
<http://docs.scipy.org/doc/numpy/reference/arrays.dtypes.html>`_
"""
return self._dtype
# -------------------------------------------------------------------------
# Other attributes
# -------------------------------------------------------------------------
@property
def T(self):
"""Shape-reversed view of the array.
If ndim < 2, then this is just a reference to the array itself.
"""
if self.ndim < 2:
return self
else:
return self.transpose()
__array_priority__ = 100
# -------------------------------------------------------------------------
# Array interface
# -------------------------------------------------------------------------
# TODO(beam2d): Implement it
# __array_interface__
# -------------------------------------------------------------------------
# ctypes foreign function interface
# -------------------------------------------------------------------------
@property
def ctypes(self):
"""C representation of the array.
This property is used for sending an array to CUDA kernels. The type of
returned C structure is different for different dtypes and ndims. The
definition of C type is written in ``cupy/carray.cuh``.
.. note::
The returned value does not have compatibility with
:attr:`numpy.ndarray.ctypes`.
"""
return carray.to_carray(self.data.ptr, self.size, self._shape,
self._strides)
# -------------------------------------------------------------------------
# Array conversion
# -------------------------------------------------------------------------
# TODO(beam2d): Implement it
# def item(self, *args):
def tolist(self):
"""Converts the array to a (possibly nested) Python list.
Returns:
list: The possibly nested Python list of array elements.
.. seealso:: :meth:`numpy.ndarray.tolist`
"""
return self.get().tolist()
# TODO(beam2d): Implement these
# def itemset(self, *args):
# def tostring(self, order='C'):
# def tobytes(self, order='C'):
def tofile(self, fid, sep='', format='%s'):
"""Writes the array to a file.
.. seealso:: :meth:`numpy.ndarray.tolist`
"""
self.get().tofile(fid, sep, format)
def dump(self, file):
"""Dumps a pickle of the array to a file.
Dumped file can be read back to cupy.ndarray by
:func:`cupy.load`.
"""
six.moves.cPickle.dump(self, file, -1)
def dumps(self):
"""Dumps a pickle of the array to a string."""
return six.moves.cPickle.dumps(self, -1)
def astype(self, dtype, copy=True):
"""Casts the array to given data type.
Args:
dtype: Type specifier.
copy (bool): If it is False and no cast happens, then this method
returns the array itself. Otherwise, a copy is returned.
Returns:
If ``copy`` is False and no cast is required, then the array itself
is returned. Otherwise, it returns a (possibly casted) copy of the
array.
.. note::
This method currently does not support ``order``, ``casting``, and
``subok`` arguments.
.. seealso:: :meth:`numpy.ndarray.astype`
"""
# TODO(beam2d): Support ordering, casting, and subok option
dtype = numpy.dtype(dtype)
if dtype == self._dtype:
if copy:
return self.copy()
else:
return self
else:
newarray = empty_like(self, dtype=dtype)
elementwise.copy(self, newarray)
return newarray
# TODO(beam2d): Implement it
# def byteswap(self, inplace=False):
def copy(self):
"""Returns a copy of the array.
.. seealso::
:func:`cupy.copy` for full documentation,
:meth:`numpy.ndarray.copy`
"""
# TODO(beam2d): Support ordering option
return copy(self)
def view(self, dtype=None):
"""Returns a view of the array.
Args:
dtype: If this is different from the data type of the array, the
returned view reinterpret the memory sequence as an array of
this type.
Returns:
cupy.ndarray: A view of the array. A reference to the original
array is stored at the :attr:`~ndarray.base` attribute.
.. seealso:: :meth:`numpy.ndarray.view`
"""
# Use __new__ instead of __init__ to skip recomputation of contiguity
v = ndarray.__new__(ndarray)
v._dtype = self._dtype
v._flags = self._flags & ~flags.OWNDATA
v._shape = self._shape
v._strides = self._strides
v.data = self.data
v.base = self.base if self.base is not None else self
return v
# TODO(beam2d): Implement these
# def getfield(self, dtype, offset=0):
# def setflags(self, write=None, align=None, uic=None):
def fill(self, value):
"""Fills the array with a scalar value.
Args:
value: A scalar value to fill the array content.
.. seealso:: :meth:`numpy.ndarray.fill`
"""
elementwise.copy(value, self, dtype=self._dtype)
# -------------------------------------------------------------------------
# Shape manipulation
# -------------------------------------------------------------------------
def reshape(self, *shape):
"""Returns an array of a different shape and the same content.
.. seealso::
:func:`cupy.reshape` for full documentation,
:meth:`numpy.ndarray.reshape`
"""
# TODO(beam2d): Support ordering option
if len(shape) == 1 and isinstance(shape[0], collections.Iterable):
shape = tuple(shape[0])
return reshape(self, shape)
# TODO(beam2d0: Implement it
# def resize(self, new_shape, refcheck=True):
def transpose(self, *axes):
"""Returns a view of the array with axes permuted.
.. seealso::
:func:`cupy.transpose` for full documentation,
:meth:`numpy.ndarray.reshape`
"""
return transpose(self, axes)
def swapaxes(self, axis1, axis2):
"""Returns a view of the array with two axes swapped.
.. seealso::
:func:`cupy.swapaxes` for full documentation,
:meth:`numpy.ndarray.swapaxes`
"""
return swapaxes(self, axis1, axis2)
def flatten(self):
"""Returns a copy of the array flatten into one dimension.
It currently supports C-order only.
Returns:
cupy.ndarray: A copy of the array with one dimension.
.. seealso:: :meth:`numpy.ndarray.flatten`
"""
# TODO(beam2d): Support ordering option
if self.flags.c_contiguous:
newarray = self.copy()
else:
newarray = empty_like(self)
elementwise.copy(self, newarray)
newarray._shape = self.size,
newarray._strides = self.itemsize,
self._flags |= flags.C_CONTIGUOUS | flags.F_CONTIGUOUS
return newarray
def ravel(self):
"""Returns an array flattend into one dimension.
.. seealso::
:func:`cupy.ravel` for full documentation,
:meth:`numpy.ndarray.ravel`
"""
# TODO(beam2d): Support ordering option
return ravel(self)
def squeeze(self, axis=None):
"""Returns a view with size-one axes removed.
.. seealso::
:func:`cupy.squeeze` for full documentation,
:meth:`numpy.ndarray.squeeze`
"""
return squeeze(self, axis)
# -------------------------------------------------------------------------
# Item selection and manipulation
# -------------------------------------------------------------------------
def take(self, indices, axis=None, out=None):
"""Returns an array of elements at given indices along the axis.
.. seealso::
:func:`cupy.take` for full documentation,
:meth:`numpy.ndarray.take`
"""
return take(self, indices, axis, out)
# TODO(beam2d): Implement these
# def put(self, indices, values, mode='raise'):
# def repeat(self, repeats, axis=None):
# def choose(self, choices, out=None, mode='raise'):
# def sort(self, axis=-1, kind='quicksort', order=None):
# def argsort(self, axis=-1, kind='quicksort', order=None):
# def partition(self, kth, axis=-1, kind='introselect', order=None):
# def argpartition(self, kth, axis=-1, kind='introselect', order=None):
# def searchsorted(self, v, side='left', sorter=None):
# def nonzero(self):
# def compress(self, condition, axis=None, out=None):
def diagonal(self, offset=0, axis1=0, axis2=1):
"""Returns a view of the specified diagonals.
.. seealso::
:func:`cupy.diagonal` for full documentation,
:meth:`numpy.ndarray.diagonal`
"""
return diagonal(self, offset, axis1, axis2)
# -------------------------------------------------------------------------
# Calculation
# -------------------------------------------------------------------------
def max(self, axis=None, out=None, dtype=None, keepdims=False):
"""Returns the maximum along a given axis.
.. seealso::
:func:`cupy.amax` for full documentation,
:meth:`numpy.ndarray.max`
"""
return amax(
self, axis=axis, out=out, dtype=dtype, keepdims=keepdims)
def argmax(self, axis=None, out=None, dtype=None, keepdims=False):
"""Returns the indices of the maximum along a given axis.
.. seealso::
:func:`cupy.argmax` for full documentation,
:meth:`numpy.ndarray.argmax`
"""
return argmax(
self, axis=axis, out=out, dtype=dtype, keepdims=keepdims)
def min(self, axis=None, out=None, dtype=None, keepdims=False):
"""Returns the minimum along a given axis.
.. seealso::
:func:`cupy.amin` for full documentation,
:meth:`numpy.ndarray.min`
"""
return amin(
self, axis=axis, out=out, dtype=dtype, keepdims=keepdims)
def argmin(self, axis=None, out=None, dtype=None, keepdims=False):
"""Returns the indices of the minimum along a given axis.
.. seealso::
:func:`cupy.argmin` for full documentation,
:meth:`numpy.ndarray.argmin`
"""
return argmin(
self, axis=axis, out=out, dtype=dtype, keepdims=keepdims)
# TODO(beam2d): Implement it
# def ptp(self, axis=None, out=None):
def clip(self, a_min, a_max, out=None):
"""Returns an array with values limited to [a_min, a_max].
.. seealso::
:func:`cupy.clip` for full documentation,
:meth:`numpy.ndarray.clip`
"""
return clip(self, a_min, a_max, out=out)
# TODO(beam2d): Implement it
# def round(self, decimals=0, out=None):
def trace(self, offset=0, axis1=0, axis2=1, dtype=None, out=None):
"""Returns the sum along diagonals of the array.
.. seealso::
:func:`cupy.trace` for full documentation,
:meth:`numpy.ndarray.trace`
"""
return trace(self, offset, axis1, axis2, dtype, out)
def sum(self, axis=None, dtype=None, out=None, keepdims=False):
"""Returns the sum along a given axis.
.. seealso::
:func:`cupy.sum` for full documentation,
:meth:`numpy.ndarray.sum`
"""
return sum(self, axis=axis, dtype=dtype, out=out, keepdims=keepdims)
# TODO(beam2d): Implement it
# def cumsum(self, axis=None, dtype=None, out=None):
def mean(self, axis=None, dtype=None, out=None, keepdims=False):
"""Returns the mean along a given axis.
.. seealso::
:func:`cupy.mean` for full documentation,
:meth:`numpy.ndarray.mean`
"""
return mean(self, axis=axis, dtype=dtype, out=out, keepdims=keepdims)
def var(self, axis=None, dtype=None, out=None, ddof=0, keepdims=False):
"""Returns the variance along a given axis.
.. seealso::
:func:`cupy.var` for full documentation,
:meth:`numpy.ndarray.var`
"""
return var(self, axis=axis, dtype=dtype, out=out, keepdims=keepdims)
def std(self, axis=None, dtype=None, out=None, ddof=0, keepdims=False):
"""Returns the standard deviation along a given axis.
.. seealso::
:func:`cupy.std` for full documentation,
:meth:`numpy.ndarray.std`
"""
return std(self, axis=axis, dtype=dtype, out=out, keepdims=keepdims)
def prod(self, axis=None, dtype=None, out=None, keepdims=None):
"""Returns the product along a given axis.
.. seealso::
:func:`cupy.prod` for full documentation,
:meth:`numpy.ndarray.prod`
"""
return prod(self, axis=axis, dtype=dtype, out=out, keepdims=keepdims)
# TODO(beam2d): Implement these
# def cumprod(self, axis=None, dtype=None, out=None):
# def all(self, axis=None, out=None):
# def any(self, axis=None, out=None):
# -------------------------------------------------------------------------
# Arithmetic and comparison operations
# -------------------------------------------------------------------------
# Comparison operators:
def __lt__(self, other):
return less(self, other)
def __le__(self, other):
return less_equal(self, other)
def __gt__(self, other):
return greater(self, other)
def __ge__(self, other):
return greater_equal(self, other)
def __eq__(self, other):
return equal(self, other)
def __ne__(self, other):
return not_equal(self, other)
# Truth value of an array (bool):
def __nonzero__(self):
return self != 0
# Unary operations:
def __neg__(self):
return negative(self)
def __pos__(self):
return self
def __abs__(self):
return absolute(self)
def __invert__(self):
return invert(self)
# Arithmetic:
def __add__(self, other):
if self._should_use_rop(other):
return other.__radd__(self)
else:
return add(self, other)
def __sub__(self, other):
if self._should_use_rop(other):
return other.__rsub__(self)
else:
return subtract(self, other)
def __mul__(self, other):
if self._should_use_rop(other):
return other.__rmul__(self)
else:
return multiply(self, other)
def __div__(self, other):
if self._should_use_rop(other):
return other.__rdiv__(self)
else:
return divide(self, other)
def __truediv__(self, other):
if self._should_use_rop(other):
return other.__rtruediv__(self)
else:
return true_divide(self, other)
def __floordiv__(self, other):
if self._should_use_rop(other):
return other.__rfloordiv__(self)
else:
return floor_divide(self, other)
def __mod__(self, other):
if self._should_use_rop(other):
return other.__rmod__(self)
else:
return remainder(self, other)
def __divmod__(self, other):
if self._should_use_rop(other):
return other.__rdivmod__(self)
else:
return elementwise._divmod(self, other)
def __pow__(self, other, modulo=None):
# Note that we ignore the modulo argument as well as NumPy.
if self._should_use_rop(other):
return other.__rpow__(self)
else:
return power(self, other)
def __lshift__(self, other):
if self._should_use_rop(other):
return other.__rlshift__(self)
else:
return left_shift(self, other)
def __rshift__(self, other):
if self._should_use_rop(other):
return other.__rrshift__(self)
else:
return right_shift(self, other)
def __and__(self, other):
if self._should_use_rop(other):
return other.__rand__(self)
else:
return bitwise_and(self, other)
def __or__(self, other):
if self._should_use_rop(other):
return other.__ror__(self)
else:
return bitwise_or(self, other)
def __xor__(self, other):
if self._should_use_rop(other):
return other.__rxor__(self)
else:
return bitwise_xor(self, other)
# Arithmetic __r{op}__ (CuPy specific):
def __radd__(self, other):
return add(other, self)
def __rsub__(self, other):
return subtract(other, self)
def __rmul__(self, other):
if not isinstance(other, ndarray):
return multiply(other, self)
return multiply(other, self)
def __rdiv__(self, other):
return divide(other, self)
def __rtruediv__(self, other):
return true_divide(other, self)
def __rfloordiv__(self, other):
return floor_divide(other, self)
def __rmod__(self, other):
return remainder(other, self)
def __rdivmod__(self, other):
return elementwise._divmod(other, self)
def __rpow__(self, other):
return power(other, self)
def __rlshift__(self, other):
return left_shift(other, self)
def __rrshift__(self, other):
return right_shift(other, self)
def __rand__(self, other):
return bitwise_and(other, self)
def __ror__(self, other):
return bitwise_or(other, self)
def __rxor__(self, other):
return bitwise_xor(other, self)
# Arithmetic, in-place:
def __iadd__(self, other):
return add(self, other, self)
def __isub__(self, other):
return subtract(self, other, self)
def __imul__(self, other):
return multiply(self, other, self)
def __idiv__(self, other):
return divide(self, other, self)
def __itruediv__(self, other):
return true_divide(self, other, self)
def __ifloordiv__(self, other):
return floor_divide(self, other, self)
def __imod__(self, other):
return remainder(self, other, self)
def __ipow__(self, other, modulo=None):
return power(self, other, self)
def __ilshift__(self, other):
return left_shift(self, other, self)
def __irshift__(self, other):
return right_shift(self, other, self)
def __iand__(self, other):
return bitwise_and(self, other, self)
def __ior__(self, other):
return bitwise_or(self, other, self)
def __ixor__(self, other):
return bitwise_xor(self, other, self)
# -------------------------------------------------------------------------
# Special methods
# -------------------------------------------------------------------------
# For standard library functions:
def __copy__(self):
return self.copy()
def __deepcopy__(self, memo):
return self.copy()
def __reduce__(self):
return array, (self.get(),)
# Basic customization:
# cupy.ndarray does not define __new__
def __array__(self, dtype=None):
if dtype is None or self._dtype == dtype:
return self
else:
return self.astype(dtype)
# TODO(beam2d): Impleent it
# def __array_wrap__(self, obj):
# Container customization:
def __len__(self):
if not self._shape:
raise TypeError('len() of unsized object')
return self._shape[0]
def __getitem__(self, slices):
# It supports the basic indexing (by slices, ints or Ellipsis) only.
# TODO(beam2d): Support the advanced indexing of NumPy.
if not isinstance(slices, tuple):
slices = [slices]
else:
slices = list(slices)
if any(isinstance(s, ndarray) for s in slices):
raise ValueError('Advanced indexing is not supported')
# Expand ellipsis into empty slices
n_newaxes = slices.count(newaxis)
n_ellipses = slices.count(Ellipsis)
if n_ellipses > 0:
if n_ellipses > 1:
raise ValueError('Only one Ellipsis is allowed in index')
ellipsis = slices.index(Ellipsis)
ellipsis_size = self.ndim - (len(slices) - n_newaxes - 1)
slices[ellipsis:ellipsis + 1] = [slice(None)] * ellipsis_size
slices += [slice(None)] * (self.ndim - len(slices) + n_newaxes)
# Create new shape and stride
shape = []
strides = []
j = 0
offset = 0
for i, s in enumerate(slices):
if s is newaxis:
shape.append(1)
if j < self.ndim:
strides.append(self._strides[j])
elif self.ndim > 0:
strides.append(self._strides[-1])
else:
strides.append(self.itemsize)
elif isinstance(s, slice):
s = internal.complete_slice(s, self._shape[j])
if s.step > 0:
dim = (s.stop - s.start - 1) // s.step + 1
else:
dim = (s.stop - s.start + 1) // s.step + 1
shape.append(dim)
strides.append(self._strides[j] * s.step)
offset += s.start * self._strides[j]
j += 1
elif numpy.isscalar(s):
s = int(s)
if s >= self._shape[j]:
raise IndexError('Index %s exceeds the size %s at axis %s'
% (s, self._shape[j], j))
offset += s * self._strides[j]
j += 1
else:
raise TypeError('Invalid index type: %s' % type(slices[i]))
v = self.view()
v._shape = tuple(shape)
v._strides = tuple(strides)
v.data = self.data + offset
v._mark_dirty()
return v
def __setitem__(self, slices, value):
v = self[slices]
if isinstance(value, ndarray):
y, x = broadcast_arrays(v, value)
if y._shape == x._shape and y._strides == x._strides:
if int(y.data) == int(x.data):
return # Skip since x and y are the same array
elif y.flags.c_contiguous and x.dtype == y.dtype:
y.data.copy_from(x.data, x.nbytes)
return
elementwise.copy(x, y)
else:
v.fill(value)
# TODO(beam2d): Implement these
# def __getslice__(self, i, j):
# def __setslice__(self, i, j, y):
# def __contains__(self, y):
# Conversion:
def __int__(self):
return int(self.get())
if sys.version_info < (3,):
def __long__(self):
# Avoid using long() for flake8
return self.get().__long__()
def __float__(self):
return float(self.get())
def __oct__(self):
return oct(self.get())
def __hex__(self):
return hex(self.get())
# String representations:
def __repr__(self):
return repr(self.get())
def __str__(self):
return str(self.get())
# -------------------------------------------------------------------------
# Methods outside of the ndarray main documentation
# -------------------------------------------------------------------------
def dot(self, b, out=None):
"""Returns the dot product with given array.
.. seealso::
:func:`cupy.dot` for full documentation,
:meth:`numpy.ndarray.dot`
"""
return dot(self, b, out)
# -------------------------------------------------------------------------
# Cupy specific attributes and methods
# -------------------------------------------------------------------------
@property
def device(self):
"""CUDA device on which this array resides."""
return self.data.device
@property
def _fptr(self):
if self._dtype.type == numpy.float64:
return ctypes.cast(self.data.ptr, ctypes.POINTER(ctypes.c_double))
else:
return ctypes.cast(self.data.ptr, ctypes.POINTER(ctypes.c_float))
def get(self, stream=None):
"""Returns a copy of the array on host memory.
Args:
stream (cupy.cuda.Stream): CUDA stream object. If it is given, the
copy runs asynchronously. Otherwise, the copy is synchronous.
Returns:
numpy.ndarray: Copy of the array on host memory.
"""
a_gpu = ascontiguousarray(self)
a_cpu = numpy.empty(self._shape, dtype=self._dtype)
ptr = internal.get_ndarray_ptr(a_cpu)
if stream is None:
a_gpu.data.copy_to_host(ptr, a_gpu.nbytes)
else:
a_gpu.data.copy_to_host_async(ptr, a_gpu.nbytes, stream)
return a_cpu
def set(self, arr, stream=None):
"""Copies an array on the host memory to cuda.ndarray.
Args:
arr (numpy.ndarray): The source array on the host memory.
stream (cupy.cuda.Stream): CUDA stream object. If it is given, the
copy runs asynchronously. Otherwise, the copy is synchronous.
"""
if not isinstance(arr, numpy.ndarray):
raise TypeError('Only numpy.ndarray can be set to cupy.ndarray')
if self._dtype != arr.dtype:
raise TypeError('{} array cannot be set to {} array'.format(
arr.dtype, self._dtype))
if self._shape != arr.shape:
raise ValueError('Shape mismatch')
if not self.flags.c_contiguous:
raise RuntimeError('Cannot set to non-contiguous array')
arr = numpy.ascontiguousarray(arr)
ptr = internal.get_ndarray_ptr(arr)
if stream is None:
self.data.copy_from_host(ptr, self.nbytes)
else:
self.data.copy_from_host_async(ptr, self.nbytes, stream)
def reduced_view(self, dtype=None):
"""Returns a view of the array with minimum number of dimensions.
Args:
dtype: Data type specifier. If it is given, then the memory
sequence is reinterpreted as the new type.
Returns:
cupy.ndarray: A view of the array with reduced dimensions.
"""
view = self.view(dtype=dtype)
shape, strides = internal.get_reduced_dims_from_array(self)
view._shape = shape
view._strides = strides
view._mark_f_dirty()
return view
def _update_c_contiguity(self):
self._flags &= ~flags.C_CONTIGUOUS
if internal.get_c_contiguity(self._shape, self._strides,
self.itemsize):
self._flags |= flags.C_CONTIGUOUS
self._flags &= ~flags.C_DIRTY
def _update_f_contiguity(self):
self._flags &= ~flags.F_CONTIGUOUS
if internal.get_c_contiguity(tuple(reversed(self._shape)),
tuple(reversed(self._strides)),
self.itemsize):
self._flags |= flags.F_CONTIGUOUS
self._flags &= ~flags.F_DIRTY
def _update_contiguity(self):
self._update_c_contiguity()
self._update_f_contiguity()
def _mark_c_dirty(self):
self._flags |= flags.C_DIRTY
def _mark_f_dirty(self):
self._flags |= flags.F_DIRTY
def _mark_dirty(self):
self._flags |= flags.C_DIRTY | flags.F_DIRTY
def _should_use_rop(self, a):
return getattr(a, '__array_priority__', 0) > self.__array_priority__
ufunc = elementwise.ufunc
newaxis = numpy.newaxis # == None
# =============================================================================
# Routines
#
# The order of these declarations are borrowed from the NumPy document:
# http://docs.scipy.org/doc/numpy/reference/routines.html
# =============================================================================
# -----------------------------------------------------------------------------
# Array creation routines
# -----------------------------------------------------------------------------
empty = creation.basic.empty
empty_like = creation.basic.empty_like
eye = creation.basic.eye
identity = creation.basic.identity
ones = creation.basic.ones
ones_like = creation.basic.ones_like
zeros = creation.basic.zeros
zeros_like = creation.basic.zeros_like
full = creation.basic.full
full_like = creation.basic.full_like
array = creation.from_data.array
asarray = creation.from_data.asarray
asanyarray = creation.from_data.asanyarray
ascontiguousarray = creation.from_data.ascontiguousarray
copy = creation.from_data.copy
arange = creation.ranges.arange
linspace = creation.ranges.linspace
diag = creation.matrix.diag
diagflat = creation.matrix.diagflat
# -----------------------------------------------------------------------------
# Array manipulation routines
# -----------------------------------------------------------------------------
copyto = manipulation.basic.copyto
reshape = manipulation.shape.reshape
ravel = manipulation.shape.ravel
rollaxis = manipulation.transpose.rollaxis
swapaxes = manipulation.transpose.swapaxes
transpose = manipulation.transpose.transpose
atleast_1d = manipulation.dims.atleast_1d
atleast_2d = manipulation.dims.atleast_2d
atleast_3d = manipulation.dims.atleast_3d
broadcast = manipulation.dims.broadcast
broadcast_arrays = manipulation.dims.broadcast_arrays
expand_dims = manipulation.dims.expand_dims
squeeze = manipulation.dims.squeeze
column_stack = manipulation.join.column_stack
concatenate = manipulation.join.concatenate
dstack = manipulation.join.dstack
hstack = manipulation.join.hstack
vstack = manipulation.join.vstack
array_split = manipulation.split.array_split
dsplit = manipulation.split.dsplit
hsplit = manipulation.split.hsplit
split = manipulation.split.split
vsplit = manipulation.split.vsplit
# -----------------------------------------------------------------------------
# Binary operations
# -----------------------------------------------------------------------------
bitwise_and = binary.elementwise.bitwise_and
bitwise_or = binary.elementwise.bitwise_or
bitwise_xor = binary.elementwise.bitwise_xor
invert = binary.elementwise.invert
left_shift = binary.elementwise.left_shift
right_shift = binary.elementwise.right_shift
binary_repr = numpy.binary_repr
# -----------------------------------------------------------------------------
# Data type routines (borrowed from NumPy)
# -----------------------------------------------------------------------------
can_cast = numpy.can_cast
promote_types = numpy.promote_types
min_scalar_type = numpy.min_scalar_type
result_type = numpy.result_type
common_type = numpy.common_type
obj2sctype = numpy.obj2sctype
dtype = numpy.dtype
format_parser = numpy.format_parser
finfo = numpy.finfo
iinfo = numpy.iinfo
MachAr = numpy.MachAr
issctype = numpy.issctype
issubdtype = numpy.issubdtype
issubsctype = numpy.issubsctype
issubclass_ = numpy.issubclass_
find_common_type = numpy.find_common_type
typename = numpy.typename
sctype2char = numpy.sctype2char
mintypecode = numpy.mintypecode
# -----------------------------------------------------------------------------
# Optionally Scipy-accelerated routines
# -----------------------------------------------------------------------------
# TODO(beam2d): Implement it
# -----------------------------------------------------------------------------
# Discrete Fourier Transform
# -----------------------------------------------------------------------------
# TODO(beam2d): Implement it
# -----------------------------------------------------------------------------
# Indexing routines
# -----------------------------------------------------------------------------
take = indexing.indexing.take
diagonal = indexing.indexing.diagonal
# -----------------------------------------------------------------------------
# Input and output
# -----------------------------------------------------------------------------
load = io.npz.load
save = io.npz.save
savez = io.npz.savez
savez_compressed = io.npz.savez_compressed
array_repr = io.formatting.array_repr
array_str = io.formatting.array_str
base_repr = numpy.base_repr
# -----------------------------------------------------------------------------
# Linear algebra
# -----------------------------------------------------------------------------
dot = linalg.product.dot
vdot = linalg.product.vdot
inner = linalg.product.inner
outer = linalg.product.outer
tensordot = linalg.product.tensordot
trace = linalg.norm.trace
# -----------------------------------------------------------------------------
# Logic functions
# -----------------------------------------------------------------------------
isfinite = logic.content.isfinite
isinf = logic.content.isinf
isnan = logic.content.isnan
isscalar = numpy.isscalar
logical_and = logic.ops.logical_and
logical_or = logic.ops.logical_or
logical_not = logic.ops.logical_not
logical_xor = logic.ops.logical_xor
greater = logic.comparison.greater
greater_equal = logic.comparison.greater_equal
less = logic.comparison.less
less_equal = logic.comparison.less_equal
equal = logic.comparison.equal
not_equal = logic.comparison.not_equal
# -----------------------------------------------------------------------------
# Mathematical functions
# -----------------------------------------------------------------------------
sin = math.trigonometric.sin
cos = math.trigonometric.cos
tan = math.trigonometric.tan
arcsin = math.trigonometric.arcsin
arccos = math.trigonometric.arccos
arctan = math.trigonometric.arctan
hypot = math.trigonometric.hypot
arctan2 = math.trigonometric.arctan2
deg2rad = math.trigonometric.deg2rad
rad2deg = math.trigonometric.rad2deg
degrees = math.trigonometric.degrees
radians = math.trigonometric.radians
sinh = math.hyperbolic.sinh
cosh = math.hyperbolic.cosh
tanh = math.hyperbolic.tanh
arcsinh = math.hyperbolic.arcsinh
arccosh = math.hyperbolic.arccosh
arctanh = math.hyperbolic.arctanh
rint = math.rounding.rint
floor = math.rounding.floor
ceil = math.rounding.ceil
trunc = math.rounding.trunc
sum = math.sumprod.sum
prod = math.sumprod.prod
exp = math.explog.exp
expm1 = math.explog.expm1
exp2 = math.explog.exp2
log = math.explog.log
log10 = math.explog.log10
log2 = math.explog.log2
log1p = math.explog.log1p
logaddexp = math.explog.logaddexp
logaddexp2 = math.explog.logaddexp2
signbit = math.floating.signbit
copysign = math.floating.copysign
ldexp = math.floating.ldexp
frexp = math.floating.frexp
nextafter = math.floating.nextafter
add = math.arithmetic.add
reciprocal = math.arithmetic.reciprocal
negative = math.arithmetic.negative
multiply = math.arithmetic.multiply
divide = math.arithmetic.divide
power = math.arithmetic.power
subtract = math.arithmetic.subtract
true_divide = math.arithmetic.true_divide
floor_divide = math.arithmetic.floor_divide
fmod = math.arithmetic.fmod
mod = math.arithmetic.remainder
modf = math.arithmetic.modf
remainder = math.arithmetic.remainder
clip = math.misc.clip
sqrt = math.misc.sqrt
square = math.misc.square
absolute = math.misc.absolute
sign = math.misc.sign
maximum = math.misc.maximum
minimum = math.misc.minimum
fmax = math.misc.fmax
fmin = math.misc.fmin
# -----------------------------------------------------------------------------
# Sorting, searching, and counting
# -----------------------------------------------------------------------------
argmax = sorting.search.argmax
argmin = sorting.search.argmin
# -----------------------------------------------------------------------------
# Statistics
# -----------------------------------------------------------------------------
amin = statistics.order.amin
amax = statistics.order.amax
mean = statistics.meanvar.mean
var = statistics.meanvar.var
std = statistics.meanvar.std
# CuPy specific functions
def asnumpy(a, stream=None):
"""Returns an array on the host memory from an arbitrary source array.
Args:
a: Arbitrary object that can be converted to numpy.ndarray.
stream (cupy.cuda.Stream): CUDA stream object. If it is specified, then
the device-to-host copy runs asynchronously. Otherwise, the copy is
synchronous. Note that if ``a`` is not a cupy.ndarray object, then
this argument has no effect.
Returns:
numpy.ndarray: Converted array on the host memory.
"""
if isinstance(a, ndarray):
return a.get(stream=stream)
else:
return numpy.asarray(a)
_cupy = sys.modules[__name__]
def get_array_module(*args):
"""Returns the array module for arguments.
This function is used to implement CPU/GPU generic code. If at least one of
the arguments is a :class:`cupy.ndarray` object, the :mod:`cupy` module is
returned.
Args:
args: Values to determine whether NumPy or CuPy should be used.
Returns:
module: :mod:`cupy` or :mod:`numpy` is returned based on the types of
the arguments.
.. admonition:: Example
A NumPy/CuPy generic function can be written as follows::
def softplus(x):
xp = cupy.get_array_module(x)
return xp.maximum(0, x) + xp.log1p(xp.exp(-abs(x)))
"""
if any(isinstance(arg, ndarray) for arg in args):
return _cupy
else:
return numpy
clear_memo = util.clear_memo
memoize = util.memoize
ElementwiseKernel = elementwise.ElementwiseKernel
ReductionKernel = reduction.ReductionKernel
| |
""" This module contains all of the layer classes:
InputLayer
OutputLayer
FCLayer
ConvLayer
PoolLayer
"""
import numpy as np
import theano
import theano.tensor as T
import theano.tensor.shared_randomstreams
from theano.tensor.signal import downsample
from theano.sandbox.cuda.basic_ops import gpu_contiguous
from pylearn2.sandbox.cuda_convnet.filter_acts import FilterActs
from pylearn2.sandbox.cuda_convnet.pool import MaxPool
import NNlib as NNl
Tfloat = theano.config.floatX
Tsh = theano.shared
Tsig = T.nnet.sigmoid
class Layer(object):
def __init__(self, input_layer, traits, _tag):
self.tag = _tag
self.number = traits['number']
self.input_layer = input_layer
class InputLayer(Layer):
""" This layer will come first in any structure definition of a network.
It is involved in applying data augmentation, such as dropout, jitter
and flipping.
"""
def __init__(self, rngs, input_layer, Lshape, traits, activation=None):
super(InputLayer, self).__init__(input_layer, traits, "Input")
self.srng = rngs[1]
self.out_shape = Lshape
self.p_retain = (1. - traits['dropout'])
self.traits = traits
def output(self, use_dropout=False, depth=0):
""" Provides data to next layer and applies dropout """
ret = self.input_layer
if use_dropout:
num_str = NNl.get_num_streams(np.prod(self.out_shape))
mask = NNl.gen_mask(self.srng, self.out_shape, self.p_retain,
num_str)
ret *= mask / self.p_retain
return ret
class OutputLayer(Layer):
""" This layer will come last in most structure definitions of a network.
The cost calculations for supervised training are done here, as well as
the actual classification of data samples.
Attributes:
W: The matrix of weights
b: The vector of biases
params: A container for easy grouping of the layer's parameters
The methods depend on only two main arguments:
y: The labels for your dataset
use_dropout: Toggles dropout usage for training vs testing, which is
propagated down through each input layer
"""
def __init__(self, rngs, input_layer, Lshape, traits, activation):
super(OutputLayer, self).__init__(input_layer, traits, "Output")
self.out_shape = (Lshape[0], Lshape[1])
self.W_shape = Lshape[1:]
self.activation = activation
self.l2decay = traits['l2decay']
if len(Lshape) != 3:
print("Logistic regression shape must be (2,), it is,", Lshape)
# Initialize weights and biases (can load values later)
self.W = NNl.gen_weights(rngs[0], self.W_shape, 0, traits['initW'])
self.b = Tsh(np.zeros((Lshape[2],), dtype=Tfloat))
self.params = [self.W, self.b]
def p_y_given_x(self, use_dropout=True):
""" Probability of a given state, using softmax, for classification """
x = self.input_layer.output(use_dropout, depth=self.number)
if x.ndim != 2:
x = x.flatten(2)
return T.nnet.softmax(T.dot(x, self.W) + self.b)
def mse(self, y, use_dropout=True):
""" Calculates the mean squared error between the prediction
and the label values.
"""
x = self.input_layer.output(use_dropout, depth=self.number)
if x.ndim != 2:
x = x.flatten(2)
# Activations, for use with regression
y_act = self.activation(T.dot(x, self.W) + self.b)
return T.mean(T.sqr(y_act - y))
def log_loss(self, y, use_dropout=True):
""" Calculates the negative log loss between the predicted and
label values.
"""
x = self.input_layer.output(use_dropout, depth=self.number)
if x.ndim != 2:
x = x.flatten(2)
y_act = self.activation(T.dot(x, self.W) + self.b)
# y_act = T.maximum(1e-15, T.minimum(1. - 1e-15, y_act))
loss = -(y * T.log(y_act) + (1 - y) * T.log(1 - y_act))
return T.mean(loss)
def negative_log_likelihood(self, y, use_dropout=True):
""" Return the mean of the negative log-likelihood of the prediction
of this model under a given target distribution.
"""
logprob = T.log(self.p_y_given_x(use_dropout))
return -T.mean(logprob[T.arange(y.shape[0]), y])
def class_pred(self, use_dropout=False):
""" Predict classes by the one with max probability """
return T.argmax(self.p_y_given_x(use_dropout), axis=1)
def errors(self, y, use_dropout=False):
""" Calculate the total number of classification errors """
return T.mean(T.neq(self.class_pred(use_dropout), y))
class FCLayer(Layer):
""" A generic fully-connected layer of a neural network.
The FCLayer also has options that allow it to be trained in an
autoencoder, and utilize dropout and weight decay.
Methods:
signal: The pre-activation signal, i.e. x.W + b
output: What the next layer will see. Applies activation and dropout.
prop_r: Propagate 'right': a simple version of 'signal'
prop_l: Propogate 'left': reconstruct the input from output,
i.e. y.W.T + b'. Used for autoencoder training.
reconstruct_mse: Mean-squared error of reconstruction. Used during
the training as an autoencoder.
Attributes:
tag: Signifier for the layer type.
rng: numpy rng (used for initialization)
srng: Theano rng stream (used for generating dropout masks).
input_layer: The layer which feeds into this one.
in_shape: The shape of the input to this layer.
out_shape: The shape of output this layer produces.
number: 'n' where this layer is the nth layer in your network,
starting from the Input as 0.
p_retain: Probability of retaining a neuron after dropout.
l2decay: L2 decay constant for this layer.
activation: The non-linearity to apply to x.W + b (def. ReLU)
d_rec: Input without dropout applied, used by autoencoder.
best_error: Tracks the recon error during autoencoder training.
(The Model class tracks the supervised training error)
W: Weight matrix
b: bias matrix
bp: 'inverse' bias matrix (applied during prop_l)
params: Convenient wrapper of params for calculating the gradient.
pt_params: As above, but for autoencoder training
"""
def __init__(self, rngs, input_layer, Lshape, traits, activation):
super(FCLayer, self).__init__(input_layer, traits, "FC")
self.p_retain = (1. - traits['dropout'])
self.rng = rngs[0]
self.srng = rngs[1]
self.out_shape = (Lshape[0], Lshape[2])
self.W_shape = Lshape[1:]
self.activation = activation
self.l2decay = traits['l2decay']
self.d_rec = input_layer.output(False)
self.best_error = np.inf
if len(Lshape) != 3:
print "FC layer shape must be (2,), it is,", Lshape
self.W = NNl.gen_weights(self.rng, self.W_shape, 0, traits['initW'])
self.b = Tsh(np.zeros(Lshape[2], dtype=Tfloat))
self.ib = Tsh(np.zeros(Lshape[1], dtype=Tfloat))
self.params = [self.W, self.b,]
self.pt_params = [self.W, self.b, self.ib]
def signal(self, use_dropout=False, depth=0):
""" Raw signal from applying weights and bias, pre-activation """
if depth > 0:
x = self.input_layer.output(use_dropout, depth=(depth-1))
else:
x = self.input_layer.output(False, depth=0)
if x.ndim > 2:
x = x.flatten(2)
return T.dot(x, self.W) + self.b
def output(self, use_dropout=False, depth=0):
""" Apply the activation and dropout to the signal, producing
output that will be used by subsequent layers
"""
out = self.activation(self.signal(use_dropout=use_dropout, depth=depth))
c_shape = self.out_shape
if use_dropout:
num_str = NNl.get_num_streams(np.prod(c_shape))
mask = NNl.gen_mask(self.srng, c_shape, self.p_retain, num_str)
out = out * mask / self.p_retain
return out
def prop_R(self, in_vectors):
""" Simple version of 'signal' method: Right propagation """
return T.dot(in_vectors, self.W) + self.b
def prop_L(self, in_vectors):
""" Apply the weight transpose and the inverse bias: Left prop. """
return T.dot(in_vectors, self.W.T) + self.ib
def reconstruct_mse(self, r_activation):
""" Used for training as an auto-encoder, this compares the input
and its reconstruction via forward and backward propagation.
Args:
r_activation: Activation function to apply during reconstruction.
The choice for this depends on the distribution of your
input, so the deeper hidden layers just use a soft ReLU in
general, but the initial hidden layer will need to use a
function dependent on your feature set.
"""
x0 = self.input_layer.output()
if x0.ndim > 2:
x0 = x0.flatten(2)
out = self.activation(self.signal(use_dropout=True, depth=1))
xr = r_activation(self.prop_L(out))
return T.mean(T.sum(T.sqr(x0 - xr), axis=1))
class ConvLayer(Layer):
""" This layer applies the convolution step to input data. This means
rastering a square matrix ("filter") across the input. Usually, many
parallel filters applied per layer (nKernels). It is possible to pad
the borders with zeros so the convop output is the same size as the
input. This implementation uses the pylearn2 FilterActs method, based
on Alex Krizhevsky's speedy CUDA ConvNet code.
Methods:
output: What the next layer will see. Applies activation and dropout.
Attributes:
tag: Signifier for the layer type.
rng: numpy rng (used for initialization)
input_layer: The layer which feeds into this one.
number: 'n' where this layer is the nth layer in your network,
starting from the Input as 0.
l2decay: L2 decay constant for this layer.
filter_shape: shape of the convolutional filter bank:
(kernels, channels, filter size, filter size)
pad: zeropadding for applying conv filter at all points
W: Weight matrix
params: Convenient wrapper of params for calculating the gradient.
"""
def __init__(self, rngs, input_layer, Lshape, traits, activation):
super(ConvLayer, self).__init__(input_layer, traits, "Conv")
self.rng = rngs[0]
self.l2decay = traits['l2decay']
filter_shape = Lshape[1]
# The number of input channels must match number of filter channels
assert Lshape[0][1] == filter_shape[1]
self.pad = traits['padding']
self.W = NNl.gen_weights(self.rng, filter_shape, 0, traits['initW'])
# convolve input feature maps with filters
# Using Alex K.'s fast CUDA conv, courtesy of S. Dieleman
self.x = self.input_layer.output(False)
conv_op = FilterActs(pad=self.pad, partial_sum=1)
input_shuffled = (self.x).dimshuffle(1, 2, 3, 0) # bc01 to c01b
filters_shuffled = (self.W).dimshuffle(1, 2, 3, 0) # bc01 to c01b
contiguous_input = gpu_contiguous(input_shuffled)
contiguous_filters = gpu_contiguous(filters_shuffled)
out_shuffled = conv_op(contiguous_input, contiguous_filters)
self.conv_out = out_shuffled.dimshuffle(3, 0, 1, 2) # c01b to bc01
# store parameters of this layer
self.params = [self.W]
def output(self, use_dropout=True, depth=0):
"""Just pass through for now"""
return self.conv_out
class PoolLayer(Layer):
""" This layer simply performs a MaxOut pooling, where a downsample
factor N is specified, and for each NxN contiguous block of input the
maximum value is taken as the output.
"""
def __init__(self, rngs, input_layer, Lshape, traits, activation):
super(PoolLayer, self).__init__(input_layer, traits, "Pool")
self.pool_size = (traits['pool'], traits['pool'])
self.activation = activation
self.l2decay = traits['l2decay']
self.b = Tsh(np.zeros((Lshape[1],), dtype=Tfloat))
self.params = [self.b]
def output(self, use_dropout=True, depth=0):
""" Downsamples the input data and apply activation """
conv_output = self.input_layer.output()
input_shuffled = (conv_output).dimshuffle(1, 2, 3, 0)
contiguous_input = gpu_contiguous(input_shuffled)
pool_op = MaxPool(ds=3, stride=self.pool_size[0])
out_shuffled = pool_op(contiguous_input)
pool_out = out_shuffled.dimshuffle(3, 0, 1, 2) # c01b to bc01
# Theano routine
# pool_out = downsample.max_pool_2d(input=self.input_layer.output(),
# ds=self.pool_size, ignore_border=True)
return self.activation(pool_out + self.b.dimshuffle('x', 0, 'x', 'x'))
| |
## dea_bandindices.py
'''
Description: This file contains a set of python functions for computing
remote sensing band indices on Digital Earth Australia data.
License: The code in this notebook is licensed under the Apache License,
Version 2.0 (https://www.apache.org/licenses/LICENSE-2.0). Digital Earth
Australia data is licensed under the Creative Commons by Attribution 4.0
license (https://creativecommons.org/licenses/by/4.0/).
Contact: If you need assistance, please post a question on the Open Data
Cube Slack channel (http://slack.opendatacube.org/) or on the GIS Stack
Exchange (https://gis.stackexchange.com/questions/ask?tags=open-data-cube)
using the `open-data-cube` tag (you can view previously asked questions
here: https://gis.stackexchange.com/questions/tagged/open-data-cube).
If you would like to report an issue with this script, you can file one
on Github (https://github.com/GeoscienceAustralia/dea-notebooks/issues/new).
Last modified: March 2021
'''
# Import required packages
import warnings
import numpy as np
# Define custom functions
def calculate_indices(ds,
index=None,
collection=None,
custom_varname=None,
normalise=True,
drop=False,
inplace=False):
"""
Takes an xarray dataset containing spectral bands, calculates one of
a set of remote sensing indices, and adds the resulting array as a
new variable in the original dataset.
Note: by default, this function will create a new copy of the data
in memory. This can be a memory-expensive operation, so to avoid
this, set `inplace=True`.
Last modified: March 2021
Parameters
----------
ds : xarray Dataset
A two-dimensional or multi-dimensional array with containing the
spectral bands required to calculate the index. These bands are
used as inputs to calculate the selected water index.
index : str or list of strs
A string giving the name of the index to calculate or a list of
strings giving the names of the indices to calculate:
'AWEI_ns (Automated Water Extraction Index,
no shadows, Feyisa 2014)
'AWEI_sh' (Automated Water Extraction Index,
shadows, Feyisa 2014)
'BAEI' (Built-Up Area Extraction Index, Bouzekri et al. 2015)
'BAI' (Burn Area Index, Martin 1998)
'BSI' (Bare Soil Index, Rikimaru et al. 2002)
'BUI' (Built-Up Index, He et al. 2010)
'CMR' (Clay Minerals Ratio, Drury 1987)
'EVI' (Enhanced Vegetation Index, Huete 2002)
'FMR' (Ferrous Minerals Ratio, Segal 1982)
'IOR' (Iron Oxide Ratio, Segal 1982)
'LAI' (Leaf Area Index, Boegh 2002)
'MNDWI' (Modified Normalised Difference Water Index, Xu 1996)
'MSAVI' (Modified Soil Adjusted Vegetation Index,
Qi et al. 1994)
'NBI' (New Built-Up Index, Jieli et al. 2010)
'NBR' (Normalised Burn Ratio, Lopez Garcia 1991)
'NDBI' (Normalised Difference Built-Up Index, Zha 2003)
'NDCI' (Normalised Difference Chlorophyll Index,
Mishra & Mishra, 2012)
'NDMI' (Normalised Difference Moisture Index, Gao 1996)
'NDSI' (Normalised Difference Snow Index, Hall 1995)
'NDTI' (Normalise Difference Tillage Index,
Van Deventeret et al. 1997)
'NDVI' (Normalised Difference Vegetation Index, Rouse 1973)
'NDWI' (Normalised Difference Water Index, McFeeters 1996)
'SAVI' (Soil Adjusted Vegetation Index, Huete 1988)
'TCB' (Tasseled Cap Brightness, Crist 1985)
'TCG' (Tasseled Cap Greeness, Crist 1985)
'TCW' (Tasseled Cap Wetness, Crist 1985)
'TCB_GSO' (Tasseled Cap Brightness, Nedkov 2017)
'TCG_GSO' (Tasseled Cap Greeness, Nedkov 2017)
'TCW_GSO' (Tasseled Cap Wetness, Nedkov 2017)
'WI' (Water Index, Fisher 2016)
'kNDVI' (Non-linear Normalised Difference Vegation Index,
Camps-Valls et al. 2021)
collection : str
An string that tells the function what data collection is
being used to calculate the index. This is necessary because
different collections use different names for bands covering
a similar spectra. Valid options are 'ga_ls_2' (for GA
Landsat Collection 2), 'ga_ls_3' (for GA Landsat Collection 3)
and 'ga_s2_1' (for GA Sentinel 2 Collection 1).
custom_varname : str, optional
By default, the original dataset will be returned with
a new index variable named after `index` (e.g. 'NDVI'). To
specify a custom name instead, you can supply e.g.
`custom_varname='custom_name'`. Defaults to None, which uses
`index` to name the variable.
normalise : bool, optional
Some coefficient-based indices (e.g. 'WI', 'BAEI', 'AWEI_ns',
'AWEI_sh', 'TCW', 'TCG', 'TCB', 'TCW_GSO', 'TCG_GSO', 'TCB_GSO',
'EVI', 'LAI', 'SAVI', 'MSAVI') produce different results if
surface reflectance values are not scaled between 0.0 and 1.0
prior to calculating the index. Setting `normalise=True` first
scales values to a 0.0-1.0 range by dividing by 10000.0.
Defaults to True.
drop : bool, optional
Provides the option to drop the original input data, thus saving
space. if drop = True, returns only the index and its values.
inplace: bool, optional
If `inplace=True`, calculate_indices will modify the original
array in-place, adding bands to the input dataset. The default
is `inplace=False`, which will instead make a new copy of the
original data (and use twice the memory).
Returns
-------
ds : xarray Dataset
The original xarray Dataset inputted into the function, with a
new varible containing the remote sensing index as a DataArray.
If drop = True, the new variable/s as DataArrays in the
original Dataset.
"""
# Set ds equal to a copy of itself in order to prevent the function
# from editing the input dataset. This can prevent unexpected
# behaviour though it uses twice as much memory.
if not inplace:
ds = ds.copy(deep=True)
# Capture input band names in order to drop these if drop=True
if drop:
bands_to_drop=list(ds.data_vars)
print(f'Dropping bands {bands_to_drop}')
# Dictionary containing remote sensing index band recipes
index_dict = {
# Normalised Difference Vegation Index, Rouse 1973
'NDVI': lambda ds: (ds.nir - ds.red) /
(ds.nir + ds.red),
# Non-linear Normalised Difference Vegation Index,
# Camps-Valls et al. 2021
'kNDVI': lambda ds: np.tanh(((ds.nir - ds.red) /
(ds.nir + ds.red)) ** 2),
# Enhanced Vegetation Index, Huete 2002
'EVI': lambda ds: ((2.5 * (ds.nir - ds.red)) /
(ds.nir + 6 * ds.red -
7.5 * ds.blue + 1)),
# Leaf Area Index, Boegh 2002
'LAI': lambda ds: (3.618 * ((2.5 * (ds.nir - ds.red)) /
(ds.nir + 6 * ds.red -
7.5 * ds.blue + 1)) - 0.118),
# Soil Adjusted Vegetation Index, Huete 1988
'SAVI': lambda ds: ((1.5 * (ds.nir - ds.red)) /
(ds.nir + ds.red + 0.5)),
# Mod. Soil Adjusted Vegetation Index, Qi et al. 1994
'MSAVI': lambda ds: ((2 * ds.nir + 1 -
((2 * ds.nir + 1)**2 -
8 * (ds.nir - ds.red))**0.5) / 2),
# Normalised Difference Moisture Index, Gao 1996
'NDMI': lambda ds: (ds.nir - ds.swir1) /
(ds.nir + ds.swir1),
# Normalised Burn Ratio, Lopez Garcia 1991
'NBR': lambda ds: (ds.nir - ds.swir2) /
(ds.nir + ds.swir2),
# Burn Area Index, Martin 1998
'BAI': lambda ds: (1.0 / ((0.10 - ds.red) ** 2 +
(0.06 - ds.nir) ** 2)),
# Normalised Difference Chlorophyll Index,
# (Mishra & Mishra, 2012)
'NDCI': lambda ds: (ds.red_edge_1 - ds.red) /
(ds.red_edge_1 + ds.red),
# Normalised Difference Snow Index, Hall 1995
'NDSI': lambda ds: (ds.green - ds.swir1) /
(ds.green + ds.swir1),
# Normalised Difference Tillage Index,
# Van Deventer et al. 1997
'NDTI': lambda ds: (ds.swir1 - ds.swir2) /
(ds.swir1 + ds.swir2),
# Normalised Difference Water Index, McFeeters 1996
'NDWI': lambda ds: (ds.green - ds.nir) /
(ds.green + ds.nir),
# Modified Normalised Difference Water Index, Xu 2006
'MNDWI': lambda ds: (ds.green - ds.swir1) /
(ds.green + ds.swir1),
# Normalised Difference Built-Up Index, Zha 2003
'NDBI': lambda ds: (ds.swir1 - ds.nir) /
(ds.swir1 + ds.nir),
# Built-Up Index, He et al. 2010
'BUI': lambda ds: ((ds.swir1 - ds.nir) /
(ds.swir1 + ds.nir)) -
((ds.nir - ds.red) /
(ds.nir + ds.red)),
# Built-up Area Extraction Index, Bouzekri et al. 2015
'BAEI': lambda ds: (ds.red + 0.3) /
(ds.green + ds.swir1),
# New Built-up Index, Jieli et al. 2010
'NBI': lambda ds: (ds.swir1 + ds.red) / ds.nir,
# Bare Soil Index, Rikimaru et al. 2002
'BSI': lambda ds: ((ds.swir1 + ds.red) -
(ds.nir + ds.blue)) /
((ds.swir1 + ds.red) +
(ds.nir + ds.blue)),
# Automated Water Extraction Index (no shadows), Feyisa 2014
'AWEI_ns': lambda ds: (4 * (ds.green - ds.swir1) -
(0.25 * ds.nir * + 2.75 * ds.swir2)),
# Automated Water Extraction Index (shadows), Feyisa 2014
'AWEI_sh': lambda ds: (ds.blue + 2.5 * ds.green -
1.5 * (ds.nir + ds.swir1) -
0.25 * ds.swir2),
# Water Index, Fisher 2016
'WI': lambda ds: (1.7204 + 171 * ds.green + 3 * ds.red -
70 * ds.nir - 45 * ds.swir1 -
71 * ds.swir2),
# Tasseled Cap Wetness, Crist 1985
'TCW': lambda ds: (0.0315 * ds.blue + 0.2021 * ds.green +
0.3102 * ds.red + 0.1594 * ds.nir +
-0.6806 * ds.swir1 + -0.6109 * ds.swir2),
# Tasseled Cap Greeness, Crist 1985
'TCG': lambda ds: (-0.1603 * ds.blue + -0.2819 * ds.green +
-0.4934 * ds.red + 0.7940 * ds.nir +
-0.0002 * ds.swir1 + -0.1446 * ds.swir2),
# Tasseled Cap Brightness, Crist 1985
'TCB': lambda ds: (0.2043 * ds.blue + 0.4158 * ds.green +
0.5524 * ds.red + 0.5741 * ds.nir +
0.3124 * ds.swir1 + -0.2303 * ds.swir2),
# Tasseled Cap Transformations with Sentinel-2 coefficients
# after Nedkov 2017 using Gram-Schmidt orthogonalization (GSO)
# Tasseled Cap Wetness, Nedkov 2017
'TCW_GSO': lambda ds: (0.0649 * ds.blue + 0.2802 * ds.green +
0.3072 * ds.red + -0.0807 * ds.nir +
-0.4064 * ds.swir1 + -0.5602 * ds.swir2),
# Tasseled Cap Greeness, Nedkov 2017
'TCG_GSO': lambda ds: (-0.0635 * ds.blue + -0.168 * ds.green +
-0.348 * ds.red + 0.3895 * ds.nir +
-0.4587 * ds.swir1 + -0.4064 * ds.swir2),
# Tasseled Cap Brightness, Nedkov 2017
'TCB_GSO': lambda ds: (0.0822 * ds.blue + 0.136 * ds.green +
0.2611 * ds.red + 0.5741 * ds.nir +
0.3882 * ds.swir1 + 0.1366 * ds.swir2),
# Clay Minerals Ratio, Drury 1987
'CMR': lambda ds: (ds.swir1 / ds.swir2),
# Ferrous Minerals Ratio, Segal 1982
'FMR': lambda ds: (ds.swir1 / ds.nir),
# Iron Oxide Ratio, Segal 1982
'IOR': lambda ds: (ds.red / ds.blue)
}
# If index supplied is not a list, convert to list. This allows us to
# iterate through either multiple or single indices in the loop below
indices = index if isinstance(index, list) else [index]
#calculate for each index in the list of indices supplied (indexes)
for index in indices:
# Select an index function from the dictionary
index_func = index_dict.get(str(index))
# If no index is provided or if no function is returned due to an
# invalid option being provided, raise an exception informing user to
# choose from the list of valid options
if index is None:
raise ValueError(f"No remote sensing `index` was provided. Please "
"refer to the function \ndocumentation for a full "
"list of valid options for `index` (e.g. 'NDVI')")
elif (index in ['WI', 'BAEI', 'AWEI_ns', 'AWEI_sh', 'TCW',
'TCG', 'TCB', 'TCW_GSO', 'TCG_GSO', 'TCB_GSO',
'EVI', 'LAI', 'SAVI', 'MSAVI']
and not normalise):
warnings.warn(f"\nA coefficient-based index ('{index}') normally "
"applied to surface reflectance values in the \n"
"0.0-1.0 range was applied to values in the 0-10000 "
"range. This can produce unexpected results; \nif "
"required, resolve this by setting `normalise=True`")
elif index_func is None:
raise ValueError(f"The selected index '{index}' is not one of the "
"valid remote sensing index options. \nPlease "
"refer to the function documentation for a full "
"list of valid options for `index`")
# Rename bands to a consistent format if depending on what collection
# is specified in `collection`. This allows the same index calculations
# to be applied to all collections. If no collection was provided,
# raise an exception.
if collection is None:
raise ValueError("'No `collection` was provided. Please specify "
"either 'ga_ls_2', 'ga_ls_3' or 'ga_s2_1' \nto "
"ensure the function calculates indices using the "
"correct spectral bands")
elif collection == 'ga_ls_3':
# Dictionary mapping full data names to simpler 'red' alias names
bandnames_dict = {
'nbart_nir': 'nir',
'nbart_red': 'red',
'nbart_green': 'green',
'nbart_blue': 'blue',
'nbart_swir_1': 'swir1',
'nbart_swir_2': 'swir2',
'nbar_red': 'red',
'nbar_green': 'green',
'nbar_blue': 'blue',
'nbar_nir': 'nir',
'nbar_swir_1': 'swir1',
'nbar_swir_2': 'swir2'
}
# Rename bands in dataset to use simple names (e.g. 'red')
bands_to_rename = {
a: b for a, b in bandnames_dict.items() if a in ds.variables
}
elif collection == 'ga_s2_1':
# Dictionary mapping full data names to simpler 'red' alias names
bandnames_dict = {
'nbart_red': 'red',
'nbart_green': 'green',
'nbart_blue': 'blue',
'nbart_nir_1': 'nir',
'nbart_red_edge_1': 'red_edge_1',
'nbart_red_edge_2': 'red_edge_2',
'nbart_swir_2': 'swir1',
'nbart_swir_3': 'swir2',
'nbar_red': 'red',
'nbar_green': 'green',
'nbar_blue': 'blue',
'nbar_nir_1': 'nir',
'nbar_red_edge_1': 'red_edge_1',
'nbar_red_edge_2': 'red_edge_2',
'nbar_swir_2': 'swir1',
'nbar_swir_3': 'swir2'
}
# Rename bands in dataset to use simple names (e.g. 'red')
bands_to_rename = {
a: b for a, b in bandnames_dict.items() if a in ds.variables
}
elif collection == 'ga_ls_2':
# Pass an empty dict as no bands need renaming
bands_to_rename = {}
# Raise error if no valid collection name is provided:
else:
raise ValueError(f"'{collection}' is not a valid option for "
"`collection`. Please specify either \n"
"'ga_ls_2', 'ga_ls_3' or 'ga_s2_1'")
# Apply index function
try:
# If normalised=True, divide data by 10,000 before applying func
mult = 10000.0 if normalise else 1.0
index_array = index_func(ds.rename(bands_to_rename) / mult)
except AttributeError:
raise ValueError(f'Please verify that all bands required to '
f'compute {index} are present in `ds`. \n'
f'These bands may vary depending on the `collection` '
f'(e.g. the Landsat `nbart_nir` band \n'
f'is equivelent to `nbart_nir_1` for Sentinel 2)')
# Add as a new variable in dataset
output_band_name = custom_varname if custom_varname else index
ds[output_band_name] = index_array
# Once all indexes are calculated, drop input bands if inplace=False
if drop and not inplace:
ds = ds.drop(bands_to_drop)
# If inplace == True, delete bands in-place instead of using drop
if drop and inplace:
for band_to_drop in bands_to_drop:
del ds[band_to_drop]
# Return input dataset with added water index variable
return ds
| |
# Copyright 2011 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import datetime
import iso8601
import six.moves.urllib.parse as urlparse
from webob import exc
from nova.api.openstack import extensions
from nova.api.openstack import wsgi
from nova.api.openstack import xmlutil
from nova import exception
from nova.objects import flavor as flavor_obj
from nova.objects import instance as instance_obj
from nova.openstack.common.gettextutils import _
from nova.openstack.common import timeutils
authorize_show = extensions.extension_authorizer('compute',
'simple_tenant_usage:show')
authorize_list = extensions.extension_authorizer('compute',
'simple_tenant_usage:list')
def make_usage(elem):
for subelem_tag in ('tenant_id', 'total_local_gb_usage',
'total_vcpus_usage', 'total_memory_mb_usage',
'total_hours', 'start', 'stop'):
subelem = xmlutil.SubTemplateElement(elem, subelem_tag)
subelem.text = subelem_tag
server_usages = xmlutil.SubTemplateElement(elem, 'server_usages')
server_usage = xmlutil.SubTemplateElement(server_usages, 'server_usage',
selector='server_usages')
for subelem_tag in ('instance_id', 'name', 'hours', 'memory_mb',
'local_gb', 'vcpus', 'tenant_id', 'flavor',
'started_at', 'ended_at', 'state', 'uptime'):
subelem = xmlutil.SubTemplateElement(server_usage, subelem_tag)
subelem.text = subelem_tag
class SimpleTenantUsageTemplate(xmlutil.TemplateBuilder):
def construct(self):
root = xmlutil.TemplateElement('tenant_usage', selector='tenant_usage')
make_usage(root)
return xmlutil.MasterTemplate(root, 1)
class SimpleTenantUsagesTemplate(xmlutil.TemplateBuilder):
def construct(self):
root = xmlutil.TemplateElement('tenant_usages')
elem = xmlutil.SubTemplateElement(root, 'tenant_usage',
selector='tenant_usages')
make_usage(elem)
return xmlutil.MasterTemplate(root, 1)
class SimpleTenantUsageController(object):
def _hours_for(self, instance, period_start, period_stop):
launched_at = instance.launched_at
terminated_at = instance.terminated_at
if terminated_at is not None:
if not isinstance(terminated_at, datetime.datetime):
# NOTE(mriedem): Instance object DateTime fields are
# timezone-aware so convert using isotime.
terminated_at = timeutils.parse_isotime(terminated_at)
if launched_at is not None:
if not isinstance(launched_at, datetime.datetime):
launched_at = timeutils.parse_isotime(launched_at)
if terminated_at and terminated_at < period_start:
return 0
# nothing if it started after the usage report ended
if launched_at and launched_at > period_stop:
return 0
if launched_at:
# if instance launched after period_started, don't charge for first
start = max(launched_at, period_start)
if terminated_at:
# if instance stopped before period_stop, don't charge after
stop = min(period_stop, terminated_at)
else:
# instance is still running, so charge them up to current time
stop = period_stop
dt = stop - start
seconds = (dt.days * 3600 * 24 + dt.seconds +
dt.microseconds / 100000.0)
return seconds / 3600.0
else:
# instance hasn't launched, so no charge
return 0
def _get_flavor(self, context, instance, flavors_cache):
"""Get flavor information from the instance's system_metadata,
allowing a fallback to lookup by-id for deleted instances only.
"""
try:
return instance.get_flavor()
except KeyError:
if not instance.deleted:
# Only support the fallback mechanism for deleted instances
# that would have been skipped by migration #153
raise
flavor_type = instance.instance_type_id
if flavor_type in flavors_cache:
return flavors_cache[flavor_type]
try:
flavor_ref = flavor_obj.Flavor.get_by_id(context, flavor_type)
flavors_cache[flavor_type] = flavor_ref
except exception.FlavorNotFound:
# can't bill if there is no flavor
flavor_ref = None
return flavor_ref
def _tenant_usages_for_period(self, context, period_start,
period_stop, tenant_id=None, detailed=True):
instances = instance_obj.InstanceList.get_active_by_window_joined(
context, period_start, period_stop, tenant_id,
expected_attrs=instance_obj.INSTANCE_DEFAULT_FIELDS)
rval = {}
flavors = {}
for instance in instances:
info = {}
info['hours'] = self._hours_for(instance,
period_start,
period_stop)
flavor = self._get_flavor(context, instance, flavors)
if not flavor:
continue
info['instance_id'] = instance.uuid
info['name'] = instance.display_name
info['memory_mb'] = flavor.memory_mb
info['local_gb'] = flavor.root_gb + flavor.ephemeral_gb
info['vcpus'] = flavor.vcpus
info['tenant_id'] = instance.project_id
info['flavor'] = flavor.name
# NOTE(mriedem): We need to normalize the start/end times back
# to timezone-naive so the response doesn't change after the
# conversion to objects.
info['started_at'] = timeutils.normalize_time(instance.launched_at)
info['ended_at'] = (
timeutils.normalize_time(instance.terminated_at) if
instance.terminated_at else None)
if info['ended_at']:
info['state'] = 'terminated'
else:
info['state'] = instance.vm_state
now = timeutils.utcnow()
if info['state'] == 'terminated':
delta = info['ended_at'] - info['started_at']
else:
delta = now - info['started_at']
info['uptime'] = delta.days * 24 * 3600 + delta.seconds
if info['tenant_id'] not in rval:
summary = {}
summary['tenant_id'] = info['tenant_id']
if detailed:
summary['server_usages'] = []
summary['total_local_gb_usage'] = 0
summary['total_vcpus_usage'] = 0
summary['total_memory_mb_usage'] = 0
summary['total_hours'] = 0
summary['start'] = timeutils.normalize_time(period_start)
summary['stop'] = timeutils.normalize_time(period_stop)
rval[info['tenant_id']] = summary
summary = rval[info['tenant_id']]
summary['total_local_gb_usage'] += info['local_gb'] * info['hours']
summary['total_vcpus_usage'] += info['vcpus'] * info['hours']
summary['total_memory_mb_usage'] += (info['memory_mb'] *
info['hours'])
summary['total_hours'] += info['hours']
if detailed:
summary['server_usages'].append(info)
return rval.values()
def _parse_datetime(self, dtstr):
if not dtstr:
value = timeutils.utcnow()
elif isinstance(dtstr, datetime.datetime):
value = dtstr
try:
value = timeutils.parse_strtime(dtstr, "%Y-%m-%dT%H:%M:%S")
except Exception:
try:
value = timeutils.parse_strtime(dtstr, "%Y-%m-%dT%H:%M:%S.%f")
except Exception:
value = timeutils.parse_strtime(dtstr, "%Y-%m-%d %H:%M:%S.%f")
# NOTE(mriedem): Instance object DateTime fields are timezone-aware
# so we have to force UTC timezone for comparing this datetime against
# instance object fields and still maintain backwards compatibility
# in the API.
if value.utcoffset() is None:
value = value.replace(tzinfo=iso8601.iso8601.Utc())
return value
def _get_datetime_range(self, req):
qs = req.environ.get('QUERY_STRING', '')
env = urlparse.parse_qs(qs)
# NOTE(lzyeval): env.get() always returns a list
period_start = self._parse_datetime(env.get('start', [None])[0])
period_stop = self._parse_datetime(env.get('end', [None])[0])
if not period_start < period_stop:
msg = _("Invalid start time. The start time cannot occur after "
"the end time.")
raise exc.HTTPBadRequest(explanation=msg)
detailed = env.get('detailed', ['0'])[0] == '1'
return (period_start, period_stop, detailed)
@wsgi.serializers(xml=SimpleTenantUsagesTemplate)
def index(self, req):
"""Retrieve tenant_usage for all tenants."""
context = req.environ['nova.context']
authorize_list(context)
(period_start, period_stop, detailed) = self._get_datetime_range(req)
now = timeutils.parse_isotime(timeutils.strtime())
if period_stop > now:
period_stop = now
usages = self._tenant_usages_for_period(context,
period_start,
period_stop,
detailed=detailed)
return {'tenant_usages': usages}
@wsgi.serializers(xml=SimpleTenantUsageTemplate)
def show(self, req, id):
"""Retrieve tenant_usage for a specified tenant."""
tenant_id = id
context = req.environ['nova.context']
authorize_show(context, {'project_id': tenant_id})
(period_start, period_stop, ignore) = self._get_datetime_range(req)
now = timeutils.parse_isotime(timeutils.strtime())
if period_stop > now:
period_stop = now
usage = self._tenant_usages_for_period(context,
period_start,
period_stop,
tenant_id=tenant_id,
detailed=True)
if len(usage):
usage = usage[0]
else:
usage = {}
return {'tenant_usage': usage}
class Simple_tenant_usage(extensions.ExtensionDescriptor):
"""Simple tenant usage extension."""
name = "SimpleTenantUsage"
alias = "os-simple-tenant-usage"
namespace = ("http://docs.openstack.org/compute/ext/"
"os-simple-tenant-usage/api/v1.1")
updated = "2011-08-19T00:00:00+00:00"
def get_resources(self):
resources = []
res = extensions.ResourceExtension('os-simple-tenant-usage',
SimpleTenantUsageController())
resources.append(res)
return resources
| |
# intervals.py is part of the 'spartan' package.
# It was written by Gus Dunn and was created on 5/3/14.
#
# Please see the license info in the root folder of this package.
"""
=================================================
intervals.py
=================================================
Purpose:
This code is intended to provide the "base" interval representation for `spartan`.
"""
from spartan.utils.misc import Bunch
__author__ = 'Gus Dunn'
import spartan.utils.errors as e
#### Helper functions ####
def interval_length(start, end):
return end - start + 1
def grow_interval(orig_start, orig_end, grow_by, add_to="both"):
"""
Returns new interval start and end base locations after growing the interval at either
the ``left``, ``right``, or ``both`` ends by ``grow_by`` amount.
:param grow_by: number of bases to grow the edges of the interval
:param add_to: {"left", "right", "both"}
:return: (new_start, new_end)
"""
assert isinstance(orig_start, int)
assert isinstance(orig_end, int)
if add_to == "both":
grow_left == True
grow_right == True
elif add_to == "left":
grow_left == True
grow_right == False
elif add_to == "right":
grow_left == False
grow_right == True
else:
raise e.InvalidOptionError(wrong_value=add_to,
option_name="add_to",
valid_values=('both', 'left', 'right'))
merge_these = [(orig_start, orig_end)]
if grow_left:
merge_these.append(left_window_coords(grow_by, orig_start))
if grow_right:
merge_these.append(right_window_coords(grow_by, orig_end))
merged = merge_intervals(merge_these)
if len(merged) == 1:
return merged
else:
msg = "`grow_interval` should return a single new interval. Check your input then check the code. Would " \
"have returned: %s" % (str(merged))
raise e.SanityCheckError(msg)
def merge_intervals(intervals):
"""
Returns a list of interval tuples (sorted from left to right by left bound) after overlapping intervals have been
combined.
:param intervals: iterable of interval tuples
"""
pass
def left_window_coords(win_size, original_left_bound):
"""
Returns a `tuple` `(new_start, new_end)` left of original bound describing a window of length `win_size` (see
note).
Note: Converts any new value less than `1` to `1`.
:param win_size: size of window to the left.
:param original_left_bound:
:return: new_coords
"""
new_start = original_left_bound - win_size
new_end = original_left_bound - 1
if new_start < 1:
new_start = 1
if new_end < 1:
new_end = 1
new_coords = (new_start, new_end)
return new_coords
def right_window_coords(win_size, original_right_bound):
"""
Returns a `tuple` `(new_start, new_end)` right of original bound describing a window of length `win_size`.
:param win_size: size of window to the right.
:param original_right_bound:
:return: new_coords
"""
new_start = original_right_bound + 1
new_end = original_right_bound + win_size
new_coords = (new_start, new_end)
return new_coords
def detect_overlap(coords1, coords2):
"""
Returns `True` if `coords1` overlaps with `coords2`.
:param coords1: `list` of two `int` numbers representing **start**, **end** coordinates of a feature
:param coords2: `list` of two `int` numbers representing **start**, **end** coordinates of a feature
"""
coords1[0], coords1[1] = int(coords1[0]), int(coords1[1])
coords2[0], coords2[1] = int(coords2[0]), int(coords2[1])
# +++ Sort Coords +++
coords1.sort()
coords2.sort()
# +++ Classify Coords +++
if (coords1[1]-coords1[0]) <= (coords2[1]-coords2[0]):
shorter = coords1
longer = coords2
else:
shorter = coords2
longer = coords1
# +++ +++
left_edge = (shorter[0]-longer[0] >= 0) and (shorter[0]-longer[1] <= 0)
right_edge = (shorter[1]-longer[0] >= 0) and (shorter[1]-longer[1] <= 0)
# -- did we get a hit? --
return left_edge or right_edge
##########################
class SimpleFeature(object):
def __init__(self, start, end,):
"""
:param start: left most coordinate.
:param end: right most coordinate.
"""
assert start <= end
self.data = Bunch()
try:
self.data.start = start
self.data.end = end
except TypeError:
pass
def __str__(self):
return "%s-%s" % (self.data.start, self.data.end)
def __len__(self):
return interval_length(self.data.start, self.data.end)
def __contains__(self, other):
s_start = self.data.start
o_start = other.data.start
s_end = self.data.end
o_end = other.data.end
if (s_start <= o_start) and (s_end >= o_end):
return True
else:
return False
def __cmp__(self, other):
"""
:param other: an interval/feature
:returns int:
* `-1` if `other` should sort to the right of `self`
* `0` if `other` should sort exactly the same as `self`
* `1` if `other` should sort to the left of `self`
"""
s_start = self.data.start
o_start = other.data.start
if s_start < o_start:
return -1
if s_start == o_start:
return 0
if s_start > o_start:
return 1
def __eq__(self, other):
"""
Returns `True` if `other` perfectly overlaps this feature, `False` otherwise.
:param other: an interval/feature
"""
s_start = self.data.start
o_start = other.data.start
s_end = self.data.end
o_end = other.data.end
if (s_start == o_start) and (s_end == o_end):
return True
else:
return False
def __gt__(self, other):
"""
Returns `True` if `other` falls to the right even if `other` overlaps this feature,
`False` otherwise.
:param other: an interval/feature
"""
if self.data.end > other.data.end:
return True
else:
return False
def __lt__(self, other):
"""
Returns `True` if `other` falls to the left even if `other` overlaps this feature,
`False` otherwise.
:param other: an interval/feature
"""
if self.data.start < other.data.start:
return True
else:
return False
def has_overlap(self, other):
return detect_overlap(coords1=[self.data.start, self.data.end],
coords2=[other.data.start, other.data.end])
def get_flank(self, length):
# TODO: SimpleFeature.flank
raise NotImplementedError()
def get_rflank(self, length):
# TODO: SimpleFeature.rflank
raise NotImplementedError()
def get_lflank(self, length):
# TODO: SimpleFeature.lflank
raise NotImplementedError()
| |
#!/usr/bin/env python3
# -*- coding: UTF-8 -*-
# Copyright 2016, 2017 Eddie Antonio Santos <easantos@ualberta.ca>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from pathlib import Path
from blessings import Terminal # type: ignore
from sensibility.edit import Deletion, Edit, Insertion, Substitution
from sensibility.language import current_language
from sensibility.vocabulary import Vind
class Suggestion:
"""
Wraps an edit as a suggestion to a fix.
"""
# Both line and column MUST be one-indexed
# Note, that for some dumb reason, the convention is that column numbers
# are stored zero-indexed, but line numbers are stored one-indexed, so
# account for that...
line: int
column: int
@staticmethod
def enclose(filename: Path, fix: Edit) -> 'Suggestion':
tokens = tuple(current_language.tokenize(filename.read_bytes()))
if isinstance(fix, Insertion):
return Insert(fix.token, fix.index, tokens)
elif isinstance(fix, Deletion):
return Remove(fix.index, tokens)
elif isinstance(fix, Substitution):
return Replace(fix, tokens)
else:
raise ValueError(f"Unknown edit subclass: {fix}")
def __str__(self) -> str:
raise NotImplementedError('The subclass MUST implement this')
# TODO: these classes are ancient; I could fix them.
class Insert(Suggestion):
def __init__(self, token: Vind, pos, tokens) -> None:
self.token = token
assert 1 < pos < len(tokens)
self.tokens = tokens
# Determine if it should be an insert after or an insert before.
# This depends on whether the token straddles a line.
if tokens[pos - 1].line < tokens[pos].line:
self.insert_after = True
self.pos = pos - 1
else:
self.insert_after = False
self.pos = pos
@property
def line(self):
return self.tokens[self.pos].line
@property
def column(self):
"""
ONE-INDEXED COLUMN
"""
return 1 + self.tokens[self.pos].column
@property
def insert_before(self):
return not self.insert_after
def __str__(self):
t = Terminal()
pos = self.pos
text = current_language.to_source_text(self.token)
# TODO: lack of bounds check...
next_token = self.tokens[pos + 1]
msg = ("try inserting '{t.bold}{text}{t.normal}' "
"".format_map(locals()))
line_tokens = get_token_line(self.pos, self.tokens)
if self.insert_after:
line = format_line(line_tokens)
# Add an extra space BEFORE the insertion point:
padding = ' ' * (2 + self.column)
else:
# Add an extra space AFTER insertion point;
line = format_line(line_tokens,
insert_space_before=self.tokens[self.pos])
padding = ' ' * (self.column)
arrow = padding + t.bold_green('^')
suggestion = padding + t.green(text)
return '\n'.join((msg, line, arrow, suggestion))
class Remove(Suggestion):
def __init__(self, pos, tokens):
self.pos = pos
self.tokens = tokens
@property
def token(self):
return self.tokens[self.pos]
@property
def line(self):
return self.token.line
@property
def column(self):
"""
ONE-INDEXED COLUMN
"""
return 1 + self.token.column
def __str__(self):
t = Terminal()
text = self.token.value
msg = ("try removing '{t.bold}{text}{t.normal}' "
"".format_map(locals()))
line_tokens = get_token_line(self.pos, self.tokens)
line = format_line(line_tokens)
padding = ' ' * (self.token.column)
arrow = padding + t.bold_red('^')
suggestion = padding + t.red(text)
return '\n'.join((msg, line, arrow, suggestion))
class Replace(Suggestion):
def __init__(self, fix: Substitution, tokens) -> None:
self.fix = fix
self.tokens = tokens
@property
def pos(self) -> int:
return self.fix.index
@property
def token(self):
return self.tokens[self.pos]
@property
def line(self):
return self.token.line
@property
def column(self):
"""
ONE-INDEXED COLUMN
"""
return 1 + self.token.column
def __str__(self) -> str:
t = Terminal()
original = self.token.value
replacement = current_language.to_source_text(self.fix.token)
msg = (
f"try replacing {t.bold_red}{original}{t.normal}"
f" with {t.bold_green}{replacement}{t.normal}"
)
line_tokens = get_token_line(self.pos, self.tokens)
# TODO: add strikethrough to the token!
line = format_line(line_tokens)
padding = ' ' * (self.token.column)
arrow = padding + t.bold_red('^')
suggestion = padding + t.red(replacement)
return '\n'.join((msg, line, arrow, suggestion))
def format_fix(filename: Path, fix: Edit) -> None:
"""
Prints a fix for the given filename.
"""
suggestion = Suggestion.enclose(filename, fix)
line = suggestion.line
column = suggestion.column
t = Terminal()
# Use a format similar to Clang's.
header = t.bold(f"{filename}:{line}:{column}:")
print(header, suggestion)
def get_token_line(pos, tokens):
line_no = tokens[pos].line
left_extent = pos
while left_extent > 0:
if tokens[left_extent - 1].line != line_no:
break
left_extent -= 1
right_extent = pos + 1
while right_extent < len(tokens):
if tokens[right_extent].line != line_no:
break
right_extent += 1
return tokens[left_extent:right_extent]
def format_line(tokens, insert_space_before=None):
result = ''
extra_padding = 0
for token in tokens:
if token is insert_space_before:
extra_padding = 2
padding = ' ' * (extra_padding + token.column - len(result))
result += padding
result += token._raw
return result
def not_implemented():
raise NotImplementedError()
| |
"""A clone of threading module (version 2.7.2) that always
targets real OS threads. (Unlike 'threading' which flips between
green and OS threads based on whether the monkey patching is in effect
or not).
This module is missing 'Thread' class, but includes 'Queue'.
"""
from Queue import Full, Empty
from collections import deque
import heapq
from time import time as _time, sleep as _sleep
from gevent import monkey
from gevent.hub import PY3
__all__ = ['Condition',
'Event',
'Lock',
'RLock',
'Semaphore',
'BoundedSemaphore',
'Queue',
'local',
'stack_size']
thread_name = '_thread' if PY3 else 'thread'
start_new_thread, Lock, get_ident, local, stack_size = monkey.get_original(thread_name, [
'start_new_thread', 'allocate_lock', 'get_ident', '_local', 'stack_size'])
class RLock(object):
def __init__(self):
self.__block = Lock()
self.__owner = None
self.__count = 0
def __repr__(self):
owner = self.__owner
return "<%s owner=%r count=%d>" % (
self.__class__.__name__, owner, self.__count)
def acquire(self, blocking=1):
me = get_ident()
if self.__owner == me:
self.__count = self.__count + 1
return 1
rc = self.__block.acquire(blocking)
if rc:
self.__owner = me
self.__count = 1
return rc
__enter__ = acquire
def release(self):
if self.__owner != get_ident():
raise RuntimeError("cannot release un-acquired lock")
self.__count = count = self.__count - 1
if not count:
self.__owner = None
self.__block.release()
def __exit__(self, t, v, tb):
self.release()
# Internal methods used by condition variables
def _acquire_restore(self, count_owner):
count, owner = count_owner
self.__block.acquire()
self.__count = count
self.__owner = owner
def _release_save(self):
count = self.__count
self.__count = 0
owner = self.__owner
self.__owner = None
self.__block.release()
return (count, owner)
def _is_owned(self):
return self.__owner == get_ident()
class Condition(object):
def __init__(self, lock=None):
if lock is None:
lock = RLock()
self.__lock = lock
# Export the lock's acquire() and release() methods
self.acquire = lock.acquire
self.release = lock.release
# If the lock defines _release_save() and/or _acquire_restore(),
# these override the default implementations (which just call
# release() and acquire() on the lock). Ditto for _is_owned().
try:
self._release_save = lock._release_save
except AttributeError:
pass
try:
self._acquire_restore = lock._acquire_restore
except AttributeError:
pass
try:
self._is_owned = lock._is_owned
except AttributeError:
pass
self.__waiters = []
def __enter__(self):
return self.__lock.__enter__()
def __exit__(self, *args):
return self.__lock.__exit__(*args)
def __repr__(self):
return "<Condition(%s, %d)>" % (self.__lock, len(self.__waiters))
def _release_save(self):
self.__lock.release() # No state to save
def _acquire_restore(self, x):
self.__lock.acquire() # Ignore saved state
def _is_owned(self):
# Return True if lock is owned by current_thread.
# This method is called only if __lock doesn't have _is_owned().
if self.__lock.acquire(0):
self.__lock.release()
return False
else:
return True
def wait(self, timeout=None):
if not self._is_owned():
raise RuntimeError("cannot wait on un-acquired lock")
waiter = Lock()
waiter.acquire()
self.__waiters.append(waiter)
saved_state = self._release_save()
try: # restore state no matter what (e.g., KeyboardInterrupt)
if timeout is None:
waiter.acquire()
else:
# Balancing act: We can't afford a pure busy loop, so we
# have to sleep; but if we sleep the whole timeout time,
# we'll be unresponsive. The scheme here sleeps very
# little at first, longer as time goes on, but never longer
# than 20 times per second (or the timeout time remaining).
endtime = _time() + timeout
delay = 0.0005 # 500 us -> initial delay of 1 ms
while True:
gotit = waiter.acquire(0)
if gotit:
break
remaining = endtime - _time()
if remaining <= 0:
break
delay = min(delay * 2, remaining, .05)
_sleep(delay)
if not gotit:
try:
self.__waiters.remove(waiter)
except ValueError:
pass
finally:
self._acquire_restore(saved_state)
def notify(self, n=1):
if not self._is_owned():
raise RuntimeError("cannot notify on un-acquired lock")
__waiters = self.__waiters
waiters = __waiters[:n]
if not waiters:
return
for waiter in waiters:
waiter.release()
try:
__waiters.remove(waiter)
except ValueError:
pass
def notify_all(self):
self.notify(len(self.__waiters))
class Semaphore(object):
# After Tim Peters' semaphore class, but not quite the same (no maximum)
def __init__(self, value=1):
if value < 0:
raise ValueError("semaphore initial value must be >= 0")
self.__cond = Condition(Lock())
self.__value = value
def acquire(self, blocking=1):
rc = False
self.__cond.acquire()
while self.__value == 0:
if not blocking:
break
self.__cond.wait()
else:
self.__value = self.__value - 1
rc = True
self.__cond.release()
return rc
__enter__ = acquire
def release(self):
self.__cond.acquire()
self.__value = self.__value + 1
self.__cond.notify()
self.__cond.release()
def __exit__(self, t, v, tb):
self.release()
class BoundedSemaphore(Semaphore):
"""Semaphore that checks that # releases is <= # acquires"""
def __init__(self, value=1):
Semaphore.__init__(self, value)
self._initial_value = value
def release(self):
if self.Semaphore__value >= self._initial_value:
raise ValueError("Semaphore released too many times")
return Semaphore.release(self)
class Event(object):
# After Tim Peters' event class (without is_posted())
def __init__(self):
self.__cond = Condition(Lock())
self.__flag = False
def _reset_internal_locks(self):
# private! called by Thread._reset_internal_locks by _after_fork()
self.__cond.__init__()
def is_set(self):
return self.__flag
def set(self):
self.__cond.acquire()
try:
self.__flag = True
self.__cond.notify_all()
finally:
self.__cond.release()
def clear(self):
self.__cond.acquire()
try:
self.__flag = False
finally:
self.__cond.release()
def wait(self, timeout=None):
self.__cond.acquire()
try:
if not self.__flag:
self.__cond.wait(timeout)
return self.__flag
finally:
self.__cond.release()
class Queue:
"""Create a queue object with a given maximum size.
If maxsize is <= 0, the queue size is infinite.
"""
def __init__(self, maxsize=0):
self.maxsize = maxsize
self._init(maxsize)
# mutex must be held whenever the queue is mutating. All methods
# that acquire mutex must release it before returning. mutex
# is shared between the three conditions, so acquiring and
# releasing the conditions also acquires and releases mutex.
self.mutex = Lock()
# Notify not_empty whenever an item is added to the queue; a
# thread waiting to get is notified then.
self.not_empty = Condition(self.mutex)
# Notify not_full whenever an item is removed from the queue;
# a thread waiting to put is notified then.
self.not_full = Condition(self.mutex)
# Notify all_tasks_done whenever the number of unfinished tasks
# drops to zero; thread waiting to join() is notified to resume
self.all_tasks_done = Condition(self.mutex)
self.unfinished_tasks = 0
def task_done(self):
"""Indicate that a formerly enqueued task is complete.
Used by Queue consumer threads. For each get() used to fetch a task,
a subsequent call to task_done() tells the queue that the processing
on the task is complete.
If a join() is currently blocking, it will resume when all items
have been processed (meaning that a task_done() call was received
for every item that had been put() into the queue).
Raises a ValueError if called more times than there were items
placed in the queue.
"""
self.all_tasks_done.acquire()
try:
unfinished = self.unfinished_tasks - 1
if unfinished <= 0:
if unfinished < 0:
raise ValueError('task_done() called too many times')
self.all_tasks_done.notify_all()
self.unfinished_tasks = unfinished
finally:
self.all_tasks_done.release()
def join(self):
"""Blocks until all items in the Queue have been gotten and processed.
The count of unfinished tasks goes up whenever an item is added to the
queue. The count goes down whenever a consumer thread calls task_done()
to indicate the item was retrieved and all work on it is complete.
When the count of unfinished tasks drops to zero, join() unblocks.
"""
self.all_tasks_done.acquire()
try:
while self.unfinished_tasks:
self.all_tasks_done.wait()
finally:
self.all_tasks_done.release()
def qsize(self):
"""Return the approximate size of the queue (not reliable!)."""
self.mutex.acquire()
try:
return self._qsize()
finally:
self.mutex.release()
def empty(self):
"""Return True if the queue is empty, False otherwise (not reliable!)."""
self.mutex.acquire()
try:
return not self._qsize()
finally:
self.mutex.release()
def full(self):
"""Return True if the queue is full, False otherwise (not reliable!)."""
self.mutex.acquire()
try:
if self.maxsize <= 0:
return False
if self.maxsize >= self._qsize():
return True
finally:
self.mutex.release()
def put(self, item, block=True, timeout=None):
"""Put an item into the queue.
If optional args 'block' is true and 'timeout' is None (the default),
block if necessary until a free slot is available. If 'timeout' is
a positive number, it blocks at most 'timeout' seconds and raises
the Full exception if no free slot was available within that time.
Otherwise ('block' is false), put an item on the queue if a free slot
is immediately available, else raise the Full exception ('timeout'
is ignored in that case).
"""
self.not_full.acquire()
try:
if self.maxsize > 0:
if not block:
if self._qsize() >= self.maxsize:
raise Full
elif timeout is None:
while self._qsize() >= self.maxsize:
self.not_full.wait()
elif timeout < 0:
raise ValueError("'timeout' must be a positive number")
else:
endtime = _time() + timeout
while self._qsize() >= self.maxsize:
remaining = endtime - _time()
if remaining <= 0.0:
raise Full
self.not_full.wait(remaining)
self._put(item)
self.unfinished_tasks += 1
self.not_empty.notify()
finally:
self.not_full.release()
def put_nowait(self, item):
"""Put an item into the queue without blocking.
Only enqueue the item if a free slot is immediately available.
Otherwise raise the Full exception.
"""
return self.put(item, False)
def get(self, block=True, timeout=None):
"""Remove and return an item from the queue.
If optional args 'block' is true and 'timeout' is None (the default),
block if necessary until an item is available. If 'timeout' is
a positive number, it blocks at most 'timeout' seconds and raises
the Empty exception if no item was available within that time.
Otherwise ('block' is false), return an item if one is immediately
available, else raise the Empty exception ('timeout' is ignored
in that case).
"""
self.not_empty.acquire()
try:
if not block:
if not self._qsize():
raise Empty
elif timeout is None:
while not self._qsize():
self.not_empty.wait()
elif timeout < 0:
raise ValueError("'timeout' must be a positive number")
else:
endtime = _time() + timeout
while not self._qsize():
remaining = endtime - _time()
if remaining <= 0.0:
raise Empty
self.not_empty.wait(remaining)
item = self._get()
self.not_full.notify()
return item
finally:
self.not_empty.release()
def get_nowait(self):
"""Remove and return an item from the queue without blocking.
Only get an item if one is immediately available. Otherwise
raise the Empty exception.
"""
return self.get(False)
# Override these methods to implement other queue organizations
# (e.g. stack or priority queue).
# These will only be called with appropriate locks held
# Initialize the queue representation
def _init(self, maxsize):
self.queue = deque()
def _qsize(self, len=len):
return len(self.queue)
# Put a new item in the queue
def _put(self, item):
self.queue.append(item)
# Get an item from the queue
def _get(self):
return self.queue.popleft()
class PriorityQueue(Queue):
'''Variant of Queue that retrieves open entries in priority order (lowest first).
Entries are typically tuples of the form: (priority number, data).
'''
def _init(self, maxsize):
self.queue = []
def _qsize(self, len=len):
return len(self.queue)
def _put(self, item, heappush=heapq.heappush):
heappush(self.queue, item)
def _get(self, heappop=heapq.heappop):
return heappop(self.queue)
class LifoQueue(Queue):
'''Variant of Queue that retrieves most recently added entries first.'''
def _init(self, maxsize):
self.queue = []
def _qsize(self, len=len):
return len(self.queue)
def _put(self, item):
self.queue.append(item)
def _get(self):
return self.queue.pop()
| |
#!/usr/bin/env python
"""Management Command Tool.
Based originally on Django's one."""
from __future__ import print_function
import os
import sys
import collections
import imp
import warnings
from optparse import OptionParser, NO_DEFAULT, make_option
import traceback
from importlib import import_module
from magpy.server.database import Database
from magpy.server.validators import smart_str
import magpy
import six
from six.moves import filter
# A cache of loaded commands, so that call_command
# doesn't have to reload every time it's called.
# This is how Django does it, but globals are for losers
# So lets introduce a class one day instead.
_COMMANDS = None
def execute_from_command_line(argv=None):
"""
A simple method that runs a ManagementUtility.
"""
utility = ManagementUtility(argv)
utility.execute()
def find_commands(management_dir):
"""
Given a path to a management directory, returns a list of all the command
names that are available.
Returns an empty list if no commands are defined.
"""
command_dir = os.path.join(management_dir, 'commands')
try:
return [filename[:-3] for filename in os.listdir(command_dir)
if not filename.startswith('_') and filename.endswith('.py')]
except OSError:
return []
def get_commands():
"""
Returns a dictionary mapping command names to their callback applications.
This works by looking for a management.commands package in magpy, and
in each installed application -- if a commands package exists, all commands
in that package are registered.
The dictionary is in the format {command_name: app_name}. Key-value
pairs from this dictionary can then be used in calls to
load_command_class(app_name, command_name)
If a specific version of a command must be loaded (e.g., with the
startapp command), the instantiated module can be placed in the
dictionary in place of the application name.
The dictionary is cached on the first call and reused on subsequent
calls.
"""
global _COMMANDS # pylint: disable-msg=W0603
if _COMMANDS is None:
# Find the builtin commands
magpy_path = find_management_module('magpy')
_COMMANDS = dict([(name, 'magpy') for \
name in find_commands(magpy_path)])
# Find the installed apps
database = Database()
apps = database.get_app_list()
if apps == None:
apps = []
# Find and load the management module for each installed app.
for app_name in apps:
try:
path = find_management_module(app_name)
_COMMANDS.update(dict([(name, app_name)
for name in find_commands(path)]))
except ImportError:
pass # No management module - ignore this app
return _COMMANDS
def find_management_module(app_name):
"""
Determines the path to the management module for the given app_name,
without actually importing the application or the management module.
Raises ImportError if the management module cannot be found for any reason.
"""
parts = app_name.split('.')
parts.append('management')
parts.reverse()
part = parts.pop()
path = None
# When using manage.py, the project module is added to the path,
# loaded, then removed from the path. This means that
# testproject.testapp.models can be loaded in future, even if
# testproject isn't in the path. When looking for the management
# module, we need look for the case where the project name is part
# of the app_name but the project directory itself isn't on the path.
# pylint: disable-msg=W0612
try:
fileobject, path, descr = imp.find_module(part, path)
except ImportError as exception:
if os.path.basename(os.getcwd()) != part:
raise exception
while parts:
part = parts.pop()
fileobject, path, descr = \
imp.find_module(part, path and [path] or None)
return path
def load_command_class(app_name, name):
"""
Given a command name and an application name, returns the Command
class instance. All errors raised by the import process
(ImportError, AttributeError) are allowed to propagate.
"""
module = import_module('%s.management.commands.%s' % (app_name, name))
return module.Command()
class LaxOptionParser(OptionParser): # pylint: disable-msg=R0904
"""
An option parser that doesn't raise any errors on unknown options.
This is needed because the --settings and --pythonpath options affect
the commands (and thus the options) that are available to the user.
"""
def error(self, msg):
pass
def print_help(self): # pylint: disable-msg=W0221
"""Output nothing.
The lax options are included in the normal option parser, so under
normal usage, we don't need to print the lax options.
"""
pass
def print_lax_help(self):
"""Output the basic options available to every command.
This just redirects to the default print_help() behavior.
"""
OptionParser.print_help(self)
def _process_args(self, largs, rargs, values):
"""
Overrides OptionParser._process_args to exclusively handle default
options and ignore args and other options.
This overrides the behavior of the super class, which stop parsing
at the first unrecognized option.
"""
while rargs:
arg = rargs[0]
try:
if arg[0:2] == "--" and len(arg) > 2:
# process a single long option (possibly with value(s))
# the superclass code pops the arg off rargs
self._process_long_opt(rargs, values)
elif arg[:1] == "-" and len(arg) > 1:
# process a cluster of short options (possibly with
# value(s) for the last one only)
# the superclass code pops the arg off rargs
self._process_short_opts(rargs, values)
else:
# it's either a non-default option or an arg
# either way, add it to the args list so we can keep
# dealing with options
del rargs[0]
raise Exception
except:
largs.append(arg) # pylint: disable-msg=W0702
class ManagementUtility(object):
"""
Encapsulates the logic of the django-admin.py and manage.py utilities.
A ManagementUtility has a number of commands, which can be manipulated
by editing the self.commands dictionary.
"""
def __init__(self, argv=None):
self.argv = argv or sys.argv[:]
self.prog_name = os.path.basename(self.argv[0])
def main_help_text(self, commands_only=False):
"""
Returns the script's main help text, as a string.
"""
if commands_only:
usage = sorted(get_commands().keys())
else:
usage = [
"",
"Type '%s help <subcommand>' for help on a "
"specific subcommand." % self.prog_name,
"",
"Available subcommands:",
]
commands_dict = collections.defaultdict(lambda: [])
for name, app in six.iteritems(get_commands()):
if app == 'django.core':
app = 'django'
else:
app = app.rpartition('.')[-1]
commands_dict[app].append(name)
style = color_style()
for app in sorted(commands_dict.keys()):
usage.append("")
usage.append(style.NOTICE("[%s]" % app))
for name in sorted(commands_dict[app]):
usage.append(" %s" % name)
return '\n'.join(usage)
def fetch_command(self, subcommand):
"""
Tries to fetch the given subcommand, printing a message with the
appropriate command called from the command line (usually
"django-admin.py" or "manage.py") if it can't be found.
"""
try:
app_name = get_commands()[subcommand]
except KeyError:
sys.stderr.write("Unknown command: %r\nType '%s help'"
" for usage.\n" % \
(subcommand, self.prog_name))
sys.exit(1)
if isinstance(app_name, BaseCommand):
# If the command is already loaded, use it directly.
klass = app_name
else:
klass = load_command_class(app_name, subcommand)
return klass
def autocomplete(self): # pylint: disable-msg=R0914
"""
Output completion suggestions for BASH.
The output of this function is passed to BASH's `COMREPLY` variable and
treated as completion suggestions. `COMREPLY` expects a space
separated string as the result.
The `COMP_WORDS` and `COMP_CWORD` BASH environment variables are used
to get information about the cli input. Please refer to the BASH
man-page for more information about this variables.
Subcommand options are saved as pairs. A pair consists of
the long option string (e.g. '--exclude') and a boolean
value indicating if the option requires arguments. When printing to
stdout, a equal sign is appended to options which require arguments.
Note: If debugging this function, it is recommended to write the debug
output in a separate file. Otherwise the debug output will be treated
and formatted as potential completion suggestions.
"""
# Don't complete if user hasn't sourced bash_completion file.
# This is found in django-trunk/extras/django_bash_completion
if 'DJANGO_AUTO_COMPLETE' not in os.environ:
return
cwords = os.environ['COMP_WORDS'].split()[1:]
cword = int(os.environ['COMP_CWORD'])
try:
curr = cwords[cword - 1]
except IndexError:
curr = ''
subcommands = get_commands().keys() + ['help']
options = [('--help', None)]
# subcommand
if cword == 1:
debug_text = ' '.join(sorted(filter(lambda x: x.startswith(curr),
subcommands)))
print(debug_text)
# subcommand options
# special case: the 'help' subcommand has no options
elif cwords[0] in subcommands and cwords[0] != 'help':
subcommand_cls = self.fetch_command(cwords[0])
# special case: add the names of installed apps to options
if cwords[0] in ('dumpdata', 'sql', 'sqlall', 'sqlclear',
'sqlcustom', 'sqlindexes', 'sqlsequencereset', 'test'):
try:
database = Database()
# Get the last part of the dotted path as the app name.
options += [(a.split('.')[-1], 0) for \
a in database.get_app_list()]
except ImportError:
# Fail silently if DJANGO_SETTINGS_MODULE isn't set. The
# user will find out once they execute the command.
pass
options += [(s_opt.get_opt_string(), s_opt.nargs) for s_opt in
subcommand_cls.option_list]
# filter out previously specified options from available options
prev_opts = [x.split('=')[0] for x in cwords[1:cword - 1]]
# Original Python 2 version
#options = filter(lambda (x, v): x not in prev_opts, options)
# Python 3 version?
#options = filter(lambda x_v: x_v[0] not in prev_opts, options)
options = [opt for opt in options if opt[0] not in prev_opts]
# filter options by current input
options = sorted([(k, v) for k, v in \
options if k.startswith(curr)])
for option in options:
opt_label = option[0]
# append '=' to options which require args
if option[1]:
opt_label += '='
print(opt_label)
sys.exit(1)
def execute(self):
"""
Given the command-line arguments, this figures out which subcommand is
being run, creates a parser appropriate to that command, and runs it.
"""
# Preprocess options to extract --settings and --pythonpath.
# These options could affect the commands that are available, so they
# must be processed early.
parser = LaxOptionParser(usage="%prog subcommand [options] [args]",
version=magpy.get_version(),
option_list=BaseCommand.option_list)
self.autocomplete()
try:
options, args = parser.parse_args(self.argv)
handle_default_options(options)
except:
# Ignore any option errors at this point.
pass # pylint: disable-msg=W0702
try:
subcommand = self.argv[1]
except IndexError:
subcommand = 'help' # Display help if no arguments were given.
if subcommand == 'help':
if len(args) <= 2:
parser.print_lax_help()
sys.stdout.write(self.main_help_text() + '\n')
elif args[2] == '--commands':
sys.stdout.write(
self.main_help_text(commands_only=True) + '\n')
else:
self.fetch_command(args[2]).print_help(self.prog_name, args[2])
elif subcommand == 'version':
sys.stdout.write(parser.get_version() + '\n')
# Special-cases: We want 'django-admin.py --version' and
# 'django-admin.py --help' to work, for backwards compatibility.
elif self.argv[1:] == ['--version']:
# LaxOptionParser already takes care of printing the version.
pass
elif self.argv[1:] in (['--help'], ['-h']):
parser.print_lax_help()
sys.stdout.write(self.main_help_text() + '\n')
else:
self.fetch_command(subcommand).run_from_argv(self.argv)
# Base classes for writing management commands (named commands which can
# be executed through ``manage.py``).
class CommandError(Exception):
"""
Exception class indicating a problem while executing a management
command.
If this exception is raised during the execution of a management
command, it will be caught and turned into a nicely-printed error
message to the appropriate output stream (i.e., stderr); as a
result, raising this exception (with a sensible description of the
error) is the preferred way to indicate that something has gone
wrong in the execution of a command.
"""
pass
def handle_default_options(options):
"""
Include any default options that all commands should accept here
so that ManagementUtility can handle them before searching for
user commands.
"""
if options.settings:
os.environ['DJANGO_SETTINGS_MODULE'] = options.settings
if options.pythonpath:
sys.path.insert(0, options.pythonpath)
class BaseCommand(object):
"""
The base class from which all management commands ultimately
derive.
Use this class if you want access to all of the mechanisms which
parse the command-line arguments and work out what code to call in
response; if you don't need to change any of that behavior,
consider using one of the subclasses defined in this file.
If you are interested in overriding/customizing various aspects of
the command-parsing and -execution behavior, the normal flow works
as follows:
1. ``django-admin.py`` or ``manage.py`` loads the command class
and calls its ``run_from_argv()`` method.
2. The ``run_from_argv()`` method calls ``create_parser()`` to get
an ``OptionParser`` for the arguments, parses them, performs
any environment changes requested by options like
``pythonpath``, and then calls the ``execute()`` method,
passing the parsed arguments.
3. The ``execute()`` method attempts to carry out the command by
calling the ``handle()`` method with the parsed arguments; any
output produced by ``handle()`` will be printed to standard
output.
4. If ``handle()`` raised a ``CommandError``, ``execute()`` will
instead print an error message to ``stderr``.
Thus, the ``handle()`` method is typically the starting point for
subclasses; many built-in commands and command types either place
all of their logic in ``handle()``, or perform some additional
parsing work in ``handle()`` and then delegate from it to more
specialized methods as needed.
Several attributes affect behavior at various steps along the way:
``args``
A string listing the arguments accepted by the command,
suitable for use in help messages; e.g., a command which takes
a list of application names might set this to '<appname
appname ...>'.
``can_import_settings``
A boolean indicating whether the command needs to be able to
import Django settings; if ``True``, ``execute()`` will verify
that this is possible before proceeding. Default value is
``True``.
``help``
A short description of the command, which will be printed in
help messages.
``option_list``
This is the list of ``optparse`` options which will be fed
into the command's ``OptionParser`` for parsing arguments.
"""
# Metadata about this command.
option_list = (
make_option('-v', '--verbosity', action='store',
dest='verbosity', default='1',
type='choice', choices=['0', '1', '2', '3'],
help='Verbosity level; 0=minimal output, 1=normal output,'
' 2=verbose output, 3=very verbose output'),
make_option('--settings',
help='The Python path to a settings module, e.g. '
'"myproject.settings.main". If this isn\'t provided, '
'the DJANGO_SETTINGS_MODULE environment variable will'
' be used.'),
make_option('--pythonpath',
help='A directory to add to the Python path, '
'e.g. "/home/djangoprojects/myproject".'),
make_option('--traceback', action='store_true',
help='Print traceback on exception'),
)
help = ''
args = ''
# Configuration shortcuts that alter various logic.
can_import_settings = True
requires_model_validation = True
output_transaction = False
# Whether to wrap the output in a "BEGIN; COMMIT;"
def __init__(self):
self.style = color_style()
@staticmethod
def get_version():
"""
Return the Magpy version, which should be correct for all
built-in Magpy commands. User-supplied commands should
override this method.
"""
return magpy.get_version()
def usage(self, subcommand):
"""
Return a brief description of how to use this command, by
default from the attribute ``self.help``.
"""
usage = '%%prog %s [options] %s' % (subcommand, self.args)
if self.help:
return '%s\n\n%s' % (usage, self.help)
else:
return usage
def create_parser(self, prog_name, subcommand):
"""
Create and return the ``OptionParser`` which will be used to
parse the arguments to this command.
"""
return OptionParser(prog=prog_name,
usage=self.usage(subcommand),
version=self.get_version(),
option_list=self.option_list)
def print_help(self, prog_name, subcommand):
"""
Print the help message for this command, derived from
``self.usage()``.
"""
parser = self.create_parser(prog_name, subcommand)
parser.print_help()
def run_from_argv(self, argv):
"""
Set up any environment changes requested (e.g., Python path
and Django settings), then run this command.
"""
parser = self.create_parser(argv[0], argv[1])
options, args = parser.parse_args(argv[2:])
handle_default_options(options)
self.execute(*args, **options.__dict__) # pylint: disable-msg=W0142
def execute(self, *args, **options):
"""
Try to execute this command.
If the command raises a
``CommandError``, intercept it and print it sensibly to
stderr.
"""
show_traceback = options.get('traceback', False)
try:
self.stdout = options.get('stdout', sys.stdout)
self.stderr = options.get('stderr', sys.stderr)
output = self.handle(*args, **options)
if output:
self.stdout.write(output)
except CommandError as exception:
if show_traceback:
traceback.print_exc()
else:
self.stderr.write(
smart_str(self.style.ERROR('Error: %s\n' % exception)))
sys.exit(1)
def handle(self, *args, **options):
"""
The actual logic of the command. Subclasses must implement
this method.
"""
raise NotImplementedError()
class ImproperlyConfigured(Exception):
"Magpy is somehow improperly configured"
pass
def load_models(appname):
"""Load the models module for the `appname`."""
return import_module('.models', appname)
class AppCommand(BaseCommand): # pylint: disable=R0921
"""
A management command which takes one or more installed application
names as arguments, and does something with each of them.
Rather than implementing ``handle()``, subclasses must implement
``handle_app()``, which will be called once for each application.
"""
args = '<appname appname ...>'
def handle(self, *app_labels, **options):
if not app_labels:
raise CommandError('Enter at least one appname.')
try:
app_list = [load_models(app_label) for app_label in app_labels]
except (ImproperlyConfigured, ImportError) as exception:
raise CommandError(
"%s. Are you sure your applications.installed_apps "
"setting is correct?" % exception)
output = []
for app in app_list:
app_output = self.handle_app(app, **options)
if app_output:
output.append(app_output)
return '\n'.join(output)
def handle_app(self, app, **options):
"""
Perform the command's actions for ``app``, which will be the
Python module corresponding to an application name given on
the command line.
"""
raise NotImplementedError()
class LabelCommand(BaseCommand): # pylint: disable=R0921
"""
A management command which takes one or more arbitrary arguments
(labels) on the command line, and does something with each of
them.
Rather than implementing ``handle()``, subclasses must implement
``handle_label()``, which will be called once for each label.
If the arguments should be names of installed applications, use
``AppCommand`` instead.
"""
args = '<label label ...>'
label = 'label'
def handle(self, *labels, **options):
if not labels:
raise CommandError('Enter at least one %s.' % self.label)
output = []
for label in labels:
label_output = self.handle_label(label, **options)
if label_output:
output.append(label_output)
return '\n'.join(output)
def handle_label(self, label, **options):
"""
Perform the command's actions for ``label``, which will be the
string as given on the command line.
"""
raise NotImplementedError()
class NoArgsCommand(BaseCommand): # pylint: disable=R0921
"""
A command which takes no arguments on the command line.
Rather than implementing ``handle()``, subclasses must implement
``handle_noargs()``; ``handle()`` itself is overridden to ensure
no arguments are passed to the command.
Attempting to pass arguments will raise ``CommandError``.
"""
args = ''
def handle(self, *args, **options):
if args:
raise CommandError("Command doesn't accept any arguments")
return self.handle_noargs(**options)
def handle_noargs(self, **options):
"""
Perform this command's actions.
"""
raise NotImplementedError()
##############################################
# Crap to do with colours. #
##############################################
def supports_color():
"""
Returns True if the running system's terminal supports color, and False
otherwise.
"""
unsupported_platform = (sys.platform in ('win32', 'Pocket PC'))
# isatty is not always implemented, #6223.
is_a_tty = hasattr(sys.stdout, 'isatty') and sys.stdout.isatty()
if unsupported_platform or not is_a_tty:
return False
return True
def color_style():
"""Returns a Style object with the Django color scheme."""
if not supports_color():
style = no_style()
else:
environment_color_style = os.environ.get('DJANGO_COLORS', '')
color_settings = parse_color_setting(environment_color_style)
if color_settings:
class Dummy:
"""A style object."""
pass
style = Dummy()
# The nocolor palette has all available roles.
# Use that pallete as the basis for populating
# the palette as defined in the environment.
for role in PALETTES[NOCOLOR_PALETTE]:
colour_format = color_settings.get(role, {})
# pylint: disable-msg=W0142
setattr(style, role, make_style(**colour_format))
# For backwards compatibility,
# set style for ERROR_OUTPUT == ERROR
style.ERROR_OUTPUT = style.ERROR
else:
style = no_style()
return style
def no_style():
"""Returns a Style object that has no colors."""
class Dummy:
"""A style object that has no colors."""
def __getattr__(self, attr):
return lambda x: x
return Dummy()
COLOR_NAMES = (
'black', 'red', 'green', 'yellow', 'blue', 'magenta', 'cyan', 'white'
)
FOREGROUND = dict([(COLOR_NAMES[index], '3%s' % index) for index in range(8)])
BACKGROUND = dict([(COLOR_NAMES[index], '4%s' % index) for index in range(8)])
RESET = '0'
OPT_DICT = {
'bold': '1', 'underscore': '4', 'blink': '5',
'reverse': '7', 'conceal': '8'}
def colorize(text='', opts=(), **kwargs):
"""
Returns your text, enclosed in ANSI graphics codes.
Depends on the keyword arguments 'fg' and 'bg', and the contents of
the opts tuple/list.
Returns the RESET code if no parameters are given.
Valid colors:
'black', 'red', 'green', 'yellow', 'blue', 'magenta', 'cyan', 'white'
Valid options:
'bold'
'underscore'
'blink'
'reverse'
'conceal'
'noreset' - string will not be auto-terminated with the RESET code
Examples:
colorize('hello', fg='red', bg='blue', opts=('blink',))
colorize()
colorize('goodbye', opts=('underscore',))
print colorize('first line', fg='red', opts=('noreset',))
print 'this should be red too'
print colorize('and so should this')
print 'this should not be red'
"""
code_list = []
if text == '' and len(opts) == 1 and opts[0] == 'reset':
return '\x1b[%sm' % RESET
for k, value in six.iteritems(kwargs):
if k == 'fg':
code_list.append(FOREGROUND[value])
elif k == 'bg':
code_list.append(BACKGROUND[value])
for opt in opts:
if opt in OPT_DICT:
code_list.append(OPT_DICT[opt])
if 'noreset' not in opts:
text = text + '\x1b[%sm' % RESET
return ('\x1b[%sm' % ';'.join(code_list)) + text
def make_style(opts=(), **kwargs):
"""
Returns a function with default parameters for colorize()
Example:
bold_red = make_style(opts=('bold',), fg='red')
print bold_red('hello')
KEYWORD = make_style(fg='yellow')
COMMENT = make_style(fg='blue', opts=('bold',))
"""
# pylint: disable-msg=W0142
return lambda text: colorize(text, opts, **kwargs)
NOCOLOR_PALETTE = 'nocolor'
DARK_PALETTE = 'dark'
LIGHT_PALETTE = 'light'
PALETTES = {
NOCOLOR_PALETTE: {
'ERROR': {},
'NOTICE': {},
'SQL_FIELD': {},
'SQL_COLTYPE': {},
'SQL_KEYWORD': {},
'SQL_TABLE': {},
'HTTP_INFO': {},
'HTTP_SUCCESS': {},
'HTTP_REDIRECT': {},
'HTTP_NOT_MODIFIED': {},
'HTTP_BAD_REQUEST': {},
'HTTP_NOT_FOUND': {},
'HTTP_SERVER_ERROR': {},
},
DARK_PALETTE: {
'ERROR': {'fg': 'red', 'opts': ('bold',)},
'NOTICE': {'fg': 'red'},
'SQL_FIELD': {'fg': 'green', 'opts': ('bold',)},
'SQL_COLTYPE': {'fg': 'green'},
'SQL_KEYWORD': {'fg': 'yellow'},
'SQL_TABLE': {'opts': ('bold',)},
'HTTP_INFO': {'opts': ('bold',)},
'HTTP_SUCCESS': {},
'HTTP_REDIRECT': {'fg': 'green'},
'HTTP_NOT_MODIFIED': {'fg': 'cyan'},
'HTTP_BAD_REQUEST': {'fg': 'red', 'opts': ('bold',)},
'HTTP_NOT_FOUND': {'fg': 'yellow'},
'HTTP_SERVER_ERROR': {'fg': 'magenta', 'opts': ('bold',)},
},
LIGHT_PALETTE: {
'ERROR': {'fg': 'red', 'opts': ('bold',)},
'NOTICE': {'fg': 'red'},
'SQL_FIELD': {'fg': 'green', 'opts': ('bold',)},
'SQL_COLTYPE': {'fg': 'green'},
'SQL_KEYWORD': {'fg': 'blue'},
'SQL_TABLE': {'opts': ('bold',)},
'HTTP_INFO': {'opts': ('bold',)},
'HTTP_SUCCESS': {},
'HTTP_REDIRECT': {'fg': 'green', 'opts': ('bold',)},
'HTTP_NOT_MODIFIED': {'fg': 'green'},
'HTTP_BAD_REQUEST': {'fg': 'red', 'opts': ('bold',)},
'HTTP_NOT_FOUND': {'fg': 'red'},
'HTTP_SERVER_ERROR': {'fg': 'magenta', 'opts': ('bold',)},
}
}
DEFAULT_PALETTE = DARK_PALETTE
def parse_color_setting(config_string):
"""Parse a DJANGO_COLORS environment variable to produce the system palette
The general form of a pallete definition is:
"palette;role=fg;role=fg/bg;role=fg,option,option;role=fg/bg,option,option"
where:
palette is a named palette; one of 'light', 'dark', or 'nocolor'.
role is a named style used by Django
fg is a background color. [foreground surely]
bg is a background color.
option is a display options.
Specifying a named palette is the same as manually specifying
the individual
definitions for each role. Any individual definitions following the pallete
definition will augment the base palette definition.
Valid roles:
'error', 'notice', 'sql_field', 'sql_coltype', 'sql_keyword',
'sql_table',
'http_info', 'http_success', 'http_redirect', 'http_bad_request',
'http_not_found', 'http_server_error'
Valid colors:
'black', 'red', 'green', 'yellow', 'blue', 'magenta', 'cyan', 'white'
Valid options:
'bold', 'underscore', 'blink', 'reverse', 'conceal'
"""
if not config_string:
return PALETTES[DEFAULT_PALETTE]
# Split the color configuration into parts
parts = config_string.lower().split(';')
palette = PALETTES[NOCOLOR_PALETTE].copy()
for part in parts:
if part in PALETTES:
# A default palette has been specified
palette.update(PALETTES[part])
elif '=' in part:
# Process a palette defining string
definition = {}
# Break the definition into the role,
# plus the list of specific instructions.
# The role must be in upper case
role, instructions = part.split('=')
role = role.upper()
styles = instructions.split(',')
styles.reverse()
# The first instruction can contain a slash
# to break apart fg/bg.
colors = styles.pop().split('/')
colors.reverse()
foreg = colors.pop()
if foreg in COLOR_NAMES:
definition['fg'] = foreg
if colors and colors[-1] in COLOR_NAMES:
definition['bg'] = colors[-1]
# All remaining instructions are options
opts = tuple(s for s in styles if s in OPT_DICT.keys())
if opts:
definition['opts'] = opts
# The nocolor palette has all available roles.
# Use that palette as the basis for determining
# if the role is valid.
if role in PALETTES[NOCOLOR_PALETTE] and definition:
palette[role] = definition
# If there are no colors specified, return the empty palette.
if palette == PALETTES[NOCOLOR_PALETTE]:
return None
return palette
| |
# -*- coding: utf-8 -*-
from south.db import db
from south.v2 import SchemaMigration
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding index on 'SummaryAgenda', fields ['month']
db.create_index('agendas_summaryagenda', ['month'])
def backwards(self, orm):
# Removing index on 'SummaryAgenda', fields ['month']
db.delete_index('agendas_summaryagenda', ['month'])
models = {
'agendas.agenda': {
'Meta': {'unique_together': "(('name', 'public_owner_name'),)", 'object_name': 'Agenda'},
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'editors': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'agendas'", 'symmetrical': 'False', 'to': "orm['auth.User']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image': ('django.db.models.fields.files.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'is_public': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'num_followers': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'public_owner_name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'votes': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['laws.Vote']", 'through': "orm['agendas.AgendaVote']", 'symmetrical': 'False'})
},
'agendas.agendabill': {
'Meta': {'unique_together': "(('agenda', 'bill'),)", 'object_name': 'AgendaBill'},
'agenda': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'agendabills'", 'to': "orm['agendas.Agenda']"}),
'bill': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'agendabills'", 'to': "orm['laws.Bill']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'importance': ('django.db.models.fields.FloatField', [], {'default': '1.0'}),
'reasoning': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'score': ('django.db.models.fields.FloatField', [], {'default': '0.0'})
},
'agendas.agendameeting': {
'Meta': {'unique_together': "(('agenda', 'meeting'),)", 'object_name': 'AgendaMeeting'},
'agenda': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'agendameetings'", 'to': "orm['agendas.Agenda']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'meeting': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'agendacommitteemeetings'", 'to': "orm['committees.CommitteeMeeting']"}),
'reasoning': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'score': ('django.db.models.fields.FloatField', [], {'default': '0.0'})
},
'agendas.agendavote': {
'Meta': {'unique_together': "(('agenda', 'vote'),)", 'object_name': 'AgendaVote'},
'agenda': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'agendavotes'", 'to': "orm['agendas.Agenda']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'importance': ('django.db.models.fields.FloatField', [], {'default': '1.0'}),
'reasoning': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'score': ('django.db.models.fields.FloatField', [], {'default': '0.0'}),
'vote': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'agendavotes'", 'to': "orm['laws.Vote']"})
},
'agendas.summaryagenda': {
'Meta': {'object_name': 'SummaryAgenda'},
'agenda': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'score_summaries'", 'to': "orm['agendas.Agenda']"}),
'db_created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'db_updated': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'mk': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'agenda_summaries'", 'null': 'True', 'to': "orm['mks.Member']"}),
'month': ('django.db.models.fields.DateTimeField', [], {'db_index': 'True'}),
'score': ('django.db.models.fields.FloatField', [], {'default': '0.0'}),
'summary_type': ('django.db.models.fields.CharField', [], {'max_length': '2'}),
'votes': ('django.db.models.fields.BigIntegerField', [], {'default': '0'})
},
'agendas.usersuggestedvote': {
'Meta': {'unique_together': "(('agenda', 'vote', 'user'),)", 'object_name': 'UserSuggestedVote'},
'agenda': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'user_suggested_votes'", 'to': "orm['agendas.Agenda']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'reasoning': ('django.db.models.fields.TextField', [], {'default': "''", 'blank': 'True'}),
'sent_to_editor': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'suggested_agenda_votes'", 'to': "orm['auth.User']"}),
'vote': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'user_suggested_agendas'", 'to': "orm['laws.Vote']"})
},
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'committees.committee': {
'Meta': {'object_name': 'Committee'},
'aliases': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'chairpersons': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'chaired_committees'", 'blank': 'True', 'to': "orm['mks.Member']"}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'members': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'committees'", 'blank': 'True', 'to': "orm['mks.Member']"}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '256'}),
'portal_knesset_broadcasts_url': ('django.db.models.fields.URLField', [], {'max_length': '1000', 'blank': 'True'}),
'replacements': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'replacing_in_committees'", 'blank': 'True', 'to': "orm['mks.Member']"}),
'type': ('django.db.models.fields.CharField', [], {'default': "'committee'", 'max_length': '10'})
},
'committees.committeemeeting': {
'Meta': {'ordering': "('-date',)", 'object_name': 'CommitteeMeeting'},
'committee': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'meetings'", 'to': "orm['committees.Committee']"}),
'date': ('django.db.models.fields.DateField', [], {'db_index': 'True'}),
'date_string': ('django.db.models.fields.CharField', [], {'max_length': '256'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'mks_attended': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'committee_meetings'", 'symmetrical': 'False', 'to': "orm['mks.Member']"}),
'protocol_text': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'src_url': ('django.db.models.fields.URLField', [], {'max_length': '1024', 'null': 'True', 'blank': 'True'}),
'topics': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'votes_mentioned': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'committee_meetings'", 'blank': 'True', 'to': "orm['laws.Vote']"})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'events.event': {
'Meta': {'object_name': 'Event'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'what': ('django.db.models.fields.TextField', [], {}),
'when': ('django.db.models.fields.DateTimeField', [], {}),
'when_over': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'when_over_guessed': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'where': ('django.db.models.fields.TextField', [], {'default': "u'earth'"}),
'which_pk': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'which_type': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'event_for_event'", 'null': 'True', 'to': "orm['contenttypes.ContentType']"}),
'who': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['persons.Person']", 'null': 'True', 'symmetrical': 'False'}),
'why': ('django.db.models.fields.TextField', [], {'null': 'True'})
},
'laws.bill': {
'Meta': {'ordering': "('-stage_date', '-id')", 'object_name': 'Bill'},
'approval_vote': ('django.db.models.fields.related.OneToOneField', [], {'blank': 'True', 'related_name': "'bill_approved'", 'unique': 'True', 'null': 'True', 'to': "orm['laws.Vote']"}),
'first_committee_meetings': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'bills_first'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['committees.CommitteeMeeting']"}),
'first_vote': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'bills_first'", 'null': 'True', 'to': "orm['laws.Vote']"}),
'full_title': ('django.db.models.fields.CharField', [], {'max_length': '2000', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'joiners': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'bills_joined'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['mks.Member']"}),
'law': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'bills'", 'null': 'True', 'to': "orm['laws.Law']"}),
'popular_name': ('django.db.models.fields.CharField', [], {'max_length': '1000', 'blank': 'True'}),
'popular_name_slug': ('django.db.models.fields.CharField', [], {'max_length': '1000', 'blank': 'True'}),
'pre_votes': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'bills_pre_votes'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['laws.Vote']"}),
'proposers': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'bills'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['mks.Member']"}),
'second_committee_meetings': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'bills_second'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['committees.CommitteeMeeting']"}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '1000'}),
'stage': ('django.db.models.fields.CharField', [], {'max_length': '10'}),
'stage_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '1000'})
},
'laws.law': {
'Meta': {'object_name': 'Law'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'merged_into': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'duplicates'", 'null': 'True', 'to': "orm['laws.Law']"}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '1000'})
},
'laws.vote': {
'Meta': {'ordering': "('-time', '-id')", 'object_name': 'Vote'},
'against_coalition': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'against_opposition': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'against_own_bill': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'against_party': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'against_votes_count': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'controversy': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'for_votes_count': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'full_text': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'full_text_url': ('django.db.models.fields.URLField', [], {'max_length': '1024', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'importance': ('django.db.models.fields.FloatField', [], {'default': '0.0'}),
'meeting_number': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'src_id': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'src_url': ('django.db.models.fields.URLField', [], {'max_length': '1024', 'null': 'True', 'blank': 'True'}),
'summary': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'time': ('django.db.models.fields.DateTimeField', [], {'db_index': 'True'}),
'time_string': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '1000'}),
'vote_number': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'votes': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'votes'", 'blank': 'True', 'through': "orm['laws.VoteAction']", 'to': "orm['mks.Member']"}),
'votes_count': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'})
},
'laws.voteaction': {
'Meta': {'object_name': 'VoteAction'},
'against_coalition': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'against_opposition': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'against_own_bill': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'against_party': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'member': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['mks.Member']"}),
'type': ('django.db.models.fields.CharField', [], {'max_length': '10'}),
'vote': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['laws.Vote']"})
},
'mks.knesset': {
'Meta': {'object_name': 'Knesset'},
'end_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'number': ('django.db.models.fields.IntegerField', [], {'primary_key': 'True'}),
'start_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'})
},
'mks.member': {
'Meta': {'ordering': "['name']", 'object_name': 'Member'},
'area_of_residence': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'average_monthly_committee_presence': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'average_weekly_presence_hours': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'backlinks_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'bills_stats_approved': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'bills_stats_first': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'bills_stats_pre': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'bills_stats_proposed': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'blog': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['planet.Blog']", 'unique': 'True', 'null': 'True', 'blank': 'True'}),
'current_party': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'members'", 'null': 'True', 'to': "orm['mks.Party']"}),
'current_position': ('django.db.models.fields.PositiveIntegerField', [], {'default': '999', 'blank': 'True'}),
'current_role_descriptions': ('django.db.models.fields.CharField', [], {'max_length': '1024', 'null': 'True', 'blank': 'True'}),
'date_of_birth': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'date_of_death': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'null': 'True', 'blank': 'True'}),
'end_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'family_status': ('django.db.models.fields.CharField', [], {'max_length': '10', 'null': 'True', 'blank': 'True'}),
'fax': ('django.db.models.fields.CharField', [], {'max_length': '20', 'null': 'True', 'blank': 'True'}),
'gender': ('django.db.models.fields.CharField', [], {'max_length': '1', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'img_url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'blank': 'True'}),
'is_current': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'db_index': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'number_of_children': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'parties': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'all_members'", 'symmetrical': 'False', 'through': "orm['mks.Membership']", 'to': "orm['mks.Party']"}),
'phone': ('django.db.models.fields.CharField', [], {'max_length': '20', 'null': 'True', 'blank': 'True'}),
'place_of_birth': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'place_of_residence': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'place_of_residence_lat': ('django.db.models.fields.CharField', [], {'max_length': '16', 'null': 'True', 'blank': 'True'}),
'place_of_residence_lon': ('django.db.models.fields.CharField', [], {'max_length': '16', 'null': 'True', 'blank': 'True'}),
'residence_centrality': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'residence_economy': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'start_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True', 'blank': 'True'}),
'website': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'year_of_aliyah': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'})
},
'mks.membership': {
'Meta': {'object_name': 'Membership'},
'end_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'member': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['mks.Member']"}),
'party': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['mks.Party']"}),
'position': ('django.db.models.fields.PositiveIntegerField', [], {'default': '999', 'blank': 'True'}),
'start_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'})
},
'mks.party': {
'Meta': {'ordering': "('-number_of_seats',)", 'unique_together': "(('knesset', 'name'),)", 'object_name': 'Party'},
'end_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_coalition': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'knesset': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'parties'", 'null': 'True', 'to': "orm['mks.Knesset']"}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'number_of_members': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'number_of_seats': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'start_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'})
},
'persons.person': {
'Meta': {'ordering': "('name',)", 'object_name': 'Person'},
'area_of_residence': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'date_of_birth': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'date_of_death': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'null': 'True', 'blank': 'True'}),
'family_status': ('django.db.models.fields.CharField', [], {'max_length': '10', 'null': 'True', 'blank': 'True'}),
'fax': ('django.db.models.fields.CharField', [], {'max_length': '20', 'null': 'True', 'blank': 'True'}),
'gender': ('django.db.models.fields.CharField', [], {'max_length': '1', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'img_url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'blank': 'True'}),
'mk': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'person'", 'null': 'True', 'to': "orm['mks.Member']"}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'number_of_children': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'phone': ('django.db.models.fields.CharField', [], {'max_length': '20', 'null': 'True', 'blank': 'True'}),
'place_of_birth': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'place_of_residence': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'place_of_residence_lat': ('django.db.models.fields.CharField', [], {'max_length': '16', 'null': 'True', 'blank': 'True'}),
'place_of_residence_lon': ('django.db.models.fields.CharField', [], {'max_length': '16', 'null': 'True', 'blank': 'True'}),
'residence_centrality': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'residence_economy': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'titles': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'persons'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['persons.Title']"}),
'year_of_aliyah': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'})
},
'persons.title': {
'Meta': {'object_name': 'Title'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '64'})
},
'planet.blog': {
'Meta': {'ordering': "('title', 'url')", 'object_name': 'Blog'},
'date_created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '255', 'blank': 'True'}),
'url': ('django.db.models.fields.URLField', [], {'unique': 'True', 'max_length': '1024', 'db_index': 'True'})
},
'tagging.tag': {
'Meta': {'ordering': "('name',)", 'object_name': 'Tag'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '50', 'db_index': 'True'})
},
'tagging.taggeditem': {
'Meta': {'unique_together': "(('tag', 'content_type', 'object_id'),)", 'object_name': 'TaggedItem'},
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'object_id': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'tag': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'items'", 'to': "orm['tagging.Tag']"})
}
}
complete_apps = ['agendas']
| |
from zerver.lib.test_classes import WebhookTestCase
TOPIC = "sandbox"
TOPIC_BRANCH_EVENTS = "sandbox / {branch}"
class Bitbucket3HookTests(WebhookTestCase):
STREAM_NAME = "bitbucket3"
URL_TEMPLATE = "/api/v1/external/bitbucket3?stream={stream}&api_key={api_key}"
WEBHOOK_DIR_NAME = "bitbucket3"
# Diagnostics events:
def test_ping(self) -> None:
expected_message = (
"Congratulations! The Bitbucket Server webhook was configured successfully!"
)
self.check_webhook("diagnostics_ping", "Bitbucket Server Ping", expected_message)
def test_ping_with_user_defined_topic(self) -> None:
self.url = self.build_webhook_url(topic="my topic")
expected_message = (
"Congratulations! The Bitbucket Server webhook was configured successfully!"
)
self.check_webhook("diagnostics_ping", "my topic", expected_message)
# Core repo events:
def test_commit_comment_added(self) -> None:
expected_message = """[hypro999](http://139.59.64.214:7990/users/hypro999) commented on [508d1b6](http://139.59.64.214:7990/projects/SBOX/repos/sandbox/commits/508d1b67f1f8f3a25f543a030a7a178894aa9907):\n~~~ quote\nJust an arbitrary comment on a commit.\n~~~"""
self.check_webhook("commit_comment_added", TOPIC, expected_message)
def test_commit_comment_edited(self) -> None:
expected_message = """[hypro999](http://139.59.64.214:7990/users/hypro999) edited their comment on [508d1b6](http://139.59.64.214:7990/projects/SBOX/repos/sandbox/commits/508d1b67f1f8f3a25f543a030a7a178894aa9907):\n~~~ quote\nJust an arbitrary comment on a commit. Nothing to see here...\n~~~"""
self.check_webhook("commit_comment_edited", TOPIC, expected_message)
def test_commit_comment_deleted(self) -> None:
expected_message = """[hypro999](http://139.59.64.214:7990/users/hypro999) deleted their comment on [508d1b6](http://139.59.64.214:7990/projects/SBOX/repos/sandbox/commits/508d1b67f1f8f3a25f543a030a7a178894aa9907):\n~~~ quote\n~~Just an arbitrary comment on a commit. Nothing to see here...~~\n~~~"""
self.check_webhook("commit_comment_deleted", TOPIC, expected_message)
def test_bitbucket3_repo_forked(self) -> None:
expected_message = """User Hemanth V. Alluri(login: [hypro999](http://139.59.64.214:7990/users/hypro999)) forked the repository into [sandbox fork](http://139.59.64.214:7990/users/hypro999/repos/sandbox-fork/browse)."""
self.check_webhook("repo_forked", TOPIC, expected_message)
def test_bitbucket3_repo_modified(self) -> None:
expected_message = """[hypro999](http://139.59.64.214:7990/users/hypro999) changed the name of the **sandbox** repo from **sandbox** to **sandbox v2**."""
expected_topic = "sandbox v2"
self.check_webhook("repo_modified", expected_topic, expected_message)
# Repo push events:
def test_push_add_branch(self) -> None:
expected_message = (
"""[hypro999](http://139.59.64.214:7990/users/hypro999) created branch2 branch."""
)
expected_topic = TOPIC_BRANCH_EVENTS.format(branch="branch2")
self.check_webhook("repo_push_add_branch", expected_topic, expected_message)
def test_push_add_tag(self) -> None:
expected_message = (
"""[hypro999](http://139.59.64.214:7990/users/hypro999) pushed tag newtag."""
)
self.check_webhook("repo_push_add_tag", TOPIC, expected_message)
def test_push_delete_branch(self) -> None:
expected_message = (
"""[hypro999](http://139.59.64.214:7990/users/hypro999) deleted branch branch2."""
)
expected_topic = TOPIC_BRANCH_EVENTS.format(branch="branch2")
self.check_webhook("repo_push_delete_branch", expected_topic, expected_message)
def test_push_delete_tag(self) -> None:
expected_message = (
"""[hypro999](http://139.59.64.214:7990/users/hypro999) removed tag test-tag."""
)
self.check_webhook("repo_push_delete_tag", TOPIC, expected_message)
def test_push_update_single_branch(self) -> None:
expected_message = """[hypro999](http://139.59.64.214:7990/users/hypro999) pushed to branch master. Head is now e68c981ef53dbab0a5ca320a2d8d80e216c70528."""
expected_topic = TOPIC_BRANCH_EVENTS.format(branch="master")
self.check_webhook("repo_push_update_single_branch", expected_topic, expected_message)
def test_push_update_multiple_branches(self) -> None:
branch1_content = """[hypro999](http://139.59.64.214:7990/users/hypro999) pushed to branch branch1. Head is now 3980c2be32a7e23c795741d5dc1a2eecb9b85d6d."""
master_content = """[hypro999](http://139.59.64.214:7990/users/hypro999) pushed to branch master. Head is now fc43d13cff1abb28631196944ba4fc4ad06a2cf2."""
self.subscribe(self.test_user, self.STREAM_NAME)
payload = self.get_body("repo_push_update_multiple_branches")
msg = self.send_webhook_payload(
self.test_user,
self.url,
payload,
content_type="application/json",
)
msg = self.get_second_to_last_message()
self.assert_stream_message(
message=msg,
stream_name=self.STREAM_NAME,
topic_name=TOPIC_BRANCH_EVENTS.format(branch="branch1"),
content=branch1_content,
)
msg = self.get_last_message()
self.assert_stream_message(
message=msg,
stream_name=self.STREAM_NAME,
topic_name=TOPIC_BRANCH_EVENTS.format(branch="master"),
content=master_content,
)
def test_push_update_multiple_branches_with_branch_filter(self) -> None:
self.url = self.build_webhook_url(branches="master")
expected_message = """[hypro999](http://139.59.64.214:7990/users/hypro999) pushed to branch master. Head is now fc43d13cff1abb28631196944ba4fc4ad06a2cf2."""
expected_topic = TOPIC_BRANCH_EVENTS.format(branch="master")
self.check_webhook("repo_push_update_multiple_branches", expected_topic, expected_message)
self.url = self.build_webhook_url(branches="branch1")
expected_message = """[hypro999](http://139.59.64.214:7990/users/hypro999) pushed to branch branch1. Head is now 3980c2be32a7e23c795741d5dc1a2eecb9b85d6d."""
expected_topic = TOPIC_BRANCH_EVENTS.format(branch="branch1")
self.check_webhook("repo_push_update_multiple_branches", expected_topic, expected_message)
# Core PR events:
def test_pr_opened_without_reviewers(self) -> None:
expected_topic = "sandbox / PR #1 Branch1"
expected_message = """[hypro999](http://139.59.64.214:7990/users/hypro999) opened [PR #1](http://139.59.64.214:7990/projects/SBOX/repos/sandbox/pull-requests/1) from `branch1` to `master`:\n\n~~~ quote\n* Add file2.txt\r\n* Add file3.txt\n~~~"""
self.check_webhook(
"pull_request_opened_without_reviewers", expected_topic, expected_message
)
def test_pr_opened_without_description(self) -> None:
expected_topic = "sandbox / PR #2 Add notes feature."
expected_message = """[hypro999](http://139.59.64.214:7990/users/hypro999) opened [PR #2](http://139.59.64.214:7990/projects/SBOX/repos/sandbox/pull-requests/2) from `master` to `master`."""
self.check_webhook(
"pull_request_opened_without_description", expected_topic, expected_message
)
def test_pr_opened_with_two_reviewers(self) -> None:
expected_topic = "sandbox / PR #5 Add Notes Feature"
expected_message = """[hypro999](http://139.59.64.214:7990/users/hypro999) opened [PR #5](http://139.59.64.214:7990/projects/SBOX/repos/sandbox/pull-requests/5) from `master` to `master` (assigned to [shimura](http://139.59.64.214:7990/users/shimura) and [sougo](http://139.59.64.214:7990/users/sougo) for review)."""
self.check_webhook(
"pull_request_opened_with_two_reviewers", expected_topic, expected_message
)
def test_pr_opened_with_two_reviewers_and_user_defined_topic(self) -> None:
expected_topic = "sandbox / PR #5 Add Notes Feature"
expected_topic = "custom_topic"
self.url = self.build_webhook_url(topic="custom_topic")
expected_message = """[hypro999](http://139.59.64.214:7990/users/hypro999) opened [PR #5 Add Notes Feature](http://139.59.64.214:7990/projects/SBOX/repos/sandbox/pull-requests/5) from `master` to `master` (assigned to [shimura](http://139.59.64.214:7990/users/shimura) and [sougo](http://139.59.64.214:7990/users/sougo) for review)."""
self.check_webhook(
"pull_request_opened_with_two_reviewers", expected_topic, expected_message
)
def test_pr_opened_with_multiple_reviewers(self) -> None:
expected_topic = "sandbox / PR #6 sample_file: Add sample_file.txt."
expected_message = """[hypro999](http://139.59.64.214:7990/users/hypro999) opened [PR #6](http://139.59.64.214:7990/projects/SBOX/repos/sandbox/pull-requests/6) from `master` to `master` (assigned to [sougo](http://139.59.64.214:7990/users/sougo), [zura](http://139.59.64.214:7990/users/zura) and [shimura](http://139.59.64.214:7990/users/shimura) for review):\n\n~~~ quote\nAdd a simple text file for further testing purposes.\n~~~"""
self.check_webhook(
"pull_request_opened_with_multiple_reviewers", expected_topic, expected_message
)
def test_pr_modified(self) -> None:
expected_topic = "sandbox / PR #1 Branch1"
expected_message = """[hypro999](http://139.59.64.214:7990/users/hypro999) modified [PR #1](http://139.59.64.214:7990/projects/SBOX/repos/sandbox/pull-requests/1) from `branch1` to `master` (assigned to [shimura](http://139.59.64.214:7990/users/shimura) for review):\n\n~~~ quote\n* Add file2.txt\n* Add file3.txt\nBoth of these files would be important additions to the project!\n~~~"""
self.check_webhook("pull_request_modified", expected_topic, expected_message)
def test_pr_modified_with_include_title(self) -> None:
expected_topic = "custom_topic"
expected_message = """[hypro999](http://139.59.64.214:7990/users/hypro999) modified [PR #1 Branch1](http://139.59.64.214:7990/projects/SBOX/repos/sandbox/pull-requests/1) from `branch1` to `master` (assigned to [shimura](http://139.59.64.214:7990/users/shimura) for review):\n\n~~~ quote\n* Add file2.txt\n* Add file3.txt\nBoth of these files would be important additions to the project!\n~~~"""
self.url = self.build_webhook_url(topic="custom_topic")
self.check_webhook("pull_request_modified", expected_topic, expected_message)
def test_pr_deleted(self) -> None:
expected_topic = "sandbox / PR #2 Add notes feature."
expected_message = """[hypro999](http://139.59.64.214:7990/users/hypro999) deleted [PR #2](http://139.59.64.214:7990/projects/SBOX/repos/sandbox/pull-requests/2)."""
self.check_webhook("pull_request_deleted", expected_topic, expected_message)
def test_pr_deleted_with_include_title(self) -> None:
expected_topic = "custom_topic"
expected_message = """[hypro999](http://139.59.64.214:7990/users/hypro999) deleted [PR #2 Add notes feature.](http://139.59.64.214:7990/projects/SBOX/repos/sandbox/pull-requests/2)"""
self.url = self.build_webhook_url(topic="custom_topic")
self.check_webhook("pull_request_deleted", expected_topic, expected_message)
def test_pr_declined(self) -> None:
expected_topic = "sandbox / PR #7 Crazy Idea"
expected_message = """[zura](http://139.59.64.214:7990/users/zura) declined [PR #7](http://139.59.64.214:7990/projects/SBOX/repos/sandbox/pull-requests/7)."""
self.check_webhook("pull_request_declined", expected_topic, expected_message)
def test_pr_merged(self) -> None:
expected_topic = "sandbox / PR #6 sample_file: Add sample_file.txt."
expected_message = """[zura](http://139.59.64.214:7990/users/zura) merged [PR #6](http://139.59.64.214:7990/projects/SBOX/repos/sandbox/pull-requests/6)."""
self.check_webhook("pull_request_merged", expected_topic, expected_message)
# PR reviewer events:
def test_pr_approved(self) -> None:
expected_topic = "sandbox / PR #6 sample_file: Add sample_file.txt."
expected_message = """[zura](http://139.59.64.214:7990/users/zura) approved [PR #6](http://139.59.64.214:7990/projects/SBOX/repos/sandbox/pull-requests/6)."""
self.check_webhook("pull_request_approved", expected_topic, expected_message)
def test_pr_unapproved(self) -> None:
expected_topic = "sandbox / PR #6 sample_file: Add sample_file.txt."
expected_message = """[zura](http://139.59.64.214:7990/users/zura) unapproved [PR #6](http://139.59.64.214:7990/projects/SBOX/repos/sandbox/pull-requests/6)."""
self.check_webhook("pull_request_unapproved", expected_topic, expected_message)
def test_pr_marked_as_needs_review(self) -> None:
expected_topic = "sandbox / PR #6 sample_file: Add sample_file.txt."
expected_message = """[zura](http://139.59.64.214:7990/users/zura) marked [PR #6](http://139.59.64.214:7990/projects/SBOX/repos/sandbox/pull-requests/6) as \"needs work\"."""
self.check_webhook("pull_request_needs_work", expected_topic, expected_message)
def test_pr_marked_as_needs_review_and_include_title(self) -> None:
expected_topic = "custom_topic"
expected_message = """[zura](http://139.59.64.214:7990/users/zura) marked [PR #6 sample_file: Add sample_file.txt.](http://139.59.64.214:7990/projects/SBOX/repos/sandbox/pull-requests/6) as \"needs work\"."""
self.url = self.build_webhook_url(topic="custom_topic")
self.check_webhook("pull_request_needs_work", expected_topic, expected_message)
def test_pull_request_reviewer_added(self) -> None:
expected_message = """[hypro999](http://139.59.64.214:7990/users/hypro999) reassigned [PR #1](http://139.59.64.214:7990/projects/SBOX/repos/sandbox/pull-requests/1) to [shimura](http://139.59.64.214:7990/users/shimura)."""
expected_topic = "sandbox / PR #1 Branch1"
self.check_webhook("pull_request_add_reviewer", expected_topic, expected_message)
def test_pull_request_reviewer_added_and_include_title(self) -> None:
expected_message = """[hypro999](http://139.59.64.214:7990/users/hypro999) reassigned [PR #1 Branch1](http://139.59.64.214:7990/projects/SBOX/repos/sandbox/pull-requests/1) to [shimura](http://139.59.64.214:7990/users/shimura)."""
expected_topic = "custom_topic"
self.url = self.build_webhook_url(topic="custom_topic")
self.check_webhook("pull_request_add_reviewer", expected_topic, expected_message)
def test_pull_request_reviewers_added(self) -> None:
expected_message = """[hypro999](http://139.59.64.214:7990/users/hypro999) reassigned [PR #1](http://139.59.64.214:7990/projects/SBOX/repos/sandbox/pull-requests/1) to [shimura](http://139.59.64.214:7990/users/shimura) and [sougo](http://139.59.64.214:7990/users/sougo)."""
expected_topic = "sandbox / PR #1 Branch1"
self.check_webhook("pull_request_add_two_reviewers", expected_topic, expected_message)
def test_pull_request_remove_all_reviewers(self) -> None:
expected_message = """[hypro999](http://139.59.64.214:7990/users/hypro999) removed all reviewers from [PR #1](http://139.59.64.214:7990/projects/SBOX/repos/sandbox/pull-requests/1)."""
expected_topic = "sandbox / PR #1 Branch1"
self.check_webhook("pull_request_remove_reviewer", expected_topic, expected_message)
def test_pull_request_remove_all_reviewers_with_title(self) -> None:
expected_message = """[hypro999](http://139.59.64.214:7990/users/hypro999) removed all reviewers from [PR #1 Branch1](http://139.59.64.214:7990/projects/SBOX/repos/sandbox/pull-requests/1)."""
expected_topic = "sandbox / PR #1 Branch1"
expected_topic = "custom_topic"
self.url = self.build_webhook_url(topic="custom_topic")
self.check_webhook("pull_request_remove_reviewer", expected_topic, expected_message)
# PR comment events:
def test_pull_request_comment_added(self) -> None:
expected_message = """[zura](http://139.59.64.214:7990/users/zura) commented on [PR #6](http://139.59.64.214:7990/projects/SBOX/repos/sandbox/pull-requests/6):\n\n~~~ quote\nThis seems like a pretty good idea.\n~~~"""
expected_topic = "sandbox / PR #6 sample_file: Add sample_file.txt."
self.check_webhook("pull_request_comment_added", expected_topic, expected_message)
def test_pull_request_comment_edited(self) -> None:
expected_message = """[zura](http://139.59.64.214:7990/users/zura) edited their comment on [PR #6](http://139.59.64.214:7990/projects/SBOX/repos/sandbox/pull-requests/6):\n\n~~~ quote\nThis seems like a pretty good idea. @shimura what do you think?\n~~~"""
expected_topic = "sandbox / PR #6 sample_file: Add sample_file.txt."
self.check_webhook("pull_request_comment_edited", expected_topic, expected_message)
def test_pull_request_comment_deleted(self) -> None:
expected_message = """[zura](http://139.59.64.214:7990/users/zura) deleted their comment on [PR #6](http://139.59.64.214:7990/projects/SBOX/repos/sandbox/pull-requests/6):\n\n~~~ quote\n~~This seems like a pretty good idea. @shimura what do you think?~~\n~~~"""
expected_topic = "sandbox / PR #6 sample_file: Add sample_file.txt."
self.check_webhook("pull_request_comment_deleted", expected_topic, expected_message)
| |
# Copyright 2014 Rackspace Hosting
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import json
from time import sleep
from datetime import datetime
from proboscis import SkipTest
from proboscis import test
from proboscis.asserts import assert_equal
from proboscis.asserts import assert_raises
from proboscis.asserts import assert_true
from proboscis.asserts import assert_not_equal
from proboscis.decorators import time_out
from trove.common.utils import poll_until
from trove.tests.api.instances import assert_unprocessable
from trove.tests.api.instances import InstanceTestInfo
from trove.tests.api.instances import instance_info
from trove.tests.api.instances import TIMEOUT_INSTANCE_CREATE
from trove.tests.api.instances import TIMEOUT_INSTANCE_DELETE
from trove.tests.api.instances import WaitForGuestInstallationToFinish
from trove.tests.config import CONFIG
from trove.tests.util import create_dbaas_client
from trove.tests.util import test_config
from trove.tests.util.check import AttrCheck
from trove.tests.util.check import CollectionCheck
from trove.tests.util.check import TypeCheck
from trove.tests.util.mysql import create_mysql_connection
from trove.tests.util.users import Requirements
from troveclient.compat import exceptions
GROUP = "dbaas.api.configurations"
CONFIG_NAME = "test_configuration"
CONFIG_DESC = "configuration description"
configuration_default = None
configuration_info = None
configuration_href = None
configuration_instance = InstanceTestInfo()
configuration_instance_id = None
sql_variables = [
'key_buffer_size',
'connect_timeout',
'join_buffer_size',
]
def _is_valid_timestamp(time_string):
try:
datetime.strptime(time_string, "%Y-%m-%dT%H:%M:%S")
except ValueError:
return False
return True
# helper methods to validate configuration is applied to instance
def _execute_query(host, user_name, password, query):
print(host, user_name, password, query)
with create_mysql_connection(host, user_name, password) as db:
result = db.execute(query)
return result
assert_true(False, "something went wrong in the sql connection")
def _get_address(instance_id):
result = instance_info.dbaas_admin.mgmt.instances.show(instance_id)
return result.ip[0]
def _test_configuration_is_applied_to_instance(instance, configuration_id):
if CONFIG.fake_mode:
raise SkipTest("configuration from sql does not work in fake mode")
instance_test = instance_info.dbaas.instances.get(instance.id)
assert_equal(configuration_id, instance_test.configuration['id'])
if configuration_id:
testconfig_info = instance_info.dbaas.configurations.get(
configuration_id)
else:
testconfig_info = instance_info.dbaas.instance.configuration(
instance.id)
testconfig_info['configuration']
conf_instances = instance_info.dbaas.configurations.instances(
configuration_id)
config_instance_ids = [inst.id for inst in conf_instances]
assert_true(instance_test.id in config_instance_ids)
cfg_names = testconfig_info.values.keys()
host = _get_address(instance.id)
for user in instance.users:
username = user['name']
password = user['password']
concat_variables = "','".join(cfg_names)
query = ("show variables where Variable_name "
"in ('%s');" % concat_variables)
actual_values = _execute_query(host, username, password, query)
print("actual_values %s" % actual_values)
print("testconfig_info.values %s" % testconfig_info.values)
assert_true(len(actual_values) == len(cfg_names))
# check the configs exist
attrcheck = AttrCheck()
expected_attrs = [actual_key for actual_key, actual_value in actual_values]
attrcheck.attrs_exist(testconfig_info.values, expected_attrs,
msg="Configurations parameters")
def _get_parameter_type(name):
instance_info.dbaas.configuration_parameters.get_parameter(
instance_info.dbaas_datastore,
instance_info.dbaas_datastore_version,
name)
resp, body = instance_info.dbaas.client.last_response
print(resp)
print(body)
return json.loads(body)['type']
# check the config values are correct
for key, value in actual_values:
key_type = _get_parameter_type(key)
# mysql returns 'ON' and 'OFF' for True and False respectively
if value == 'ON':
converted_key_value = (str(key), 1)
elif value == 'OFF':
converted_key_value = (str(key), 0)
else:
if key_type == 'integer':
value = int(value)
converted_key_value = (str(key), value)
print("converted_key_value: %s" % str(converted_key_value))
assert_true(converted_key_value in testconfig_info.values.items())
class ConfigurationsTestBase(object):
@staticmethod
def expected_instance_datastore_configs(instance_id):
"""Given an instance retrieve the expected test configurations for
instance's datastore.
"""
instance = instance_info.dbaas.instances.get(instance_id)
datastore_type = instance.datastore['type']
datastore_test_configs = CONFIG.get(datastore_type, {})
return datastore_test_configs.get("configurations", {})
@staticmethod
def expected_default_datastore_configs():
"""Returns the expected test configurations for the default datastore
defined in the Test Config as dbaas_datastore.
"""
default_datatstore = CONFIG.get('dbaas_datastore', None)
datastore_test_configs = CONFIG.get(default_datatstore, {})
return datastore_test_configs.get("configurations", {})
@test(depends_on_classes=[WaitForGuestInstallationToFinish], groups=[GROUP])
class CreateConfigurations(ConfigurationsTestBase):
@test
def test_expected_configurations_parameters(self):
"""Test get expected configurations parameters."""
expected_attrs = ["configuration-parameters"]
instance_info.dbaas.configuration_parameters.parameters(
instance_info.dbaas_datastore,
instance_info.dbaas_datastore_version)
resp, body = instance_info.dbaas.client.last_response
attrcheck = AttrCheck()
config_parameters_dict = json.loads(body)
attrcheck.attrs_exist(config_parameters_dict, expected_attrs,
msg="Configurations parameters")
# sanity check that a few options are in the list
config_params_list = config_parameters_dict['configuration-parameters']
config_param_keys = []
for param in config_params_list:
config_param_keys.append(param['name'])
expected_configs = self.expected_default_datastore_configs()
expected_config_params = expected_configs.get('parameters_list')
# check for duplicate configuration parameters
msg = "check for duplicate configuration parameters"
assert_equal(len(config_param_keys), len(set(config_param_keys)), msg)
for expected_config_item in expected_config_params:
assert_true(expected_config_item in config_param_keys)
@test
def test_expected_get_configuration_parameter(self):
# tests get on a single parameter to verify it has expected attributes
param = 'key_buffer_size'
expected_config_params = ['name', 'restart_required',
'max_size', 'min_size', 'type',
'deleted', 'deleted_at',
'datastore_version_id']
instance_info.dbaas.configuration_parameters.get_parameter(
instance_info.dbaas_datastore,
instance_info.dbaas_datastore_version,
param)
resp, body = instance_info.dbaas.client.last_response
print(resp)
print(body)
attrcheck = AttrCheck()
config_parameter_dict = json.loads(body)
print(config_parameter_dict)
attrcheck.attrs_exist(config_parameter_dict, expected_config_params,
msg="Get Configuration parameter")
assert_equal(param, config_parameter_dict['name'])
@test
def test_configurations_create_invalid_values(self):
"""Test create configurations with invalid values."""
values = '{"this_is_invalid": 123}'
try:
instance_info.dbaas.configurations.create(
CONFIG_NAME,
values,
CONFIG_DESC)
except exceptions.NotFound:
resp, body = instance_info.dbaas.client.last_response
assert_equal(resp.status, 404)
@test
def test_configurations_create_invalid_value_type(self):
"""Test create configuration with invalild value type."""
values = '{"key_buffer_size": "this is a string not int"}'
assert_unprocessable(instance_info.dbaas.configurations.create,
CONFIG_NAME, values, CONFIG_DESC)
@test
def test_configurations_create_value_out_of_bounds(self):
"""Test create configuration with value out of bounds."""
expected_configs = self.expected_default_datastore_configs()
values = json.dumps(expected_configs.get('out_of_bounds_over'))
assert_unprocessable(instance_info.dbaas.configurations.create,
CONFIG_NAME, values, CONFIG_DESC)
values = json.dumps(expected_configs.get('out_of_bounds_under'))
assert_unprocessable(instance_info.dbaas.configurations.create,
CONFIG_NAME, values, CONFIG_DESC)
@test
def test_valid_configurations_create(self):
# create a configuration with valid parameters
expected_configs = self.expected_default_datastore_configs()
values = json.dumps(expected_configs.get('valid_values'))
expected_values = json.loads(values)
result = instance_info.dbaas.configurations.create(
CONFIG_NAME,
values,
CONFIG_DESC,
datastore=instance_info.dbaas_datastore,
datastore_version=instance_info.dbaas_datastore_version)
resp, body = instance_info.dbaas.client.last_response
assert_equal(resp.status, 200)
with TypeCheck('Configuration', result) as configuration:
configuration.has_field('name', basestring)
configuration.has_field('description', basestring)
configuration.has_field('values', dict)
configuration.has_field('datastore_name', basestring)
configuration.has_field('datastore_version_id', unicode)
configuration.has_field('datastore_version_name', basestring)
global configuration_info
configuration_info = result
assert_equal(configuration_info.name, CONFIG_NAME)
assert_equal(configuration_info.description, CONFIG_DESC)
assert_equal(configuration_info.values, expected_values)
@test(runs_after=[test_valid_configurations_create])
def test_appending_to_existing_configuration(self):
# test being able to update and insert new parameter name and values
# to an existing configuration
expected_configs = self.expected_default_datastore_configs()
values = json.dumps(expected_configs.get('appending_values'))
# ensure updated timestamp is different than created
sleep(1)
instance_info.dbaas.configurations.edit(configuration_info.id,
values)
resp, body = instance_info.dbaas.client.last_response
assert_equal(resp.status, 200)
@test(runs_after=[CreateConfigurations], groups=[GROUP])
class AfterConfigurationsCreation(ConfigurationsTestBase):
@test
def test_assign_configuration_to_invalid_instance(self):
# test assigning to an instance that does not exist
invalid_id = "invalid-inst-id"
try:
instance_info.dbaas.instances.modify(invalid_id,
configuration_info.id)
except exceptions.NotFound:
resp, body = instance_info.dbaas.client.last_response
assert_equal(resp.status, 404)
@test
def test_assign_configuration_to_valid_instance(self):
# test assigning a configuration to an instance
print("instance_info.id: %s" % instance_info.id)
print("configuration_info: %s" % configuration_info)
print("configuration_info.id: %s" % configuration_info.id)
config_id = configuration_info.id
instance_info.dbaas.instances.modify(instance_info.id,
configuration=config_id)
resp, body = instance_info.dbaas.client.last_response
assert_equal(resp.status, 202)
@test(depends_on=[test_assign_configuration_to_valid_instance])
def test_assign_configuration_to_instance_with_config(self):
# test assigning a configuration to an instance that
# already has an assigned configuration
config_id = configuration_info.id
assert_raises(exceptions.BadRequest,
instance_info.dbaas.instances.modify, instance_info.id,
configuration=config_id)
@test(depends_on=[test_assign_configuration_to_valid_instance])
@time_out(30)
def test_get_configuration_details_from_instance_validation(self):
# validate that the configuraiton was applied correctly to the instance
inst = instance_info.dbaas.instances.get(instance_info.id)
configuration_id = inst.configuration['id']
assert_not_equal(None, inst.configuration['id'])
_test_configuration_is_applied_to_instance(instance_info,
configuration_id)
@test
def test_configurations_get(self):
# test that the instance shows up on the assigned configuration
result = instance_info.dbaas.configurations.get(configuration_info.id)
assert_equal(configuration_info.id, result.id)
assert_equal(configuration_info.name, result.name)
assert_equal(configuration_info.description, result.description)
# check the result field types
with TypeCheck("configuration", result) as check:
check.has_field("id", basestring)
check.has_field("name", basestring)
check.has_field("description", basestring)
check.has_field("values", dict)
check.has_field("created", basestring)
check.has_field("updated", basestring)
check.has_field("instance_count", int)
print(result.values)
# check for valid timestamps
assert_true(_is_valid_timestamp(result.created))
assert_true(_is_valid_timestamp(result.updated))
# check that created and updated timestamps differ, since
# test_appending_to_existing_configuration should have changed the
# updated timestamp
assert_not_equal(result.created, result.updated)
assert_equal(result.instance_count, 1)
with CollectionCheck("configuration_values", result.values) as check:
# check each item has the correct type according to the rules
for (item_key, item_val) in result.values.iteritems():
print("item_key: %s" % item_key)
print("item_val: %s" % item_val)
dbaas = instance_info.dbaas
param = dbaas.configuration_parameters.get_parameter(
instance_info.dbaas_datastore,
instance_info.dbaas_datastore_version,
item_key)
if param.type == 'integer':
check.has_element(item_key, int)
if param.type == 'string':
check.has_element(item_key, basestring)
if param.type == 'boolean':
check.has_element(item_key, bool)
# Test to make sure that another user is not able to GET this config
reqs = Requirements(is_admin=False)
test_auth_user = instance_info.user.auth_user
other_user = CONFIG.users.find_user(reqs, black_list=[test_auth_user])
other_user_tenant_id = other_user.tenant_id
client_tenant_id = instance_info.user.tenant_id
if other_user_tenant_id == client_tenant_id:
other_user = CONFIG.users.find_user(reqs,
black_list=[
instance_info.user.auth_user,
other_user])
print(other_user)
print(other_user.__dict__)
other_client = create_dbaas_client(other_user)
assert_raises(exceptions.NotFound, other_client.configurations.get,
configuration_info.id)
@test(runs_after=[AfterConfigurationsCreation], groups=[GROUP])
class ListConfigurations(ConfigurationsTestBase):
@test
def test_configurations_list(self):
# test listing configurations show up
result = instance_info.dbaas.configurations.list()
for conf in result:
with TypeCheck("Configuration", conf) as check:
check.has_field('id', basestring)
check.has_field('name', basestring)
check.has_field('description', basestring)
check.has_field('datastore_version_id', basestring)
check.has_field('datastore_version_name', basestring)
check.has_field('datastore_name', basestring)
exists = [config for config in result if
config.id == configuration_info.id]
assert_equal(1, len(exists))
configuration = exists[0]
assert_equal(configuration.id, configuration_info.id)
assert_equal(configuration.name, configuration_info.name)
assert_equal(configuration.description, configuration_info.description)
@test
def test_configurations_list_for_instance(self):
# test getting an instance shows the configuration assigned shows up
instance = instance_info.dbaas.instances.get(instance_info.id)
assert_equal(instance.configuration['id'], configuration_info.id)
assert_equal(instance.configuration['name'], configuration_info.name)
# expecting two things in links, href and bookmark
assert_equal(2, len(instance.configuration['links']))
link = instance.configuration['links'][0]
global configuration_href
configuration_href = link['href']
@test
def test_get_default_configuration_on_instance(self):
# test the api call to get the default template of an instance exists
result = instance_info.dbaas.instances.configuration(instance_info.id)
global configuration_default
configuration_default = result
assert_not_equal(None, result.configuration)
@test
def test_changing_configuration_with_nondynamic_parameter(self):
# test that changing a non-dynamic parameter is applied to instance
# and show that the instance requires a restart
expected_configs = self.expected_default_datastore_configs()
values = json.dumps(expected_configs.get('nondynamic_parameter'))
instance_info.dbaas.configurations.update(configuration_info.id,
values)
resp, body = instance_info.dbaas.client.last_response
assert_equal(resp.status, 202)
instance_info.dbaas.configurations.get(configuration_info.id)
resp, body = instance_info.dbaas.client.last_response
assert_equal(resp.status, 200)
@test(depends_on=[test_changing_configuration_with_nondynamic_parameter])
@time_out(20)
def test_waiting_for_instance_in_restart_required(self):
def result_is_not_active():
instance = instance_info.dbaas.instances.get(
instance_info.id)
if instance.status == "ACTIVE":
return False
else:
return True
poll_until(result_is_not_active)
instance = instance_info.dbaas.instances.get(instance_info.id)
resp, body = instance_info.dbaas.client.last_response
assert_equal(resp.status, 200)
print(instance.status)
assert_equal('RESTART_REQUIRED', instance.status)
@test(depends_on=[test_waiting_for_instance_in_restart_required])
def test_restart_service_should_return_active(self):
# test that after restarting the instance it becomes active
instance_info.dbaas.instances.restart(instance_info.id)
resp, body = instance_info.dbaas.client.last_response
assert_equal(resp.status, 202)
def result_is_active():
instance = instance_info.dbaas.instances.get(
instance_info.id)
if instance.status == "ACTIVE":
return True
else:
assert_equal("REBOOT", instance.status)
return False
poll_until(result_is_active)
@test(depends_on=[test_restart_service_should_return_active])
@time_out(30)
def test_get_configuration_details_from_instance_validation(self):
# validate that the configuraiton was applied correctly to the instance
inst = instance_info.dbaas.instances.get(instance_info.id)
configuration_id = inst.configuration['id']
assert_not_equal(None, inst.configuration['id'])
_test_configuration_is_applied_to_instance(instance_info,
configuration_id)
@test(depends_on=[test_configurations_list])
def test_compare_list_and_details_timestamps(self):
# compare config timestamps between list and details calls
result = instance_info.dbaas.configurations.list()
list_config = [config for config in result if
config.id == configuration_info.id]
assert_equal(1, len(list_config))
details_config = instance_info.dbaas.configurations.get(
configuration_info.id)
assert_equal(list_config[0].created, details_config.created)
assert_equal(list_config[0].updated, details_config.updated)
@test(runs_after=[ListConfigurations], groups=[GROUP])
class StartInstanceWithConfiguration(ConfigurationsTestBase):
@test
def test_start_instance_with_configuration(self):
# test that a new instance will apply the configuration on create
if test_config.auth_strategy == "fake":
raise SkipTest("Skipping instance start with configuration "
"test for fake mode.")
global configuration_instance
databases = []
databases.append({"name": "firstdbconfig", "character_set": "latin2",
"collate": "latin2_general_ci"})
databases.append({"name": "db2"})
configuration_instance.databases = databases
users = []
users.append({"name": "liteconf", "password": "liteconfpass",
"databases": [{"name": "firstdbconfig"}]})
configuration_instance.users = users
configuration_instance.name = "TEST_" + str(datetime.now()) + "_config"
flavor_href = instance_info.dbaas_flavor_href
configuration_instance.dbaas_flavor_href = flavor_href
configuration_instance.volume = instance_info.volume
result = instance_info.dbaas.instances.create(
configuration_instance.name,
configuration_instance.dbaas_flavor_href,
configuration_instance.volume,
configuration_instance.databases,
configuration_instance.users,
availability_zone="nova",
configuration=configuration_href)
assert_equal(200, instance_info.dbaas.last_http_code)
assert_equal("BUILD", result.status)
configuration_instance.id = result.id
@test(runs_after=[StartInstanceWithConfiguration], groups=[GROUP])
class WaitForConfigurationInstanceToFinish(ConfigurationsTestBase):
@test
@time_out(TIMEOUT_INSTANCE_CREATE)
def test_instance_with_configuration_active(self):
# wait for the instance to become active
if test_config.auth_strategy == "fake":
raise SkipTest("Skipping instance start with configuration "
"test for fake mode.")
def result_is_active():
instance = instance_info.dbaas.instances.get(
configuration_instance.id)
if instance.status == "ACTIVE":
return True
else:
assert_equal("BUILD", instance.status)
return False
poll_until(result_is_active)
@test(depends_on=[test_instance_with_configuration_active])
@time_out(30)
def test_get_configuration_details_from_instance_validation(self):
# validate that the configuraiton was applied correctly to the instance
inst = instance_info.dbaas.instances.get(configuration_instance.id)
configuration_id = inst.configuration['id']
assert_not_equal(None, inst.configuration['id'])
_test_configuration_is_applied_to_instance(configuration_instance,
configuration_id)
@test(runs_after=[WaitForConfigurationInstanceToFinish], groups=[GROUP])
class DeleteConfigurations(ConfigurationsTestBase):
@test
def test_delete_invalid_configuration_not_found(self):
# test deleting a configuration that does not exist throws exception
invalid_configuration_id = "invalid-config-id"
assert_raises(exceptions.NotFound,
instance_info.dbaas.configurations.delete,
invalid_configuration_id)
@test(depends_on=[test_delete_invalid_configuration_not_found])
def test_delete_configuration_parameter_with_mgmt_api(self):
# delete a parameter that is used by a test
# connect_timeout
ds = instance_info.dbaas_datastore
ds_v = instance_info.dbaas_datastore_version
version = instance_info.dbaas.datastore_versions.get(
ds, ds_v)
client = instance_info.dbaas_admin.mgmt_configs
config_param_name = sql_variables[1]
client.delete(version.id, config_param_name)
assert_raises(
exceptions.NotFound,
instance_info.dbaas.configuration_parameters.get_parameter,
ds,
ds_v,
config_param_name)
@test(depends_on=[test_delete_configuration_parameter_with_mgmt_api])
def test_unable_delete_instance_configurations(self):
# test deleting a configuration that is assigned to
# an instance is not allowed.
assert_raises(exceptions.BadRequest,
instance_info.dbaas.configurations.delete,
configuration_info.id)
@test(depends_on=[test_unable_delete_instance_configurations])
@time_out(30)
def test_unassign_configuration_from_instances(self):
# test to unassign configuration from instance
instance_info.dbaas.instances.modify(configuration_instance.id,
configuration="")
resp, body = instance_info.dbaas.client.last_response
assert_equal(resp.status, 202)
instance_info.dbaas.instances.get(configuration_instance.id)
#test that config group is not removed
instance_info.dbaas.instances.modify(instance_info.id,
configuration=None)
resp, body = instance_info.dbaas.client.last_response
assert_equal(resp.status, 202)
instance_info.dbaas.instances.get(instance_info.id)
def result_has_no_configuration():
instance = instance_info.dbaas.instances.get(inst_info.id)
if hasattr(instance, 'configuration'):
return False
else:
return True
inst_info = instance_info
poll_until(result_has_no_configuration)
inst_info = configuration_instance
poll_until(result_has_no_configuration)
instance = instance_info.dbaas.instances.get(instance_info.id)
assert_equal('RESTART_REQUIRED', instance.status)
@test(depends_on=[test_unassign_configuration_from_instances])
def test_assign_in_wrong_state(self):
# test assigning a config to an instance in RESTART state
assert_raises(exceptions.BadRequest,
instance_info.dbaas.instances.modify,
configuration_instance.id,
configuration=configuration_info.id)
@test(depends_on=[test_assign_in_wrong_state])
def test_no_instances_on_configuration(self):
# test there is no configuration on the instance after unassigning
result = instance_info.dbaas.configurations.get(configuration_info.id)
assert_equal(configuration_info.id, result.id)
assert_equal(configuration_info.name, result.name)
assert_equal(configuration_info.description, result.description)
assert_equal(result.instance_count, 0)
print(configuration_instance.id)
print(instance_info.id)
@test(depends_on=[test_no_instances_on_configuration])
def test_delete_unassigned_configuration(self):
# test that we can delete the configuration after no instances are
# assigned to it any longer
instance_info.dbaas.configurations.delete(configuration_info.id)
resp, body = instance_info.dbaas.client.last_response
assert_equal(resp.status, 202)
@test(depends_on=[test_unassign_configuration_from_instances])
@time_out(120)
def test_restart_service_after_unassign_return_active(self):
def result_is_not_active():
instance = instance_info.dbaas.instances.get(
instance_info.id)
if instance.status == "ACTIVE":
return False
else:
return True
poll_until(result_is_not_active)
config = instance_info.dbaas.configurations.list()
print(config)
instance = instance_info.dbaas.instances.get(instance_info.id)
print(instance.__dict__)
resp, body = instance_info.dbaas.client.last_response
assert_equal(resp.status, 200)
print(instance.status)
assert_equal('RESTART_REQUIRED', instance.status)
@test(depends_on=[test_restart_service_after_unassign_return_active])
@time_out(120)
def test_restart_service_should_return_active(self):
# test that after restarting the instance it becomes active
instance_info.dbaas.instances.restart(instance_info.id)
resp, body = instance_info.dbaas.client.last_response
assert_equal(resp.status, 202)
def result_is_active():
instance = instance_info.dbaas.instances.get(
instance_info.id)
if instance.status == "ACTIVE":
return True
else:
assert_equal("REBOOT", instance.status)
return False
poll_until(result_is_active)
@test(depends_on=[test_delete_unassigned_configuration])
@time_out(TIMEOUT_INSTANCE_DELETE)
def test_delete_configuration_instance(self):
# test that we can delete the instance even though there is a
# configuration applied to the instance
instance_info.dbaas.instances.delete(configuration_instance.id)
assert_equal(202, instance_info.dbaas.last_http_code)
def instance_is_gone():
try:
instance_info.dbaas.instances.get(configuration_instance.id)
return False
except exceptions.NotFound:
return True
poll_until(instance_is_gone)
assert_raises(exceptions.NotFound, instance_info.dbaas.instances.get,
configuration_instance.id)
| |
__doc__ = """The model definitions for the pix2pix network taken from the
retina repository at https://github.com/costapt/vess2ret
"""
import os
import keras
from keras import backend as K
from keras import objectives
from keras.layers import Input, merge
from keras.layers.advanced_activations import LeakyReLU
from keras.layers.convolutional import Convolution2D, Deconvolution2D
from keras.layers.core import Activation, Dropout
from keras.layers.normalization import BatchNormalization
from keras.models import Model
from keras.optimizers import Adam
KERAS_2 = keras.__version__[0] == '2'
try:
# keras 2 imports
from keras.layers.convolutional import Conv2DTranspose
from keras.layers.merge import Concatenate
except ImportError:
print("Keras 2 layers could not be imported defaulting to keras1")
KERAS_2 = False
K.set_image_dim_ordering('th')
def concatenate_layers(inputs, concat_axis, mode='concat'):
if KERAS_2:
assert mode == 'concat', "Only concatenation is supported in this wrapper"
return Concatenate(axis=concat_axis)(inputs)
else:
return merge(inputs=inputs, concat_axis=concat_axis, mode=mode)
def Convolution(f, k=3, s=2, border_mode='same', **kwargs):
"""Convenience method for Convolutions."""
if KERAS_2:
return Convolution2D(f,
kernel_size=(k, k),
padding=border_mode,
strides=(s, s),
**kwargs)
else:
return Convolution2D(f, k, k, border_mode=border_mode,
subsample=(s, s),
**kwargs)
def Deconvolution(f, output_shape, k=2, s=2, **kwargs):
"""Convenience method for Transposed Convolutions."""
if KERAS_2:
return Conv2DTranspose(f,
kernel_size=(k, k),
output_shape=output_shape,
strides=(s, s),
data_format=K.image_data_format(),
**kwargs)
else:
return Deconvolution2D(f, k, k, output_shape=output_shape,
subsample=(s, s), **kwargs)
def BatchNorm(mode=2, axis=1, **kwargs):
"""Convenience method for BatchNormalization layers."""
if KERAS_2:
return BatchNormalization(axis=axis, **kwargs)
else:
return BatchNormalization(mode=2,axis=axis, **kwargs)
def g_unet(in_ch, out_ch, nf, batch_size=1, is_binary=False, name='unet'):
# type: (int, int, int, int, bool, str) -> keras.models.Model
"""Define a U-Net.
Input has shape in_ch x 512 x 512
Parameters:
- in_ch: the number of input channels;
- out_ch: the number of output channels;
- nf: the number of filters of the first layer;
- is_binary: if is_binary is true, the last layer is followed by a sigmoid
activation function, otherwise, a tanh is used.
>>> K.set_image_dim_ordering('th')
>>> K.image_data_format()
'channels_first'
>>> unet = g_unet(1, 2, 3, batch_size=5, is_binary=True)
TheanoShapedU-NET
>>> for ilay in unet.layers: ilay.name='_'.join(ilay.name.split('_')[:-1]) # remove layer id
>>> unet.summary() #doctest: +NORMALIZE_WHITESPACE
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input (InputLayer) (None, 1, 512, 512) 0
_________________________________________________________________
conv2d (Conv2D) (None, 3, 256, 256) 30
_________________________________________________________________
batch_normalization (BatchNo (None, 3, 256, 256) 12
_________________________________________________________________
leaky_re_lu (LeakyReLU) (None, 3, 256, 256) 0
_________________________________________________________________
conv2d (Conv2D) (None, 6, 128, 128) 168
_________________________________________________________________
batch_normalization (BatchNo (None, 6, 128, 128) 24
_________________________________________________________________
leaky_re_lu (LeakyReLU) (None, 6, 128, 128) 0
_________________________________________________________________
conv2d (Conv2D) (None, 12, 64, 64) 660
_________________________________________________________________
batch_normalization (BatchNo (None, 12, 64, 64) 48
_________________________________________________________________
leaky_re_lu (LeakyReLU) (None, 12, 64, 64) 0
_________________________________________________________________
conv2d (Conv2D) (None, 24, 32, 32) 2616
_________________________________________________________________
batch_normalization (BatchNo (None, 24, 32, 32) 96
_________________________________________________________________
leaky_re_lu (LeakyReLU) (None, 24, 32, 32) 0
_________________________________________________________________
conv2d (Conv2D) (None, 24, 16, 16) 5208
_________________________________________________________________
batch_normalization (BatchNo (None, 24, 16, 16) 96
_________________________________________________________________
leaky_re_lu (LeakyReLU) (None, 24, 16, 16) 0
_________________________________________________________________
conv2d (Conv2D) (None, 24, 8, 8) 5208
_________________________________________________________________
batch_normalization (BatchNo (None, 24, 8, 8) 96
_________________________________________________________________
leaky_re_lu (LeakyReLU) (None, 24, 8, 8) 0
_________________________________________________________________
conv2d (Conv2D) (None, 24, 4, 4) 5208
_________________________________________________________________
batch_normalization (BatchNo (None, 24, 4, 4) 96
_________________________________________________________________
leaky_re_lu (LeakyReLU) (None, 24, 4, 4) 0
_________________________________________________________________
conv2d (Conv2D) (None, 24, 2, 2) 5208
_________________________________________________________________
batch_normalization (BatchNo (None, 24, 2, 2) 96
_________________________________________________________________
leaky_re_lu (LeakyReLU) (None, 24, 2, 2) 0
_________________________________________________________________
conv2d (Conv2D) (None, 24, 1, 1) 2328
_________________________________________________________________
batch_normalization (BatchNo (None, 24, 1, 1) 96
_________________________________________________________________
leaky_re_lu (LeakyReLU) (None, 24, 1, 1) 0
_________________________________________________________________
conv2d_transpose (Conv2DTran (None, 24, 2, 2) 2328
_________________________________________________________________
batch_normalization (BatchNo (None, 24, 2, 2) 96
_________________________________________________________________
dropout (Dropout) (None, 24, 2, 2) 0
_________________________________________________________________
concatenate (Concatenate) (None, 48, 2, 2) 0
_________________________________________________________________
leaky_re_lu (LeakyReLU) (None, 48, 2, 2) 0
_________________________________________________________________
conv2d_transpose (Conv2DTran (None, 24, 4, 4) 4632
_________________________________________________________________
batch_normalization (BatchNo (None, 24, 4, 4) 96
_________________________________________________________________
dropout (Dropout) (None, 24, 4, 4) 0
_________________________________________________________________
concatenate (Concatenate) (None, 48, 4, 4) 0
_________________________________________________________________
leaky_re_lu (LeakyReLU) (None, 48, 4, 4) 0
_________________________________________________________________
conv2d_transpose (Conv2DTran (None, 24, 8, 8) 4632
_________________________________________________________________
batch_normalization (BatchNo (None, 24, 8, 8) 96
_________________________________________________________________
dropout (Dropout) (None, 24, 8, 8) 0
_________________________________________________________________
concatenate (Concatenate) (None, 48, 8, 8) 0
_________________________________________________________________
leaky_re_lu (LeakyReLU) (None, 48, 8, 8) 0
_________________________________________________________________
conv2d_transpose (Conv2DTran (None, 24, 16, 16) 4632
_________________________________________________________________
batch_normalization (BatchNo (None, 24, 16, 16) 96
_________________________________________________________________
concatenate (Concatenate) (None, 48, 16, 16) 0
_________________________________________________________________
leaky_re_lu (LeakyReLU) (None, 48, 16, 16) 0
_________________________________________________________________
conv2d_transpose (Conv2DTran (None, 24, 32, 32) 4632
_________________________________________________________________
batch_normalization (BatchNo (None, 24, 32, 32) 96
_________________________________________________________________
concatenate (Concatenate) (None, 48, 32, 32) 0
_________________________________________________________________
leaky_re_lu (LeakyReLU) (None, 48, 32, 32) 0
_________________________________________________________________
conv2d_transpose (Conv2DTran (None, 12, 64, 64) 2316
_________________________________________________________________
batch_normalization (BatchNo (None, 12, 64, 64) 48
_________________________________________________________________
concatenate (Concatenate) (None, 24, 64, 64) 0
_________________________________________________________________
leaky_re_lu (LeakyReLU) (None, 24, 64, 64) 0
_________________________________________________________________
conv2d_transpose (Conv2DTran (None, 6, 128, 128) 582
_________________________________________________________________
batch_normalization (BatchNo (None, 6, 128, 128) 24
_________________________________________________________________
concatenate (Concatenate) (None, 12, 128, 128) 0
_________________________________________________________________
leaky_re_lu (LeakyReLU) (None, 12, 128, 128) 0
_________________________________________________________________
conv2d_transpose (Conv2DTran (None, 3, 256, 256) 147
_________________________________________________________________
batch_normalization (BatchNo (None, 3, 256, 256) 12
_________________________________________________________________
concatenate (Concatenate) (None, 6, 256, 256) 0
_________________________________________________________________
leaky_re_lu (LeakyReLU) (None, 6, 256, 256) 0
_________________________________________________________________
conv2d_transpose (Conv2DTran (None, 2, 512, 512) 50
_________________________________________________________________
activation (Activation) (None, 2, 512, 512) 0
=================================================================
Total params: 51,809.0
Trainable params: 51,197.0
Non-trainable params: 612.0
_________________________________________________________________
>>> K.set_image_dim_ordering('tf')
>>> K.image_data_format()
'channels_last'
>>> unet2=g_unet(3, 4, 2, batch_size=7, is_binary=False)
TensorflowShapedU-NET
>>> for ilay in unet2.layers: ilay.name='_'.join(ilay.name.split('_')[:-1]) # remove layer id
>>> unet2.summary() #doctest: +NORMALIZE_WHITESPACE
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input (InputLayer) (None, 512, 512, 3) 0
_________________________________________________________________
conv2d (Conv2D) (None, 256, 256, 2) 56
_________________________________________________________________
batch_normalization (BatchNo (None, 256, 256, 2) 1024
_________________________________________________________________
leaky_re_lu (LeakyReLU) (None, 256, 256, 2) 0
_________________________________________________________________
conv2d (Conv2D) (None, 128, 128, 4) 76
_________________________________________________________________
batch_normalization (BatchNo (None, 128, 128, 4) 512
_________________________________________________________________
leaky_re_lu (LeakyReLU) (None, 128, 128, 4) 0
_________________________________________________________________
conv2d (Conv2D) (None, 64, 64, 8) 296
_________________________________________________________________
batch_normalization (BatchNo (None, 64, 64, 8) 256
_________________________________________________________________
leaky_re_lu (LeakyReLU) (None, 64, 64, 8) 0
_________________________________________________________________
conv2d (Conv2D) (None, 32, 32, 16) 1168
_________________________________________________________________
batch_normalization (BatchNo (None, 32, 32, 16) 128
_________________________________________________________________
leaky_re_lu (LeakyReLU) (None, 32, 32, 16) 0
_________________________________________________________________
conv2d (Conv2D) (None, 16, 16, 16) 2320
_________________________________________________________________
batch_normalization (BatchNo (None, 16, 16, 16) 64
_________________________________________________________________
leaky_re_lu (LeakyReLU) (None, 16, 16, 16) 0
_________________________________________________________________
conv2d (Conv2D) (None, 8, 8, 16) 2320
_________________________________________________________________
batch_normalization (BatchNo (None, 8, 8, 16) 32
_________________________________________________________________
leaky_re_lu (LeakyReLU) (None, 8, 8, 16) 0
_________________________________________________________________
conv2d (Conv2D) (None, 4, 4, 16) 2320
_________________________________________________________________
batch_normalization (BatchNo (None, 4, 4, 16) 16
_________________________________________________________________
leaky_re_lu (LeakyReLU) (None, 4, 4, 16) 0
_________________________________________________________________
conv2d (Conv2D) (None, 2, 2, 16) 2320
_________________________________________________________________
batch_normalization (BatchNo (None, 2, 2, 16) 8
_________________________________________________________________
leaky_re_lu (LeakyReLU) (None, 2, 2, 16) 0
_________________________________________________________________
conv2d (Conv2D) (None, 1, 1, 16) 1040
_________________________________________________________________
batch_normalization (BatchNo (None, 1, 1, 16) 4
_________________________________________________________________
leaky_re_lu (LeakyReLU) (None, 1, 1, 16) 0
_________________________________________________________________
conv2d_transpose (Conv2DTran (None, 2, 2, 16) 1040
_________________________________________________________________
batch_normalization (BatchNo (None, 2, 2, 16) 8
_________________________________________________________________
dropout (Dropout) (None, 2, 2, 16) 0
_________________________________________________________________
concatenate (Concatenate) (None, 2, 2, 32) 0
_________________________________________________________________
leaky_re_lu (LeakyReLU) (None, 2, 2, 32) 0
_________________________________________________________________
conv2d_transpose (Conv2DTran (None, 4, 4, 16) 2064
_________________________________________________________________
batch_normalization (BatchNo (None, 4, 4, 16) 16
_________________________________________________________________
dropout (Dropout) (None, 4, 4, 16) 0
_________________________________________________________________
concatenate (Concatenate) (None, 4, 4, 32) 0
_________________________________________________________________
leaky_re_lu (LeakyReLU) (None, 4, 4, 32) 0
_________________________________________________________________
conv2d_transpose (Conv2DTran (None, 8, 8, 16) 2064
_________________________________________________________________
batch_normalization (BatchNo (None, 8, 8, 16) 32
_________________________________________________________________
dropout (Dropout) (None, 8, 8, 16) 0
_________________________________________________________________
concatenate (Concatenate) (None, 8, 8, 32) 0
_________________________________________________________________
leaky_re_lu (LeakyReLU) (None, 8, 8, 32) 0
_________________________________________________________________
conv2d_transpose (Conv2DTran (None, 16, 16, 16) 2064
_________________________________________________________________
batch_normalization (BatchNo (None, 16, 16, 16) 64
_________________________________________________________________
concatenate (Concatenate) (None, 16, 16, 32) 0
_________________________________________________________________
leaky_re_lu (LeakyReLU) (None, 16, 16, 32) 0
_________________________________________________________________
conv2d_transpose (Conv2DTran (None, 32, 32, 16) 2064
_________________________________________________________________
batch_normalization (BatchNo (None, 32, 32, 16) 128
_________________________________________________________________
concatenate (Concatenate) (None, 32, 32, 32) 0
_________________________________________________________________
leaky_re_lu (LeakyReLU) (None, 32, 32, 32) 0
_________________________________________________________________
conv2d_transpose (Conv2DTran (None, 64, 64, 8) 1032
_________________________________________________________________
batch_normalization (BatchNo (None, 64, 64, 8) 256
_________________________________________________________________
concatenate (Concatenate) (None, 64, 64, 16) 0
_________________________________________________________________
leaky_re_lu (LeakyReLU) (None, 64, 64, 16) 0
_________________________________________________________________
conv2d_transpose (Conv2DTran (None, 128, 128, 4) 260
_________________________________________________________________
batch_normalization (BatchNo (None, 128, 128, 4) 512
_________________________________________________________________
concatenate (Concatenate) (None, 128, 128, 8) 0
_________________________________________________________________
leaky_re_lu (LeakyReLU) (None, 128, 128, 8) 0
_________________________________________________________________
conv2d_transpose (Conv2DTran (None, 256, 256, 2) 66
_________________________________________________________________
batch_normalization (BatchNo (None, 256, 256, 2) 1024
_________________________________________________________________
concatenate (Concatenate) (None, 256, 256, 4) 0
_________________________________________________________________
leaky_re_lu (LeakyReLU) (None, 256, 256, 4) 0
_________________________________________________________________
conv2d_transpose (Conv2DTran (None, 512, 512, 4) 68
_________________________________________________________________
activation (Activation) (None, 512, 512, 4) 0
=================================================================
Total params: 26,722.0
Trainable params: 24,680.0
Non-trainable params: 2,042.0
_________________________________________________________________
"""
merge_params = {
'mode': 'concat',
'concat_axis': 1
}
if K.image_dim_ordering() == 'th':
print('TheanoShapedU-NET')
i = Input(shape=(in_ch, 512, 512))
def get_deconv_shape(samples, channels, x_dim, y_dim):
return samples, channels, x_dim, y_dim
elif K.image_dim_ordering() == 'tf':
i = Input(shape=(512, 512, in_ch))
print('TensorflowShapedU-NET')
def get_deconv_shape(samples, channels, x_dim, y_dim):
return samples, x_dim, y_dim, channels
merge_params['concat_axis'] = 3
else:
raise ValueError(
'Keras dimension ordering not supported: {}'.format(
K.image_dim_ordering()))
# in_ch x 512 x 512
conv1 = Convolution(nf)(i)
conv1 = BatchNorm()(conv1)
x = LeakyReLU(0.2)(conv1)
# nf x 256 x 256
conv2 = Convolution(nf * 2)(x)
conv2 = BatchNorm()(conv2)
x = LeakyReLU(0.2)(conv2)
# nf*2 x 128 x 128
conv3 = Convolution(nf * 4)(x)
conv3 = BatchNorm()(conv3)
x = LeakyReLU(0.2)(conv3)
# nf*4 x 64 x 64
conv4 = Convolution(nf * 8)(x)
conv4 = BatchNorm()(conv4)
x = LeakyReLU(0.2)(conv4)
# nf*8 x 32 x 32
conv5 = Convolution(nf * 8)(x)
conv5 = BatchNorm()(conv5)
x = LeakyReLU(0.2)(conv5)
# nf*8 x 16 x 16
conv6 = Convolution(nf * 8)(x)
conv6 = BatchNorm()(conv6)
x = LeakyReLU(0.2)(conv6)
# nf*8 x 8 x 8
conv7 = Convolution(nf * 8)(x)
conv7 = BatchNorm()(conv7)
x = LeakyReLU(0.2)(conv7)
# nf*8 x 4 x 4
conv8 = Convolution(nf * 8)(x)
conv8 = BatchNorm()(conv8)
x = LeakyReLU(0.2)(conv8)
# nf*8 x 2 x 2
conv9 = Convolution(nf * 8, k=2, s=1, border_mode='valid')(x)
conv9 = BatchNorm()(conv9)
x = LeakyReLU(0.2)(conv9)
# nf*8 x 1 x 1
dconv1 = Deconvolution(nf * 8,
get_deconv_shape(batch_size, nf * 8, 2, 2),
k=2, s=1)(x)
dconv1 = BatchNorm()(dconv1)
dconv1 = Dropout(0.5)(dconv1)
x = concatenate_layers([dconv1, conv8], **merge_params)
x = LeakyReLU(0.2)(x)
# nf*(8 + 8) x 2 x 2
dconv2 = Deconvolution(nf * 8,
get_deconv_shape(batch_size, nf * 8, 4, 4))(x)
dconv2 = BatchNorm()(dconv2)
dconv2 = Dropout(0.5)(dconv2)
x = concatenate_layers([dconv2, conv7], **merge_params)
x = LeakyReLU(0.2)(x)
# nf*(8 + 8) x 4 x 4
dconv3 = Deconvolution(nf * 8,
get_deconv_shape(batch_size, nf * 8, 8, 8))(x)
dconv3 = BatchNorm()(dconv3)
dconv3 = Dropout(0.5)(dconv3)
x = concatenate_layers([dconv3, conv6], **merge_params)
x = LeakyReLU(0.2)(x)
# nf*(8 + 8) x 8 x 8
dconv4 = Deconvolution(nf * 8,
get_deconv_shape(batch_size, nf * 8, 16, 16))(x)
dconv4 = BatchNorm()(dconv4)
x = concatenate_layers([dconv4, conv5], **merge_params)
x = LeakyReLU(0.2)(x)
# nf*(8 + 8) x 16 x 16
dconv5 = Deconvolution(nf * 8,
get_deconv_shape(batch_size, nf * 8, 32, 32))(x)
dconv5 = BatchNorm()(dconv5)
x = concatenate_layers([dconv5, conv4], **merge_params)
x = LeakyReLU(0.2)(x)
# nf*(8 + 8) x 32 x 32
dconv6 = Deconvolution(nf * 4,
get_deconv_shape(batch_size, nf * 4, 64, 64))(x)
dconv6 = BatchNorm()(dconv6)
x = concatenate_layers([dconv6, conv3], **merge_params)
x = LeakyReLU(0.2)(x)
# nf*(4 + 4) x 64 x 64
dconv7 = Deconvolution(nf * 2,
get_deconv_shape(batch_size, nf * 2, 128, 128))(x)
dconv7 = BatchNorm()(dconv7)
x = concatenate_layers([dconv7, conv2], **merge_params)
x = LeakyReLU(0.2)(x)
# nf*(2 + 2) x 128 x 128
dconv8 = Deconvolution(nf,
get_deconv_shape(batch_size, nf, 256, 256))(x)
dconv8 = BatchNorm()(dconv8)
x = concatenate_layers([dconv8, conv1], **merge_params)
x = LeakyReLU(0.2)(x)
# nf*(1 + 1) x 256 x 256
dconv9 = Deconvolution(out_ch,
get_deconv_shape(batch_size, out_ch, 512, 512))(x)
# out_ch x 512 x 512
act = 'sigmoid' if is_binary else 'tanh'
out = Activation(act)(dconv9)
unet = Model(i, out, name=name)
return unet
def discriminator(a_ch, b_ch, nf, opt=Adam(lr=2e-4, beta_1=0.5), name='d'):
"""Define the discriminator network.
Parameters:
- a_ch: the number of channels of the first image;
- b_ch: the number of channels of the second image;
- nf: the number of filters of the first layer.
>>> K.set_image_dim_ordering('th')
>>> disc=discriminator(3,4,2)
>>> for ilay in disc.layers: ilay.name='_'.join(ilay.name.split('_')[:-1]) # remove layer id
>>> disc.summary() #doctest: +NORMALIZE_WHITESPACE
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input (InputLayer) (None, 7, 512, 512) 0
_________________________________________________________________
conv2d (Conv2D) (None, 2, 256, 256) 128
_________________________________________________________________
leaky_re_lu (LeakyReLU) (None, 2, 256, 256) 0
_________________________________________________________________
conv2d (Conv2D) (None, 4, 128, 128) 76
_________________________________________________________________
leaky_re_lu (LeakyReLU) (None, 4, 128, 128) 0
_________________________________________________________________
conv2d (Conv2D) (None, 8, 64, 64) 296
_________________________________________________________________
leaky_re_lu (LeakyReLU) (None, 8, 64, 64) 0
_________________________________________________________________
conv2d (Conv2D) (None, 16, 32, 32) 1168
_________________________________________________________________
leaky_re_lu (LeakyReLU) (None, 16, 32, 32) 0
_________________________________________________________________
conv2d (Conv2D) (None, 1, 16, 16) 145
_________________________________________________________________
activation (Activation) (None, 1, 16, 16) 0
=================================================================
Total params: 1,813.0
Trainable params: 1,813.0
Non-trainable params: 0.0
_________________________________________________________________
"""
i = Input(shape=(a_ch + b_ch, 512, 512))
# (a_ch + b_ch) x 512 x 512
conv1 = Convolution(nf)(i)
x = LeakyReLU(0.2)(conv1)
# nf x 256 x 256
conv2 = Convolution(nf * 2)(x)
x = LeakyReLU(0.2)(conv2)
# nf*2 x 128 x 128
conv3 = Convolution(nf * 4)(x)
x = LeakyReLU(0.2)(conv3)
# nf*4 x 64 x 64
conv4 = Convolution(nf * 8)(x)
x = LeakyReLU(0.2)(conv4)
# nf*8 x 32 x 32
conv5 = Convolution(1)(x)
out = Activation('sigmoid')(conv5)
# 1 x 16 x 16
d = Model(i, out, name=name)
def d_loss(y_true, y_pred):
L = objectives.binary_crossentropy(K.batch_flatten(y_true),
K.batch_flatten(y_pred))
return L
d.compile(optimizer=opt, loss=d_loss)
return d
def pix2pix(atob, d, a_ch, b_ch, alpha=100, is_a_binary=False,
is_b_binary=False, opt=Adam(lr=2e-4, beta_1=0.5), name='pix2pix'):
# type: (...) -> keras.models.Model
"""
Define the pix2pix network.
:param atob:
:param d:
:param a_ch:
:param b_ch:
:param alpha:
:param is_a_binary:
:param is_b_binary:
:param opt:
:param name:
:return:
>>> K.set_image_dim_ordering('th')
>>> unet = g_unet(3, 4, 2, batch_size=8, is_binary=False)
TheanoShapedU-NET
>>> disc=discriminator(3,4,2)
>>> pp_net=pix2pix(unet, disc, 3, 4)
>>> for ilay in pp_net.layers: ilay.name='_'.join(ilay.name.split('_')[:-1]) # remove layer id
>>> pp_net.summary() #doctest: +NORMALIZE_WHITESPACE
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input (InputLayer) (None, 3, 512, 512) 0
_________________________________________________________________
(Model) (None, 4, 512, 512) 23454
_________________________________________________________________
concatenate (Concatenate) (None, 7, 512, 512) 0
_________________________________________________________________
(Model) (None, 1, 16, 16) 1813
=================================================================
Total params: 25,267.0
Trainable params: 24,859.0
Non-trainable params: 408.0
_________________________________________________________________
"""
a = Input(shape=(a_ch, 512, 512))
b = Input(shape=(b_ch, 512, 512))
# A -> B'
bp = atob(a)
# Discriminator receives the pair of images
d_in = concatenate_layers([a, bp], mode='concat', concat_axis=1)
pix2pix = Model([a, b], d(d_in), name=name)
def pix2pix_loss(y_true, y_pred):
y_true_flat = K.batch_flatten(y_true)
y_pred_flat = K.batch_flatten(y_pred)
# Adversarial Loss
L_adv = objectives.binary_crossentropy(y_true_flat, y_pred_flat)
# A to B loss
b_flat = K.batch_flatten(b)
bp_flat = K.batch_flatten(bp)
if is_b_binary:
L_atob = objectives.binary_crossentropy(b_flat, bp_flat)
else:
L_atob = K.mean(K.abs(b_flat - bp_flat))
return L_adv + alpha * L_atob
# This network is used to train the generator. Freeze the discriminator part.
pix2pix.get_layer('d').trainable = False
pix2pix.compile(optimizer=opt, loss=pix2pix_loss)
return pix2pix
if __name__ == '__main__':
import doctest
TEST_TF = True
if TEST_TF:
os.environ['KERAS_BACKEND'] = 'tensorflow'
else:
os.environ['KERAS_BACKEND'] = 'theano'
doctest.testsource('models.py', verbose=True, optionflags=doctest.ELLIPSIS)
| |
from __future__ import unicode_literals
import logging
import sys
import types
from django import http
from django.conf import settings
from django.core import exceptions
from django.core import urlresolvers
from django.core import signals
from django.core.exceptions import MiddlewareNotUsed, PermissionDenied
from django.utils.encoding import force_text
from django.utils.module_loading import import_by_path
from django.utils import six
from django.views import debug
logger = logging.getLogger('django.request')
class BaseHandler(object):
# Changes that are always applied to a response (in this order).
response_fixes = [
http.fix_location_header,
http.conditional_content_removal,
http.fix_IE_for_attach,
http.fix_IE_for_vary,
]
def __init__(self):
self._request_middleware = self._view_middleware = self._template_response_middleware = self._response_middleware = self._exception_middleware = None
def load_middleware(self):
"""
Populate middleware lists from settings.MIDDLEWARE_CLASSES.
Must be called after the environment is fixed (see __call__ in subclasses).
"""
self._view_middleware = []
self._template_response_middleware = []
self._response_middleware = []
self._exception_middleware = []
request_middleware = []
for middleware_path in settings.MIDDLEWARE_CLASSES:
mw_class = import_by_path(middleware_path)
try:
mw_instance = mw_class()
except MiddlewareNotUsed:
continue
if hasattr(mw_instance, 'process_request'):
request_middleware.append(mw_instance.process_request)
if hasattr(mw_instance, 'process_view'):
self._view_middleware.append(mw_instance.process_view)
if hasattr(mw_instance, 'process_template_response'):
self._template_response_middleware.insert(0, mw_instance.process_template_response)
if hasattr(mw_instance, 'process_response'):
self._response_middleware.insert(0, mw_instance.process_response)
if hasattr(mw_instance, 'process_exception'):
self._exception_middleware.insert(0, mw_instance.process_exception)
# We only assign to this when initialization is complete as it is used
# as a flag for initialization being complete.
self._request_middleware = request_middleware
def get_response(self, request):
"Returns an HttpResponse object for the given HttpRequest"
try:
# Setup default url resolver for this thread, this code is outside
# the try/except so we don't get a spurious "unbound local
# variable" exception in the event an exception is raised before
# resolver is set
urlconf = settings.ROOT_URLCONF
urlresolvers.set_urlconf(urlconf)
resolver = urlresolvers.RegexURLResolver(r'^/', urlconf)
try:
response = None
# Apply request middleware
for middleware_method in self._request_middleware:
response = middleware_method(request)
if response:
break
if response is None:
if hasattr(request, 'urlconf'):
# Reset url resolver with a custom urlconf.
urlconf = request.urlconf
urlresolvers.set_urlconf(urlconf)
resolver = urlresolvers.RegexURLResolver(r'^/', urlconf)
resolver_match = resolver.resolve(request.path_info)
callback, callback_args, callback_kwargs = resolver_match
request.resolver_match = resolver_match
# Apply view middleware
for middleware_method in self._view_middleware:
response = middleware_method(request, callback, callback_args, callback_kwargs)
if response:
break
if response is None:
try:
response = callback(request, *callback_args, **callback_kwargs)
except Exception as e:
# If the view raised an exception, run it through exception
# middleware, and if the exception middleware returns a
# response, use that. Otherwise, reraise the exception.
for middleware_method in self._exception_middleware:
response = middleware_method(request, e)
if response:
break
if response is None:
raise
# Complain if the view returned None (a common error).
if response is None:
if isinstance(callback, types.FunctionType): # FBV
view_name = callback.__name__
else: # CBV
view_name = callback.__class__.__name__ + '.__call__'
raise ValueError("The view %s.%s didn't return an HttpResponse object." % (callback.__module__, view_name))
# If the response supports deferred rendering, apply template
# response middleware and then render the response
if hasattr(response, 'render') and callable(response.render):
for middleware_method in self._template_response_middleware:
response = middleware_method(request, response)
response = response.render()
except http.Http404 as e:
logger.warning('Not Found: %s', request.path,
extra={
'status_code': 404,
'request': request
})
if settings.DEBUG:
response = debug.technical_404_response(request, e)
else:
try:
callback, param_dict = resolver.resolve404()
response = callback(request, **param_dict)
except:
signals.got_request_exception.send(sender=self.__class__, request=request)
response = self.handle_uncaught_exception(request, resolver, sys.exc_info())
except PermissionDenied:
logger.warning(
'Forbidden (Permission denied): %s', request.path,
extra={
'status_code': 403,
'request': request
})
try:
callback, param_dict = resolver.resolve403()
response = callback(request, **param_dict)
except:
signals.got_request_exception.send(
sender=self.__class__, request=request)
response = self.handle_uncaught_exception(request,
resolver, sys.exc_info())
except SystemExit:
# Allow sys.exit() to actually exit. See tickets #1023 and #4701
raise
except: # Handle everything else, including SuspiciousOperation, etc.
# Get the exception info now, in case another exception is thrown later.
signals.got_request_exception.send(sender=self.__class__, request=request)
response = self.handle_uncaught_exception(request, resolver, sys.exc_info())
finally:
# Reset URLconf for this thread on the way out for complete
# isolation of request.urlconf
urlresolvers.set_urlconf(None)
try:
# Apply response middleware, regardless of the response
for middleware_method in self._response_middleware:
response = middleware_method(request, response)
response = self.apply_response_fixes(request, response)
except: # Any exception should be gathered and handled
signals.got_request_exception.send(sender=self.__class__, request=request)
response = self.handle_uncaught_exception(request, resolver, sys.exc_info())
return response
def handle_uncaught_exception(self, request, resolver, exc_info):
"""
Processing for any otherwise uncaught exceptions (those that will
generate HTTP 500 responses). Can be overridden by subclasses who want
customised 500 handling.
Be *very* careful when overriding this because the error could be
caused by anything, so assuming something like the database is always
available would be an error.
"""
if settings.DEBUG_PROPAGATE_EXCEPTIONS:
raise
logger.error('Internal Server Error: %s', request.path,
exc_info=exc_info,
extra={
'status_code': 500,
'request': request
}
)
if settings.DEBUG:
return debug.technical_500_response(request, *exc_info)
# If Http500 handler is not installed, re-raise last exception
if resolver.urlconf_module is None:
six.reraise(*exc_info)
# Return an HttpResponse that displays a friendly error message.
callback, param_dict = resolver.resolve500()
return callback(request, **param_dict)
def apply_response_fixes(self, request, response):
"""
Applies each of the functions in self.response_fixes to the request and
response, modifying the response in the process. Returns the new
response.
"""
for func in self.response_fixes:
response = func(request, response)
return response
def get_path_info(environ):
"""
Returns the HTTP request's PATH_INFO as a unicode string.
"""
path_info = environ.get('PATH_INFO', str('/'))
# Under Python 3, strings in environ are decoded with ISO-8859-1;
# re-encode to recover the original bytestring provided by the webserver.
if six.PY3:
path_info = path_info.encode('iso-8859-1')
# It'd be better to implement URI-to-IRI decoding, see #19508.
return path_info.decode('utf-8')
def get_script_name(environ):
"""
Returns the equivalent of the HTTP request's SCRIPT_NAME environment
variable. If Apache mod_rewrite has been used, returns what would have been
the script name prior to any rewriting (so it's the script name as seen
from the client's perspective), unless the FORCE_SCRIPT_NAME setting is
set (to anything).
"""
if settings.FORCE_SCRIPT_NAME is not None:
return force_text(settings.FORCE_SCRIPT_NAME)
# If Apache's mod_rewrite had a whack at the URL, Apache set either
# SCRIPT_URL or REDIRECT_URL to the full resource URL before applying any
# rewrites. Unfortunately not every Web server (lighttpd!) passes this
# information through all the time, so FORCE_SCRIPT_NAME, above, is still
# needed.
script_url = environ.get('SCRIPT_URL', environ.get('REDIRECT_URL', str('')))
if script_url:
script_name = script_url[:-len(environ.get('PATH_INFO', str('')))]
else:
script_name = environ.get('SCRIPT_NAME', str(''))
# Under Python 3, strings in environ are decoded with ISO-8859-1;
# re-encode to recover the original bytestring provided by the webserver.
if six.PY3:
script_name = script_name.encode('iso-8859-1')
# It'd be better to implement URI-to-IRI decoding, see #19508.
return script_name.decode('utf-8')
| |
# Copyright (c) 2008 Nokia Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Wrapper module for e32socket (_socket), providing some additional
# facilities implemented in Python.
from e32socket import *
import os
_default_access_point = None
__all__ = ["getfqdn","getservbyname","getaddrinfo","gethostname"]
import e32socket
__all__.extend(os._get_exports_list(e32socket))
__all__ += ('_socketobject','_fileobject')
# Release 1.0 wanted the e32socket.socket object as the
# argument for some methods. To preserve compatibility,
# we accept both that and the proper type (_socketobject).
def _unwrap(sock):
if isinstance(sock,_socketobject):
return sock._sock
else:
return sock
def bt_advertise_service(name, socket, flag, class_):
return e32socket.bt_advertise_service(name,_unwrap(socket),flag,class_)
def bt_obex_receive(socket, filename):
return e32socket.bt_obex_receive(_unwrap(socket), filename)
def bt_rfcomm_get_available_server_channel(socket):
return e32socket.bt_rfcomm_get_available_server_channel(_unwrap(socket))
def set_security(socket,mode):
return e32socket.set_security(_unwrap(socket),mode)
def gethostname():
return "localhost"
def gethostbyaddr(addr):
if not _isnumericipaddr(addr):
addr = gethostbyname(addr)
(hostname, aliaslist, ipaddrlist) = e32socket.gethostbyaddr(addr)
return (str(hostname), aliaslist, ipaddrlist)
_realsocketcall = e32socket.socket
def socket(family, type, proto=0, apo=None):
return _socketobject(_realsocketcall(family, type, proto, apo), family)
try:
_realsslcall = e32socket.ssl
except AttributeError:
pass # No ssl
else:
def ssl(sock, keyfile=None, certfile=None, hostname=None):
realsock=getattr(sock, "_sock", sock)
# Note: this is just a stopgap hack while waiting for proper SSL error handling.
# Until that time, SSL operations _will not_ raise sslerror properly as they should.
SSL_ERROR_NONE=0
SSL_ERROR_SSL=1
SSL_ERROR_WANT_READ=2
SSL_ERROR_WANT_WRITE=3
SSL_ERROR_WANT_X509_LOOKUP=4
SSL_ERROR_SYSCALL=5
SSL_ERROR_ZERO_RETURN=6
SSL_ERROR_WANT_CONNECT=7
SSL_ERROR_EOF=8
SSL_ERROR_INVALID_ERROR_CODE=9
class sslerror(Exception):
pass
del os
AF_UNSPEC = 0
GAI_ANY = 0
EAI_ADDRFAMILY = 1
EAI_AGAIN = 2
EAI_BADFLAGS = 3
EAI_FAIL = 4
EAI_FAMILY = 5
EAI_MEMORY = 6
EAI_NODATA = 7
EAI_NONAME = 8
EAI_SERVICE = 9
EAI_SOCKTYPE = 10
EAI_SYSTEM = 11
EAI_BADHINTS = 12
EAI_PROTOCOL = 13
EAI_MAX = 14
AI_PASSIVE = 0x00000001
AI_CANONNAME = 0x00000002
AI_NUMERICHOST = 0x00000004
def _isnumeric(value):
try:
tmp = int(value)
except:
return False
return True
def _isnumericipaddr(addr):
for x in addr.split('.'):
if not _isnumeric(x):
return False
return True
service_db = {'echo':[('tcp',7),('udp',7)],
'ftp-data':[('tcp',20),('udp',20)],
'ftp':[('tcp',21),('udp',21)],
'ssh':[('tcp',22),('udp',22)],
'telnet':[('tcp',23),('udp',23)],
'smtp':[('tcp',25),('udp',25)],
'time':[('tcp',37),('udp',37)],
'domain':[('tcp',53),('udp',53)],
'tftp':[('tcp',69),('udp',69)],
'http':[('tcp',80),('udp',80)],
'www-http':[('tcp',80),('udp',80)],
'pop2':[('tcp',109),('udp',109)],
'pop3':[('tcp',110),('udp',110)],
'sftp':[('tcp',115),('udp',115)],
'nntp':[('tcp',119),('udp',119)]}
def getservbyname(service, proto):
service_record = service_db[service.lower()]
for x in service_record:
if x[0] == proto:
return x[1]
raise error("service/proto not found")
def getaddrinfo(host, port, fam=AF_UNSPEC, socktype=GAI_ANY, proto=0, flags=0):
if host == None and port == None:
raise gaierror(EAI_NONAME)
if fam not in [AF_UNSPEC, AF_INET]:
raise gaierror(EAI_FAMILY)
if flags & AI_NUMERICHOST and host and not _isnumericipaddr(host):
raise gaierror(EAI_NONAME)
r_family = AF_INET
r_socktype = GAI_ANY
r_proto = GAI_ANY
r_canonname = None
r_host = None
r_port = GAI_ANY
if socktype == GAI_ANY:
if proto == IPPROTO_UDP:
r_socktype = SOCK_DGRAM
elif proto == IPPROTO_TCP:
r_socktype = SOCK_STREAM
elif socktype == SOCK_DGRAM:
if not proto in [IPPROTO_UDP, GAI_ANY]:
raise gaierror(EAI_BADHINTS)
r_socktype = SOCK_DGRAM
r_proto = IPPROTO_UDP
elif socktype == SOCK_STREAM:
if not proto in [IPPROTO_TCP, GAI_ANY]:
raise gaierror(EAI_BADHINTS)
r_socktype = SOCK_STREAM
r_proto = IPPROTO_TCP
else:
raise gaierror(EAI_SOCKTYPE)
if port:
if _isnumeric(port):
if r_socktype == GAI_ANY:
r_socktype = SOCK_DGRAM
r_proto = IPPROTO_UDP
r_port = port
else:
if r_socktype == GAI_ANY:
r_port = getservbyname(port, 'tcp')
if r_port:
r_socktype = SOCK_STREAM
r_proto = IPPROTO_TCP
else:
r_port = getservbyname(port, 'udp')
if r_port:
r_socktype = SOCK_DGRAM
r_proto = IPPROTO_UDP
elif r_socktype == SOCK_DGRAM:
r_port = getservbyname(port, 'udp')
elif r_socktype == SOCK_STREAM:
r_port = getservbyname(port, 'tcp')
if not r_port:
raise gaierror(EAI_SERVICE)
if not host:
if flags & AI_PASSIVE:
r_host = '0.0.0.0'
else:
r_host = '127.0.0.1'
elif _isnumericipaddr(host):
r_host = host
if flags & AI_CANONNAME:
if flags & AI_NUMERICHOST:
r_canonname = host
else:
r_canonname, aliases, ipaddrs = gethostbyaddr(host)
else:
r_host = gethostbyname(host)
if flags & AI_CANONNAME:
r_canonname = host # hmmm...
return [(r_family, r_socktype, r_proto, r_canonname, (r_host, r_port))]
def getfqdn(name=''):
name = name.strip()
if not name or name == '0.0.0.0':
name = gethostname()
try:
hostname, aliases, ipaddrs = gethostbyaddr(name)
except error:
pass
else:
aliases.insert(0, hostname)
for name in aliases:
if '.' in name:
break
else:
name = hostname
return name
_socketmethods = (
'bind', 'connect_ex', 'fileno', 'listen',
'getpeername', 'getsockname', 'getsockopt', 'setsockopt',
'sendall', 'sendto', 'shutdown')
def raise_error(*args,**kwargs):
raise error(9, 'Bad file descriptor')
class _socketobject(object):
def __init__(self, sock, family):
self._internalsocket=_internalsocketobject(sock,family)
self._sock=sock
for k in dir(self._internalsocket):
if not k.startswith('__') and k != 'close':
value=getattr(self._internalsocket,k)
if callable(value):
setattr(self,k,value)
def close(self):
for k in dir(self._internalsocket):
if not k.startswith('__') and k != 'close':
value=getattr(self._internalsocket,k)
if callable(value):
setattr(self,k,raise_error)
self._internalsocket=None
self._sock=None
class _internalsocketobject:
class _closedsocket:
def __getattr__(self, name):
raise error(9, 'Bad file descriptor')
def __init__(self, sock, family=AF_UNSPEC):
self._sock = sock
self._blocking=True
self._family=family
self._connectname=None
def close(self):
self._sock = self.__class__._closedsocket()
def setblocking(self, flag):
self._blocking=flag
def accept(self, cb=None):
if cb == None:
sock, addr = self._sock.accept()
return _socketobject(sock, self._family), addr
else:
return self._sock.accept(cb)
def connect(self, addr, cb=None):
if not self._family == AF_INET or _isnumericipaddr(addr[0]):
return self._sock.connect(addr, cb)
else:
# Store hostname so that it can be given to ssl().
self._connectname=addr[0]
return self._sock.connect((gethostbyname(addr[0]), addr[1]), cb)
def _getconnectname(self):
return self._connectname
def dup(self):
return _socketobject(self._sock, self._family)
def makefile(self, mode='r', bufsize=-1):
return _fileobject(self.dup(), mode, bufsize)
def read(self, n=1, cb=None):
return self.recv(n,0,cb)
def read_all(self, blocksize=1024):
self._checkerror()
data = ''
while 1:
fragment = self._sock.recv(blocksize)
if not fragment:
break
data += fragment
return data
def recv(self, n, f=0, cb=None):
return self._sock.recv(n, f, cb)
def recvfrom(self, n, f=0, cb=None):
return self._sock.recvfrom(n, f, cb)
def send(self, data, f=0, cb=None):
return self._sock.send(data, f, cb)
_s = "def %s(self, *args): return self._sock.%s(*args)\n\n"
for _m in _socketmethods:
exec _s % (_m, _m)
class _fileobject(object):
def __init__(self, sock, mode, bufsize):
self._sock = sock
self._mode = mode
if bufsize < 0:
bufsize = 512
self._rbufsize = max(1, bufsize)
self._wbufsize = bufsize
self._wbuf = self._rbuf = ""
def close(self):
try:
if self._sock:
self.flush()
finally:
self._sock = 0
def __del__(self):
self.close()
def flush(self):
if self._wbuf:
self._sock.sendall(self._wbuf)
self._wbuf = ""
def fileno(self):
return self._sock._sock.fileno()
def write(self, data):
self._wbuf = self._wbuf + data
if self._wbufsize == 1:
if '\n' in data:
self.flush()
else:
if len(self._wbuf) >= self._wbufsize:
self.flush()
def writelines(self, list):
filter(self._sock.sendall, list)
self.flush()
def read(self, n=-1):
if n >= 0:
k = len(self._rbuf)
if n <= k:
data = self._rbuf[:n]
self._rbuf = self._rbuf[n:]
return data
n = n - k
L = [self._rbuf]
self._rbuf = ""
while n > 0:
new = self._sock.recv(max(n, self._rbufsize))
if not new: break
k = len(new)
if k > n:
L.append(new[:n])
self._rbuf = new[n:]
break
L.append(new)
n = n - k
return "".join(L)
k = max(512, self._rbufsize)
L = [self._rbuf]
self._rbuf = ""
while 1:
new = self._sock.recv(k)
if not new: break
L.append(new)
k = min(k*2, 1024**2)
return "".join(L)
def readline(self, limit=-1):
data = ""
i = self._rbuf.find('\n')
while i < 0 and not (0 < limit <= len(self._rbuf)):
new = self._sock.recv(self._rbufsize)
if not new: break
i = new.find('\n')
if i >= 0: i = i + len(self._rbuf)
self._rbuf = self._rbuf + new
if i < 0: i = len(self._rbuf)
else: i = i+1
if 0 <= limit < len(self._rbuf): i = limit
data, self._rbuf = self._rbuf[:i], self._rbuf[i:]
return data
def readlines(self, sizehint = 0):
total = 0
list = []
while 1:
line = self.readline()
if not line: break
list.append(line)
total += len(line)
if sizehint and total >= sizehint:
break
return list
| |
#!/usr/bin/env python3
# Copyright (C) 2017-2021 The btclib developers
#
# This file is part of btclib. It is subject to the license terms in the
# LICENSE file found in the top-level directory of this distribution.
#
# No part of btclib including this file, may be copied, modified, propagated,
# or distributed except according to the terms contained in the LICENSE file.
"Tests for the `btclib.entropy` module."
import math
import secrets
from io import StringIO
from typing import List
import pytest
from btclib.exceptions import BTClibValueError
from btclib.mnemonic.entropy import (
_bits,
bin_str_entropy_from_bytes,
bin_str_entropy_from_entropy,
bin_str_entropy_from_int,
bin_str_entropy_from_random,
bin_str_entropy_from_rolls,
bin_str_entropy_from_str,
bin_str_entropy_from_wordlist_indexes,
bytes_entropy_from_str,
collect_rolls,
wordlist_indexes_from_bin_str_entropy,
)
def test_indexes() -> None:
for entropy in ("0", "00000000000"):
indexes = wordlist_indexes_from_bin_str_entropy(entropy, 2048)
assert indexes == [0]
entropy = "000000000000"
indexes = wordlist_indexes_from_bin_str_entropy(entropy, 2048)
assert indexes == [0, 0]
test_vector = [
[1268, 535, 810, 685, 433, 811, 1385, 1790, 421, 570, 567, 1313],
[0, 0, 2047, 2047, 2047, 2047, 2047, 2047, 2047, 2047, 2047, 0],
[0, 0, 2047, 2047, 2047, 2047, 2047, 2047, 2047, 2047, 2047, 0],
]
for indx in test_vector:
entropy = bin_str_entropy_from_wordlist_indexes(indx, 2048)
indexes = wordlist_indexes_from_bin_str_entropy(entropy, 2048)
assert indexes == indx
def test_conversions() -> None:
test_vectors = [
"10101011" * 32,
"00101011" * 32,
"00000000" + "10101011" * 31,
]
for raw in test_vectors:
assert bin_str_entropy_from_str(raw) == raw
i = int(raw, 2)
assert bin_str_entropy_from_int(i) == raw
assert bin_str_entropy_from_int(bin(i).upper()) == raw
assert bin_str_entropy_from_int(hex(i).upper()) == raw
b = i.to_bytes(32, byteorder="big", signed=False)
assert bin_str_entropy_from_bytes(b) == raw
assert bin_str_entropy_from_bytes(b.hex()) == raw
assert bin_str_entropy_from_entropy(raw) == raw
assert bin_str_entropy_from_entropy(i) == raw
assert bin_str_entropy_from_entropy(b) == raw
max_bits = max(_bits)
raw = "10" + "11111111" * (max_bits // 8)
assert bin_str_entropy_from_entropy(raw) == bin_str_entropy_from_entropy(raw[:-2])
# entr integer has its leftmost bit set to 0
i = 1 << max_bits - 1
bin_str_entropy = bin_str_entropy_from_entropy(i)
assert len(bin_str_entropy) == max_bits
# entr integer has its leftmost bit set to 1
i = 1 << max_bits
bin_str_entropy = bin_str_entropy_from_entropy(i)
assert len(bin_str_entropy) == max_bits
exp_i = i >> 1
i = int(bin_str_entropy, 2)
assert i == exp_i
i = secrets.randbits(255)
raw = bin_str_entropy_from_int(i)
assert int(raw, 2) == i
assert len(raw) == 256
assert bin_str_entropy_from_str(raw) == raw
assert bin_str_entropy_from_int(hex(i).upper()) == raw
b = i.to_bytes(32, byteorder="big", signed=False)
assert bin_str_entropy_from_bytes(b) == raw
raw2 = bin_str_entropy_from_int(i, 255)
assert int(raw2, 2) == i
assert len(raw2) == 255
assert bin_str_entropy_from_str("0" + raw2) == raw
raw2 = bin_str_entropy_from_str(raw, 128)
assert len(raw2) == 128
assert raw2 == raw[:128]
def test_exceptions() -> None:
bin_str_entropy216 = "00011010" * 27 # 216 bits
bin_str_entropy214 = bin_str_entropy216[:-2] # 214 bits
entropy = bin_str_entropy_from_entropy(bin_str_entropy214, 214)
assert entropy == bin_str_entropy214
# 214 is not in [128, 160, 192, 224, 256, 512]
err_msg = "invalid number of bits: "
with pytest.raises(BTClibValueError, match=err_msg):
bin_str_entropy_from_entropy(bin_str_entropy214)
# 214 is not in [216]
err_msg = "invalid number of bits: "
with pytest.raises(BTClibValueError, match=err_msg):
bin_str_entropy_from_entropy(bin_str_entropy214, 216)
int_entropy211 = int(bin_str_entropy214, 2) # 211 bits
assert int_entropy211.bit_length() == 211
entropy = bin_str_entropy_from_entropy(int_entropy211, 214)
assert entropy == bin_str_entropy214
entropy = bin_str_entropy_from_entropy(int_entropy211, 256)
assert len(entropy) == 256
assert int(entropy, 2) == int_entropy211
entropy = bin_str_entropy_from_entropy(int_entropy211)
assert len(entropy) == 224
assert int(entropy, 2) == int_entropy211
err_msg = "Negative entropy: "
with pytest.raises(BTClibValueError, match=err_msg):
bin_str_entropy_from_entropy(-1 * int_entropy211)
bytes_entropy216 = int_entropy211.to_bytes(27, byteorder="big", signed=False)
entropy = bin_str_entropy_from_entropy(bytes_entropy216, 214)
assert entropy == bin_str_entropy214
entropy = bin_str_entropy_from_entropy(bytes_entropy216, 216)
assert entropy != bin_str_entropy216
err_msg = "invalid number of bits: "
with pytest.raises(BTClibValueError, match=err_msg):
bin_str_entropy_from_entropy(bytes_entropy216, 224)
with pytest.raises(BTClibValueError, match=err_msg):
bin_str_entropy_from_entropy(tuple()) # type: ignore
with pytest.raises(ValueError):
bin_str_entropy_from_int("not an int") # type: ignore
with pytest.raises(TypeError):
bin_str_entropy_from_str(3) # type: ignore
err_msg = "invalid number of bits: "
with pytest.raises(BTClibValueError, match=err_msg):
bin_str_entropy = "01" * 65 # 130 bits
bytes_entropy_from_str(bin_str_entropy)
inputs: List[StringIO] = []
# 2 input failures, then automatic rolls with default D6
inputs.append(StringIO("3\npluto\na\n"))
# D120, then 43 automatic rolls
inputs.append(StringIO("a120\n"))
# D120, one input failure, then 43 (implausible but valid) non-automatic rolls
inputs.append(StringIO("120\npluto\n" + "64\n" * 43))
def test_collect_rolls(monkeypatch):
bits = 256
for i, sides in enumerate((6, 120, 120)):
monkeypatch.setattr("sys.stdin", inputs[i])
dice_sides, dice_rolls = collect_rolls(bits)
assert dice_sides == sides
bits_per_roll = math.floor(math.log2(sides))
base = 2 ** bits_per_roll
for roll in dice_rolls:
assert 0 < roll <= base
min_roll_number = math.ceil(bits / bits_per_roll)
assert len(dice_rolls) == min_roll_number
def test_bin_str_entropy_from_rolls() -> None:
bits = 256
dice_base = 20
bits_per_roll = math.floor(math.log2(dice_base))
base = 2 ** bits_per_roll
roll_number = math.ceil(bits / bits_per_roll)
rolls = [base for _ in range(roll_number)]
bin_str = bin_str_entropy_from_rolls(bits, dice_base, rolls)
assert bin_str == "1" * 256
rolls = [base for _ in range(2 * roll_number)]
bin_str = bin_str_entropy_from_rolls(bits, dice_base, rolls)
assert bin_str == "1" * 256
rolls = [1 for _ in range(roll_number)]
bin_str = bin_str_entropy_from_rolls(bits, dice_base, rolls)
assert bin_str == "0" * 256
rolls = [1 for _ in range(2 * roll_number)]
bin_str = bin_str_entropy_from_rolls(bits, dice_base, rolls)
assert bin_str == "0" * 256
rolls = [secrets.randbelow(base) + 1 for _ in range(roll_number)]
bin_str = bin_str_entropy_from_rolls(bits, dice_base, rolls)
assert len(bin_str) == 256
rolls = [secrets.randbelow(base) + 1 for _ in range(roll_number)]
bin_str2 = bin_str_entropy_from_rolls(bits, dice_base, rolls)
assert len(bin_str2) == 256
assert bin_str != bin_str2
bin_str = bin_str_entropy_from_rolls(bits - 1, dice_base, rolls)
assert len(bin_str) == bits - 1
rolls = [base for _ in range(roll_number + 1)]
bin_str = bin_str_entropy_from_rolls(bits + 1, dice_base, rolls)
assert len(bin_str) == bits + 1
rolls = [base for _ in range(roll_number + 1)]
bin_str_rolls = bin_str_entropy_from_rolls(bits, dice_base, rolls)
bin_str = bin_str_entropy_from_random(bits, bin_str_rolls)
rolls = [secrets.randbelow(base) + 1 for _ in range(roll_number - 2)]
err_msg = "Too few rolls in the usable " # [1-16] range, missing 2 rolls
with pytest.raises(BTClibValueError, match=err_msg):
bin_str_entropy_from_rolls(bits, dice_base, rolls)
rolls = [secrets.randbelow(base) + 1 for _ in range(roll_number)]
rolls[1] = base + 1
err_msg = "Too few rolls in the usable " # [1-16] range, missing 1 rolls
with pytest.raises(BTClibValueError, match=err_msg):
bin_str_entropy_from_rolls(bits, dice_base, rolls)
rolls = [secrets.randbelow(base) + 1 for _ in range(roll_number)]
rolls[1] = dice_base + 1
err_msg = "invalid roll: " # 21 is not in [1-20]
with pytest.raises(BTClibValueError, match=err_msg):
bin_str_entropy_from_rolls(bits, dice_base, rolls)
rolls = [secrets.randbelow(base) + 1 for _ in range(roll_number)]
err_msg = "invalid dice base: "
with pytest.raises(BTClibValueError, match=err_msg):
bin_str_entropy_from_rolls(bits, 1, rolls)
def test_bin_str_entropy_from_random() -> None:
for to_be_hashed in (True, False):
bits = 256
bin_str = bin_str_entropy_from_random(bits, to_be_hashed=to_be_hashed)
assert len(bin_str) == bits
bin_str2 = bin_str_entropy_from_random(bits, "", to_be_hashed=to_be_hashed)
assert len(bin_str2) == bits
assert bin_str != bin_str2
bin_str2 = bin_str_entropy_from_random(bits, to_be_hashed=to_be_hashed)
assert len(bin_str2) == bits
assert bin_str != bin_str2
bin_str2 = bin_str_entropy_from_random(bits, "", to_be_hashed=to_be_hashed)
assert len(bin_str2) == bits
assert bin_str != bin_str2
bits = 512
bin_str = bin_str_entropy_from_random(bits, to_be_hashed=to_be_hashed)
assert len(bin_str) == bits
bin_str2 = bin_str_entropy_from_random(bits, bin_str, to_be_hashed=to_be_hashed)
assert len(bin_str2) == bits
assert bin_str != bin_str2
bin_str2 = bin_str_entropy_from_random(256, bin_str, to_be_hashed=to_be_hashed)
assert len(bin_str2) == 256
bin_str = bin_str_entropy_from_random(1024, to_be_hashed=False)
assert len(bin_str) == 1024
err_msg = "Too many bits required: "
with pytest.raises(BTClibValueError, match=err_msg):
bin_str_entropy_from_random(1024)
| |
# -*- coding: utf-8 -*-
from operator import attrgetter
from pyangbind.lib.yangtypes import RestrictedPrecisionDecimalType
from pyangbind.lib.yangtypes import RestrictedClassType
from pyangbind.lib.yangtypes import TypedListType
from pyangbind.lib.yangtypes import YANGBool
from pyangbind.lib.yangtypes import YANGListType
from pyangbind.lib.yangtypes import YANGDynClass
from pyangbind.lib.yangtypes import ReferenceType
from pyangbind.lib.base import PybindBase
from collections import OrderedDict
from decimal import Decimal
from bitarray import bitarray
import six
# PY3 support of some PY2 keywords (needs improved)
if six.PY3:
import builtins as __builtin__
long = int
elif six.PY2:
import __builtin__
class state(PybindBase):
"""
This class was auto-generated by the PythonClass plugin for PYANG
from YANG module openconfig-network-instance - based on the path /network-instances/network-instance/protocols/protocol/isis/interfaces/interface/levels/level/packet-counters/psnp/state. Each member element of
the container is represented as a class variable - with a specific
YANG type.
YANG Description: Packet counters relating to PSNPs.
"""
__slots__ = (
"_path_helper",
"_extmethods",
"__received",
"__processed",
"__dropped",
"__sent",
"__retransmit",
)
_yang_name = "state"
_pybind_generated_by = "container"
def __init__(self, *args, **kwargs):
self._path_helper = False
self._extmethods = False
self.__received = YANGDynClass(
base=RestrictedClassType(
base_type=long,
restriction_dict={"range": ["0..4294967295"]},
int_size=32,
),
is_leaf=True,
yang_name="received",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="yang:counter32",
is_config=False,
)
self.__processed = YANGDynClass(
base=RestrictedClassType(
base_type=long,
restriction_dict={"range": ["0..4294967295"]},
int_size=32,
),
is_leaf=True,
yang_name="processed",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="yang:counter32",
is_config=False,
)
self.__dropped = YANGDynClass(
base=RestrictedClassType(
base_type=long,
restriction_dict={"range": ["0..4294967295"]},
int_size=32,
),
is_leaf=True,
yang_name="dropped",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="yang:counter32",
is_config=False,
)
self.__sent = YANGDynClass(
base=RestrictedClassType(
base_type=long,
restriction_dict={"range": ["0..4294967295"]},
int_size=32,
),
is_leaf=True,
yang_name="sent",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="yang:counter32",
is_config=False,
)
self.__retransmit = YANGDynClass(
base=RestrictedClassType(
base_type=long,
restriction_dict={"range": ["0..4294967295"]},
int_size=32,
),
is_leaf=True,
yang_name="retransmit",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="yang:counter32",
is_config=False,
)
load = kwargs.pop("load", None)
if args:
if len(args) > 1:
raise TypeError("cannot create a YANG container with >1 argument")
all_attr = True
for e in self._pyangbind_elements:
if not hasattr(args[0], e):
all_attr = False
break
if not all_attr:
raise ValueError("Supplied object did not have the correct attributes")
for e in self._pyangbind_elements:
nobj = getattr(args[0], e)
if nobj._changed() is False:
continue
setmethod = getattr(self, "_set_%s" % e)
if load is None:
setmethod(getattr(args[0], e))
else:
setmethod(getattr(args[0], e), load=load)
def _path(self):
if hasattr(self, "_parent"):
return self._parent._path() + [self._yang_name]
else:
return [
"network-instances",
"network-instance",
"protocols",
"protocol",
"isis",
"interfaces",
"interface",
"levels",
"level",
"packet-counters",
"psnp",
"state",
]
def _get_received(self):
"""
Getter method for received, mapped from YANG variable /network_instances/network_instance/protocols/protocol/isis/interfaces/interface/levels/level/packet_counters/psnp/state/received (yang:counter32)
YANG Description: The number of the specified type of PDU received on the interface.
"""
return self.__received
def _set_received(self, v, load=False):
"""
Setter method for received, mapped from YANG variable /network_instances/network_instance/protocols/protocol/isis/interfaces/interface/levels/level/packet_counters/psnp/state/received (yang:counter32)
If this variable is read-only (config: false) in the
source YANG file, then _set_received is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_received() directly.
YANG Description: The number of the specified type of PDU received on the interface.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=RestrictedClassType(
base_type=long,
restriction_dict={"range": ["0..4294967295"]},
int_size=32,
),
is_leaf=True,
yang_name="received",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="yang:counter32",
is_config=False,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """received must be of a type compatible with yang:counter32""",
"defined-type": "yang:counter32",
"generated-type": """YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="received", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='yang:counter32', is_config=False)""",
}
)
self.__received = t
if hasattr(self, "_set"):
self._set()
def _unset_received(self):
self.__received = YANGDynClass(
base=RestrictedClassType(
base_type=long,
restriction_dict={"range": ["0..4294967295"]},
int_size=32,
),
is_leaf=True,
yang_name="received",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="yang:counter32",
is_config=False,
)
def _get_processed(self):
"""
Getter method for processed, mapped from YANG variable /network_instances/network_instance/protocols/protocol/isis/interfaces/interface/levels/level/packet_counters/psnp/state/processed (yang:counter32)
YANG Description: The number of the specified type of PDU received on the interface
that have been processed by the local system.
"""
return self.__processed
def _set_processed(self, v, load=False):
"""
Setter method for processed, mapped from YANG variable /network_instances/network_instance/protocols/protocol/isis/interfaces/interface/levels/level/packet_counters/psnp/state/processed (yang:counter32)
If this variable is read-only (config: false) in the
source YANG file, then _set_processed is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_processed() directly.
YANG Description: The number of the specified type of PDU received on the interface
that have been processed by the local system.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=RestrictedClassType(
base_type=long,
restriction_dict={"range": ["0..4294967295"]},
int_size=32,
),
is_leaf=True,
yang_name="processed",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="yang:counter32",
is_config=False,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """processed must be of a type compatible with yang:counter32""",
"defined-type": "yang:counter32",
"generated-type": """YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="processed", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='yang:counter32', is_config=False)""",
}
)
self.__processed = t
if hasattr(self, "_set"):
self._set()
def _unset_processed(self):
self.__processed = YANGDynClass(
base=RestrictedClassType(
base_type=long,
restriction_dict={"range": ["0..4294967295"]},
int_size=32,
),
is_leaf=True,
yang_name="processed",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="yang:counter32",
is_config=False,
)
def _get_dropped(self):
"""
Getter method for dropped, mapped from YANG variable /network_instances/network_instance/protocols/protocol/isis/interfaces/interface/levels/level/packet_counters/psnp/state/dropped (yang:counter32)
YANG Description: The number of the specified type of PDU received on the interface
that have been dropped.
"""
return self.__dropped
def _set_dropped(self, v, load=False):
"""
Setter method for dropped, mapped from YANG variable /network_instances/network_instance/protocols/protocol/isis/interfaces/interface/levels/level/packet_counters/psnp/state/dropped (yang:counter32)
If this variable is read-only (config: false) in the
source YANG file, then _set_dropped is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_dropped() directly.
YANG Description: The number of the specified type of PDU received on the interface
that have been dropped.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=RestrictedClassType(
base_type=long,
restriction_dict={"range": ["0..4294967295"]},
int_size=32,
),
is_leaf=True,
yang_name="dropped",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="yang:counter32",
is_config=False,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """dropped must be of a type compatible with yang:counter32""",
"defined-type": "yang:counter32",
"generated-type": """YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="dropped", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='yang:counter32', is_config=False)""",
}
)
self.__dropped = t
if hasattr(self, "_set"):
self._set()
def _unset_dropped(self):
self.__dropped = YANGDynClass(
base=RestrictedClassType(
base_type=long,
restriction_dict={"range": ["0..4294967295"]},
int_size=32,
),
is_leaf=True,
yang_name="dropped",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="yang:counter32",
is_config=False,
)
def _get_sent(self):
"""
Getter method for sent, mapped from YANG variable /network_instances/network_instance/protocols/protocol/isis/interfaces/interface/levels/level/packet_counters/psnp/state/sent (yang:counter32)
YANG Description: The number of the specified type of PDU that have been sent by the
local system on the interface.
"""
return self.__sent
def _set_sent(self, v, load=False):
"""
Setter method for sent, mapped from YANG variable /network_instances/network_instance/protocols/protocol/isis/interfaces/interface/levels/level/packet_counters/psnp/state/sent (yang:counter32)
If this variable is read-only (config: false) in the
source YANG file, then _set_sent is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_sent() directly.
YANG Description: The number of the specified type of PDU that have been sent by the
local system on the interface.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=RestrictedClassType(
base_type=long,
restriction_dict={"range": ["0..4294967295"]},
int_size=32,
),
is_leaf=True,
yang_name="sent",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="yang:counter32",
is_config=False,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """sent must be of a type compatible with yang:counter32""",
"defined-type": "yang:counter32",
"generated-type": """YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="sent", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='yang:counter32', is_config=False)""",
}
)
self.__sent = t
if hasattr(self, "_set"):
self._set()
def _unset_sent(self):
self.__sent = YANGDynClass(
base=RestrictedClassType(
base_type=long,
restriction_dict={"range": ["0..4294967295"]},
int_size=32,
),
is_leaf=True,
yang_name="sent",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="yang:counter32",
is_config=False,
)
def _get_retransmit(self):
"""
Getter method for retransmit, mapped from YANG variable /network_instances/network_instance/protocols/protocol/isis/interfaces/interface/levels/level/packet_counters/psnp/state/retransmit (yang:counter32)
YANG Description: The number of the specified type of PDU that that have been
retransmitted by the local system on the interface.
"""
return self.__retransmit
def _set_retransmit(self, v, load=False):
"""
Setter method for retransmit, mapped from YANG variable /network_instances/network_instance/protocols/protocol/isis/interfaces/interface/levels/level/packet_counters/psnp/state/retransmit (yang:counter32)
If this variable is read-only (config: false) in the
source YANG file, then _set_retransmit is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_retransmit() directly.
YANG Description: The number of the specified type of PDU that that have been
retransmitted by the local system on the interface.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=RestrictedClassType(
base_type=long,
restriction_dict={"range": ["0..4294967295"]},
int_size=32,
),
is_leaf=True,
yang_name="retransmit",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="yang:counter32",
is_config=False,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """retransmit must be of a type compatible with yang:counter32""",
"defined-type": "yang:counter32",
"generated-type": """YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="retransmit", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='yang:counter32', is_config=False)""",
}
)
self.__retransmit = t
if hasattr(self, "_set"):
self._set()
def _unset_retransmit(self):
self.__retransmit = YANGDynClass(
base=RestrictedClassType(
base_type=long,
restriction_dict={"range": ["0..4294967295"]},
int_size=32,
),
is_leaf=True,
yang_name="retransmit",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="yang:counter32",
is_config=False,
)
received = __builtin__.property(_get_received)
processed = __builtin__.property(_get_processed)
dropped = __builtin__.property(_get_dropped)
sent = __builtin__.property(_get_sent)
retransmit = __builtin__.property(_get_retransmit)
_pyangbind_elements = OrderedDict(
[
("received", received),
("processed", processed),
("dropped", dropped),
("sent", sent),
("retransmit", retransmit),
]
)
class state(PybindBase):
"""
This class was auto-generated by the PythonClass plugin for PYANG
from YANG module openconfig-network-instance-l2 - based on the path /network-instances/network-instance/protocols/protocol/isis/interfaces/interface/levels/level/packet-counters/psnp/state. Each member element of
the container is represented as a class variable - with a specific
YANG type.
YANG Description: Packet counters relating to PSNPs.
"""
__slots__ = (
"_path_helper",
"_extmethods",
"__received",
"__processed",
"__dropped",
"__sent",
"__retransmit",
)
_yang_name = "state"
_pybind_generated_by = "container"
def __init__(self, *args, **kwargs):
self._path_helper = False
self._extmethods = False
self.__received = YANGDynClass(
base=RestrictedClassType(
base_type=long,
restriction_dict={"range": ["0..4294967295"]},
int_size=32,
),
is_leaf=True,
yang_name="received",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="yang:counter32",
is_config=False,
)
self.__processed = YANGDynClass(
base=RestrictedClassType(
base_type=long,
restriction_dict={"range": ["0..4294967295"]},
int_size=32,
),
is_leaf=True,
yang_name="processed",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="yang:counter32",
is_config=False,
)
self.__dropped = YANGDynClass(
base=RestrictedClassType(
base_type=long,
restriction_dict={"range": ["0..4294967295"]},
int_size=32,
),
is_leaf=True,
yang_name="dropped",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="yang:counter32",
is_config=False,
)
self.__sent = YANGDynClass(
base=RestrictedClassType(
base_type=long,
restriction_dict={"range": ["0..4294967295"]},
int_size=32,
),
is_leaf=True,
yang_name="sent",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="yang:counter32",
is_config=False,
)
self.__retransmit = YANGDynClass(
base=RestrictedClassType(
base_type=long,
restriction_dict={"range": ["0..4294967295"]},
int_size=32,
),
is_leaf=True,
yang_name="retransmit",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="yang:counter32",
is_config=False,
)
load = kwargs.pop("load", None)
if args:
if len(args) > 1:
raise TypeError("cannot create a YANG container with >1 argument")
all_attr = True
for e in self._pyangbind_elements:
if not hasattr(args[0], e):
all_attr = False
break
if not all_attr:
raise ValueError("Supplied object did not have the correct attributes")
for e in self._pyangbind_elements:
nobj = getattr(args[0], e)
if nobj._changed() is False:
continue
setmethod = getattr(self, "_set_%s" % e)
if load is None:
setmethod(getattr(args[0], e))
else:
setmethod(getattr(args[0], e), load=load)
def _path(self):
if hasattr(self, "_parent"):
return self._parent._path() + [self._yang_name]
else:
return [
"network-instances",
"network-instance",
"protocols",
"protocol",
"isis",
"interfaces",
"interface",
"levels",
"level",
"packet-counters",
"psnp",
"state",
]
def _get_received(self):
"""
Getter method for received, mapped from YANG variable /network_instances/network_instance/protocols/protocol/isis/interfaces/interface/levels/level/packet_counters/psnp/state/received (yang:counter32)
YANG Description: The number of the specified type of PDU received on the interface.
"""
return self.__received
def _set_received(self, v, load=False):
"""
Setter method for received, mapped from YANG variable /network_instances/network_instance/protocols/protocol/isis/interfaces/interface/levels/level/packet_counters/psnp/state/received (yang:counter32)
If this variable is read-only (config: false) in the
source YANG file, then _set_received is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_received() directly.
YANG Description: The number of the specified type of PDU received on the interface.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=RestrictedClassType(
base_type=long,
restriction_dict={"range": ["0..4294967295"]},
int_size=32,
),
is_leaf=True,
yang_name="received",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="yang:counter32",
is_config=False,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """received must be of a type compatible with yang:counter32""",
"defined-type": "yang:counter32",
"generated-type": """YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="received", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='yang:counter32', is_config=False)""",
}
)
self.__received = t
if hasattr(self, "_set"):
self._set()
def _unset_received(self):
self.__received = YANGDynClass(
base=RestrictedClassType(
base_type=long,
restriction_dict={"range": ["0..4294967295"]},
int_size=32,
),
is_leaf=True,
yang_name="received",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="yang:counter32",
is_config=False,
)
def _get_processed(self):
"""
Getter method for processed, mapped from YANG variable /network_instances/network_instance/protocols/protocol/isis/interfaces/interface/levels/level/packet_counters/psnp/state/processed (yang:counter32)
YANG Description: The number of the specified type of PDU received on the interface
that have been processed by the local system.
"""
return self.__processed
def _set_processed(self, v, load=False):
"""
Setter method for processed, mapped from YANG variable /network_instances/network_instance/protocols/protocol/isis/interfaces/interface/levels/level/packet_counters/psnp/state/processed (yang:counter32)
If this variable is read-only (config: false) in the
source YANG file, then _set_processed is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_processed() directly.
YANG Description: The number of the specified type of PDU received on the interface
that have been processed by the local system.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=RestrictedClassType(
base_type=long,
restriction_dict={"range": ["0..4294967295"]},
int_size=32,
),
is_leaf=True,
yang_name="processed",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="yang:counter32",
is_config=False,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """processed must be of a type compatible with yang:counter32""",
"defined-type": "yang:counter32",
"generated-type": """YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="processed", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='yang:counter32', is_config=False)""",
}
)
self.__processed = t
if hasattr(self, "_set"):
self._set()
def _unset_processed(self):
self.__processed = YANGDynClass(
base=RestrictedClassType(
base_type=long,
restriction_dict={"range": ["0..4294967295"]},
int_size=32,
),
is_leaf=True,
yang_name="processed",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="yang:counter32",
is_config=False,
)
def _get_dropped(self):
"""
Getter method for dropped, mapped from YANG variable /network_instances/network_instance/protocols/protocol/isis/interfaces/interface/levels/level/packet_counters/psnp/state/dropped (yang:counter32)
YANG Description: The number of the specified type of PDU received on the interface
that have been dropped.
"""
return self.__dropped
def _set_dropped(self, v, load=False):
"""
Setter method for dropped, mapped from YANG variable /network_instances/network_instance/protocols/protocol/isis/interfaces/interface/levels/level/packet_counters/psnp/state/dropped (yang:counter32)
If this variable is read-only (config: false) in the
source YANG file, then _set_dropped is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_dropped() directly.
YANG Description: The number of the specified type of PDU received on the interface
that have been dropped.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=RestrictedClassType(
base_type=long,
restriction_dict={"range": ["0..4294967295"]},
int_size=32,
),
is_leaf=True,
yang_name="dropped",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="yang:counter32",
is_config=False,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """dropped must be of a type compatible with yang:counter32""",
"defined-type": "yang:counter32",
"generated-type": """YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="dropped", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='yang:counter32', is_config=False)""",
}
)
self.__dropped = t
if hasattr(self, "_set"):
self._set()
def _unset_dropped(self):
self.__dropped = YANGDynClass(
base=RestrictedClassType(
base_type=long,
restriction_dict={"range": ["0..4294967295"]},
int_size=32,
),
is_leaf=True,
yang_name="dropped",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="yang:counter32",
is_config=False,
)
def _get_sent(self):
"""
Getter method for sent, mapped from YANG variable /network_instances/network_instance/protocols/protocol/isis/interfaces/interface/levels/level/packet_counters/psnp/state/sent (yang:counter32)
YANG Description: The number of the specified type of PDU that have been sent by the
local system on the interface.
"""
return self.__sent
def _set_sent(self, v, load=False):
"""
Setter method for sent, mapped from YANG variable /network_instances/network_instance/protocols/protocol/isis/interfaces/interface/levels/level/packet_counters/psnp/state/sent (yang:counter32)
If this variable is read-only (config: false) in the
source YANG file, then _set_sent is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_sent() directly.
YANG Description: The number of the specified type of PDU that have been sent by the
local system on the interface.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=RestrictedClassType(
base_type=long,
restriction_dict={"range": ["0..4294967295"]},
int_size=32,
),
is_leaf=True,
yang_name="sent",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="yang:counter32",
is_config=False,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """sent must be of a type compatible with yang:counter32""",
"defined-type": "yang:counter32",
"generated-type": """YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="sent", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='yang:counter32', is_config=False)""",
}
)
self.__sent = t
if hasattr(self, "_set"):
self._set()
def _unset_sent(self):
self.__sent = YANGDynClass(
base=RestrictedClassType(
base_type=long,
restriction_dict={"range": ["0..4294967295"]},
int_size=32,
),
is_leaf=True,
yang_name="sent",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="yang:counter32",
is_config=False,
)
def _get_retransmit(self):
"""
Getter method for retransmit, mapped from YANG variable /network_instances/network_instance/protocols/protocol/isis/interfaces/interface/levels/level/packet_counters/psnp/state/retransmit (yang:counter32)
YANG Description: The number of the specified type of PDU that that have been
retransmitted by the local system on the interface.
"""
return self.__retransmit
def _set_retransmit(self, v, load=False):
"""
Setter method for retransmit, mapped from YANG variable /network_instances/network_instance/protocols/protocol/isis/interfaces/interface/levels/level/packet_counters/psnp/state/retransmit (yang:counter32)
If this variable is read-only (config: false) in the
source YANG file, then _set_retransmit is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_retransmit() directly.
YANG Description: The number of the specified type of PDU that that have been
retransmitted by the local system on the interface.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=RestrictedClassType(
base_type=long,
restriction_dict={"range": ["0..4294967295"]},
int_size=32,
),
is_leaf=True,
yang_name="retransmit",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="yang:counter32",
is_config=False,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """retransmit must be of a type compatible with yang:counter32""",
"defined-type": "yang:counter32",
"generated-type": """YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="retransmit", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='yang:counter32', is_config=False)""",
}
)
self.__retransmit = t
if hasattr(self, "_set"):
self._set()
def _unset_retransmit(self):
self.__retransmit = YANGDynClass(
base=RestrictedClassType(
base_type=long,
restriction_dict={"range": ["0..4294967295"]},
int_size=32,
),
is_leaf=True,
yang_name="retransmit",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="yang:counter32",
is_config=False,
)
received = __builtin__.property(_get_received)
processed = __builtin__.property(_get_processed)
dropped = __builtin__.property(_get_dropped)
sent = __builtin__.property(_get_sent)
retransmit = __builtin__.property(_get_retransmit)
_pyangbind_elements = OrderedDict(
[
("received", received),
("processed", processed),
("dropped", dropped),
("sent", sent),
("retransmit", retransmit),
]
)
| |
# -*- coding: utf-8 -*-
"""
requests.adapters
~~~~~~~~~~~~~~~~~
This module contains the transport adapters that Requests uses to define
and maintain connections.
"""
import socket
from .models import Response
from .packages.urllib3.poolmanager import PoolManager, proxy_from_url
from .packages.urllib3.response import HTTPResponse
from .packages.urllib3.util import Timeout as TimeoutSauce
from .packages.urllib3.util.retry import Retry
from .compat import urlparse, basestring
from .utils import (DEFAULT_CA_BUNDLE_PATH, get_encoding_from_headers,
prepend_scheme_if_needed, get_auth_from_url, urldefragauth)
from .structures import CaseInsensitiveDict
from .packages.urllib3.exceptions import ConnectTimeoutError
from .packages.urllib3.exceptions import HTTPError as _HTTPError
from .packages.urllib3.exceptions import MaxRetryError
from .packages.urllib3.exceptions import ProxyError as _ProxyError
from .packages.urllib3.exceptions import ProtocolError
from .packages.urllib3.exceptions import ReadTimeoutError
from .packages.urllib3.exceptions import SSLError as _SSLError
from .packages.urllib3.exceptions import ResponseError
from .cookies import extract_cookies_to_jar
from .exceptions import (ConnectionError, ConnectTimeout, ReadTimeout, SSLError,
ProxyError, RetryError)
from .auth import _basic_auth_str
DEFAULT_POOLBLOCK = False
DEFAULT_POOLSIZE = 10
DEFAULT_RETRIES = 0
class BaseAdapter(object):
"""The Base Transport Adapter"""
def __init__(self):
super(BaseAdapter, self).__init__()
def send(self):
raise NotImplementedError
def close(self):
raise NotImplementedError
class HTTPAdapter(BaseAdapter):
"""The built-in HTTP Adapter for urllib3.
Provides a general-case interface for Requests sessions to contact HTTP and
HTTPS urls by implementing the Transport Adapter interface. This class will
usually be created by the :class:`Session <Session>` class under the
covers.
:param pool_connections: The number of urllib3 connection pools to cache.
:param pool_maxsize: The maximum number of connections to save in the pool.
:param int max_retries: The maximum number of retries each connection
should attempt. Note, this applies only to failed DNS lookups, socket
connections and connection timeouts, never to requests where data has
made it to the server. By default, Requests does not retry failed
connections. If you need granular control over the conditions under
which we retry a request, import urllib3's ``Retry`` class and pass
that instead.
:param pool_block: Whether the connection pool should block for connections.
Usage::
>>> import requests
>>> s = requests.Session()
>>> a = requests.adapters.HTTPAdapter(max_retries=3)
>>> s.mount('http://', a)
"""
__attrs__ = ['max_retries', 'config', '_pool_connections', '_pool_maxsize',
'_pool_block']
def __init__(self, pool_connections=DEFAULT_POOLSIZE,
pool_maxsize=DEFAULT_POOLSIZE, max_retries=DEFAULT_RETRIES,
pool_block=DEFAULT_POOLBLOCK):
if max_retries == DEFAULT_RETRIES:
self.max_retries = Retry(0, read=False)
else:
self.max_retries = Retry.from_int(max_retries)
self.config = {}
self.proxy_manager = {}
super(HTTPAdapter, self).__init__()
self._pool_connections = pool_connections
self._pool_maxsize = pool_maxsize
self._pool_block = pool_block
self.init_poolmanager(pool_connections, pool_maxsize, block=pool_block)
def __getstate__(self):
return dict((attr, getattr(self, attr, None)) for attr in
self.__attrs__)
def __setstate__(self, state):
# Can't handle by adding 'proxy_manager' to self.__attrs__ because
# because self.poolmanager uses a lambda function, which isn't pickleable.
self.proxy_manager = {}
self.config = {}
for attr, value in state.items():
setattr(self, attr, value)
self.init_poolmanager(self._pool_connections, self._pool_maxsize,
block=self._pool_block)
def init_poolmanager(self, connections, maxsize, block=DEFAULT_POOLBLOCK, **pool_kwargs):
"""Initializes a urllib3 PoolManager.
This method should not be called from user code, and is only
exposed for use when subclassing the
:class:`HTTPAdapter <requests.adapters.HTTPAdapter>`.
:param connections: The number of urllib3 connection pools to cache.
:param maxsize: The maximum number of connections to save in the pool.
:param block: Block when no free connections are available.
:param pool_kwargs: Extra keyword arguments used to initialize the Pool Manager.
"""
# save these values for pickling
self._pool_connections = connections
self._pool_maxsize = maxsize
self._pool_block = block
self.poolmanager = PoolManager(num_pools=connections, maxsize=maxsize,
block=block, strict=True, **pool_kwargs)
def proxy_manager_for(self, proxy, **proxy_kwargs):
"""Return urllib3 ProxyManager for the given proxy.
This method should not be called from user code, and is only
exposed for use when subclassing the
:class:`HTTPAdapter <requests.adapters.HTTPAdapter>`.
:param proxy: The proxy to return a urllib3 ProxyManager for.
:param proxy_kwargs: Extra keyword arguments used to configure the Proxy Manager.
:returns: ProxyManager
"""
if not proxy in self.proxy_manager:
proxy_headers = self.proxy_headers(proxy)
self.proxy_manager[proxy] = proxy_from_url(
proxy,
proxy_headers=proxy_headers,
num_pools=self._pool_connections,
maxsize=self._pool_maxsize,
block=self._pool_block,
**proxy_kwargs)
return self.proxy_manager[proxy]
def cert_verify(self, conn, url, verify, cert):
"""Verify a SSL certificate. This method should not be called from user
code, and is only exposed for use when subclassing the
:class:`HTTPAdapter <requests.adapters.HTTPAdapter>`.
:param conn: The urllib3 connection object associated with the cert.
:param url: The requested URL.
:param verify: Whether we should actually verify the certificate.
:param cert: The SSL certificate to verify.
"""
if url.lower().startswith('https') and verify:
cert_loc = None
# Allow self-specified cert location.
if verify is not True:
cert_loc = verify
if not cert_loc:
cert_loc = DEFAULT_CA_BUNDLE_PATH
if not cert_loc:
raise Exception("Could not find a suitable SSL CA certificate bundle.")
conn.cert_reqs = 'CERT_REQUIRED'
conn.ca_certs = cert_loc
else:
conn.cert_reqs = 'CERT_NONE'
conn.ca_certs = None
if cert:
if not isinstance(cert, basestring):
conn.cert_file = cert[0]
conn.key_file = cert[1]
else:
conn.cert_file = cert
def build_response(self, req, resp):
"""Builds a :class:`Response <requests.Response>` object from a urllib3
response. This should not be called from user code, and is only exposed
for use when subclassing the
:class:`HTTPAdapter <requests.adapters.HTTPAdapter>`
:param req: The :class:`PreparedRequest <PreparedRequest>` used to generate the response.
:param resp: The urllib3 response object.
"""
response = Response()
# Fallback to None if there's no status_code, for whatever reason.
response.status_code = getattr(resp, 'status', None)
# Make headers case-insensitive.
response.headers = CaseInsensitiveDict(getattr(resp, 'headers', {}))
# Set encoding.
response.encoding = get_encoding_from_headers(response.headers)
response.raw = resp
response.reason = response.raw.reason
if isinstance(req.url, bytes):
response.url = req.url.decode('utf-8')
else:
response.url = req.url
# Add new cookies from the server.
extract_cookies_to_jar(response.cookies, req, resp)
# Give the Response some context.
response.request = req
response.connection = self
return response
def get_connection(self, url, proxies=None):
"""Returns a urllib3 connection for the given URL. This should not be
called from user code, and is only exposed for use when subclassing the
:class:`HTTPAdapter <requests.adapters.HTTPAdapter>`.
:param url: The URL to connect to.
:param proxies: (optional) A Requests-style dictionary of proxies used on this request.
"""
proxies = proxies or {}
proxy = proxies.get(urlparse(url.lower()).scheme)
if proxy:
proxy = prepend_scheme_if_needed(proxy, 'http')
proxy_manager = self.proxy_manager_for(proxy)
conn = proxy_manager.connection_from_url(url)
else:
# Only scheme should be lower case
parsed = urlparse(url)
url = parsed.geturl()
conn = self.poolmanager.connection_from_url(url)
return conn
def close(self):
"""Disposes of any internal state.
Currently, this just closes the PoolManager, which closes pooled
connections.
"""
self.poolmanager.clear()
def request_url(self, request, proxies):
"""Obtain the url to use when making the final request.
If the message is being sent through a HTTP proxy, the full URL has to
be used. Otherwise, we should only use the path portion of the URL.
This should not be called from user code, and is only exposed for use
when subclassing the
:class:`HTTPAdapter <requests.adapters.HTTPAdapter>`.
:param request: The :class:`PreparedRequest <PreparedRequest>` being sent.
:param proxies: A dictionary of schemes to proxy URLs.
"""
proxies = proxies or {}
scheme = urlparse(request.url).scheme
proxy = proxies.get(scheme)
if proxy and scheme != 'https':
url = urldefragauth(request.url)
else:
url = request.path_url
return url
def add_headers(self, request, **kwargs):
"""Add any headers needed by the connection. As of v2.0 this does
nothing by default, but is left for overriding by users that subclass
the :class:`HTTPAdapter <requests.adapters.HTTPAdapter>`.
This should not be called from user code, and is only exposed for use
when subclassing the
:class:`HTTPAdapter <requests.adapters.HTTPAdapter>`.
:param request: The :class:`PreparedRequest <PreparedRequest>` to add headers to.
:param kwargs: The keyword arguments from the call to send().
"""
pass
def proxy_headers(self, proxy):
"""Returns a dictionary of the headers to add to any request sent
through a proxy. This works with urllib3 magic to ensure that they are
correctly sent to the proxy, rather than in a tunnelled request if
CONNECT is being used.
This should not be called from user code, and is only exposed for use
when subclassing the
:class:`HTTPAdapter <requests.adapters.HTTPAdapter>`.
:param proxies: The url of the proxy being used for this request.
:param kwargs: Optional additional keyword arguments.
"""
headers = {}
username, password = get_auth_from_url(proxy)
if username and password:
headers['Proxy-Authorization'] = _basic_auth_str(username,
password)
return headers
def send(self, request, stream=False, timeout=None, verify=True, cert=None, proxies=None):
"""Sends PreparedRequest object. Returns Response object.
:param request: The :class:`PreparedRequest <PreparedRequest>` being sent.
:param stream: (optional) Whether to stream the request content.
:param timeout: (optional) How long to wait for the server to send
data before giving up, as a float, or a (`connect timeout, read
timeout <user/advanced.html#timeouts>`_) tuple.
:type timeout: float or tuple
:param verify: (optional) Whether to verify SSL certificates.
:param cert: (optional) Any user-provided SSL certificate to be trusted.
:param proxies: (optional) The proxies dictionary to apply to the request.
"""
conn = self.get_connection(request.url, proxies)
self.cert_verify(conn, request.url, verify, cert)
url = self.request_url(request, proxies)
self.add_headers(request)
chunked = not (request.body is None or 'Content-Length' in request.headers)
if isinstance(timeout, tuple):
try:
connect, read = timeout
timeout = TimeoutSauce(connect=connect, read=read)
except ValueError as e:
# this may raise a string formatting error.
err = ("Invalid timeout {0}. Pass a (connect, read) "
"timeout tuple, or a single float to set "
"both timeouts to the same value".format(timeout))
raise ValueError(err)
else:
timeout = TimeoutSauce(connect=timeout, read=timeout)
try:
if not chunked:
resp = conn.urlopen(
method=request.method,
url=url,
body=request.body,
headers=request.headers,
redirect=False,
assert_same_host=False,
preload_content=False,
decode_content=False,
retries=self.max_retries,
timeout=timeout
)
# Send the request.
else:
if hasattr(conn, 'proxy_pool'):
conn = conn.proxy_pool
low_conn = conn._get_conn(timeout=timeout)
try:
low_conn.putrequest(request.method,
url,
skip_accept_encoding=True)
for header, value in request.headers.items():
low_conn.putheader(header, value)
low_conn.endheaders()
for i in request.body:
low_conn.send(hex(len(i))[2:].encode('utf-8'))
low_conn.send(b'\r\n')
low_conn.send(i)
low_conn.send(b'\r\n')
low_conn.send(b'0\r\n\r\n')
r = low_conn.getresponse()
resp = HTTPResponse.from_httplib(
r,
pool=conn,
connection=low_conn,
preload_content=False,
decode_content=False
)
except:
# If we hit any problems here, clean up the connection.
# Then, reraise so that we can handle the actual exception.
low_conn.close()
raise
else:
# All is well, return the connection to the pool.
conn._put_conn(low_conn)
except (ProtocolError, socket.error) as err:
raise ConnectionError(err, request=request)
except MaxRetryError as e:
if isinstance(e.reason, ConnectTimeoutError):
raise ConnectTimeout(e, request=request)
if isinstance(e.reason, ResponseError):
raise RetryError(e, request=request)
raise ConnectionError(e, request=request)
except _ProxyError as e:
raise ProxyError(e)
except (_SSLError, _HTTPError) as e:
if isinstance(e, _SSLError):
raise SSLError(e, request=request)
elif isinstance(e, ReadTimeoutError):
raise ReadTimeout(e, request=request)
else:
raise
return self.build_response(request, resp)
| |
# Copyright (c) 2010-2013 ARM Limited
# All rights reserved.
#
# The license below extends only to copyright in the software and shall
# not be construed as granting a license to any other intellectual
# property including but not limited to intellectual property relating
# to a hardware implementation of the functionality of the software
# licensed hereunder. You may use the software subject to the license
# terms below provided that you ensure that this notice is replicated
# unmodified and in its entirety in all distributions of the software,
# modified or unmodified, in source code or in binary form.
#
# Copyright (c) 2006-2007 The Regents of The University of Michigan
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Authors: Ali Saidi
import optparse
import sys
import m5
from m5.defines import buildEnv
from m5.objects import *
from m5.util import addToPath, fatal
addToPath('../common')
from FSConfig import *
from SysPaths import *
from Benchmarks import *
import Simulation
import CacheConfig
from Caches import *
import Options
m5.disableAllListeners()
parser = optparse.OptionParser()
Options.addCommonOptions(parser)
Options.addFSOptions(parser)
(options, args) = parser.parse_args()
if args:
print "Error: script doesn't take any positional arguments"
sys.exit(1)
# driver system CPU is always simple... note this is an assignment of
# a class, not an instance.
DriveCPUClass = AtomicSimpleCPU
drive_mem_mode = 'atomic'
# Check if KVM support has been enabled, we might need to do VM
# configuration if that's the case.
have_kvm_support = 'BaseKvmCPU' in globals()
def is_kvm_cpu(cpu_class):
return have_kvm_support and cpu_class != None and \
issubclass(cpu_class, BaseKvmCPU)
# system under test can be any CPU
(TestCPUClass, test_mem_mode, FutureClass) = Simulation.setCPUClass(options)
# Match the memories with the CPUs, the driver system always simple,
# and based on the options for the test system
DriveMemClass = SimpleMemory
TestMemClass = Simulation.setMemClass(options)
if options.benchmark:
try:
bm = Benchmarks[options.benchmark]
except KeyError:
print "Error benchmark %s has not been defined." % options.benchmark
print "Valid benchmarks are: %s" % DefinedBenchmarks
sys.exit(1)
else:
if options.dual:
bm = [SysConfig(disk=options.disk_image, mem=options.mem_size), SysConfig(disk=options.disk_image, mem=options.mem_size)]
else:
bm = [SysConfig(disk=options.disk_image, mem=options.mem_size)]
np = options.num_cpus
if buildEnv['TARGET_ISA'] == "alpha":
test_sys = makeLinuxAlphaSystem(test_mem_mode, TestMemClass, bm[0])
elif buildEnv['TARGET_ISA'] == "mips":
test_sys = makeLinuxMipsSystem(test_mem_mode, TestMemClass, bm[0])
elif buildEnv['TARGET_ISA'] == "sparc":
test_sys = makeSparcSystem(test_mem_mode, TestMemClass, bm[0])
elif buildEnv['TARGET_ISA'] == "x86":
test_sys = makeLinuxX86System(test_mem_mode, TestMemClass,
options.num_cpus, bm[0])
elif buildEnv['TARGET_ISA'] == "arm":
test_sys = makeArmSystem(test_mem_mode, options.machine_type,
TestMemClass, bm[0], options.dtb_filename,
bare_metal=options.bare_metal)
else:
fatal("Incapable of building %s full system!", buildEnv['TARGET_ISA'])
# Create a source clock for the system and set the clock period
test_sys.clk_domain = SrcClockDomain(clock = options.sys_clock)
# Create a source clock for the CPUs and set the clock period
test_sys.cpu_clk_domain = SrcClockDomain(clock = options.cpu_clock)
if options.kernel is not None:
test_sys.kernel = binary(options.kernel)
if options.script is not None:
test_sys.readfile = options.script
test_sys.init_param = options.init_param
# For now, assign all the CPUs to the same clock domain
test_sys.cpu = [TestCPUClass(clk_domain=test_sys.cpu_clk_domain, cpu_id=i)
for i in xrange(np)]
if is_kvm_cpu(TestCPUClass) or is_kvm_cpu(FutureClass):
test_sys.vm = KvmVM()
if options.caches or options.l2cache:
# By default the IOCache runs at the system clock
test_sys.iocache = IOCache(addr_ranges = test_sys.mem_ranges)
test_sys.iocache.cpu_side = test_sys.iobus.master
test_sys.iocache.mem_side = test_sys.membus.slave
else:
test_sys.iobridge = Bridge(delay='50ns', ranges = test_sys.mem_ranges)
test_sys.iobridge.slave = test_sys.iobus.master
test_sys.iobridge.master = test_sys.membus.slave
# Sanity check
if options.fastmem:
if TestCPUClass != AtomicSimpleCPU:
fatal("Fastmem can only be used with atomic CPU!")
if (options.caches or options.l2cache):
fatal("You cannot use fastmem in combination with caches!")
for i in xrange(np):
if options.fastmem:
test_sys.cpu[i].fastmem = True
if options.checker:
test_sys.cpu[i].addCheckerCpu()
test_sys.cpu[i].createThreads()
CacheConfig.config_cache(options, test_sys)
if options.cpu_type == "detailed":
for i in xrange(np):
test_sys.cpu[i].technologyNode = options.tech_node
test_sys.cpu[i].voltage = options.voltage
test_sys.cpu[i].noMcPAT = options.no_mcpat
test_sys.cpu[i].yesVAR = options.yes_var
if len(bm) == 2:
if buildEnv['TARGET_ISA'] == 'alpha':
drive_sys = makeLinuxAlphaSystem(drive_mem_mode, DriveMemClass, bm[1])
elif buildEnv['TARGET_ISA'] == 'mips':
drive_sys = makeLinuxMipsSystem(drive_mem_mode, DriveMemClass, bm[1])
elif buildEnv['TARGET_ISA'] == 'sparc':
drive_sys = makeSparcSystem(drive_mem_mode, DriveMemClass, bm[1])
elif buildEnv['TARGET_ISA'] == 'x86':
drive_sys = makeX86System(drive_mem_mode, DriveMemClass, np, bm[1])
elif buildEnv['TARGET_ISA'] == 'arm':
drive_sys = makeArmSystem(drive_mem_mode, options.machine_type,
DriveMemClass, bm[1])
# Create a source clock for the system and set the clock period
drive_sys.clk_domain = SrcClockDomain(clock = options.sys_clock)
# Create a source clock for the CPUs and set the clock period
drive_sys.cpu_clk_domain = SrcClockDomain(clock = options.cpu_clock)
drive_sys.cpu = DriveCPUClass(clk_domain=drive_sys.cpu_clk_domain,
cpu_id=0)
drive_sys.cpu.createThreads()
drive_sys.cpu.createInterruptController()
drive_sys.cpu.connectAllPorts(drive_sys.membus)
if options.fastmem:
drive_sys.cpu.fastmem = True
if options.kernel is not None:
drive_sys.kernel = binary(options.kernel)
if is_kvm_cpu(DriveCPUClass):
drive_sys.vm = KvmVM()
drive_sys.iobridge = Bridge(delay='50ns',
ranges = drive_sys.mem_ranges)
drive_sys.iobridge.slave = drive_sys.iobus.master
drive_sys.iobridge.master = drive_sys.membus.slave
drive_sys.init_param = options.init_param
root = makeDualRoot(True, test_sys, drive_sys, options.etherdump)
elif len(bm) == 1:
root = Root(full_system=True, system=test_sys)
else:
print "Error I don't know how to create more than 2 systems."
sys.exit(1)
if options.timesync:
root.time_sync_enable = True
if options.frame_capture:
VncServer.frame_capture = True
Simulation.setWorkCountOptions(test_sys, options)
Simulation.run(options, root, test_sys, FutureClass)
| |
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Functional tests for 3d convolutional operations."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import math
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import test_util
from tensorflow.python.ops import gradient_checker
from tensorflow.python.ops import nn_ops
import tensorflow.python.ops.nn_grad # pylint: disable=unused-import
from tensorflow.python.platform import test
def GetTestConfigs():
"""Get all the valid tests configs to run.
Returns:
all the valid test configs as tuples of data_format and use_gpu.
"""
test_configs = [("NDHWC", False), ("NDHWC", True)]
if test.is_gpu_available(cuda_only=True):
# "NCDHW" format is only supported on CUDA.
test_configs += [("NCDHW", True)]
return test_configs
class Conv3DTest(test.TestCase):
def _SetupValuesForDevice(self, tensor_in_sizes, filter_in_sizes, stride,
padding, data_format, use_gpu):
total_size_1 = 1
total_size_2 = 1
for s in tensor_in_sizes:
total_size_1 *= s
for s in filter_in_sizes:
total_size_2 *= s
# Initializes the input tensor with array containing incrementing
# numbers from 1.
x1 = [f * 1.0 for f in range(1, total_size_1 + 1)]
x2 = [f * 1.0 for f in range(1, total_size_2 + 1)]
with self.test_session(use_gpu=use_gpu):
t1 = constant_op.constant(x1, shape=tensor_in_sizes)
t2 = constant_op.constant(x2, shape=filter_in_sizes)
if isinstance(stride, collections.Iterable):
strides = [1] + list(stride) + [1]
else:
strides = [1, stride, stride, stride, 1]
if data_format == "NCDHW":
t1 = test_util.NHWCToNCHW(t1)
strides = test_util.NHWCToNCHW(strides)
conv = nn_ops.conv3d(t1, t2, strides, padding=padding,
data_format=data_format)
if data_format == "NCDHW":
conv = test_util.NCHWToNHWC(conv)
return conv
def _VerifyValues(self, tensor_in_sizes, filter_in_sizes, stride, padding,
expected):
results = []
for data_format, use_gpu in GetTestConfigs():
result = self._SetupValuesForDevice(
tensor_in_sizes,
filter_in_sizes,
stride,
padding,
data_format,
use_gpu=use_gpu)
results.append(result)
tolerance = 1e-2 if use_gpu else 1e-5
with self.test_session() as sess:
values = sess.run(results)
for value in values:
print("expected = ", expected)
print("actual = ", value)
self.assertAllClose(expected, value.flatten(), atol=tolerance,
rtol=1e-6)
def testConv3D1x1x1Filter(self):
expected_output = [
30.0, 36.0, 42.0, 66.0, 81.0, 96.0, 102.0, 126.0, 150.0, 138.0, 171.0,
204.0, 174.0, 216.0, 258.0, 210.0, 261.0, 312.0
]
# These are equivalent to the Conv2D1x1 case.
self._VerifyValues(
tensor_in_sizes=[1, 2, 3, 1, 3],
filter_in_sizes=[1, 1, 1, 3, 3],
stride=1,
padding="VALID",
expected=expected_output)
self._VerifyValues(
tensor_in_sizes=[1, 2, 1, 3, 3],
filter_in_sizes=[1, 1, 1, 3, 3],
stride=1,
padding="VALID",
expected=expected_output)
self._VerifyValues(
tensor_in_sizes=[1, 1, 2, 3, 3],
filter_in_sizes=[1, 1, 1, 3, 3],
stride=1,
padding="VALID",
expected=expected_output)
# Expected values computed using scipy's correlate function.
def testConv3D2x2x2Filter(self):
expected_output = [
19554., 19962., 20370., 22110., 22590., 23070., 34890., 35730., 36570.,
37446., 38358., 39270., 50226., 51498., 52770., 52782., 54126., 55470.
]
# expected_shape = [1, 3, 1, 2, 5]
self._VerifyValues(
tensor_in_sizes=[1, 4, 2, 3, 3], # b, z, y, x, fin
filter_in_sizes=[2, 2, 2, 3, 3], # z, y, x, fin, fout
stride=1,
padding="VALID",
expected=expected_output)
def testConv3DStrides(self):
expected_output = [
102.,
151.,
172.,
193.,
214.,
235.,
142.,
438.,
592.,
613.,
634.,
655.,
676.,
394.,
774.,
1033.,
1054.,
1075.,
1096.,
1117.,
646.,
1894.,
2503.,
2524.,
2545.,
2566.,
2587.,
1486.,
2230.,
2944.,
2965.,
2986.,
3007.,
3028.,
1738.,
2566.,
3385.,
3406.,
3427.,
3448.,
3469.,
1990.,
3686.,
4855.,
4876.,
4897.,
4918.,
4939.,
2830.,
4022.,
5296.,
5317.,
5338.,
5359.,
5380.,
3082.,
4358.,
5737.,
5758.,
5779.,
5800.,
5821.,
3334.,
]
self._VerifyValues(
tensor_in_sizes=[1, 5, 8, 7, 1],
filter_in_sizes=[1, 2, 3, 1, 1],
stride=[2, 3, 1], # different stride for each spatial dimension
padding="SAME",
expected=expected_output)
def testConv3D2x2x2FilterStride2(self):
expected_output = [19554., 19962., 20370., 50226., 51498., 52770.]
self._VerifyValues(
tensor_in_sizes=[1, 4, 2, 3, 3],
filter_in_sizes=[2, 2, 2, 3, 3],
stride=2,
padding="VALID",
expected=expected_output)
def testConv3DStride3(self):
expected_output = [
36564., 38022., 39480., 37824., 39354., 40884., 39084., 40686., 42288.,
46644., 48678., 50712., 47904., 50010., 52116., 49164., 51342., 53520.,
107124., 112614., 118104., 108384., 113946., 119508., 109644., 115278.,
120912., 117204., 123270., 129336., 118464., 124602., 130740., 119724.,
125934., 132144.
]
self._VerifyValues(
tensor_in_sizes=[1, 6, 7, 8, 2],
filter_in_sizes=[3, 2, 1, 2, 3],
stride=3,
padding="VALID",
expected=expected_output)
def testConv3D2x2x2FilterStride2Same(self):
expected_output = [
19554., 19962., 20370., 10452., 10710., 10968., 50226., 51498., 52770.,
23844., 24534., 25224.
]
self._VerifyValues(
tensor_in_sizes=[1, 4, 2, 3, 3],
filter_in_sizes=[2, 2, 2, 3, 3],
stride=2,
padding="SAME",
expected=expected_output)
def testKernelSmallerThanStride(self):
expected_output = [1., 3., 7., 9., 19., 21., 25., 27.]
self._VerifyValues(
tensor_in_sizes=[1, 3, 3, 3, 1],
filter_in_sizes=[1, 1, 1, 1, 1],
stride=2,
padding="SAME",
expected=expected_output)
self._VerifyValues(
tensor_in_sizes=[1, 3, 3, 3, 1],
filter_in_sizes=[1, 1, 1, 1, 1],
stride=2,
padding="VALID",
expected=expected_output)
expected_output = [
1484., 1592., 770., 2240., 2348., 1106., 1149., 1191., 539., 6776.,
6884., 3122., 7532., 7640., 3458., 3207., 3249., 1421., 3005., 3035.,
1225., 3215., 3245., 1309., 1013., 1022., 343.
]
self._VerifyValues(
tensor_in_sizes=[1, 7, 7, 7, 1],
filter_in_sizes=[2, 2, 2, 1, 1],
stride=3,
padding="SAME",
expected=expected_output)
expected_output = [1484., 1592., 2240., 2348., 6776., 6884., 7532., 7640.]
self._VerifyValues(
tensor_in_sizes=[1, 7, 7, 7, 1],
filter_in_sizes=[2, 2, 2, 1, 1],
stride=3,
padding="VALID",
expected=expected_output)
def testKernelSizeMatchesInputSize(self):
self._VerifyValues(
tensor_in_sizes=[1, 2, 1, 2, 1],
filter_in_sizes=[2, 1, 2, 1, 2],
stride=1,
padding="VALID",
expected=[50, 60])
def _ConstructAndTestGradientForConfig(
self, batch, input_shape, filter_shape, in_depth, out_depth, stride,
padding, test_input, data_format, use_gpu):
input_planes, input_rows, input_cols = input_shape
filter_planes, filter_rows, filter_cols = filter_shape
input_shape = [batch, input_planes, input_rows, input_cols, in_depth]
filter_shape = [
filter_planes, filter_rows, filter_cols, in_depth, out_depth
]
if isinstance(stride, collections.Iterable):
strides = [1] + list(stride) + [1]
else:
strides = [1, stride, stride, stride, 1]
if padding == "VALID":
output_planes = int(
math.ceil((input_planes - filter_planes + 1.0) / strides[1]))
output_rows = int(
math.ceil((input_rows - filter_rows + 1.0) / strides[2]))
output_cols = int(
math.ceil((input_cols - filter_cols + 1.0) / strides[3]))
else:
output_planes = int(math.ceil(float(input_planes) / strides[1]))
output_rows = int(math.ceil(float(input_rows) / strides[2]))
output_cols = int(math.ceil(float(input_cols) / strides[3]))
output_shape = [batch, output_planes, output_rows, output_cols, out_depth]
input_size = 1
for x in input_shape:
input_size *= x
filter_size = 1
for x in filter_shape:
filter_size *= x
input_data = [x * 1.0 / input_size for x in range(0, input_size)]
filter_data = [x * 1.0 / filter_size for x in range(0, filter_size)]
if test.is_gpu_available() and use_gpu:
data_type = dtypes.float32
# TODO(mjanusz): Modify gradient_checker to also provide max relative
# error and synchronize the tolerance levels between the tests for forward
# and backward computations.
if test.is_gpu_available():
tolerance = 5e-3
else:
# As of Aug 2016, higher tolerance is needed for some CPU architectures.
# Runs on a single machine can also generate slightly different errors
# because of multithreading.
tolerance = 8e-3
else:
data_type = dtypes.float64
tolerance = 1e-8
with self.test_session(use_gpu=use_gpu):
orig_input_tensor = constant_op.constant(
input_data, shape=input_shape, dtype=data_type, name="input")
filter_tensor = constant_op.constant(
filter_data, shape=filter_shape, dtype=data_type, name="filter")
if data_format == "NCDHW":
input_tensor = test_util.NHWCToNCHW(orig_input_tensor)
strides = test_util.NHWCToNCHW(strides)
else:
input_tensor = orig_input_tensor
conv = nn_ops.conv3d(
input_tensor, filter_tensor, strides, padding,
data_format=data_format, name="conv")
if data_format == "NCDHW":
conv = test_util.NCHWToNHWC(conv)
if test_input:
err = gradient_checker.compute_gradient_error(orig_input_tensor,
input_shape,
conv, output_shape)
else:
err = gradient_checker.compute_gradient_error(filter_tensor,
filter_shape, conv,
output_shape)
print("conv3d gradient error = ", err)
self.assertLess(err, tolerance)
def ConstructAndTestGradient(self, **kwargs):
for data_format, use_gpu in GetTestConfigs():
self._ConstructAndTestGradientForConfig(data_format=data_format,
use_gpu=use_gpu, **kwargs)
def testInputGradientValidPaddingStrideOne(self):
self.ConstructAndTestGradient(
batch=2,
input_shape=(3, 5, 4),
filter_shape=(3, 3, 3),
in_depth=2,
out_depth=3,
stride=1,
padding="VALID",
test_input=True)
def testFilterGradientValidPaddingStrideOne(self):
self.ConstructAndTestGradient(
batch=4,
input_shape=(4, 6, 5),
filter_shape=(2, 2, 2),
in_depth=2,
out_depth=3,
stride=1,
padding="VALID",
test_input=False)
def testInputGradientValidPaddingStrideTwo(self):
self.ConstructAndTestGradient(
batch=2,
input_shape=(6, 3, 5),
filter_shape=(3, 3, 3),
in_depth=2,
out_depth=3,
stride=2,
padding="VALID",
test_input=True)
def testFilterGradientValidPaddingStrideTwo(self):
self.ConstructAndTestGradient(
batch=2,
input_shape=(7, 6, 5),
filter_shape=(2, 2, 2),
in_depth=2,
out_depth=3,
stride=2,
padding="VALID",
test_input=False)
def testInputGradientValidPaddingStrideThree(self):
self.ConstructAndTestGradient(
batch=2,
input_shape=(3, 7, 6),
filter_shape=(3, 3, 3),
in_depth=2,
out_depth=3,
stride=3,
padding="VALID",
test_input=True)
def testFilterGradientValidPaddingStrideThree(self):
self.ConstructAndTestGradient(
batch=2,
input_shape=(4, 4, 7),
filter_shape=(4, 4, 4),
in_depth=2,
out_depth=3,
stride=3,
padding="VALID",
test_input=False)
def testInputGradientSamePaddingStrideOne(self):
self.ConstructAndTestGradient(
batch=2,
input_shape=(3, 2, 2),
filter_shape=(3, 2, 1),
in_depth=2,
out_depth=1,
stride=1,
padding="SAME",
test_input=True)
def testFilterGradientSamePaddingStrideOne(self):
self.ConstructAndTestGradient(
batch=2,
input_shape=(3, 6, 5),
filter_shape=(2, 2, 2),
in_depth=2,
out_depth=3,
stride=1,
padding="SAME",
test_input=False)
def testInputGradientSamePaddingStrideTwo(self):
self.ConstructAndTestGradient(
batch=2,
input_shape=(6, 3, 4),
filter_shape=(3, 3, 3),
in_depth=2,
out_depth=3,
stride=2,
padding="SAME",
test_input=True)
def testFilterGradientSamePaddingStrideTwo(self):
self.ConstructAndTestGradient(
batch=4,
input_shape=(7, 3, 5),
filter_shape=(2, 2, 2),
in_depth=2,
out_depth=3,
stride=2,
padding="SAME",
test_input=False)
def testInputGradientSamePaddingStrideThree(self):
self.ConstructAndTestGradient(
batch=2,
input_shape=(9, 3, 6),
filter_shape=(3, 3, 3),
in_depth=2,
out_depth=3,
stride=3,
padding="SAME",
test_input=True)
def testFilterGradientSamePaddingStrideThree(self):
self.ConstructAndTestGradient(
batch=2,
input_shape=(9, 4, 7),
filter_shape=(4, 4, 4),
in_depth=2,
out_depth=3,
stride=3,
padding="SAME",
test_input=False)
def testInputGradientSamePaddingDifferentStrides(self):
self.ConstructAndTestGradient(
batch=1,
input_shape=(5, 8, 7),
filter_shape=(1, 2, 3),
in_depth=2,
out_depth=3,
stride=[2, 3, 1],
padding="SAME",
test_input=True)
def testFilterGradientKernelSizeMatchesInputSize(self):
self.ConstructAndTestGradient(
batch=2,
input_shape=(5, 4, 3),
filter_shape=(5, 4, 3),
in_depth=2,
out_depth=3,
stride=1,
padding="VALID",
test_input=False)
def testInputGradientKernelSizeMatchesInputSize(self):
self.ConstructAndTestGradient(
batch=2,
input_shape=(5, 4, 3),
filter_shape=(5, 4, 3),
in_depth=2,
out_depth=3,
stride=1,
padding="VALID",
test_input=True)
def disabledtestFilterGradientSamePaddingDifferentStrides(self):
self.ConstructAndTestGradient(
batch=1,
input_shape=(5, 8, 7),
filter_shape=(1, 2, 3),
in_depth=2,
out_depth=3,
stride=[2, 3, 1],
padding="SAME",
test_input=False)
if __name__ == "__main__":
test.main()
| |
"""Main module for the Slinket component.
Responsible for the top-level processing of Slinket. Most functionality is in
the Chunk class.
"""
from __future__ import absolute_import
from __future__ import print_function
from components.common_modules.component import TarsqiComponent
from components.common_modules.utils import get_events, get_words_as_string
from library.slinket.main import SLINKET_DICTS
from library.tarsqi_constants import SLINKET
from library.main import LIBRARY
from utilities import logger
DEBUG = False
class Slinket (TarsqiComponent):
"""Class that implements the Slinket SLINK and ALINK parser. Only lexical alinks
and slinks are found.
Purpose clause are not yet implemented. But note that some purpose clause
SLINKS are already introduced in the lexically-triggered process. This is so
for those events that discoursively tend to appear modified by a Purpose
Clause (e.g., 'address'). The data are based on TimeBank. Conditionals are
not implemented either.
Instance variables:
NAME - a string
doctree - a TarsqiTree
docelement - a docelement Tag
"""
def __init__(self):
"""Initialize Slinket. Sets doctree and docelement to None, these are added by
process_doctree()."""
self.NAME = SLINKET
self.doctree = None
self.docelement = None
# Load the Slinket dictionaries if they have not been loaded yet
SLINKET_DICTS.load()
def process_doctree(self, doctree):
"""Find alinks and slinks in doctree and export them to self.docelement."""
self.doctree = doctree
if DEBUG:
for s in doctree:
for e in s:
print(e)
e.print_vars()
doctree.pp()
self.docelement = self.doctree.docelement
self._build_event_dictionary()
for sentence in self.doctree:
# print get_words_as_string(sentence)
self._find_links(self.doctree, sentence)
self._add_links_to_document()
def _build_event_dictionary(self):
"""Creates a dictionary with events on the self.doctree variable and adds
event lists (which consist of pairs of event location and event id) to
all sentences in self.doctree."""
self.doctree.events = {}
for event in get_events(self.doctree):
eid = event.attrs[LIBRARY.timeml.EID]
self.doctree.events[eid] = event.attrs
pos = event.dtrs[0].pos
epos = self.doctree.events[eid][LIBRARY.timeml.POS]
form = event.dtrs[0].getText()
self.doctree.events[eid][LIBRARY.timeml.FORM] = form
self.doctree.events[eid][LIBRARY.timeml.EPOS] = epos
self.doctree.events[eid][LIBRARY.timeml.POS] = pos
for sentence in self.doctree:
sentence.set_event_list()
def _find_links(self, doc, sentence):
"""For each event in the sentence, check whether an Alink or Slink can be
created for it."""
eventNum = -1
for (eLocation, eid) in sentence.eventList:
eventNum += 1
event_expr = EventExpression(eid, eLocation, eventNum, doc.events[eid])
logger.debug(event_expr.as_verbose_string())
if event_expr.can_introduce_alink():
logger.debug("Alink candidate: " + event_expr.form)
self._find_alink(sentence, event_expr)
if event_expr.can_introduce_slink():
logger.debug("Slink candidate: " + event_expr.form)
self._find_lexically_based_slink(sentence, event_expr)
def _find_alink(self, sentence, event_expr):
"""Try to find an alink with event_expr as the trigger, alinks are created as a side
effect."""
evNode = sentence[event_expr.locInSent]
if evNode is None:
logger.error("No node found at locInSent=%s" % event_expr.locInSent)
return
forwardFSAs = event_expr.alinkingContexts('forward')
if forwardFSAs:
alink_created = evNode.find_forward_alink(forwardFSAs)
if not alink_created:
backwardFSAs = event_expr.alinkingContexts('backwards')
if backwardFSAs:
evNode.find_backward_alink(backwardFSAs)
def _find_lexically_based_slink(self, sentence, event_expr):
"""Try to find lexically based Slinks for an instance of EventExpression using
forward, backward and reporting FSA paterns. No return value, if an
Slink is found, it will be created by the chunk that embeds the Slink
triggering event."""
evNode = sentence[event_expr.locInSent]
if evNode is None:
logger.error("No node found at locInSent=%s" % event_expr.locInSent)
return
slink_created = False
logger.debug("Sentence element class: %s" % evNode.__class__.__name__)
forwardFSAs = event_expr.slinkingContexts('forward')
if forwardFSAs:
logger.debug("Applying FORWARD slink FSAs")
slink_created = evNode.find_forward_slink(forwardFSAs)
logger.debug("forward slink created = %s" % slink_created)
if not slink_created:
backwardFSAs = event_expr.slinkingContexts('backwards')
if backwardFSAs:
logger.debug("Applying BACKWARD slink FSAs")
slink_created = evNode.find_backward_slink(backwardFSAs)
logger.debug("backward slink created = %s" % slink_created)
if not slink_created:
reportingFSAs = event_expr.slinkingContexts('reporting')
if reportingFSAs:
logger.debug("Applying REPORTING slink FSAs")
slink_created = evNode.find_reporting_slink(reportingFSAs)
logger.debug("reporting slink created = %s" % slink_created)
def _add_links_to_document(self):
for alink in self.doctree.alinks:
self._add_link(LIBRARY.timeml.ALINK, alink.attrs)
for slink in self.doctree.slinks:
self._add_link(LIBRARY.timeml.SLINK, slink.attrs)
def _add_link(self, tagname, attrs):
"""Add the link to the TagRepository instance on the TarsqiDocument."""
attrs[LIBRARY.timeml.ORIGIN] = SLINKET
logger.debug("Adding %s: %s" % (tagname, attrs))
self.doctree.tarsqidoc.tags.add_tag(tagname, -1, -1, attrs)
class EventExpression(object):
"""Class that wraps an event in a way that's convenient for Slinket.
Instance variables:
dict - dictionary of event attributes
eid
eiid
tense
aspect
nf_morph - VERB, NOUN or ADJECTIVE, sometimes called epos
polarity - optional attribute (that is, it can be None)
modality - optional attribute (that is, it can be None)
evClass - the event class
pos - the part-of-speech of the event token
form - the actual string
locInSent - idx of node bearing event tag in the document, wrt to its
sentence parent node.
eventNum - position of event in sentence.eventList (needed for
potential slinking with previous or next events in list)
"""
def __init__(self, eid, locInSent, eventNum, event_attributes):
"""Set all attributes, using default values if appropriate. The arguments are:
an identifier string, an integer reflecting the location of the event in
the sentence, an integer reflecting the position of the event on the
eventList on the sentence and a dictionary with event attributes."""
self.locInSent = locInSent
self.eventNum = eventNum
self.dict = event_attributes
self.eid = eid
self.eiid = self.get_event_attribute(LIBRARY.timeml.EIID)
self.tense = self.get_event_attribute(LIBRARY.timeml.TENSE)
self.aspect = self.get_event_attribute(LIBRARY.timeml.ASPECT)
self.nf_morph = self.get_event_attribute(LIBRARY.timeml.EPOS)
self.polarity = self.get_event_attribute(LIBRARY.timeml.POL, optional=True)
self.modality = self.get_event_attribute(LIBRARY.timeml.MOD, optional=True)
self.evClass = self.get_event_attribute(LIBRARY.timeml.CLASS)
self.pos = self.get_event_attribute(LIBRARY.timeml.POS)
self.form = self.get_event_attribute(LIBRARY.timeml.FORM)
def as_verbose_string(self):
return \
"%s: %s\n" % (self.__class__.__name__, self.form) + \
"\tpos=%s TENSE=%s ASPECT=%s CLASS=%s\n" \
% (self.pos, self.tense, self.aspect, self.evClass) + \
"\tNF_MORPH=%s MODALITY=%s POLARITY=%s\n" \
% (self.nf_morph, self.modality, self.polarity) + \
"\tCLASS=%s locInSent=%s eventNum=%s\n" \
% (self.evClass, self.locInSent, self.eventNum)
def get_event_attribute(self, attr, optional=False):
"""Return the value of an attribute 'attr' from self.dict. If the attribute is
not in the dictionary, then (i) return a default value if there is one,
and (ii) write an error if the attribute is not optional."""
val = self.dict.get(attr)
if val is None and not optional:
logger.error("No %s attribute for current event" % attr)
if val is None and attr == LIBRARY.timeml.POL:
val = 'POS'
return val
def pp(self):
self.pretty_print()
def pretty_print(self):
print(self.as_verbose_string())
def can_introduce_alink(self):
"""Returns True if the EventExpression instance can introduce an Alink, False
otherwise. This ability is determined by dictionary lookup."""
form = self.form.lower()
if self.nf_morph == LIBRARY.timeml.VERB:
return form in SLINKET_DICTS.alinkVerbsDict
if self.nf_morph == LIBRARY.timeml.NOUN:
return form in SLINKET_DICTS.alinkNounsDict
return False
def can_introduce_slink(self):
"""Returns True if the EventExpression instance can introduce an Slink, False
otherwise. This ability is determined by dictionary lookup."""
form = self.form.lower()
if self.nf_morph == LIBRARY.timeml.VERB:
return form in SLINKET_DICTS.slinkVerbsDict
if self.nf_morph == LIBRARY.timeml.NOUN:
return form in SLINKET_DICTS.slinkNounsDict
if self.nf_morph == LIBRARY.timeml.ADJECTIVE:
return form in SLINKET_DICTS.slinkAdjsDict
return False
def alinkingContexts(self, key):
"""Returns the list of alink patterns from the dictionary."""
form = self.form.lower()
if self.nf_morph == LIBRARY.timeml.VERB:
pattern_dictionary = SLINKET_DICTS.alinkVerbsDict
elif self.nf_morph == LIBRARY.timeml.NOUN:
pattern_dictionary = SLINKET_DICTS.alinkNounsDict
else:
logger.warn("SLINKS of type " + str(key) + " for EVENT form " +
str(form) + " should be in the dict")
return []
return pattern_dictionary.get(form, {}).get(key, [])
def slinkingContexts(self, key):
"""Returns the list of slink patterns from the dictionary."""
form = self.form.lower()
if self.nf_morph == LIBRARY.timeml.VERB:
pattern_dictionary = SLINKET_DICTS.slinkVerbsDict
elif self.nf_morph == LIBRARY.timeml.NOUN:
pattern_dictionary = SLINKET_DICTS.slinkNounsDict
elif self.nf_morph == LIBRARY.timeml.ADJECTIVE:
pattern_dictionary = SLINKET_DICTS.slinkAdjsDict
else:
logger.warn("SLINKS of type " + str(key) + " for EVENT form " +
str(form) + " should be in the dict")
return []
return pattern_dictionary.get(form, {}).get(key, [])
| |
# Copyright 2008 The Tor Project, Inc. See LICENSE for licensing information.
# Copyright 2010 The Update Framework. See LICENSE for licensing information.
import re
import sys
import tuf
class Schema:
"""A schema matches a set of possible Python objects, of types
that are encodable in JSON."""
def matches(self, obj):
"""Return True if 'obj' matches this schema, False if it doesn't."""
try:
self.check_match(obj)
except tuf.FormatException:
return False
else:
return True
def check_match(self, obj):
"""Raise thandy.FormatException if 'obj' does not match this schema.
Abstract method."""
raise NotImplementedError()
class Any(Schema):
"""
Matches any single object.
>>> s = Any()
>>> s.matches("A String")
True
>>> s.matches([1, "list"])
True
"""
def check_match(self, obj):
pass
class RE(Schema):
"""
Matches any string that matches a given regular expression.
>>> s = RE("h.*d")
>>> s.matches("hello world")
True
>>> s.matches("Hello World")
False
>>> s.matches("hello world!")
False
>>> s.matches([33, "Hello"])
False
"""
def __init__(self, pat=None, modifiers=0, reObj=None, reName=None):
"""Make a new RE schema
pat -- The pattern to match, or None if reObj is provided.
modifiers -- Flags to use when compiling the pattern.
reObj -- A compiled regular expression object.
"""
if not reObj:
if not pat.endswith("$"):
pat += "$"
reObj = re.compile(pat, modifiers)
self._re = reObj
if reName == None:
if pat != None:
reName = "pattern /%s/" % pat
else:
reName = "pattern"
self._reName = reName
def check_match(self, obj):
if not isinstance(obj, basestring) or not self._re.match(obj):
raise tuf.FormatException("%r did not match %s"
% (obj, self._reName))
class Str(Schema):
"""
Matches a particular string.
>>> s = Str("Hi")
>>> s.matches("Hi")
True
>>> s.matches("Not hi")
False
"""
def __init__(self, val):
self._str = val
def check_match(self, obj):
if self._str != obj:
raise tuf.FormatException("Expected %r; got %r" % (self._str, obj))
class AnyStr(Schema):
"""
Matches any string, but no non-string object.
>>> s = AnyStr()
>>> s.matches("")
True
>>> s.matches("a string")
True
>>> s.matches(["a"])
False
>>> s.matches(3)
False
>>> s.matches(u"a unicode string")
True
>>> s.matches({})
False
"""
def __init__(self):
pass
def check_match(self, obj):
if not isinstance(obj, basestring):
raise tuf.FormatException("Expected a string; got %r" % obj)
class OneOf(Schema):
"""
Matches an object that matches any one of several schemas.
>>> s = OneOf([ListOf(Int()), Str("Hello"), Str("bye")])
>>> s.matches(3)
False
>>> s.matches("bye")
True
>>> s.matches([])
True
>>> s.matches([1,2])
True
>>> s.matches(["Hi"])
False
"""
def __init__(self, alternatives):
self._subschemas = alternatives
def check_match(self, obj):
for m in self._subschemas:
if m.matches(obj):
return
raise tuf.FormatException("Object matched no recognized alternative")
class AllOf(Schema):
"""Matches the intersection of a list of schemas."""
def __init__(self, required):
self._subschemas = required[:]
def check_match(self, obj):
for s in self._subschemas:
s.check_match(obj)
class ListOf(Schema):
"""
Matches a homogenous list of some subschema.
>>> s = ListOf(RE("(?:..)*"))
>>> s.matches("hi")
False
>>> s.matches([])
True
>>> s.matches({})
False
>>> s.matches(["Hi", "this", "list", "is", "full", "of", "even", "strs"])
True
>>> s.matches(["This", "one", "is not"])
False
>>> s = ListOf(Int(), minCount=3, maxCount=10)
>>> s.matches([3]*2)
False
>>> s.matches([3]*3)
True
>>> s.matches([3]*10)
True
>>> s.matches([3]*11)
False
"""
def __init__(self, schema, minCount=0, maxCount=sys.maxint, listName="list"):
self._schema = schema
self._minCount = minCount
self._maxCount = maxCount
self._listName = listName
def check_match(self, obj):
if not isinstance(obj, (list, tuple)):
raise tuf.FormatException("Expected %s; got %r"
% (self._listName, obj))
for item in obj:
try:
self._schema.check_match(item)
except tuf.FormatException, e:
raise tuf.FormatException("%s in %s" % (e, self._listName))
if not (self._minCount <= len(obj) <= self._maxCount):
raise tuf.FormatException("Length of %s out of range"
% self._listName)
class Struct(Schema):
"""
Matches a non-homogenous list of items.
>>> s = Struct([ListOf(AnyStr()), AnyStr(), Str("X")])
>>> s.matches(False)
False
>>> s.matches("Foo")
False
>>> s.matches([[], "Q", "X"])
True
>>> s.matches([[], "Q", "D"])
False
>>> s.matches([[3], "Q", "X"])
False
>>> s.matches([[], "Q", "X", "Y"])
False
>>> s = Struct([Str("X")], allowMore=True)
>>> s.matches([])
False
>>> s.matches(["X"])
True
>>> s.matches(["X", "Y"])
True
>>> s.matches(["X", ["Y", "Z"]])
True
>>> s.matches([["X"]])
False
>>> s = Struct([Str("X"), Int()], [Int()])
>>> s.matches([])
False
>>> s.matches({})
False
>>> s.matches(["X"])
False
>>> s.matches(["X", 3])
True
>>> s.matches(["X", 3, 9])
True
>>> s.matches(["X", 3, 9, 11])
False
>>> s.matches(["X", 3, "A"])
False
"""
def __init__(self, subschemas, optschemas=[], allowMore=False,
structName="list"):
self._subschemas = subschemas + optschemas
self._min = len(subschemas)
self._allowMore = allowMore
self._structName = structName
def check_match(self, obj):
if not isinstance(obj, (list, tuple)):
raise tuf.FormatException("Expected %s; got %r"
% (self._structName, obj))
elif len(obj) < self._min:
raise tuf.FormatException(
"Too few fields in %s" % self._structName)
elif len(obj) > len(self._subschemas) and not self._allowMore:
raise tuf.FormatException(
"Too many fields in %s" % self._structName)
for item, schema in zip(obj, self._subschemas):
schema.check_match(item)
class DictOf(Schema):
"""
Matches a mapping from items matching a particular key-schema
to items matching a value-schema. Note that in JSON, keys must
be strings.
>>> s = DictOf(RE(r'[aeiou]+'), Struct([AnyStr(), AnyStr()]))
>>> s.matches("")
False
>>> s.matches({})
True
>>> s.matches({"a": ["x", "y"], "e" : ["", ""]})
True
>>> s.matches({"a": ["x", 3], "e" : ["", ""]})
False
>>> s.matches({"a": ["x", "y"], "e" : ["", ""], "d" : ["a", "b"]})
False
"""
def __init__(self, keySchema, valSchema):
self._keySchema = keySchema
self._valSchema = valSchema
def check_match(self, obj):
try:
iter = obj.iteritems()
except AttributeError:
raise tuf.FormatException("Expected a dict; got %r" % obj)
for k, v in iter:
self._keySchema.check_match(k)
self._valSchema.check_match(v)
class Opt:
"""Helper; applied to a value in Obj to mark it optional.
>>> s = Obj(k1=Str("X"), k2=Opt(Str("Y")))
>>> s.matches({'k1': "X", 'k2': "Y"})
True
>>> s.matches({'k1': "X", 'k2': "Z"})
False
>>> s.matches({'k1': "X"})
True
"""
def __init__(self, schema):
self._schema = schema
def check_match(self, obj):
self._schema.check_match(obj)
class Obj(Schema):
"""
Matches a dict from specified keys to key-specific types. Unrecognized
keys are allowed.
>>> s = Obj(a=AnyStr(), bc=Struct([Int(), Int()]))
>>> s.matches({'a':"ZYYY", 'bc':[5,9]})
True
>>> s.matches({'a':"ZYYY", 'bc':[5,9], 'xx':5})
True
>>> s.matches({'a':"ZYYY", 'bc':[5,9,3]})
False
>>> s.matches({'a':"ZYYY"})
False
"""
def __init__(self, _objname="object", **d):
self._objname = _objname
self._required = d.items()
def check_match(self, obj):
if not isinstance(obj, dict):
raise tuf.FormatException("Wanted a %s; did not get a dict" %
self._objname)
for k, schema in self._required:
try:
item = obj[k]
except KeyError:
if not isinstance(schema, Opt):
raise tuf.FormatException("Missing key %s in %s"
% (k, self._objname))
else:
try:
schema.check_match(item)
except tuf.FormatException, e:
raise tuf.FormatException("%s in %s.%s"
% (e, self._objname, k))
class TaggedObj(Schema):
"""
Matches an object based on the value of a particular 'tag' field.
If tagIsOptional, matches any object when the tag is missing.
If ignoreUnrecognized, matches any object when the tag is present
but the value is not one we know.
>>> s = TaggedObj('tp', a=Obj(int1=Int()), b=Obj(s=AnyStr()))
>>> s.matches(3)
False
>>> s.matches([])
False
>>> s.matches({})
False
>>> s.matches({'tp' : 'fred'})
True
>>> s.matches({'tp' : 'a'})
False
>>> s.matches({'tp' : 'a', 'int1': 3})
True
>>> s.matches({'tp' : 'a', 'int1': []})
False
>>> s.matches({'tp' : 'b', 'int1': 3, 's': 'tt'})
True
"""
def __init__(self, tagName, tagIsOptional=False, ignoreUnrecognized=True,
**tagvals):
self._tagName = tagName
self._tagOpt = tagIsOptional
self._ignoreOthers = ignoreUnrecognized
self._tagvals = tagvals
def check_match(self, obj):
try:
tag = obj[self._tagName]
except KeyError:
if self._tagOpt:
return
else:
raise tuf.FormatException("Missing tag %s on object" %
self._tagName)
except TypeError:
raise tuf.FormatException("Got a %s, not a tagged object" %
type(obj))
if not isinstance(tag, basestring):
raise tuf.FormatException("Expected a string for %s; got a %s" % (
self._tagName, type(tag)))
try:
subschema = self._tagvals[tag]
except KeyError:
if self._ignoreOthers:
return
else:
raise tuf.FormatException("Unrecognized value %s for %s" % (
tag, self._tagName))
subschema.check_match(obj)
class Int(Schema):
"""
Matches an integer.
>>> s = Int()
>>> s.matches(99)
True
>>> s.matches(False)
False
>>> s.matches(0L)
True
>>> s.matches("a string")
False
>>> Int(lo=10, hi=30).matches(25)
True
>>> Int(lo=10, hi=30).matches(5)
False
"""
def __init__(self, lo= -sys.maxint, hi=sys.maxint):
self._lo = lo
self._hi = hi
def check_match(self, obj):
if isinstance(obj, bool) or not isinstance(obj, (int, long)):
# We need to check for bool as a special case, since bool
# is for historical reasons a subtype of int.
raise tuf.FormatException("Got %r instead of an integer" % obj)
elif not (self._lo <= obj <= self._hi):
raise tuf.FormatException("%r not in range [%r,%r]"
% (obj, self._lo, self._hi))
class Bool(Schema):
"""
Matches a boolean.
>>> s = Bool()
>>> s.matches(True) and s.matches(False)
True
>>> s.matches(11)
False
"""
def __init__(self):
pass
def check_match(self, obj):
if not isinstance(obj, bool):
raise tuf.FormatException("Got %r instead of a boolean" % obj)
class Func(Schema):
def __init__(self, fn, baseSchema=None):
self._fn = fn
self._base = baseSchema
def check_match(self, obj):
if self._base:
self._base.check_match(obj)
r = self._fn(obj)
if r is False:
raise tuf.FormatException("%s returned False" % self._fn)
| |
# Copyright 2011 OpenStack Foundation
# (c) Copyright 2013 Hewlett-Packard Development Company, L.P.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Volume drivers for libvirt."""
import errno
import glob
import os
import re
import time
import urllib2
from oslo_concurrency import processutils
from oslo_config import cfg
from oslo_utils import strutils
import six
import six.moves.urllib.parse as urlparse
from nova import exception
from nova.i18n import _
from nova.i18n import _LE
from nova.i18n import _LI
from nova.i18n import _LW
from nova.openstack.common import log as logging
from nova.openstack.common import loopingcall
from nova import paths
from nova.storage import linuxscsi
from nova import utils
from nova.virt.libvirt import config as vconfig
from nova.virt.libvirt import quobyte
from nova.virt.libvirt import remotefs
from nova.virt.libvirt import utils as libvirt_utils
LOG = logging.getLogger(__name__)
volume_opts = [
cfg.IntOpt('num_iscsi_scan_tries',
default=5,
help='Number of times to rescan iSCSI target to find volume'),
cfg.IntOpt('num_iser_scan_tries',
default=5,
help='Number of times to rescan iSER target to find volume'),
cfg.StrOpt('rbd_user',
help='The RADOS client name for accessing rbd volumes'),
cfg.StrOpt('rbd_secret_uuid',
help='The libvirt UUID of the secret for the rbd_user'
'volumes'),
cfg.StrOpt('nfs_mount_point_base',
default=paths.state_path_def('mnt'),
help='Directory where the NFS volume is mounted on the'
' compute node'),
cfg.StrOpt('nfs_mount_options',
help='Mount options passed to the NFS client. See section '
'of the nfs man page for details'),
cfg.StrOpt('smbfs_mount_point_base',
default=paths.state_path_def('mnt'),
help='Directory where the SMBFS shares are mounted on the '
'compute node'),
cfg.StrOpt('smbfs_mount_options',
default='',
help='Mount options passed to the SMBFS client. See '
'mount.cifs man page for details. Note that the '
'libvirt-qemu uid and gid must be specified.'),
cfg.IntOpt('num_aoe_discover_tries',
default=3,
help='Number of times to rediscover AoE target to find volume'),
cfg.StrOpt('glusterfs_mount_point_base',
default=paths.state_path_def('mnt'),
help='Directory where the glusterfs volume is mounted on the '
'compute node'),
cfg.BoolOpt('iscsi_use_multipath',
default=False,
help='Use multipath connection of the iSCSI volume'),
cfg.BoolOpt('iser_use_multipath',
default=False,
help='Use multipath connection of the iSER volume'),
cfg.StrOpt('scality_sofs_config',
help='Path or URL to Scality SOFS configuration file'),
cfg.StrOpt('scality_sofs_mount_point',
default='$state_path/scality',
help='Base dir where Scality SOFS shall be mounted'),
cfg.ListOpt('qemu_allowed_storage_drivers',
default=[],
help='Protocols listed here will be accessed directly '
'from QEMU. Currently supported protocols: [gluster]'),
cfg.StrOpt('quobyte_mount_point_base',
default=paths.state_path_def('mnt'),
help='Directory where the Quobyte volume is mounted on the '
'compute node'),
cfg.StrOpt('quobyte_client_cfg',
help='Path to a Quobyte Client configuration file.'),
cfg.StrOpt('iscsi_iface',
deprecated_name='iscsi_transport',
help='The iSCSI transport iface to use to connect to target in '
'case offload support is desired. Supported transports '
'are be2iscsi, bnx2i, cxgb3i, cxgb4i, qla4xxx and ocs. '
'Default format is transport_name.hwaddress and can be '
'generated manually or via iscsiadm -m iface'),
# iser is also supported, but use LibvirtISERVolumeDriver
# instead
]
CONF = cfg.CONF
CONF.register_opts(volume_opts, 'libvirt')
class LibvirtBaseVolumeDriver(object):
"""Base class for volume drivers."""
def __init__(self, connection, is_block_dev):
self.connection = connection
self.is_block_dev = is_block_dev
def get_config(self, connection_info, disk_info):
"""Returns xml for libvirt."""
conf = vconfig.LibvirtConfigGuestDisk()
conf.driver_name = libvirt_utils.pick_disk_driver_name(
self.connection._host.get_version(),
self.is_block_dev
)
conf.source_device = disk_info['type']
conf.driver_format = "raw"
conf.driver_cache = "none"
conf.target_dev = disk_info['dev']
conf.target_bus = disk_info['bus']
conf.serial = connection_info.get('serial')
# Support for block size tuning
data = {}
if 'data' in connection_info:
data = connection_info['data']
if 'logical_block_size' in data:
conf.logical_block_size = data['logical_block_size']
if 'physical_block_size' in data:
conf.physical_block_size = data['physical_block_size']
# Extract rate_limit control parameters
if 'qos_specs' in data and data['qos_specs']:
tune_opts = ['total_bytes_sec', 'read_bytes_sec',
'write_bytes_sec', 'total_iops_sec',
'read_iops_sec', 'write_iops_sec']
specs = data['qos_specs']
if isinstance(specs, dict):
for k, v in specs.iteritems():
if k in tune_opts:
new_key = 'disk_' + k
setattr(conf, new_key, v)
else:
LOG.warn(_LW('Unknown content in connection_info/'
'qos_specs: %s'), specs)
# Extract access_mode control parameters
if 'access_mode' in data and data['access_mode']:
access_mode = data['access_mode']
if access_mode in ('ro', 'rw'):
conf.readonly = access_mode == 'ro'
else:
LOG.error(_LE('Unknown content in '
'connection_info/access_mode: %s'),
access_mode)
raise exception.InvalidVolumeAccessMode(
access_mode=access_mode)
return conf
def _get_secret_uuid(self, conf, password=None):
secret = self.connection._host.find_secret(conf.source_protocol,
conf.source_name)
if secret is None:
secret = self.connection._host.create_secret(conf.source_protocol,
conf.source_name,
password)
return secret.UUIDString()
def _delete_secret_by_name(self, connection_info):
source_protocol = connection_info['driver_volume_type']
netdisk_properties = connection_info['data']
if source_protocol == 'rbd':
return
elif source_protocol == 'iscsi':
usage_type = 'iscsi'
usage_name = ("%(target_iqn)s/%(target_lun)s" %
netdisk_properties)
self.connection._host.delete_secret(usage_type, usage_name)
def connect_volume(self, connection_info, disk_info):
"""Connect the volume. Returns xml for libvirt."""
pass
def disconnect_volume(self, connection_info, disk_dev):
"""Disconnect the volume."""
pass
class LibvirtVolumeDriver(LibvirtBaseVolumeDriver):
"""Class for volumes backed by local file."""
def __init__(self, connection):
super(LibvirtVolumeDriver,
self).__init__(connection, is_block_dev=True)
def get_config(self, connection_info, disk_info):
"""Returns xml for libvirt."""
conf = super(LibvirtVolumeDriver,
self).get_config(connection_info, disk_info)
conf.source_type = "block"
conf.source_path = connection_info['data']['device_path']
return conf
class LibvirtFakeVolumeDriver(LibvirtBaseVolumeDriver):
"""Driver to attach fake volumes to libvirt."""
def __init__(self, connection):
super(LibvirtFakeVolumeDriver,
self).__init__(connection, is_block_dev=True)
def get_config(self, connection_info, disk_info):
"""Returns xml for libvirt."""
conf = super(LibvirtFakeVolumeDriver,
self).get_config(connection_info, disk_info)
conf.source_type = "network"
conf.source_protocol = "fake"
conf.source_name = "fake"
return conf
class LibvirtNetVolumeDriver(LibvirtBaseVolumeDriver):
"""Driver to attach Network volumes to libvirt."""
def __init__(self, connection):
super(LibvirtNetVolumeDriver,
self).__init__(connection, is_block_dev=False)
def get_config(self, connection_info, disk_info):
"""Returns xml for libvirt."""
conf = super(LibvirtNetVolumeDriver,
self).get_config(connection_info, disk_info)
netdisk_properties = connection_info['data']
conf.source_type = "network"
conf.source_protocol = connection_info['driver_volume_type']
conf.source_name = netdisk_properties.get('name')
conf.source_hosts = netdisk_properties.get('hosts', [])
conf.source_ports = netdisk_properties.get('ports', [])
auth_enabled = netdisk_properties.get('auth_enabled')
if (conf.source_protocol == 'rbd' and
CONF.libvirt.rbd_secret_uuid):
conf.auth_secret_uuid = CONF.libvirt.rbd_secret_uuid
auth_enabled = True # Force authentication locally
if CONF.libvirt.rbd_user:
conf.auth_username = CONF.libvirt.rbd_user
if conf.source_protocol == 'iscsi':
try:
conf.source_name = ("%(target_iqn)s/%(target_lun)s" %
netdisk_properties)
target_portal = netdisk_properties['target_portal']
except KeyError:
raise exception.NovaException(_("Invalid volume source data"))
ip, port = utils.parse_server_string(target_portal)
if ip == '' or port == '':
raise exception.NovaException(_("Invalid target_lun"))
conf.source_hosts = [ip]
conf.source_ports = [port]
if netdisk_properties.get('auth_method') == 'CHAP':
auth_enabled = True
conf.auth_secret_type = 'iscsi'
password = netdisk_properties.get('auth_password')
conf.auth_secret_uuid = self._get_secret_uuid(conf, password)
if auth_enabled:
conf.auth_username = (conf.auth_username or
netdisk_properties['auth_username'])
conf.auth_secret_type = (conf.auth_secret_type or
netdisk_properties['secret_type'])
conf.auth_secret_uuid = (conf.auth_secret_uuid or
netdisk_properties['secret_uuid'])
return conf
def disconnect_volume(self, connection_info, disk_dev):
"""Detach the volume from instance_name."""
super(LibvirtNetVolumeDriver,
self).disconnect_volume(connection_info, disk_dev)
self._delete_secret_by_name(connection_info)
class LibvirtISCSIVolumeDriver(LibvirtBaseVolumeDriver):
"""Driver to attach Network volumes to libvirt."""
supported_transports = ['be2iscsi', 'bnx2i', 'cxgb3i',
'cxgb4i', 'qla4xxx', 'ocs']
def __init__(self, connection):
super(LibvirtISCSIVolumeDriver, self).__init__(connection,
is_block_dev=True)
self.num_scan_tries = CONF.libvirt.num_iscsi_scan_tries
self.use_multipath = CONF.libvirt.iscsi_use_multipath
if CONF.libvirt.iscsi_iface:
self.transport = CONF.libvirt.iscsi_iface
else:
self.transport = 'default'
def _get_transport(self):
if self._validate_transport(self.transport):
return self.transport
else:
return 'default'
def _validate_transport(self, transport_iface):
"""Check that given iscsi_iface uses only supported transports
Accepted transport names for provided iface param are
be2iscsi, bnx2i, cxgb3i, cxgb4i, qla4xxx and ocs. iSER uses it's
own separate driver. Note the difference between transport and
iface; unlike iscsi_tcp/iser, this is not one and the same for
offloaded transports, where the default format is
transport_name.hwaddress
"""
# We can support iser here as well, but currently reject it as the
# separate iser driver has not yet been deprecated.
if transport_iface == 'default':
return True
# Will return (6) if iscsi_iface file was not found, or (2) if iscsid
# could not be contacted
out = self._run_iscsiadm_bare(['-m',
'iface',
'-I',
transport_iface],
check_exit_code=[0, 2, 6])[0] or ""
LOG.debug("iscsiadm %(iface)s configuration: stdout=%(out)s",
{'iface': transport_iface, 'out': out})
for data in [line.split() for line in out.splitlines()]:
if data[0] == 'iface.transport_name':
if data[2] in self.supported_transports:
return True
LOG.warn(_LW("No useable transport found for iscsi iface %s. "
"Falling back to default transport"),
transport_iface)
return False
def _run_iscsiadm(self, iscsi_properties, iscsi_command, **kwargs):
check_exit_code = kwargs.pop('check_exit_code', 0)
(out, err) = utils.execute('iscsiadm', '-m', 'node', '-T',
iscsi_properties['target_iqn'],
'-p', iscsi_properties['target_portal'],
*iscsi_command, run_as_root=True,
check_exit_code=check_exit_code)
msg = ('iscsiadm %(command)s: stdout=%(out)s stderr=%(err)s' %
{'command': iscsi_command, 'out': out, 'err': err})
# NOTE(bpokorny): iscsi_command can contain passwords so we need to
# sanitize the password in the message.
LOG.debug(strutils.mask_password(msg))
return (out, err)
def _iscsiadm_update(self, iscsi_properties, property_key, property_value,
**kwargs):
iscsi_command = ('--op', 'update', '-n', property_key,
'-v', property_value)
return self._run_iscsiadm(iscsi_properties, iscsi_command, **kwargs)
def _get_target_portals_from_iscsiadm_output(self, output):
# return both portals and iqns
#
# as we are parsing a command line utility, allow for the
# possibility that additional debug data is spewed in the
# stream, and only grab actual ip / iqn lines.
targets = []
for data in [line.split() for line in output.splitlines()]:
if len(data) == 2 and data[1].startswith('iqn.'):
targets.append(data)
return targets
def get_config(self, connection_info, disk_info):
"""Returns xml for libvirt."""
conf = super(LibvirtISCSIVolumeDriver,
self).get_config(connection_info, disk_info)
conf.source_type = "block"
conf.source_path = connection_info['data']['device_path']
return conf
@utils.synchronized('connect_volume')
def connect_volume(self, connection_info, disk_info):
"""Attach the volume to instance_name."""
iscsi_properties = connection_info['data']
# multipath installed, discovering other targets if available
# multipath should be configured on the nova-compute node,
# in order to fit storage vendor
out = None
if self.use_multipath:
out = self._run_iscsiadm_discover(iscsi_properties)
# There are two types of iSCSI multipath devices. One which shares
# the same iqn between multiple portals, and the other which use
# different iqns on different portals. Try to identify the type by
# checking the iscsiadm output if the iqn is used by multiple
# portals. If it is, it's the former, so use the supplied iqn.
# Otherwise, it's the latter, so try the ip,iqn combinations to
# find the targets which constitutes the multipath device.
ips_iqns = self._get_target_portals_from_iscsiadm_output(out)
same_portal = False
all_portals = set()
match_portals = set()
for ip, iqn in ips_iqns:
all_portals.add(ip)
if iqn == iscsi_properties['target_iqn']:
match_portals.add(ip)
if len(all_portals) == len(match_portals):
same_portal = True
for ip, iqn in ips_iqns:
props = iscsi_properties.copy()
props['target_portal'] = ip.split(",")[0]
if not same_portal:
props['target_iqn'] = iqn
self._connect_to_iscsi_portal(props)
self._rescan_iscsi()
else:
self._connect_to_iscsi_portal(iscsi_properties)
# Detect new/resized LUNs for existing sessions
self._run_iscsiadm(iscsi_properties, ("--rescan",))
host_device = self._get_host_device(iscsi_properties)
# The /dev/disk/by-path/... node is not always present immediately
# TODO(justinsb): This retry-with-delay is a pattern, move to utils?
tries = 0
disk_dev = disk_info['dev']
# Check host_device only when transport is used, since otherwise it is
# directly derived from properties. Only needed for unit tests
while ((self._get_transport() != "default" and not host_device)
or not os.path.exists(host_device)):
if tries >= self.num_scan_tries:
raise exception.NovaException(_("iSCSI device not found at %s")
% (host_device))
LOG.warn(_LW("ISCSI volume not yet found at: %(disk_dev)s. "
"Will rescan & retry. Try number: %(tries)s"),
{'disk_dev': disk_dev, 'tries': tries})
# The rescan isn't documented as being necessary(?), but it helps
self._run_iscsiadm(iscsi_properties, ("--rescan",))
# For offloaded open-iscsi transports, host_device cannot be
# guessed unlike iscsi_tcp where it can be obtained from
# properties, so try and get it again.
if not host_device and self._get_transport() != "default":
host_device = self._get_host_device(iscsi_properties)
tries = tries + 1
if not host_device or not os.path.exists(host_device):
time.sleep(tries ** 2)
if tries != 0:
LOG.debug("Found iSCSI node %(disk_dev)s "
"(after %(tries)s rescans)",
{'disk_dev': disk_dev,
'tries': tries})
if self.use_multipath:
# we use the multipath device instead of the single path device
self._rescan_multipath()
multipath_device = self._get_multipath_device_name(host_device)
if multipath_device is not None:
host_device = multipath_device
connection_info['data']['multipath_id'] = \
multipath_device.split('/')[-1]
connection_info['data']['device_path'] = host_device
def _run_iscsiadm_discover(self, iscsi_properties):
def run_iscsiadm_update_discoverydb():
return utils.execute(
'iscsiadm',
'-m', 'discoverydb',
'-t', 'sendtargets',
'-p', iscsi_properties['target_portal'],
'--op', 'update',
'-n', "discovery.sendtargets.auth.authmethod",
'-v', iscsi_properties['discovery_auth_method'],
'-n', "discovery.sendtargets.auth.username",
'-v', iscsi_properties['discovery_auth_username'],
'-n', "discovery.sendtargets.auth.password",
'-v', iscsi_properties['discovery_auth_password'],
run_as_root=True)
out = None
if iscsi_properties.get('discovery_auth_method'):
try:
run_iscsiadm_update_discoverydb()
except processutils.ProcessExecutionError as exc:
# iscsiadm returns 6 for "db record not found"
if exc.exit_code == 6:
(out, err) = utils.execute(
'iscsiadm',
'-m', 'discoverydb',
'-t', 'sendtargets',
'-p', iscsi_properties['target_portal'],
'--op', 'new',
run_as_root=True)
run_iscsiadm_update_discoverydb()
else:
raise
out = self._run_iscsiadm_bare(
['-m',
'discoverydb',
'-t',
'sendtargets',
'-p',
iscsi_properties['target_portal'],
'--discover'],
check_exit_code=[0, 255])[0] or ""
else:
out = self._run_iscsiadm_bare(
['-m',
'discovery',
'-t',
'sendtargets',
'-p',
iscsi_properties['target_portal']],
check_exit_code=[0, 255])[0] or ""
return out
@utils.synchronized('connect_volume')
def disconnect_volume(self, connection_info, disk_dev):
"""Detach the volume from instance_name."""
iscsi_properties = connection_info['data']
host_device = self._get_host_device(iscsi_properties)
multipath_device = None
if self.use_multipath:
if 'multipath_id' in iscsi_properties:
multipath_device = ('/dev/mapper/%s' %
iscsi_properties['multipath_id'])
else:
multipath_device = self._get_multipath_device_name(host_device)
super(LibvirtISCSIVolumeDriver,
self).disconnect_volume(connection_info, disk_dev)
if self.use_multipath and multipath_device:
return self._disconnect_volume_multipath_iscsi(iscsi_properties,
multipath_device)
# NOTE(vish): Only disconnect from the target if no luns from the
# target are in use.
device_byname = ("ip-%s-iscsi-%s-lun-" %
(iscsi_properties['target_portal'],
iscsi_properties['target_iqn']))
devices = self.connection._get_all_block_devices()
devices = [dev for dev in devices if (device_byname in dev
and
dev.startswith(
'/dev/disk/by-path/'))]
if not devices:
self._disconnect_from_iscsi_portal(iscsi_properties)
elif host_device not in devices:
# Delete device if LUN is not in use by another instance
self._delete_device(host_device)
def _delete_device(self, device_path):
device_name = os.path.basename(os.path.realpath(device_path))
delete_control = '/sys/block/' + device_name + '/device/delete'
if os.path.exists(delete_control):
# Copy '1' from stdin to the device delete control file
utils.execute('cp', '/dev/stdin', delete_control,
process_input='1', run_as_root=True)
else:
LOG.warn(_LW("Unable to delete volume device %s"), device_name)
def _remove_multipath_device_descriptor(self, disk_descriptor):
disk_descriptor = disk_descriptor.replace('/dev/mapper/', '')
try:
self._run_multipath(['-f', disk_descriptor],
check_exit_code=[0, 1])
except processutils.ProcessExecutionError as exc:
# Because not all cinder drivers need to remove the dev mapper,
# here just logs a warning to avoid affecting those drivers in
# exceptional cases.
LOG.warn(_LW('Failed to remove multipath device descriptor '
'%(dev_mapper)s. Exception message: %(msg)s')
% {'dev_mapper': disk_descriptor,
'msg': exc.message})
def _disconnect_volume_multipath_iscsi(self, iscsi_properties,
multipath_device):
self._rescan_iscsi()
self._rescan_multipath()
block_devices = self.connection._get_all_block_devices()
devices = []
for dev in block_devices:
if "/mapper/" in dev:
devices.append(dev)
else:
mpdev = self._get_multipath_device_name(dev)
if mpdev:
devices.append(mpdev)
# Do a discovery to find all targets.
# Targets for multiple paths for the same multipath device
# may not be the same.
out = self._run_iscsiadm_discover(iscsi_properties)
# Extract targets for the current multipath device.
ips_iqns = []
entries = self._get_iscsi_devices()
for ip, iqn in self._get_target_portals_from_iscsiadm_output(out):
ip_iqn = "%s-iscsi-%s" % (ip.split(",")[0], iqn)
for entry in entries:
entry_ip_iqn = entry.split("-lun-")[0]
if entry_ip_iqn[:3] == "ip-":
entry_ip_iqn = entry_ip_iqn[3:]
elif entry_ip_iqn[:4] == "pci-":
# Look at an offset of len('pci-0000:00:00.0')
offset = entry_ip_iqn.find("ip-", 16, 21)
entry_ip_iqn = entry_ip_iqn[(offset + 3):]
if (ip_iqn != entry_ip_iqn):
continue
entry_real_path = os.path.realpath("/dev/disk/by-path/%s" %
entry)
entry_mpdev = self._get_multipath_device_name(entry_real_path)
if entry_mpdev == multipath_device:
ips_iqns.append([ip, iqn])
break
if not devices:
# disconnect if no other multipath devices
self._disconnect_mpath(iscsi_properties, ips_iqns)
return
# Get a target for all other multipath devices
other_iqns = [self._get_multipath_iqn(device)
for device in devices]
# Get all the targets for the current multipath device
current_iqns = [iqn for ip, iqn in ips_iqns]
in_use = False
for current in current_iqns:
if current in other_iqns:
in_use = True
break
# If no other multipath device attached has the same iqn
# as the current device
if not in_use:
# disconnect if no other multipath devices with same iqn
self._disconnect_mpath(iscsi_properties, ips_iqns)
return
elif multipath_device not in devices:
# delete the devices associated w/ the unused multipath
self._delete_mpath(iscsi_properties, multipath_device, ips_iqns)
# else do not disconnect iscsi portals,
# as they are used for other luns,
# just remove multipath mapping device descriptor
self._remove_multipath_device_descriptor(multipath_device)
return
def _connect_to_iscsi_portal(self, iscsi_properties):
# NOTE(vish): If we are on the same host as nova volume, the
# discovery makes the target so we don't need to
# run --op new. Therefore, we check to see if the
# target exists, and if we get 255 (Not Found), then
# we run --op new. This will also happen if another
# volume is using the same target.
try:
self._run_iscsiadm(iscsi_properties, ())
except processutils.ProcessExecutionError as exc:
# iscsiadm returns 21 for "No records found" after version 2.0-871
if exc.exit_code in [21, 255]:
self._reconnect(iscsi_properties)
else:
raise
if iscsi_properties.get('auth_method'):
self._iscsiadm_update(iscsi_properties,
"node.session.auth.authmethod",
iscsi_properties['auth_method'])
self._iscsiadm_update(iscsi_properties,
"node.session.auth.username",
iscsi_properties['auth_username'])
self._iscsiadm_update(iscsi_properties,
"node.session.auth.password",
iscsi_properties['auth_password'])
# duplicate logins crash iscsiadm after load,
# so we scan active sessions to see if the node is logged in.
out = self._run_iscsiadm_bare(["-m", "session"],
run_as_root=True,
check_exit_code=[0, 1, 21])[0] or ""
portals = [{'portal': p.split(" ")[2], 'iqn': p.split(" ")[3]}
for p in out.splitlines() if p.startswith("tcp:")]
stripped_portal = iscsi_properties['target_portal'].split(",")[0]
if len(portals) == 0 or len([s for s in portals
if stripped_portal ==
s['portal'].split(",")[0]
and
s['iqn'] ==
iscsi_properties['target_iqn']]
) == 0:
try:
self._run_iscsiadm(iscsi_properties,
("--login",),
check_exit_code=[0, 255])
except processutils.ProcessExecutionError as err:
# as this might be one of many paths,
# only set successful logins to startup automatically
if err.exit_code in [15]:
self._iscsiadm_update(iscsi_properties,
"node.startup",
"automatic")
return
self._iscsiadm_update(iscsi_properties,
"node.startup",
"automatic")
def _disconnect_from_iscsi_portal(self, iscsi_properties):
self._iscsiadm_update(iscsi_properties, "node.startup", "manual",
check_exit_code=[0, 21, 255])
self._run_iscsiadm(iscsi_properties, ("--logout",),
check_exit_code=[0, 21, 255])
self._run_iscsiadm(iscsi_properties, ('--op', 'delete'),
check_exit_code=[0, 21, 255])
def _get_multipath_device_name(self, single_path_device):
device = os.path.realpath(single_path_device)
out = self._run_multipath(['-ll',
device],
check_exit_code=[0, 1])[0]
mpath_line = [line for line in out.splitlines()
if "scsi_id" not in line] # ignore udev errors
if len(mpath_line) > 0 and len(mpath_line[0]) > 0:
return "/dev/mapper/%s" % mpath_line[0].split(" ")[0]
return None
def _get_iscsi_devices(self):
try:
devices = list(os.walk('/dev/disk/by-path'))[0][-1]
except IndexError:
return []
iscsi_devs = []
for entry in devices:
if (entry.startswith("ip-") or
(entry.startswith('pci-') and 'ip-' in entry)):
iscsi_devs.append(entry)
return iscsi_devs
def _delete_mpath(self, iscsi_properties, multipath_device, ips_iqns):
entries = self._get_iscsi_devices()
# Loop through ips_iqns to construct all paths
iqn_luns = []
for ip, iqn in ips_iqns:
iqn_lun = '%s-lun-%s' % (iqn,
iscsi_properties.get('target_lun', 0))
iqn_luns.append(iqn_lun)
for dev in ['/dev/disk/by-path/%s' % dev for dev in entries]:
for iqn_lun in iqn_luns:
if iqn_lun in dev:
self._delete_device(dev)
self._rescan_multipath()
def _disconnect_mpath(self, iscsi_properties, ips_iqns):
for ip, iqn in ips_iqns:
props = iscsi_properties.copy()
props['target_portal'] = ip
props['target_iqn'] = iqn
self._disconnect_from_iscsi_portal(props)
self._rescan_multipath()
def _get_multipath_iqn(self, multipath_device):
entries = self._get_iscsi_devices()
for entry in entries:
entry_real_path = os.path.realpath("/dev/disk/by-path/%s" % entry)
entry_multipath = self._get_multipath_device_name(entry_real_path)
if entry_multipath == multipath_device:
return entry.split("iscsi-")[1].split("-lun")[0]
return None
def _run_iscsiadm_bare(self, iscsi_command, **kwargs):
check_exit_code = kwargs.pop('check_exit_code', 0)
(out, err) = utils.execute('iscsiadm',
*iscsi_command,
run_as_root=True,
check_exit_code=check_exit_code)
LOG.debug("iscsiadm %(command)s: stdout=%(out)s stderr=%(err)s",
{'command': iscsi_command, 'out': out, 'err': err})
return (out, err)
def _run_multipath(self, multipath_command, **kwargs):
check_exit_code = kwargs.pop('check_exit_code', 0)
(out, err) = utils.execute('multipath',
*multipath_command,
run_as_root=True,
check_exit_code=check_exit_code)
LOG.debug("multipath %(command)s: stdout=%(out)s stderr=%(err)s",
{'command': multipath_command, 'out': out, 'err': err})
return (out, err)
def _rescan_iscsi(self):
self._run_iscsiadm_bare(('-m', 'node', '--rescan'),
check_exit_code=[0, 1, 21, 255])
self._run_iscsiadm_bare(('-m', 'session', '--rescan'),
check_exit_code=[0, 1, 21, 255])
def _rescan_multipath(self):
self._run_multipath(['-r'], check_exit_code=[0, 1, 21])
def _get_host_device(self, transport_properties):
"""Find device path in devtemfs."""
device = ("ip-%s-iscsi-%s-lun-%s" %
(transport_properties['target_portal'],
transport_properties['target_iqn'],
transport_properties.get('target_lun', 0)))
if self._get_transport() == "default":
return ("/dev/disk/by-path/%s" % device)
else:
host_device = None
look_for_device = glob.glob('/dev/disk/by-path/*%s' % device)
if look_for_device:
host_device = look_for_device[0]
return host_device
def _reconnect(self, iscsi_properties):
# Note: iscsiadm does not support changing iface.iscsi_ifacename
# via --op update, so we do this at creation time
self._run_iscsiadm(iscsi_properties,
('--interface', self._get_transport(),
'--op', 'new'))
class LibvirtISERVolumeDriver(LibvirtISCSIVolumeDriver):
"""Driver to attach Network volumes to libvirt."""
def __init__(self, connection):
super(LibvirtISERVolumeDriver, self).__init__(connection)
self.num_scan_tries = CONF.libvirt.num_iser_scan_tries
self.use_multipath = CONF.libvirt.iser_use_multipath
def _get_transport(self):
return 'iser'
def _get_multipath_iqn(self, multipath_device):
entries = self._get_iscsi_devices()
for entry in entries:
entry_real_path = os.path.realpath("/dev/disk/by-path/%s" % entry)
entry_multipath = self._get_multipath_device_name(entry_real_path)
if entry_multipath == multipath_device:
return entry.split("iser-")[1].split("-lun")[0]
return None
class LibvirtNFSVolumeDriver(LibvirtBaseVolumeDriver):
"""Class implements libvirt part of volume driver for NFS."""
def __init__(self, connection):
"""Create back-end to nfs."""
super(LibvirtNFSVolumeDriver,
self).__init__(connection, is_block_dev=False)
def _get_device_path(self, connection_info):
path = os.path.join(CONF.libvirt.nfs_mount_point_base,
utils.get_hash_str(connection_info['data']['export']))
path = os.path.join(path, connection_info['data']['name'])
return path
def get_config(self, connection_info, disk_info):
"""Returns xml for libvirt."""
conf = super(LibvirtNFSVolumeDriver,
self).get_config(connection_info, disk_info)
conf.source_type = 'file'
conf.source_path = connection_info['data']['device_path']
conf.driver_format = connection_info['data'].get('format', 'raw')
return conf
def connect_volume(self, connection_info, disk_info):
"""Connect the volume. Returns xml for libvirt."""
options = connection_info['data'].get('options')
self._ensure_mounted(connection_info['data']['export'], options)
connection_info['data']['device_path'] = \
self._get_device_path(connection_info)
def disconnect_volume(self, connection_info, disk_dev):
"""Disconnect the volume."""
export = connection_info['data']['export']
mount_path = os.path.join(CONF.libvirt.nfs_mount_point_base,
utils.get_hash_str(export))
try:
utils.execute('umount', mount_path, run_as_root=True)
except processutils.ProcessExecutionError as exc:
if ('device is busy' in exc.message or
'target is busy' in exc.message):
LOG.debug("The NFS share %s is still in use.", export)
else:
LOG.exception(_LE("Couldn't unmount the NFS share %s"), export)
def _ensure_mounted(self, nfs_export, options=None):
"""@type nfs_export: string
@type options: string
"""
mount_path = os.path.join(CONF.libvirt.nfs_mount_point_base,
utils.get_hash_str(nfs_export))
if not libvirt_utils.is_mounted(mount_path, nfs_export):
self._mount_nfs(mount_path, nfs_export, options, ensure=True)
return mount_path
def _mount_nfs(self, mount_path, nfs_share, options=None, ensure=False):
"""Mount nfs export to mount path."""
utils.execute('mkdir', '-p', mount_path)
# Construct the NFS mount command.
nfs_cmd = ['mount', '-t', 'nfs']
if CONF.libvirt.nfs_mount_options is not None:
nfs_cmd.extend(['-o', CONF.libvirt.nfs_mount_options])
if options:
nfs_cmd.extend(options.split(' '))
nfs_cmd.extend([nfs_share, mount_path])
try:
utils.execute(*nfs_cmd, run_as_root=True)
except processutils.ProcessExecutionError as exc:
if ensure and 'already mounted' in exc.message:
LOG.warn(_LW("%s is already mounted"), nfs_share)
else:
raise
class LibvirtSMBFSVolumeDriver(LibvirtBaseVolumeDriver):
"""Class implements libvirt part of volume driver for SMBFS."""
def __init__(self, connection):
super(LibvirtSMBFSVolumeDriver,
self).__init__(connection, is_block_dev=False)
self.username_regex = re.compile(
r"(user(?:name)?)=(?:[^ ,]+\\)?([^ ,]+)")
def _get_device_path(self, connection_info):
smbfs_share = connection_info['data']['export']
mount_path = self._get_mount_path(smbfs_share)
volume_path = os.path.join(mount_path,
connection_info['data']['name'])
return volume_path
def _get_mount_path(self, smbfs_share):
mount_path = os.path.join(CONF.libvirt.smbfs_mount_point_base,
utils.get_hash_str(smbfs_share))
return mount_path
def get_config(self, connection_info, disk_info):
"""Returns xml for libvirt."""
conf = super(LibvirtSMBFSVolumeDriver,
self).get_config(connection_info, disk_info)
conf.source_type = 'file'
conf.driver_cache = 'writethrough'
conf.source_path = connection_info['data']['device_path']
conf.driver_format = connection_info['data'].get('format', 'raw')
return conf
def connect_volume(self, connection_info, disk_info):
"""Connect the volume."""
smbfs_share = connection_info['data']['export']
mount_path = self._get_mount_path(smbfs_share)
if not libvirt_utils.is_mounted(mount_path, smbfs_share):
mount_options = self._parse_mount_options(connection_info)
remotefs.mount_share(mount_path, smbfs_share,
export_type='cifs', options=mount_options)
device_path = self._get_device_path(connection_info)
connection_info['data']['device_path'] = device_path
def disconnect_volume(self, connection_info, disk_dev):
"""Disconnect the volume."""
smbfs_share = connection_info['data']['export']
mount_path = self._get_mount_path(smbfs_share)
remotefs.unmount_share(mount_path, smbfs_share)
def _parse_mount_options(self, connection_info):
mount_options = " ".join(
[connection_info['data'].get('options', ''),
CONF.libvirt.smbfs_mount_options])
if not self.username_regex.findall(mount_options):
mount_options = mount_options + ' -o username=guest'
else:
# Remove the Domain Name from user name
mount_options = self.username_regex.sub(r'\1=\2', mount_options)
return mount_options.strip(", ").split(' ')
class LibvirtAOEVolumeDriver(LibvirtBaseVolumeDriver):
"""Driver to attach AoE volumes to libvirt."""
def __init__(self, connection):
super(LibvirtAOEVolumeDriver,
self).__init__(connection, is_block_dev=True)
def _aoe_discover(self):
"""Call aoe-discover (aoe-tools) AoE Discover."""
(out, err) = utils.execute('aoe-discover',
run_as_root=True, check_exit_code=0)
return (out, err)
def _aoe_revalidate(self, aoedev):
"""Revalidate the LUN Geometry (When an AoE ID is reused)."""
(out, err) = utils.execute('aoe-revalidate', aoedev,
run_as_root=True, check_exit_code=0)
return (out, err)
def _get_device_path(self, connection_info):
shelf = connection_info['data']['target_shelf']
lun = connection_info['data']['target_lun']
aoedev = 'e%s.%s' % (shelf, lun)
aoedevpath = '/dev/etherd/%s' % (aoedev)
return aoedevpath
def get_config(self, connection_info, disk_info):
"""Returns xml for libvirt."""
conf = super(LibvirtAOEVolumeDriver,
self).get_config(connection_info, disk_info)
conf.source_type = "block"
conf.source_path = connection_info['data']['device_path']
return conf
def connect_volume(self, connection_info, mount_device):
shelf = connection_info['data']['target_shelf']
lun = connection_info['data']['target_lun']
aoedev = 'e%s.%s' % (shelf, lun)
aoedevpath = '/dev/etherd/%s' % (aoedev)
if os.path.exists(aoedevpath):
# NOTE(jbr_): If aoedevpath already exists, revalidate the LUN.
self._aoe_revalidate(aoedev)
else:
# NOTE(jbr_): If aoedevpath does not exist, do a discover.
self._aoe_discover()
# NOTE(jbr_): Device path is not always present immediately
def _wait_for_device_discovery(aoedevpath, mount_device):
tries = self.tries
if os.path.exists(aoedevpath):
raise loopingcall.LoopingCallDone()
if self.tries >= CONF.libvirt.num_aoe_discover_tries:
raise exception.NovaException(_("AoE device not found at %s") %
(aoedevpath))
LOG.warn(_LW("AoE volume not yet found at: %(aoedevpath)s. "
"Try number: %(tries)s"),
{'aoedevpath': aoedevpath, 'tries': tries})
self._aoe_discover()
self.tries = self.tries + 1
self.tries = 0
timer = loopingcall.FixedIntervalLoopingCall(
_wait_for_device_discovery, aoedevpath, mount_device)
timer.start(interval=2).wait()
tries = self.tries
if tries != 0:
LOG.debug("Found AoE device %(aoedevpath)s "
"(after %(tries)s rediscover)",
{'aoedevpath': aoedevpath,
'tries': tries})
connection_info['data']['device_path'] = \
self._get_device_path(connection_info)
class LibvirtGlusterfsVolumeDriver(LibvirtBaseVolumeDriver):
"""Class implements libvirt part of volume driver for GlusterFS."""
def __init__(self, connection):
"""Create back-end to glusterfs."""
super(LibvirtGlusterfsVolumeDriver,
self).__init__(connection, is_block_dev=False)
def _get_device_path(self, connection_info):
path = os.path.join(CONF.libvirt.glusterfs_mount_point_base,
utils.get_hash_str(connection_info['data']['export']))
path = os.path.join(path, connection_info['data']['name'])
return path
def get_config(self, connection_info, disk_info):
"""Returns xml for libvirt."""
conf = super(LibvirtGlusterfsVolumeDriver,
self).get_config(connection_info, disk_info)
data = connection_info['data']
if 'gluster' in CONF.libvirt.qemu_allowed_storage_drivers:
vol_name = data['export'].split('/')[1]
source_host = data['export'].split('/')[0][:-1]
conf.source_ports = ['24007']
conf.source_type = 'network'
conf.source_protocol = 'gluster'
conf.source_hosts = [source_host]
conf.source_name = '%s/%s' % (vol_name, data['name'])
else:
conf.source_type = 'file'
conf.source_path = connection_info['data']['device_path']
conf.driver_format = connection_info['data'].get('format', 'raw')
return conf
def connect_volume(self, connection_info, mount_device):
data = connection_info['data']
if 'gluster' not in CONF.libvirt.qemu_allowed_storage_drivers:
self._ensure_mounted(data['export'], data.get('options'))
connection_info['data']['device_path'] = \
self._get_device_path(connection_info)
def disconnect_volume(self, connection_info, disk_dev):
"""Disconnect the volume."""
if 'gluster' in CONF.libvirt.qemu_allowed_storage_drivers:
return
export = connection_info['data']['export']
mount_path = os.path.join(CONF.libvirt.glusterfs_mount_point_base,
utils.get_hash_str(export))
try:
utils.execute('umount', mount_path, run_as_root=True)
except processutils.ProcessExecutionError as exc:
if 'target is busy' in exc.message:
LOG.debug("The GlusterFS share %s is still in use.", export)
else:
LOG.exception(_LE("Couldn't unmount the GlusterFS share %s"),
export)
def _ensure_mounted(self, glusterfs_export, options=None):
"""@type glusterfs_export: string
@type options: string
"""
mount_path = os.path.join(CONF.libvirt.glusterfs_mount_point_base,
utils.get_hash_str(glusterfs_export))
if not libvirt_utils.is_mounted(mount_path, glusterfs_export):
self._mount_glusterfs(mount_path, glusterfs_export,
options, ensure=True)
return mount_path
def _mount_glusterfs(self, mount_path, glusterfs_share,
options=None, ensure=False):
"""Mount glusterfs export to mount path."""
utils.execute('mkdir', '-p', mount_path)
gluster_cmd = ['mount', '-t', 'glusterfs']
if options is not None:
gluster_cmd.extend(options.split(' '))
gluster_cmd.extend([glusterfs_share, mount_path])
try:
utils.execute(*gluster_cmd, run_as_root=True)
except processutils.ProcessExecutionError as exc:
if ensure and 'already mounted' in exc.message:
LOG.warn(_LW("%s is already mounted"), glusterfs_share)
else:
raise
class LibvirtFibreChannelVolumeDriver(LibvirtBaseVolumeDriver):
"""Driver to attach Fibre Channel Network volumes to libvirt."""
def __init__(self, connection):
super(LibvirtFibreChannelVolumeDriver,
self).__init__(connection, is_block_dev=False)
def _get_pci_num(self, hba):
# NOTE(walter-boring)
# device path is in format of
# /sys/devices/pci0000:00/0000:00:03.0/0000:05:00.3/host2/fc_host/host2
# sometimes an extra entry exists before the host2 value
# we always want the value prior to the host2 value
pci_num = None
if hba is not None:
if "device_path" in hba:
index = 0
device_path = hba['device_path'].split('/')
for value in device_path:
if value.startswith('host'):
break
index = index + 1
if index > 0:
pci_num = device_path[index - 1]
return pci_num
def get_config(self, connection_info, disk_info):
"""Returns xml for libvirt."""
conf = super(LibvirtFibreChannelVolumeDriver,
self).get_config(connection_info, disk_info)
conf.source_type = "block"
conf.source_path = connection_info['data']['device_path']
return conf
def _get_possible_devices(self, wwnports):
"""Compute the possible valid fiber channel device options.
:param wwnports: possible wwn addresses. Can either be string
or list of strings.
:returns: list of (pci_id, wwn) tuples
Given one or more wwn (mac addresses for fiber channel) ports
do the matrix math to figure out a set of pci device, wwn
tuples that are potentially valid (they won't all be). This
provides a search space for the device connection.
"""
# the wwn (think mac addresses for fiber channel devices) can
# either be a single value or a list. Normalize it to a list
# for further operations.
wwns = []
if isinstance(wwnports, list):
for wwn in wwnports:
wwns.append(str(wwn))
elif isinstance(wwnports, six.string_types):
wwns.append(str(wwnports))
raw_devices = []
hbas = libvirt_utils.get_fc_hbas_info()
for hba in hbas:
pci_num = self._get_pci_num(hba)
if pci_num is not None:
for wwn in wwns:
target_wwn = "0x%s" % wwn.lower()
raw_devices.append((pci_num, target_wwn))
return raw_devices
@utils.synchronized('connect_volume')
def connect_volume(self, connection_info, disk_info):
"""Attach the volume to instance_name."""
fc_properties = connection_info['data']
mount_device = disk_info["dev"]
possible_devs = self._get_possible_devices(fc_properties['target_wwn'])
# map the raw device possibilities to possible host device paths
host_devices = []
for device in possible_devs:
pci_num, target_wwn = device
host_device = ("/dev/disk/by-path/pci-%s-fc-%s-lun-%s" %
(pci_num,
target_wwn,
fc_properties.get('target_lun', 0)))
host_devices.append(host_device)
if len(host_devices) == 0:
# this is empty because we don't have any FC HBAs
msg = _("We are unable to locate any Fibre Channel devices")
raise exception.NovaException(msg)
# The /dev/disk/by-path/... node is not always present immediately
# We only need to find the first device. Once we see the first device
# multipath will have any others.
def _wait_for_device_discovery(host_devices, mount_device):
tries = self.tries
for device in host_devices:
LOG.debug("Looking for Fibre Channel dev %(device)s",
{'device': device})
if os.path.exists(device):
self.host_device = device
# get the /dev/sdX device. This is used
# to find the multipath device.
self.device_name = os.path.realpath(device)
raise loopingcall.LoopingCallDone()
if self.tries >= CONF.libvirt.num_iscsi_scan_tries:
msg = _("Fibre Channel device not found.")
raise exception.NovaException(msg)
LOG.warn(_LW("Fibre volume not yet found at: %(mount_device)s. "
"Will rescan & retry. Try number: %(tries)s"),
{'mount_device': mount_device, 'tries': tries})
linuxscsi.rescan_hosts(libvirt_utils.get_fc_hbas_info())
self.tries = self.tries + 1
self.host_device = None
self.device_name = None
self.tries = 0
timer = loopingcall.FixedIntervalLoopingCall(
_wait_for_device_discovery, host_devices, mount_device)
timer.start(interval=2).wait()
tries = self.tries
if self.host_device is not None and self.device_name is not None:
LOG.debug("Found Fibre Channel volume %(mount_device)s "
"(after %(tries)s rescans)",
{'mount_device': mount_device,
'tries': tries})
# see if the new drive is part of a multipath
# device. If so, we'll use the multipath device.
mdev_info = linuxscsi.find_multipath_device(self.device_name)
if mdev_info is not None:
LOG.debug("Multipath device discovered %(device)s",
{'device': mdev_info['device']})
device_path = mdev_info['device']
connection_info['data']['device_path'] = device_path
connection_info['data']['devices'] = mdev_info['devices']
connection_info['data']['multipath_id'] = mdev_info['id']
else:
# we didn't find a multipath device.
# so we assume the kernel only sees 1 device
device_path = self.host_device
device_info = linuxscsi.get_device_info(self.device_name)
connection_info['data']['device_path'] = device_path
connection_info['data']['devices'] = [device_info]
@utils.synchronized('connect_volume')
def disconnect_volume(self, connection_info, mount_device):
"""Detach the volume from instance_name."""
super(LibvirtFibreChannelVolumeDriver,
self).disconnect_volume(connection_info, mount_device)
# If this is a multipath device, we need to search again
# and make sure we remove all the devices. Some of them
# might not have shown up at attach time.
if 'multipath_id' in connection_info['data']:
multipath_id = connection_info['data']['multipath_id']
mdev_info = linuxscsi.find_multipath_device(multipath_id)
devices = mdev_info['devices']
LOG.debug("devices to remove = %s", devices)
else:
# only needed when multipath-tools work improperly
devices = connection_info['data'].get('devices', [])
LOG.warn(_LW("multipath-tools probably work improperly. "
"devices to remove = %s.") % devices)
# There may have been more than 1 device mounted
# by the kernel for this volume. We have to remove
# all of them
for device in devices:
linuxscsi.remove_device(device)
class LibvirtScalityVolumeDriver(LibvirtBaseVolumeDriver):
"""Scality SOFS Nova driver. Provide hypervisors with access
to sparse files on SOFS.
"""
def __init__(self, connection):
"""Create back-end to SOFS and check connection."""
super(LibvirtScalityVolumeDriver,
self).__init__(connection, is_block_dev=False)
def _get_device_path(self, connection_info):
path = os.path.join(CONF.libvirt.scality_sofs_mount_point,
connection_info['data']['sofs_path'])
return path
def get_config(self, connection_info, disk_info):
"""Returns xml for libvirt."""
conf = super(LibvirtScalityVolumeDriver,
self).get_config(connection_info, disk_info)
conf.source_type = 'file'
conf.source_path = connection_info['data']['device_path']
# The default driver cache policy is 'none', and this causes
# qemu/kvm to open the volume file with O_DIRECT, which is
# rejected by FUSE (on kernels older than 3.3). Scality SOFS
# is FUSE based, so we must provide a more sensible default.
conf.driver_cache = 'writethrough'
return conf
def connect_volume(self, connection_info, disk_info):
"""Connect the volume. Returns xml for libvirt."""
self._check_prerequisites()
self._mount_sofs()
connection_info['data']['device_path'] = \
self._get_device_path(connection_info)
def _check_prerequisites(self):
"""Sanity checks before attempting to mount SOFS."""
# config is mandatory
config = CONF.libvirt.scality_sofs_config
if not config:
msg = _LW("Value required for 'scality_sofs_config'")
LOG.warn(msg)
raise exception.NovaException(msg)
# config can be a file path or a URL, check it
if urlparse.urlparse(config).scheme == '':
# turn local path into URL
config = 'file://%s' % config
try:
urllib2.urlopen(config, timeout=5).close()
except urllib2.URLError as e:
msg = _LW("Cannot access 'scality_sofs_config': %s") % e
LOG.warn(msg)
raise exception.NovaException(msg)
# mount.sofs must be installed
if not os.access('/sbin/mount.sofs', os.X_OK):
msg = _LW("Cannot execute /sbin/mount.sofs")
LOG.warn(msg)
raise exception.NovaException(msg)
def _mount_sofs(self):
config = CONF.libvirt.scality_sofs_config
mount_path = CONF.libvirt.scality_sofs_mount_point
sysdir = os.path.join(mount_path, 'sys')
if not os.path.isdir(mount_path):
utils.execute('mkdir', '-p', mount_path)
if not os.path.isdir(sysdir):
utils.execute('mount', '-t', 'sofs', config, mount_path,
run_as_root=True)
if not os.path.isdir(sysdir):
msg = _LW("Cannot mount Scality SOFS, check syslog for errors")
LOG.warn(msg)
raise exception.NovaException(msg)
class LibvirtGPFSVolumeDriver(LibvirtBaseVolumeDriver):
"""Class for volumes backed by gpfs volume."""
def __init__(self, connection):
super(LibvirtGPFSVolumeDriver,
self).__init__(connection, is_block_dev=False)
def get_config(self, connection_info, disk_info):
"""Returns xml for libvirt."""
conf = super(LibvirtGPFSVolumeDriver,
self).get_config(connection_info, disk_info)
conf.source_type = "file"
conf.source_path = connection_info['data']['device_path']
return conf
class LibvirtQuobyteVolumeDriver(LibvirtBaseVolumeDriver):
"""Class implements libvirt part of volume driver for Quobyte."""
def __init__(self, connection):
"""Create back-end to Quobyte."""
super(LibvirtQuobyteVolumeDriver,
self).__init__(connection, is_block_dev=False)
def get_config(self, connection_info, disk_info):
conf = super(LibvirtQuobyteVolumeDriver,
self).get_config(connection_info, disk_info)
data = connection_info['data']
conf.source_protocol = quobyte.SOURCE_PROTOCOL
conf.source_type = quobyte.SOURCE_TYPE
conf.driver_cache = quobyte.DRIVER_CACHE
conf.driver_io = quobyte.DRIVER_IO
conf.driver_format = data.get('format', 'raw')
quobyte_volume = self._normalize_url(data['export'])
path = os.path.join(self._get_mount_point_for_share(quobyte_volume),
data['name'])
conf.source_path = path
return conf
@utils.synchronized('connect_volume')
def connect_volume(self, connection_info, disk_info):
"""Connect the volume."""
data = connection_info['data']
quobyte_volume = self._normalize_url(data['export'])
mount_path = self._get_mount_point_for_share(quobyte_volume)
mounted = libvirt_utils.is_mounted(mount_path,
quobyte.SOURCE_PROTOCOL
+ '@' + quobyte_volume)
if mounted:
try:
os.stat(mount_path)
except OSError as exc:
if exc.errno == errno.ENOTCONN:
mounted = False
LOG.info(_LI('Fixing previous mount %s which was not'
' unmounted correctly.'), mount_path)
quobyte.umount_volume(mount_path)
if not mounted:
quobyte.mount_volume(quobyte_volume,
mount_path,
CONF.libvirt.quobyte_client_cfg)
quobyte.validate_volume(mount_path)
@utils.synchronized('connect_volume')
def disconnect_volume(self, connection_info, disk_dev):
"""Disconnect the volume."""
quobyte_volume = self._normalize_url(connection_info['data']['export'])
mount_path = self._get_mount_point_for_share(quobyte_volume)
if libvirt_utils.is_mounted(mount_path, 'quobyte@' + quobyte_volume):
quobyte.umount_volume(mount_path)
else:
LOG.info(_LI("Trying to disconnected unmounted volume at %s"),
mount_path)
def _normalize_url(self, export):
protocol = quobyte.SOURCE_PROTOCOL + "://"
if export.startswith(protocol):
export = export[len(protocol):]
return export
def _get_mount_point_for_share(self, quobyte_volume):
"""Return mount point for Quobyte volume.
:param quobyte_volume: Example: storage-host/openstack-volumes
"""
return os.path.join(CONF.libvirt.quobyte_mount_point_base,
utils.get_hash_str(quobyte_volume))
| |
"""
Test Selection
--------------
Test selection is handled by a Selector. The test loader calls the
appropriate selector method for each object it encounters that it
thinks may be a test.
"""
import logging
import os
import unittest
from nose.config import Config
from nose.util import split_test_name, src, getfilename, getpackage, ispackage
log = logging.getLogger(__name__)
__all__ = ['Selector', 'defaultSelector', 'TestAddress']
# for efficiency and easier mocking
op_join = os.path.join
op_basename = os.path.basename
op_exists = os.path.exists
op_splitext = os.path.splitext
op_isabs = os.path.isabs
op_abspath = os.path.abspath
class Selector(object):
"""Core test selector. Examines test candidates and determines whether,
given the specified configuration, the test candidate should be selected
as a test.
"""
def __init__(self, config):
if config is None:
config = Config()
self.configure(config)
def configure(self, config):
self.config = config
self.exclude = config.exclude
self.ignoreFiles = config.ignoreFiles
self.include = config.include
self.plugins = config.plugins
self.match = config.testMatch
def matches(self, name):
"""Does the name match my requirements?
To match, a name must match config.testMatch OR config.include
and it must not match config.exclude
"""
return ((self.match.search(name)
or (self.include and
[_f for _f in [inc.search(name) for inc in self.include] if _f]))
and ((not self.exclude)
or not [_f for _f in [exc.search(name) for exc in self.exclude] if _f]
))
def wantClass(self, cls):
"""Is the class a wanted test class?
A class must be a unittest.TestCase subclass, or match test name
requirements. Classes that start with _ are always excluded.
"""
declared = getattr(cls, '__test__', None)
if declared is not None:
wanted = declared
else:
wanted = (not cls.__name__.startswith('_')
and (issubclass(cls, unittest.TestCase)
or self.matches(cls.__name__)))
plug_wants = self.plugins.wantClass(cls)
if plug_wants is not None:
log.debug("Plugin setting selection of %s to %s", cls, plug_wants)
wanted = plug_wants
log.debug("wantClass %s? %s", cls, wanted)
return wanted
def wantDirectory(self, dirname):
"""Is the directory a wanted test directory?
All package directories match, so long as they do not match exclude.
All other directories must match test requirements.
"""
tail = op_basename(dirname)
if ispackage(dirname):
wanted = (not self.exclude
or not [_f for _f in [exc.search(tail) for exc in self.exclude] if _f])
else:
wanted = (self.matches(tail)
or (self.config.srcDirs
and tail in self.config.srcDirs))
plug_wants = self.plugins.wantDirectory(dirname)
if plug_wants is not None:
log.debug("Plugin setting selection of %s to %s",
dirname, plug_wants)
wanted = plug_wants
log.debug("wantDirectory %s? %s", dirname, wanted)
return wanted
def wantFile(self, file):
"""Is the file a wanted test file?
The file must be a python source file and match testMatch or
include, and not match exclude. Files that match ignore are *never*
wanted, regardless of plugin, testMatch, include or exclude settings.
"""
# never, ever load files that match anything in ignore
# (.* _* and *setup*.py by default)
base = op_basename(file)
ignore_matches = [ ignore_this for ignore_this in self.ignoreFiles
if ignore_this.search(base) ]
if ignore_matches:
log.debug('%s matches ignoreFiles pattern; skipped',
base)
return False
if not self.config.includeExe and os.access(file, os.X_OK):
log.info('%s is executable; skipped', file)
return False
dummy, ext = op_splitext(base)
pysrc = ext == '.py'
wanted = pysrc and self.matches(base)
plug_wants = self.plugins.wantFile(file)
if plug_wants is not None:
log.debug("plugin setting want %s to %s", file, plug_wants)
wanted = plug_wants
log.debug("wantFile %s? %s", file, wanted)
return wanted
def wantFunction(self, function):
"""Is the function a test function?
"""
try:
if hasattr(function, 'compat_func_name'):
funcname = function.compat_func_name
else:
funcname = function.__name__
except AttributeError:
# not a function
return False
declared = getattr(function, '__test__', None)
if declared is not None:
wanted = declared
else:
wanted = not funcname.startswith('_') and self.matches(funcname)
plug_wants = self.plugins.wantFunction(function)
if plug_wants is not None:
wanted = plug_wants
log.debug("wantFunction %s? %s", function, wanted)
return wanted
def wantMethod(self, method):
"""Is the method a test method?
"""
try:
method_name = method.__name__
except AttributeError:
# not a method
return False
if method_name.startswith('_'):
# never collect 'private' methods
return False
declared = getattr(method, '__test__', None)
if declared is not None:
wanted = declared
else:
wanted = self.matches(method_name)
plug_wants = self.plugins.wantMethod(method)
if plug_wants is not None:
wanted = plug_wants
log.debug("wantMethod %s? %s", method, wanted)
return wanted
def wantModule(self, module):
"""Is the module a test module?
The tail of the module name must match test requirements. One exception:
we always want __main__.
"""
declared = getattr(module, '__test__', None)
if declared is not None:
wanted = declared
else:
wanted = self.matches(module.__name__.split('.')[-1]) \
or module.__name__ == '__main__'
plug_wants = self.plugins.wantModule(module)
if plug_wants is not None:
wanted = plug_wants
log.debug("wantModule %s? %s", module, wanted)
return wanted
defaultSelector = Selector
class TestAddress(object):
"""A test address represents a user's request to run a particular
test. The user may specify a filename or module (or neither),
and/or a callable (a class, function, or method). The naming
format for test addresses is:
filename_or_module:callable
Filenames that are not absolute will be made absolute relative to
the working dir.
The filename or module part will be considered a module name if it
doesn't look like a file, that is, if it doesn't exist on the file
system and it doesn't contain any directory separators and it
doesn't end in .py.
Callables may be a class name, function name, method name, or
class.method specification.
"""
def __init__(self, name, workingDir=None):
if workingDir is None:
workingDir = os.getcwd()
self.name = name
self.workingDir = workingDir
self.filename, self.module, self.call = split_test_name(name)
log.debug('Test name %s resolved to file %s, module %s, call %s',
name, self.filename, self.module, self.call)
if self.filename is None:
if self.module is not None:
self.filename = getfilename(self.module, self.workingDir)
if self.filename:
self.filename = src(self.filename)
if not op_isabs(self.filename):
self.filename = op_abspath(op_join(workingDir,
self.filename))
if self.module is None:
self.module = getpackage(self.filename)
log.debug(
'Final resolution of test name %s: file %s module %s call %s',
name, self.filename, self.module, self.call)
def totuple(self):
return (self.filename, self.module, self.call)
def __str__(self):
return self.name
def __repr__(self):
return "%s: (%s, %s, %s)" % (self.name, self.filename,
self.module, self.call)
| |
# Copyright (c) 2013 Mirantis Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import mock
from novaclient import exceptions as nova_exceptions
from sahara import conductor as cond
from sahara import context
from sahara.service import direct_engine as e
from sahara.service import ops
from sahara.tests.unit import base
import sahara.utils.crypto as c
from sahara.utils import general as g
conductor = cond.API
class AbstractInstanceTest(base.SaharaWithDbTestCase):
def setUp(self):
super(AbstractInstanceTest, self).setUp()
self.engine = e.DirectEngine()
self.is_passthrough_patcher = mock.patch(
'sahara.conductor.resource.Resource._is_passthrough_type')
self.is_passthrough_patcher.start().return_value = True
self.novaclient_patcher = mock.patch(
'sahara.utils.openstack.nova.client')
self.nova = _create_nova_mock(self.novaclient_patcher.start())
self.nova.server_groups.findall.return_value = []
self.get_userdata_patcher = mock.patch(
'sahara.utils.remote.get_userdata_template')
self.get_userdata_patcher.start().return_value = ''
def tearDown(self):
self.get_userdata_patcher.stop()
self.novaclient_patcher.stop()
self.is_passthrough_patcher.stop()
super(AbstractInstanceTest, self).tearDown()
class TestClusterRollBack(AbstractInstanceTest):
@mock.patch('sahara.service.direct_engine.DirectEngine._check_if_deleted')
@mock.patch('sahara.service.ops._prepare_provisioning')
@mock.patch('sahara.service.ops.INFRA')
def test_cluster_creation_with_errors(self, infra, prepare,
deleted_checker):
infra.create_cluster.side_effect = self.engine.create_cluster
infra.rollback_cluster.side_effect = self.engine.rollback_cluster
node_groups = [_make_ng_dict('test_group', 'test_flavor',
['data node', 'task tracker'], 2)]
cluster = _create_cluster_mock(node_groups, [])
prepare.return_value = (context.ctx(), cluster, mock.Mock())
self.nova.servers.create.side_effect = [_mock_instance(1),
MockException("test")]
self.nova.servers.list.return_value = [_mock_instance(1)]
deleted_checker.return_value = True
ops._provision_cluster(cluster.id)
ctx = context.ctx()
cluster_obj = conductor.cluster_get_all(ctx)[0]
self.assertEqual(0, len(cluster_obj.node_groups[0].instances))
class NodePlacementTest(AbstractInstanceTest):
def test_one_node_groups_and_one_affinity_group(self):
self.nova.server_groups.create.return_value = mock.Mock(id='123')
node_groups = [_make_ng_dict('test_group', 'test_flavor',
['data node'], 2)]
cluster = _create_cluster_mock(node_groups, ["data node"])
self.engine._create_instances(cluster)
userdata = _generate_user_data_script(cluster)
self.nova.servers.create.assert_has_calls(
[mock.call("test_cluster-test_group-001",
"initial",
"test_flavor",
scheduler_hints={'group': "123"},
userdata=userdata,
key_name='user_keypair',
security_groups=None,
availability_zone=None),
mock.call("test_cluster-test_group-002",
"initial",
"test_flavor",
scheduler_hints={'group': "123"},
userdata=userdata,
key_name='user_keypair',
security_groups=None,
availability_zone=None)],
any_order=False)
ctx = context.ctx()
cluster_obj = conductor.cluster_get_all(ctx)[0]
self.assertEqual(2, len(cluster_obj.node_groups[0].instances))
def test_one_node_groups_and_no_affinity_group(self):
self.nova.server_groups.create.return_value = mock.Mock(id='123')
node_groups = [_make_ng_dict('test_group', 'test_flavor',
['data node', 'task tracker'], 2)]
cluster = _create_cluster_mock(node_groups, [])
self.engine._create_instances(cluster)
userdata = _generate_user_data_script(cluster)
self.nova.servers.create.assert_has_calls(
[mock.call("test_cluster-test_group-001",
"initial",
"test_flavor",
scheduler_hints=None,
userdata=userdata,
key_name='user_keypair',
security_groups=None,
availability_zone=None),
mock.call("test_cluster-test_group-002",
"initial",
"test_flavor",
scheduler_hints=None,
userdata=userdata,
key_name='user_keypair',
security_groups=None,
availability_zone=None)],
any_order=False)
ctx = context.ctx()
cluster_obj = conductor.cluster_get_all(ctx)[0]
self.assertEqual(2, len(cluster_obj.node_groups[0].instances))
def test_two_node_groups_and_one_affinity_group(self):
self.nova.server_groups.create.return_value = mock.Mock(id='123')
node_groups = [_make_ng_dict("test_group_1", "test_flavor",
["data node", "test tracker"], 2),
_make_ng_dict("test_group_2", "test_flavor",
["data node", "test tracker"], 1)]
cluster = _create_cluster_mock(node_groups, ["data node"])
self.engine._create_instances(cluster)
userdata = _generate_user_data_script(cluster)
self.nova.servers.create.assert_has_calls(
[mock.call('test_cluster-test_group_1-001',
"initial",
"test_flavor",
scheduler_hints={'group': "123"},
userdata=userdata,
key_name='user_keypair',
security_groups=None,
availability_zone=None),
mock.call('test_cluster-test_group_1-002',
"initial",
"test_flavor",
scheduler_hints={'group': "123"},
userdata=userdata,
key_name='user_keypair',
security_groups=None,
availability_zone=None),
mock.call('test_cluster-test_group_2-001',
"initial",
"test_flavor",
scheduler_hints={'group': "123"},
userdata=userdata,
key_name='user_keypair',
security_groups=None,
availability_zone=None)],
any_order=False)
ctx = context.ctx()
cluster_obj = conductor.cluster_get_all(ctx)[0]
inst_number = len(cluster_obj.node_groups[0].instances)
inst_number += len(cluster_obj.node_groups[1].instances)
self.assertEqual(3, inst_number)
class IpManagementTest(AbstractInstanceTest):
def setUp(self):
super(IpManagementTest, self).setUp()
self.engine = e.DirectEngine()
def test_ip_assignment_use_no_floating(self):
self.override_config("use_floating_ips", False)
node_groups = [_make_ng_dict("test_group_1", "test_flavor",
["data node", "test tracker"], 2,
'pool'),
_make_ng_dict("test_group_2", "test_flavor",
["name node", "test tracker"], 1)]
ctx = context.ctx()
cluster = _create_cluster_mock(node_groups, ["data node"])
self.engine._create_instances(cluster)
cluster = conductor.cluster_get(ctx, cluster)
instances_list = g.get_instances(cluster)
self.engine._assign_floating_ips(instances_list)
self.nova.floating_ips.create.assert_has_calls(
[mock.call("pool"), mock.call("pool")])
self.assertEqual(2, self.nova.floating_ips.create.call_count,
"Not expected floating IPs number found.")
class ShutdownClusterTest(AbstractInstanceTest):
@mock.patch('sahara.service.direct_engine.DirectEngine._check_if_deleted')
@mock.patch('sahara.service.direct_engine.DirectEngine.'
'_map_security_groups')
def test_delete_floating_ips(self, map_mock, deleted_checker):
node_groups = [_make_ng_dict("test_group_1", "test_flavor",
["data node", "test tracker"], 2, 'pool')]
map_mock.return_value = []
ctx = context.ctx()
cluster = _create_cluster_mock(node_groups, ["datanode"])
self.engine._create_instances(cluster)
cluster = conductor.cluster_get(ctx, cluster)
instances_list = g.get_instances(cluster)
self.engine._assign_floating_ips(instances_list)
deleted_checker.return_value = True
self.engine._shutdown_instances(cluster)
self.assertEqual(2, self.nova.floating_ips.delete.call_count,
"Not expected floating IPs number found in delete")
self.assertEqual(2, self.nova.servers.delete.call_count,
"Not expected")
def _make_ng_dict(name, flavor, processes, count, floating_ip_pool=None):
ng_dict = {'name': name, 'flavor_id': flavor, 'node_processes': processes,
'count': count, 'image_username': 'root'}
if floating_ip_pool:
ng_dict.update({"floating_ip_pool": floating_ip_pool})
return ng_dict
def _create_cluster_mock(node_groups, aa):
user_kp = mock.Mock()
user_kp.public_key = "123"
private_key = c.generate_key_pair()[0]
dct = {'name': 'test_cluster',
'plugin_name': 'mock_plugin',
'hadoop_version': 'mock_version',
'default_image_id': 'initial',
'user_keypair_id': 'user_keypair',
'anti_affinity': aa,
'_user_kp': user_kp,
'private_key': private_key,
'node_groups': node_groups}
cluster = conductor.cluster_create(context.ctx(), dct)
return cluster
def _mock_instance(id):
server = mock.Mock()
server.id = id
server.instance_id = id
server.status = 'ACTIVE'
server.networks = ["n1", "n2"]
server.addresses = {'n1': [{'OS-EXT-IPS:type': 'fixed',
'addr': "{0}.{0}.{0}.{0}" .format(id)}],
'n2': [{'OS-EXT-IPS:type': 'floating',
'addr': "{0}.{0}.{0}.{0}" .format(id)}]}
server.add_floating_ip.side_effect = [True, True, True]
return server
def _mock_ip(id):
ip = mock.Mock()
ip.id = id
ip.ip = "{0}.{0}.{0}.{0}" .format(id)
return ip
def _mock_instances(count):
return [_mock_instance(str(i)) for i in range(1, count + 1)]
def _mock_ips(count):
return [_mock_ip(str(i)) for i in range(1, count + 1)]
def _generate_user_data_script(cluster):
script_template = """#!/bin/bash
echo "%(public_key)s" >> %(user_home)s/.ssh/authorized_keys\n
# ====== COMMENT OUT Defaults requiretty in /etc/sudoers ========
sed '/^Defaults requiretty*/ s/^/#/' -i /etc/sudoers\n
"""
return script_template % {
"public_key": cluster.management_public_key,
"user_home": "/root/"
}
def _create_nova_mock(novaclient):
nova = mock.Mock()
novaclient.return_value = nova
nova.servers.create.side_effect = _mock_instances(4)
nova.servers.get.return_value = _mock_instance(1)
nova.floating_ips.create.side_effect = _mock_ips(4)
nova.floating_ips.findall.return_value = _mock_ips(1)
nova.floating_ips.delete.side_effect = _mock_deletes(2)
images = mock.Mock()
images.username = "root"
nova.images.get = lambda x: images
return nova
def _mock_deletes(count):
return [_mock_delete(i) for i in range(1, count + 1)]
def _mock_delete(id):
if id == 1:
return None
return nova_exceptions.NotFound(code=404)
class MockException(Exception):
pass
| |
"""Routines related to PyPI, indexes"""
# The following comment should be removed at some point in the future.
# mypy: strict-optional=False
import functools
import itertools
import logging
import re
from typing import FrozenSet, Iterable, List, Optional, Set, Tuple, Union
from pip._vendor.packaging import specifiers
from pip._vendor.packaging.tags import Tag
from pip._vendor.packaging.utils import canonicalize_name
from pip._vendor.packaging.version import _BaseVersion
from pip._vendor.packaging.version import parse as parse_version
from pip._internal.exceptions import (
BestVersionAlreadyInstalled,
DistributionNotFound,
InvalidWheelFilename,
UnsupportedWheel,
)
from pip._internal.index.collector import LinkCollector, parse_links
from pip._internal.models.candidate import InstallationCandidate
from pip._internal.models.format_control import FormatControl
from pip._internal.models.link import Link
from pip._internal.models.search_scope import SearchScope
from pip._internal.models.selection_prefs import SelectionPreferences
from pip._internal.models.target_python import TargetPython
from pip._internal.models.wheel import Wheel
from pip._internal.req import InstallRequirement
from pip._internal.utils._log import getLogger
from pip._internal.utils.filetypes import WHEEL_EXTENSION
from pip._internal.utils.hashes import Hashes
from pip._internal.utils.logging import indent_log
from pip._internal.utils.misc import build_netloc
from pip._internal.utils.packaging import check_requires_python
from pip._internal.utils.unpacking import SUPPORTED_EXTENSIONS
__all__ = ["FormatControl", "BestCandidateResult", "PackageFinder"]
logger = getLogger(__name__)
BuildTag = Union[Tuple[()], Tuple[int, str]]
CandidateSortingKey = Tuple[int, int, int, _BaseVersion, Optional[int], BuildTag]
def _check_link_requires_python(
link: Link,
version_info: Tuple[int, int, int],
ignore_requires_python: bool = False,
) -> bool:
"""
Return whether the given Python version is compatible with a link's
"Requires-Python" value.
:param version_info: A 3-tuple of ints representing the Python
major-minor-micro version to check.
:param ignore_requires_python: Whether to ignore the "Requires-Python"
value if the given Python version isn't compatible.
"""
try:
is_compatible = check_requires_python(
link.requires_python,
version_info=version_info,
)
except specifiers.InvalidSpecifier:
logger.debug(
"Ignoring invalid Requires-Python (%r) for link: %s",
link.requires_python,
link,
)
else:
if not is_compatible:
version = ".".join(map(str, version_info))
if not ignore_requires_python:
logger.verbose(
"Link requires a different Python (%s not in: %r): %s",
version,
link.requires_python,
link,
)
return False
logger.debug(
"Ignoring failed Requires-Python check (%s not in: %r) for link: %s",
version,
link.requires_python,
link,
)
return True
class LinkEvaluator:
"""
Responsible for evaluating links for a particular project.
"""
_py_version_re = re.compile(r"-py([123]\.?[0-9]?)$")
# Don't include an allow_yanked default value to make sure each call
# site considers whether yanked releases are allowed. This also causes
# that decision to be made explicit in the calling code, which helps
# people when reading the code.
def __init__(
self,
project_name: str,
canonical_name: str,
formats: FrozenSet[str],
target_python: TargetPython,
allow_yanked: bool,
ignore_requires_python: Optional[bool] = None,
) -> None:
"""
:param project_name: The user supplied package name.
:param canonical_name: The canonical package name.
:param formats: The formats allowed for this package. Should be a set
with 'binary' or 'source' or both in it.
:param target_python: The target Python interpreter to use when
evaluating link compatibility. This is used, for example, to
check wheel compatibility, as well as when checking the Python
version, e.g. the Python version embedded in a link filename
(or egg fragment) and against an HTML link's optional PEP 503
"data-requires-python" attribute.
:param allow_yanked: Whether files marked as yanked (in the sense
of PEP 592) are permitted to be candidates for install.
:param ignore_requires_python: Whether to ignore incompatible
PEP 503 "data-requires-python" values in HTML links. Defaults
to False.
"""
if ignore_requires_python is None:
ignore_requires_python = False
self._allow_yanked = allow_yanked
self._canonical_name = canonical_name
self._ignore_requires_python = ignore_requires_python
self._formats = formats
self._target_python = target_python
self.project_name = project_name
def evaluate_link(self, link: Link) -> Tuple[bool, Optional[str]]:
"""
Determine whether a link is a candidate for installation.
:return: A tuple (is_candidate, result), where `result` is (1) a
version string if `is_candidate` is True, and (2) if
`is_candidate` is False, an optional string to log the reason
the link fails to qualify.
"""
version = None
if link.is_yanked and not self._allow_yanked:
reason = link.yanked_reason or "<none given>"
return (False, f"yanked for reason: {reason}")
if link.egg_fragment:
egg_info = link.egg_fragment
ext = link.ext
else:
egg_info, ext = link.splitext()
if not ext:
return (False, "not a file")
if ext not in SUPPORTED_EXTENSIONS:
return (False, f"unsupported archive format: {ext}")
if "binary" not in self._formats and ext == WHEEL_EXTENSION:
reason = "No binaries permitted for {}".format(self.project_name)
return (False, reason)
if "macosx10" in link.path and ext == ".zip":
return (False, "macosx10 one")
if ext == WHEEL_EXTENSION:
try:
wheel = Wheel(link.filename)
except InvalidWheelFilename:
return (False, "invalid wheel filename")
if canonicalize_name(wheel.name) != self._canonical_name:
reason = "wrong project name (not {})".format(self.project_name)
return (False, reason)
supported_tags = self._target_python.get_tags()
if not wheel.supported(supported_tags):
# Include the wheel's tags in the reason string to
# simplify troubleshooting compatibility issues.
file_tags = wheel.get_formatted_file_tags()
reason = (
"none of the wheel's tags ({}) are compatible "
"(run pip debug --verbose to show compatible tags)".format(
", ".join(file_tags)
)
)
return (False, reason)
version = wheel.version
# This should be up by the self.ok_binary check, but see issue 2700.
if "source" not in self._formats and ext != WHEEL_EXTENSION:
reason = f"No sources permitted for {self.project_name}"
return (False, reason)
if not version:
version = _extract_version_from_fragment(
egg_info,
self._canonical_name,
)
if not version:
reason = f"Missing project version for {self.project_name}"
return (False, reason)
match = self._py_version_re.search(version)
if match:
version = version[: match.start()]
py_version = match.group(1)
if py_version != self._target_python.py_version:
return (False, "Python version is incorrect")
supports_python = _check_link_requires_python(
link,
version_info=self._target_python.py_version_info,
ignore_requires_python=self._ignore_requires_python,
)
if not supports_python:
# Return None for the reason text to suppress calling
# _log_skipped_link().
return (False, None)
logger.debug("Found link %s, version: %s", link, version)
return (True, version)
def filter_unallowed_hashes(
candidates: List[InstallationCandidate],
hashes: Hashes,
project_name: str,
) -> List[InstallationCandidate]:
"""
Filter out candidates whose hashes aren't allowed, and return a new
list of candidates.
If at least one candidate has an allowed hash, then all candidates with
either an allowed hash or no hash specified are returned. Otherwise,
the given candidates are returned.
Including the candidates with no hash specified when there is a match
allows a warning to be logged if there is a more preferred candidate
with no hash specified. Returning all candidates in the case of no
matches lets pip report the hash of the candidate that would otherwise
have been installed (e.g. permitting the user to more easily update
their requirements file with the desired hash).
"""
if not hashes:
logger.debug(
"Given no hashes to check %s links for project %r: "
"discarding no candidates",
len(candidates),
project_name,
)
# Make sure we're not returning back the given value.
return list(candidates)
matches_or_no_digest = []
# Collect the non-matches for logging purposes.
non_matches = []
match_count = 0
for candidate in candidates:
link = candidate.link
if not link.has_hash:
pass
elif link.is_hash_allowed(hashes=hashes):
match_count += 1
else:
non_matches.append(candidate)
continue
matches_or_no_digest.append(candidate)
if match_count:
filtered = matches_or_no_digest
else:
# Make sure we're not returning back the given value.
filtered = list(candidates)
if len(filtered) == len(candidates):
discard_message = "discarding no candidates"
else:
discard_message = "discarding {} non-matches:\n {}".format(
len(non_matches),
"\n ".join(str(candidate.link) for candidate in non_matches),
)
logger.debug(
"Checked %s links for project %r against %s hashes "
"(%s matches, %s no digest): %s",
len(candidates),
project_name,
hashes.digest_count,
match_count,
len(matches_or_no_digest) - match_count,
discard_message,
)
return filtered
class CandidatePreferences:
"""
Encapsulates some of the preferences for filtering and sorting
InstallationCandidate objects.
"""
def __init__(
self,
prefer_binary: bool = False,
allow_all_prereleases: bool = False,
) -> None:
"""
:param allow_all_prereleases: Whether to allow all pre-releases.
"""
self.allow_all_prereleases = allow_all_prereleases
self.prefer_binary = prefer_binary
class BestCandidateResult:
"""A collection of candidates, returned by `PackageFinder.find_best_candidate`.
This class is only intended to be instantiated by CandidateEvaluator's
`compute_best_candidate()` method.
"""
def __init__(
self,
candidates: List[InstallationCandidate],
applicable_candidates: List[InstallationCandidate],
best_candidate: Optional[InstallationCandidate],
) -> None:
"""
:param candidates: A sequence of all available candidates found.
:param applicable_candidates: The applicable candidates.
:param best_candidate: The most preferred candidate found, or None
if no applicable candidates were found.
"""
assert set(applicable_candidates) <= set(candidates)
if best_candidate is None:
assert not applicable_candidates
else:
assert best_candidate in applicable_candidates
self._applicable_candidates = applicable_candidates
self._candidates = candidates
self.best_candidate = best_candidate
def iter_all(self) -> Iterable[InstallationCandidate]:
"""Iterate through all candidates."""
return iter(self._candidates)
def iter_applicable(self) -> Iterable[InstallationCandidate]:
"""Iterate through the applicable candidates."""
return iter(self._applicable_candidates)
class CandidateEvaluator:
"""
Responsible for filtering and sorting candidates for installation based
on what tags are valid.
"""
@classmethod
def create(
cls,
project_name: str,
target_python: Optional[TargetPython] = None,
prefer_binary: bool = False,
allow_all_prereleases: bool = False,
specifier: Optional[specifiers.BaseSpecifier] = None,
hashes: Optional[Hashes] = None,
) -> "CandidateEvaluator":
"""Create a CandidateEvaluator object.
:param target_python: The target Python interpreter to use when
checking compatibility. If None (the default), a TargetPython
object will be constructed from the running Python.
:param specifier: An optional object implementing `filter`
(e.g. `packaging.specifiers.SpecifierSet`) to filter applicable
versions.
:param hashes: An optional collection of allowed hashes.
"""
if target_python is None:
target_python = TargetPython()
if specifier is None:
specifier = specifiers.SpecifierSet()
supported_tags = target_python.get_tags()
return cls(
project_name=project_name,
supported_tags=supported_tags,
specifier=specifier,
prefer_binary=prefer_binary,
allow_all_prereleases=allow_all_prereleases,
hashes=hashes,
)
def __init__(
self,
project_name: str,
supported_tags: List[Tag],
specifier: specifiers.BaseSpecifier,
prefer_binary: bool = False,
allow_all_prereleases: bool = False,
hashes: Optional[Hashes] = None,
) -> None:
"""
:param supported_tags: The PEP 425 tags supported by the target
Python in order of preference (most preferred first).
"""
self._allow_all_prereleases = allow_all_prereleases
self._hashes = hashes
self._prefer_binary = prefer_binary
self._project_name = project_name
self._specifier = specifier
self._supported_tags = supported_tags
# Since the index of the tag in the _supported_tags list is used
# as a priority, precompute a map from tag to index/priority to be
# used in wheel.find_most_preferred_tag.
self._wheel_tag_preferences = {
tag: idx for idx, tag in enumerate(supported_tags)
}
def get_applicable_candidates(
self,
candidates: List[InstallationCandidate],
) -> List[InstallationCandidate]:
"""
Return the applicable candidates from a list of candidates.
"""
# Using None infers from the specifier instead.
allow_prereleases = self._allow_all_prereleases or None
specifier = self._specifier
versions = {
str(v)
for v in specifier.filter(
# We turn the version object into a str here because otherwise
# when we're debundled but setuptools isn't, Python will see
# packaging.version.Version and
# pkg_resources._vendor.packaging.version.Version as different
# types. This way we'll use a str as a common data interchange
# format. If we stop using the pkg_resources provided specifier
# and start using our own, we can drop the cast to str().
(str(c.version) for c in candidates),
prereleases=allow_prereleases,
)
}
# Again, converting version to str to deal with debundling.
applicable_candidates = [c for c in candidates if str(c.version) in versions]
filtered_applicable_candidates = filter_unallowed_hashes(
candidates=applicable_candidates,
hashes=self._hashes,
project_name=self._project_name,
)
return sorted(filtered_applicable_candidates, key=self._sort_key)
def _sort_key(self, candidate: InstallationCandidate) -> CandidateSortingKey:
"""
Function to pass as the `key` argument to a call to sorted() to sort
InstallationCandidates by preference.
Returns a tuple such that tuples sorting as greater using Python's
default comparison operator are more preferred.
The preference is as follows:
First and foremost, candidates with allowed (matching) hashes are
always preferred over candidates without matching hashes. This is
because e.g. if the only candidate with an allowed hash is yanked,
we still want to use that candidate.
Second, excepting hash considerations, candidates that have been
yanked (in the sense of PEP 592) are always less preferred than
candidates that haven't been yanked. Then:
If not finding wheels, they are sorted by version only.
If finding wheels, then the sort order is by version, then:
1. existing installs
2. wheels ordered via Wheel.support_index_min(self._supported_tags)
3. source archives
If prefer_binary was set, then all wheels are sorted above sources.
Note: it was considered to embed this logic into the Link
comparison operators, but then different sdist links
with the same version, would have to be considered equal
"""
valid_tags = self._supported_tags
support_num = len(valid_tags)
build_tag: BuildTag = ()
binary_preference = 0
link = candidate.link
if link.is_wheel:
# can raise InvalidWheelFilename
wheel = Wheel(link.filename)
try:
pri = -(
wheel.find_most_preferred_tag(
valid_tags, self._wheel_tag_preferences
)
)
except ValueError:
raise UnsupportedWheel(
"{} is not a supported wheel for this platform. It "
"can't be sorted.".format(wheel.filename)
)
if self._prefer_binary:
binary_preference = 1
if wheel.build_tag is not None:
match = re.match(r"^(\d+)(.*)$", wheel.build_tag)
build_tag_groups = match.groups()
build_tag = (int(build_tag_groups[0]), build_tag_groups[1])
else: # sdist
pri = -(support_num)
has_allowed_hash = int(link.is_hash_allowed(self._hashes))
yank_value = -1 * int(link.is_yanked) # -1 for yanked.
return (
has_allowed_hash,
yank_value,
binary_preference,
candidate.version,
pri,
build_tag,
)
def sort_best_candidate(
self,
candidates: List[InstallationCandidate],
) -> Optional[InstallationCandidate]:
"""
Return the best candidate per the instance's sort order, or None if
no candidate is acceptable.
"""
if not candidates:
return None
best_candidate = max(candidates, key=self._sort_key)
return best_candidate
def compute_best_candidate(
self,
candidates: List[InstallationCandidate],
) -> BestCandidateResult:
"""
Compute and return a `BestCandidateResult` instance.
"""
applicable_candidates = self.get_applicable_candidates(candidates)
best_candidate = self.sort_best_candidate(applicable_candidates)
return BestCandidateResult(
candidates,
applicable_candidates=applicable_candidates,
best_candidate=best_candidate,
)
class PackageFinder:
"""This finds packages.
This is meant to match easy_install's technique for looking for
packages, by reading pages and looking for appropriate links.
"""
def __init__(
self,
link_collector: LinkCollector,
target_python: TargetPython,
allow_yanked: bool,
use_deprecated_html5lib: bool,
format_control: Optional[FormatControl] = None,
candidate_prefs: Optional[CandidatePreferences] = None,
ignore_requires_python: Optional[bool] = None,
) -> None:
"""
This constructor is primarily meant to be used by the create() class
method and from tests.
:param format_control: A FormatControl object, used to control
the selection of source packages / binary packages when consulting
the index and links.
:param candidate_prefs: Options to use when creating a
CandidateEvaluator object.
"""
if candidate_prefs is None:
candidate_prefs = CandidatePreferences()
format_control = format_control or FormatControl(set(), set())
self._allow_yanked = allow_yanked
self._candidate_prefs = candidate_prefs
self._ignore_requires_python = ignore_requires_python
self._link_collector = link_collector
self._target_python = target_python
self._use_deprecated_html5lib = use_deprecated_html5lib
self.format_control = format_control
# These are boring links that have already been logged somehow.
self._logged_links: Set[Link] = set()
# Don't include an allow_yanked default value to make sure each call
# site considers whether yanked releases are allowed. This also causes
# that decision to be made explicit in the calling code, which helps
# people when reading the code.
@classmethod
def create(
cls,
link_collector: LinkCollector,
selection_prefs: SelectionPreferences,
target_python: Optional[TargetPython] = None,
*,
use_deprecated_html5lib: bool,
) -> "PackageFinder":
"""Create a PackageFinder.
:param selection_prefs: The candidate selection preferences, as a
SelectionPreferences object.
:param target_python: The target Python interpreter to use when
checking compatibility. If None (the default), a TargetPython
object will be constructed from the running Python.
"""
if target_python is None:
target_python = TargetPython()
candidate_prefs = CandidatePreferences(
prefer_binary=selection_prefs.prefer_binary,
allow_all_prereleases=selection_prefs.allow_all_prereleases,
)
return cls(
candidate_prefs=candidate_prefs,
link_collector=link_collector,
target_python=target_python,
allow_yanked=selection_prefs.allow_yanked,
format_control=selection_prefs.format_control,
ignore_requires_python=selection_prefs.ignore_requires_python,
use_deprecated_html5lib=use_deprecated_html5lib,
)
@property
def target_python(self) -> TargetPython:
return self._target_python
@property
def search_scope(self) -> SearchScope:
return self._link_collector.search_scope
@search_scope.setter
def search_scope(self, search_scope: SearchScope) -> None:
self._link_collector.search_scope = search_scope
@property
def find_links(self) -> List[str]:
return self._link_collector.find_links
@property
def index_urls(self) -> List[str]:
return self.search_scope.index_urls
@property
def trusted_hosts(self) -> Iterable[str]:
for host_port in self._link_collector.session.pip_trusted_origins:
yield build_netloc(*host_port)
@property
def allow_all_prereleases(self) -> bool:
return self._candidate_prefs.allow_all_prereleases
def set_allow_all_prereleases(self) -> None:
self._candidate_prefs.allow_all_prereleases = True
@property
def prefer_binary(self) -> bool:
return self._candidate_prefs.prefer_binary
def set_prefer_binary(self) -> None:
self._candidate_prefs.prefer_binary = True
def make_link_evaluator(self, project_name: str) -> LinkEvaluator:
canonical_name = canonicalize_name(project_name)
formats = self.format_control.get_allowed_formats(canonical_name)
return LinkEvaluator(
project_name=project_name,
canonical_name=canonical_name,
formats=formats,
target_python=self._target_python,
allow_yanked=self._allow_yanked,
ignore_requires_python=self._ignore_requires_python,
)
def _sort_links(self, links: Iterable[Link]) -> List[Link]:
"""
Returns elements of links in order, non-egg links first, egg links
second, while eliminating duplicates
"""
eggs, no_eggs = [], []
seen: Set[Link] = set()
for link in links:
if link not in seen:
seen.add(link)
if link.egg_fragment:
eggs.append(link)
else:
no_eggs.append(link)
return no_eggs + eggs
def _log_skipped_link(self, link: Link, reason: str) -> None:
if link not in self._logged_links:
# Put the link at the end so the reason is more visible and because
# the link string is usually very long.
logger.debug("Skipping link: %s: %s", reason, link)
self._logged_links.add(link)
def get_install_candidate(
self, link_evaluator: LinkEvaluator, link: Link
) -> Optional[InstallationCandidate]:
"""
If the link is a candidate for install, convert it to an
InstallationCandidate and return it. Otherwise, return None.
"""
is_candidate, result = link_evaluator.evaluate_link(link)
if not is_candidate:
if result:
self._log_skipped_link(link, reason=result)
return None
return InstallationCandidate(
name=link_evaluator.project_name,
link=link,
version=result,
)
def evaluate_links(
self, link_evaluator: LinkEvaluator, links: Iterable[Link]
) -> List[InstallationCandidate]:
"""
Convert links that are candidates to InstallationCandidate objects.
"""
candidates = []
for link in self._sort_links(links):
candidate = self.get_install_candidate(link_evaluator, link)
if candidate is not None:
candidates.append(candidate)
return candidates
def process_project_url(
self, project_url: Link, link_evaluator: LinkEvaluator
) -> List[InstallationCandidate]:
logger.debug(
"Fetching project page and analyzing links: %s",
project_url,
)
html_page = self._link_collector.fetch_page(project_url)
if html_page is None:
return []
page_links = list(parse_links(html_page, self._use_deprecated_html5lib))
with indent_log():
package_links = self.evaluate_links(
link_evaluator,
links=page_links,
)
return package_links
@functools.lru_cache(maxsize=None)
def find_all_candidates(self, project_name: str) -> List[InstallationCandidate]:
"""Find all available InstallationCandidate for project_name
This checks index_urls and find_links.
All versions found are returned as an InstallationCandidate list.
See LinkEvaluator.evaluate_link() for details on which files
are accepted.
"""
link_evaluator = self.make_link_evaluator(project_name)
collected_sources = self._link_collector.collect_sources(
project_name=project_name,
candidates_from_page=functools.partial(
self.process_project_url,
link_evaluator=link_evaluator,
),
)
page_candidates_it = itertools.chain.from_iterable(
source.page_candidates()
for sources in collected_sources
for source in sources
if source is not None
)
page_candidates = list(page_candidates_it)
file_links_it = itertools.chain.from_iterable(
source.file_links()
for sources in collected_sources
for source in sources
if source is not None
)
file_candidates = self.evaluate_links(
link_evaluator,
sorted(file_links_it, reverse=True),
)
if logger.isEnabledFor(logging.DEBUG) and file_candidates:
paths = []
for candidate in file_candidates:
assert candidate.link.url # we need to have a URL
try:
paths.append(candidate.link.file_path)
except Exception:
paths.append(candidate.link.url) # it's not a local file
logger.debug("Local files found: %s", ", ".join(paths))
# This is an intentional priority ordering
return file_candidates + page_candidates
def make_candidate_evaluator(
self,
project_name: str,
specifier: Optional[specifiers.BaseSpecifier] = None,
hashes: Optional[Hashes] = None,
) -> CandidateEvaluator:
"""Create a CandidateEvaluator object to use."""
candidate_prefs = self._candidate_prefs
return CandidateEvaluator.create(
project_name=project_name,
target_python=self._target_python,
prefer_binary=candidate_prefs.prefer_binary,
allow_all_prereleases=candidate_prefs.allow_all_prereleases,
specifier=specifier,
hashes=hashes,
)
@functools.lru_cache(maxsize=None)
def find_best_candidate(
self,
project_name: str,
specifier: Optional[specifiers.BaseSpecifier] = None,
hashes: Optional[Hashes] = None,
) -> BestCandidateResult:
"""Find matches for the given project and specifier.
:param specifier: An optional object implementing `filter`
(e.g. `packaging.specifiers.SpecifierSet`) to filter applicable
versions.
:return: A `BestCandidateResult` instance.
"""
candidates = self.find_all_candidates(project_name)
candidate_evaluator = self.make_candidate_evaluator(
project_name=project_name,
specifier=specifier,
hashes=hashes,
)
return candidate_evaluator.compute_best_candidate(candidates)
def find_requirement(
self, req: InstallRequirement, upgrade: bool
) -> Optional[InstallationCandidate]:
"""Try to find a Link matching req
Expects req, an InstallRequirement and upgrade, a boolean
Returns a InstallationCandidate if found,
Raises DistributionNotFound or BestVersionAlreadyInstalled otherwise
"""
hashes = req.hashes(trust_internet=False)
best_candidate_result = self.find_best_candidate(
req.name,
specifier=req.specifier,
hashes=hashes,
)
best_candidate = best_candidate_result.best_candidate
installed_version: Optional[_BaseVersion] = None
if req.satisfied_by is not None:
installed_version = req.satisfied_by.version
def _format_versions(cand_iter: Iterable[InstallationCandidate]) -> str:
# This repeated parse_version and str() conversion is needed to
# handle different vendoring sources from pip and pkg_resources.
# If we stop using the pkg_resources provided specifier and start
# using our own, we can drop the cast to str().
return (
", ".join(
sorted(
{str(c.version) for c in cand_iter},
key=parse_version,
)
)
or "none"
)
if installed_version is None and best_candidate is None:
logger.critical(
"Could not find a version that satisfies the requirement %s "
"(from versions: %s)",
req,
_format_versions(best_candidate_result.iter_all()),
)
raise DistributionNotFound(
"No matching distribution found for {}".format(req)
)
best_installed = False
if installed_version and (
best_candidate is None or best_candidate.version <= installed_version
):
best_installed = True
if not upgrade and installed_version is not None:
if best_installed:
logger.debug(
"Existing installed version (%s) is most up-to-date and "
"satisfies requirement",
installed_version,
)
else:
logger.debug(
"Existing installed version (%s) satisfies requirement "
"(most up-to-date version is %s)",
installed_version,
best_candidate.version,
)
return None
if best_installed:
# We have an existing version, and its the best version
logger.debug(
"Installed version (%s) is most up-to-date (past versions: %s)",
installed_version,
_format_versions(best_candidate_result.iter_applicable()),
)
raise BestVersionAlreadyInstalled
logger.debug(
"Using version %s (newest of versions: %s)",
best_candidate.version,
_format_versions(best_candidate_result.iter_applicable()),
)
return best_candidate
def _find_name_version_sep(fragment: str, canonical_name: str) -> int:
"""Find the separator's index based on the package's canonical name.
:param fragment: A <package>+<version> filename "fragment" (stem) or
egg fragment.
:param canonical_name: The package's canonical name.
This function is needed since the canonicalized name does not necessarily
have the same length as the egg info's name part. An example::
>>> fragment = 'foo__bar-1.0'
>>> canonical_name = 'foo-bar'
>>> _find_name_version_sep(fragment, canonical_name)
8
"""
# Project name and version must be separated by one single dash. Find all
# occurrences of dashes; if the string in front of it matches the canonical
# name, this is the one separating the name and version parts.
for i, c in enumerate(fragment):
if c != "-":
continue
if canonicalize_name(fragment[:i]) == canonical_name:
return i
raise ValueError(f"{fragment} does not match {canonical_name}")
def _extract_version_from_fragment(fragment: str, canonical_name: str) -> Optional[str]:
"""Parse the version string from a <package>+<version> filename
"fragment" (stem) or egg fragment.
:param fragment: The string to parse. E.g. foo-2.1
:param canonical_name: The canonicalized name of the package this
belongs to.
"""
try:
version_start = _find_name_version_sep(fragment, canonical_name) + 1
except ValueError:
return None
version = fragment[version_start:]
if not version:
return None
return version
| |
#--------Include modules---------------
from copy import copy
import rospy
from visualization_msgs.msg import Marker
from std_msgs.msg import String
from geometry_msgs.msg import Point
from os import system
from random import random
from numpy import array
from numpy import floor,ceil
from numpy import delete
from numpy import concatenate
from numpy import vstack
from numpy import linalg as LA
from math import copysign
from numpy import where
from numpy import logical_and as AND
from numpy import all as All
from scipy.optimize import minimize
# Nearest function-------------------------------------
def Nearest(V,x):
n=1000000
i=0
for i in range(0,V.shape[0]):
n1=LA.norm(V[i,:]-x)
if (n1<n):
n=n1
result=i
return result
# Steer function-------------------------------------
def myfun(x,x0,x1,eta):
X=array([x[0],x[1]])
return LA.norm(X-x1)
def Steer(x0,x1,eta):
def consFun(x):
X=array([x[0],x[1]])
x0=p[0]
eta=p[2]
return -LA.norm(X-x0)+eta
cons = ({'type': 'ineq',
'fun' : consFun })
p=(x0,x1,eta)
res = minimize(myfun,[x0[0],x0[1]],args=p,constraints=cons, method='COBYLA',options={'disp': False})
xnew=array([res.x[0],res.x[1]])
return xnew
# gridValue function-------------------------------------
def gridValue(mapData,Xp):
resolution=mapData.info.resolution
Xstartx=mapData.info.origin.position.x
Xstarty=mapData.info.origin.position.y
width=mapData.info.width
Data=mapData.data
# returns grid value at "Xp" location
#map data: 100 occupied -1 unknown 0 free
index=( floor((Xp[1]-Xstarty)/resolution)*width)+( floor((Xp[0]-Xstartx)/resolution) )
if int(index) < len(Data):
return Data[int(index)]
else:
return 100
# gridCheck function-------------------------------------
def gridCheck(mapData,Xp):
resolution=mapData.info.resolution
Xstartx=mapData.info.origin.position.x
Xstarty=mapData.info.origin.position.y
width=mapData.info.width
Data=mapData.data
# check if points are in freespace or not
# c=1 means grid cell occupied
# c=0 means grid cell is free
index=( floor((Xp[1]-Xstarty)/resolution)*width)+( floor((Xp[0]-Xstartx)/resolution) )
c=1
if int(index) < len(Data):
if Data[int(index)]==0:
c=0
else:
c=1
return c
# ObstacleFree function-------------------------------------
def ObstacleFree(xnear,xnew,mapsub):
out=1
rez=mapsub.info.resolution*0.5
stepz=int(ceil(LA.norm(xnew-xnear))/rez)
xi=xnear
for c in range(0,stepz):
xi=Steer(xi,xnew,rez)
if (gridCheck(mapsub,xi) !=0):
out=0
if (gridCheck(mapsub,xnew) !=0):
out=0
return out
# Find function-------------------------------------
def Find(E,x):
if not All(array([E.shape]).shape==array([1,1])):
yy=E==x[1]
xx=E==x[0]
m=AND(yy[:,3], xx[:,2])
m=where(m==True)
if len(m[0])>0:
return m[0][0]
else:
return 0
# Near function-------------------------------------
def Near(V,xnew,r):
xnear=array([0,0])
i=0
for i in range(0,V.shape[0]):
n=LA.norm(V[i,:]-xnew)
if (n<=r):
p=V[i,:]
xnear=vstack((xnear,p))
xnear=delete(xnear, (0), axis=0)
return xnear
# Cost function-------------------------------------
def Cost(E,xn):
x=xn
if All(array([E.shape]).shape==array([1,1])):
c=0
else:
xinit=E[0,0:2]
c=0
while not All(x==xinit):
xp=E[Find(E,x),0:2]
c+=LA.norm(x-xp)
x=xp
return c
# prepEdges function
def prepEdges(E):
p=Point()
pl=[]
if not All(array([E.shape]).shape==array([1,1])):
Ex=delete(E, (1), axis=1)
Ex=delete(Ex, (2), axis=1)
Ey=delete(E, (0), axis=1)
Ey=delete(Ey, (1), axis=1)
pxs=Ex.flatten()
pys=Ey.flatten()
j=0
for j in range(0,pys.shape[0]):
p.x=pxs[j]
p.y=pys[j]
pl.append(copy(p))
return pl
# Assigner 3 robots------------------------------------------------------------------------------------------------------------------------
def assigner3(goal,x_new,client1,client2,client3,listener):
clientstate1=client1.get_state()
clientstate2=client2.get_state()
clientstate3=client3.get_state()
if clientstate1==2 or clientstate1==3 or clientstate1==4 or clientstate1==5 or clientstate1==9:
aval1=1
else:
aval1=10000000
if clientstate2==2 or clientstate2==3 or clientstate2==4 or clientstate2==5 or clientstate2==9:
aval2=1
else:
aval2=10000000
if clientstate3==2 or clientstate3==3 or clientstate3==4 or clientstate3==5 or clientstate3==9:
aval3=1
else:
aval3=10000000
(trans1,rot) = listener.lookupTransform('/robot_1/map', '/robot_1/base_link', rospy.Time(0))
(trans2,rot) = listener.lookupTransform('/robot_1/map', '/robot_2/base_link', rospy.Time(0))
(trans3,rot) = listener.lookupTransform('/robot_1/map', '/robot_3/base_link', rospy.Time(0))
dist1=LA.norm(array([ trans1[0],trans1[1] ])-x_new)*aval1
dist2=LA.norm(array([ trans2[0],trans2[1] ])-x_new)*aval2
dist3=LA.norm(array([ trans3[0],trans3[1] ])-x_new)*aval3
alldist=[dist1,dist2,dist3]
# if no robot is available wait
while aval1==aval2==aval3==10000000:
clientstate1=client1.get_state()
clientstate2=client2.get_state()
clientstate3=client3.get_state()
if clientstate1==2 or clientstate1==3 or clientstate1==4 or clientstate1==5 or clientstate1==9:
aval1=1
else:
aval1=10000000
if clientstate2==2 or clientstate2==3 or clientstate2==4 or clientstate2==5 or clientstate2==9:
aval2=1
else:
aval2=10000000
if clientstate3==2 or clientstate3==3 or clientstate3==4 or clientstate3==5 or clientstate3==9:
aval3=1
else:
aval3=10000000
goal.target_pose.pose.position.x=x_new[0]
goal.target_pose.pose.position.y=x_new[1]
goal.target_pose.pose.orientation.w = 1.0
#send command to the lowest cost available robot
if min(alldist)==dist1 and aval1==1:
client1.send_goal(goal)
#client1.wait_for_result()
#client1.get_result()
elif min(alldist)==dist2 and aval2==1:
client2.send_goal(goal)
#client2.wait_for_result()
#client2.get_result()
elif min(alldist)==dist3 and aval3==1:
client3.send_goal(goal)
#client3.wait_for_result()
#client3.get_result()
return 0
# Assigner 1 robots------------------------------------------------------------------------------------------------------------------------
def assigner1(goal,x_new,client1,listener):
#client1.send_goal(goal)
#client1.wait_for_result()
#client1.get_result()
clientstate1=client1.get_state()
if clientstate1==2 or clientstate1==3 or clientstate1==4 or clientstate1==5 or clientstate1==9:
client1.send_goal(goal)
return 0
# Assigner 1 robots opecv detector------------------------------------------------------------------------------------------------------------------------
def assigner1new(goal,x_new,client1,listener):
goal.target_pose.pose.position.x=x_new[0]
goal.target_pose.pose.position.y=x_new[1]
goal.target_pose.pose.orientation.w = 1.0
clientstate1=client1.get_state()
if clientstate1==2 or clientstate1==3 or clientstate1==4 or clientstate1==5 or clientstate1==9:
client1.send_goal(goal)
return 0
#-------------RRT frontier
# oObstacleFree function-------------------------------------
def ObstacleFree2(xnear,xnew,mapsub):
rez=mapsub.info.resolution*0.5
stepz=int(ceil(LA.norm(xnew-xnear))/rez)
xi=xnear
obs=0
unk=0
for c in range(0,stepz):
xi=Steer(xi,xnew,rez)
if (gridValue(mapsub,xi) ==100):
obs=1
if (gridValue(mapsub,xi) ==-1):
unk=1
if (gridValue(mapsub,xnew) ==100):
obs=1
if (gridValue(mapsub,xi) ==-1):
unk=1
if unk==1:
out=-1
if obs==1:
out=0
if obs!=1 and unk!=1:
out=1
#print "obs= ",obs," unk= ",unk," out= ",out
#raw_input(" ")
return out
# assigner1rrtfront(goal,frontiers,client1,listener) ----------------------------------------------------
def Nearest2(V,x):
n=1000000
i=0
for i in range(0,len(V)):
n1=LA.norm(V[i]-x)
if (n1<n):
n=n1
result=i
return result
def assigner1rrtfront(goal,frontiers,client1,listener):
clientstate1=client1.get_state()
if clientstate1==2 or clientstate1==3 or clientstate1==4 or clientstate1==5 or clientstate1==9:
if len(frontiers)>0:
(tran,rot) = listener.lookupTransform('/robot_1/map', '/robot_1/base_link', rospy.Time(0))
xp=[array( [tran[0],tran[1]] )]
row=Nearest2(frontiers,xp)
nextfrontier=frontiers[row]
frontiers=delete(frontiers, (row), axis=0)
goal.target_pose.pose.position.x=nextfrontier[0]
goal.target_pose.pose.position.y=nextfrontier[1]
goal.target_pose.pose.orientation.w = 1.0
print "exploration goal sent"
client1.send_goal(goal)
return frontiers
| |
import math
import re
import numpy as np
from .utilities import unittest
import cantera as ct
from . import utilities
class TestReactor(utilities.CanteraTest):
reactorClass = ct.Reactor
def make_reactors(self, independent=True, n_reactors=2,
T1=300, P1=101325, X1='O2:1.0',
T2=300, P2=101325, X2='O2:1.0'):
self.net = ct.ReactorNet()
self.gas1 = ct.Solution('h2o2.xml')
self.gas1.TPX = T1, P1, X1
self.r1 = self.reactorClass(self.gas1)
self.net.add_reactor(self.r1)
if independent:
self.gas2 = ct.Solution('h2o2.xml')
else:
self.gas2 = self.gas1
if n_reactors >= 2:
self.gas2.TPX = T2, P2, X2
self.r2 = self.reactorClass(self.gas2)
self.net.add_reactor(self.r2)
def add_wall(self, **kwargs):
self.w = ct.Wall(self.r1, self.r2, **kwargs)
return self.w
def test_verbose(self):
self.make_reactors(independent=False, n_reactors=1)
self.assertFalse(self.net.verbose)
self.net.verbose = True
self.assertTrue(self.net.verbose)
def test_insert(self):
R = self.reactorClass()
with self.assertRaises(Exception):
R.T
with self.assertRaises(Exception):
R.kinetics.net_production_rates
g = ct.Solution('h2o2.xml')
g.TP = 300, 101325
R.insert(g)
self.assertNear(R.T, 300)
self.assertEqual(len(R.kinetics.net_production_rates), g.n_species)
def test_volume(self):
R = self.reactorClass(volume=11)
self.assertEqual(R.volume, 11)
R.volume = 9
self.assertEqual(R.volume, 9)
def test_names(self):
self.make_reactors()
pattern = re.compile(r'(\d+)')
digits1 = pattern.search(self.r1.name).group(0)
digits2 = pattern.search(self.r2.name).group(0)
self.assertEqual(int(digits2), int(digits1) + 1)
self.r1.name = 'hello'
self.assertEqual(self.r1.name, 'hello')
def test_component_index(self):
self.make_reactors(n_reactors=1)
self.net.step()
N0 = self.net.n_vars - self.gas1.n_species
for i, name in enumerate(self.gas1.species_names):
self.assertEqual(i + N0, self.r1.component_index(name))
def test_disjoint(self):
T1, P1 = 300, 101325
T2, P2 = 500, 300000
self.make_reactors(T1=T1, T2=T2, P1=P1, P2=P2)
self.net.advance(1.0)
# Nothing should change from the initial condition
self.assertNear(T1, self.gas1.T)
self.assertNear(T2, self.gas2.T)
self.assertNear(P1, self.gas1.P)
self.assertNear(P2, self.gas2.P)
def test_disjoint2(self):
T1, P1 = 300, 101325
T2, P2 = 500, 300000
self.make_reactors(T1=T1, T2=T2, P1=P1, P2=P2, independent=False)
self.net.advance(1.0)
# Nothing should change from the initial condition
self.assertNear(T1, self.r1.T)
self.assertNear(T2, self.r2.T)
self.assertNear(P1, self.r1.thermo.P)
self.assertNear(P2, self.r2.thermo.P)
def test_timestepping(self):
self.make_reactors()
tStart = 0.3
tEnd = 10.0
dt_max = 0.07
t = tStart
self.net.set_max_time_step(dt_max)
self.net.set_initial_time(tStart)
self.assertNear(self.net.time, tStart)
while t < tEnd:
tPrev = t
t = self.net.step()
self.assertTrue(t - tPrev <= 1.0001 * dt_max)
self.assertNear(t, self.net.time)
#self.assertNear(self.net.time, tEnd)
def test_equalize_pressure(self):
self.make_reactors(P1=101325, P2=300000)
self.add_wall(K=0.1, A=1.0)
self.assertEqual(len(self.r1.walls), 1)
self.assertEqual(len(self.r2.walls), 1)
self.assertEqual(self.r1.walls[0], self.w)
self.assertEqual(self.r2.walls[0], self.w)
self.net.advance(1.0)
self.assertNear(self.net.time, 1.0)
self.assertNear(self.gas1.P, self.gas2.P)
self.assertNotAlmostEqual(self.r1.T, self.r2.T)
def test_tolerances(self):
def integrate(atol, rtol):
P0 = 10 * ct.one_atm
T0 = 1100
X0 = 'H2:1.0, O2:0.5, AR:8.0'
self.make_reactors(n_reactors=1, T1=T0, P1=P0, X1=X0)
self.net.rtol = rtol
self.net.atol = atol
self.assertEqual(self.net.rtol, rtol)
self.assertEqual(self.net.atol, atol)
tEnd = 1.0
nSteps = 0
t = 0
while t < tEnd:
t = self.net.step()
nSteps += 1
return nSteps
n_baseline = integrate(1e-10, 1e-20)
n_rtol = integrate(5e-7, 1e-20)
n_atol = integrate(1e-10, 1e-6)
self.assertTrue(n_baseline > n_rtol)
self.assertTrue(n_baseline > n_atol)
def test_heat_transfer1(self):
# Connected reactors reach thermal equilibrium after some time
self.make_reactors(T1=300, T2=1000)
self.add_wall(U=500, A=1.0)
self.net.advance(10.0)
self.assertNear(self.net.time, 10.0)
self.assertNear(self.r1.T, self.r2.T, 5e-7)
self.assertNotAlmostEqual(self.r1.thermo.P, self.r2.thermo.P)
def test_heat_transfer2(self):
# Result should be the same if (m * cp) / (U * A) is held constant
self.make_reactors(T1=300, T2=1000)
self.add_wall(U=200, A=1.0)
self.net.advance(1.0)
T1a = self.r1.T
T2a = self.r2.T
self.make_reactors(T1=300, T2=1000)
self.r1.volume = 0.25
self.r2.volume = 0.25
w = self.add_wall(U=100, A=0.5)
self.assertNear(w.heat_transfer_coeff * w.area * (self.r1.T - self.r2.T),
w.qdot(0))
self.net.advance(1.0)
self.assertNear(w.heat_transfer_coeff * w.area * (self.r1.T - self.r2.T),
w.qdot(1.0))
T1b = self.r1.T
T2b = self.r2.T
self.assertNear(T1a, T1b)
self.assertNear(T2a, T2b)
def test_equilibrium_UV(self):
# Adiabatic, constant volume combustion should proceed to equilibrum
# at constant internal energy and volume.
P0 = 10 * ct.one_atm
T0 = 1100
X0 = 'H2:1.0, O2:0.5, AR:8.0'
self.make_reactors(n_reactors=1, T1=T0, P1=P0, X1=X0)
self.net.advance(1.0)
gas = ct.Solution('h2o2.xml')
gas.TPX = T0, P0, X0
gas.equilibrate('UV')
self.assertNear(self.r1.T, gas.T)
self.assertNear(self.r1.thermo.density, gas.density)
self.assertNear(self.r1.thermo.P, gas.P)
self.assertArrayNear(self.r1.thermo.X, gas.X)
def test_equilibrium_HP(self):
# Adiabatic, constant pressure combustion should proceed to equilibrum
# at constant enthalpy and pressure.
P0 = 10 * ct.one_atm
T0 = 1100
X0 = 'H2:1.0, O2:0.5, AR:8.0'
gas1 = ct.Solution('h2o2.xml')
gas1.TPX = T0, P0, X0
r1 = ct.IdealGasConstPressureReactor(gas1)
net = ct.ReactorNet()
net.add_reactor(r1)
net.advance(1.0)
gas2 = ct.Solution('h2o2.xml')
gas2.TPX = T0, P0, X0
gas2.equilibrate('HP')
self.assertNear(r1.T, gas2.T)
self.assertNear(r1.thermo.P, P0)
self.assertNear(r1.thermo.density, gas2.density)
self.assertArrayNear(r1.thermo.X, gas2.X)
def test_wall_velocity(self):
self.make_reactors()
A = 0.2
V1 = 2.0
V2 = 5.0
self.r1.volume = V1
self.r2.volume = V2
self.add_wall(A=A)
def v(t):
if 0 < t <= 1:
return t
elif 1 <= t <= 2:
return 2 - t
else:
return 0.0
self.w.set_velocity(v)
self.net.advance(1.0)
self.assertNear(self.w.vdot(1.0), 1.0 * A, 1e-7)
self.net.advance(2.0)
self.assertNear(self.w.vdot(2.0), 0.0, 1e-7)
self.assertNear(self.r1.volume, V1 + 1.0 * A, 1e-7)
self.assertNear(self.r2.volume, V2 - 1.0 * A, 1e-7)
def test_disable_energy(self):
self.make_reactors(T1=500)
self.r1.energy_enabled = False
self.add_wall(A=1.0, U=2500)
self.net.advance(11.0)
self.assertNear(self.r1.T, 500)
self.assertNear(self.r2.T, 500)
def test_heat_flux_func(self):
self.make_reactors(T1=500, T2=300)
self.r1.volume = 0.5
U1a = self.r1.volume * self.r1.density * self.r1.thermo.u
U2a = self.r2.volume * self.r2.density * self.r2.thermo.u
V1a = self.r1.volume
V2a = self.r2.volume
self.add_wall(A=0.3)
self.w.set_heat_flux(lambda t: 90000 * (1 - t**2) if t <= 1.0 else 0.0)
Q = 0.3 * 60000
self.net.advance(1.1)
U1b = self.r1.volume * self.r1.density * self.r1.thermo.u
U2b = self.r2.volume * self.r2.density * self.r2.thermo.u
self.assertNear(V1a, self.r1.volume)
self.assertNear(V2a, self.r2.volume)
self.assertNear(U1a - Q, U1b, 1e-6)
self.assertNear(U2a + Q, U2b, 1e-6)
def test_mass_flow_controller(self):
self.make_reactors(n_reactors=1)
gas2 = ct.Solution('h2o2.xml')
gas2.TPX = 300, 10*101325, 'H2:1.0'
reservoir = ct.Reservoir(gas2)
mfc = ct.MassFlowController(reservoir, self.r1)
mfc.set_mass_flow_rate(lambda t: 0.1 if 0.2 <= t < 1.2 else 0.0)
self.assertEqual(len(reservoir.inlets), 0)
self.assertEqual(len(reservoir.outlets), 1)
self.assertEqual(reservoir.outlets[0], mfc)
self.assertEqual(len(self.r1.outlets), 0)
self.assertEqual(len(self.r1.inlets), 1)
self.assertEqual(self.r1.inlets[0], mfc)
ma = self.r1.volume * self.r1.density
Ya = self.r1.Y
self.net.rtol = 1e-11
self.net.set_max_time_step(0.05)
self.net.advance(2.5)
mb = self.r1.volume * self.r1.density
Yb = self.r1.Y
self.assertNear(ma + 0.1, mb)
self.assertArrayNear(ma * Ya + 0.1 * gas2.Y, mb * Yb)
def test_valve1(self):
self.make_reactors(P1=10*ct.one_atm, X1='AR:1.0', X2='O2:1.0')
self.net.rtol = 1e-12
valve = ct.Valve(self.r1, self.r2)
k = 2e-5
valve.set_valve_coeff(k)
self.assertEqual(self.r1.outlets, self.r2.inlets)
self.assertTrue(self.r1.energy_enabled)
self.assertTrue(self.r2.energy_enabled)
self.assertNear((self.r1.thermo.P - self.r2.thermo.P) * k,
valve.mdot(0))
m1a = self.r1.thermo.density * self.r1.volume
m2a = self.r2.thermo.density * self.r2.volume
Y1a = self.r1.thermo.Y
Y2a = self.r2.thermo.Y
self.net.advance(0.1)
m1b = self.r1.thermo.density * self.r1.volume
m2b = self.r2.thermo.density * self.r2.volume
self.assertNear((self.r1.thermo.P - self.r2.thermo.P) * k,
valve.mdot(0.1))
self.assertNear(m1a+m2a, m1b+m2b)
Y1b = self.r1.thermo.Y
Y2b = self.r2.thermo.Y
self.assertArrayNear(m1a*Y1a + m2a*Y2a, m1b*Y1b + m2b*Y2b, atol=1e-10)
self.assertArrayNear(Y1a, Y1b)
def test_valve2(self):
# Similar to test_valve1, but by disabling the energy equation
# (constant T) we can compare with an analytical solution for
# the mass of each reactor as a function of time
self.make_reactors(P1=10*ct.one_atm)
self.net.rtol = 1e-11
self.r1.energy_enabled = False
self.r2.energy_enabled = False
valve = ct.Valve(self.r1, self.r2)
k = 2e-5
valve.set_valve_coeff(k)
self.assertFalse(self.r1.energy_enabled)
self.assertFalse(self.r2.energy_enabled)
m1a = self.r1.thermo.density * self.r1.volume
m2a = self.r2.thermo.density * self.r2.volume
P1a = self.r1.thermo.P
P2a = self.r2.thermo.P
Y1 = self.r1.Y
A = k * P1a * (1 + m2a/m1a)
B = k * (P1a/m1a + P2a/m2a)
for t in np.linspace(1e-5, 0.5):
self.net.advance(t)
m1 = self.r1.thermo.density * self.r1.volume
m2 = self.r2.thermo.density * self.r2.volume
self.assertNear(m2, (m2a - A/B) * np.exp(-B * t) + A/B)
self.assertNear(m1a+m2a, m1+m2)
self.assertArrayNear(self.r1.Y, Y1)
def test_valve3(self):
# This case specifies a non-linear relationship between pressure drop
# and flow rate.
self.make_reactors(P1=10*ct.one_atm, X1='AR:0.5, O2:0.5',
X2='O2:1.0')
self.net.rtol = 1e-12
self.net.atol = 1e-20
valve = ct.Valve(self.r1, self.r2)
mdot = lambda dP: 5e-3 * np.sqrt(dP) if dP > 0 else 0.0
valve.set_valve_coeff(mdot)
Y1 = self.r1.Y
kO2 = self.gas1.species_index('O2')
kAr = self.gas1.species_index('AR')
def speciesMass(k):
return self.r1.Y[k] * self.r1.mass + self.r2.Y[k] * self.r2.mass
mO2 = speciesMass(kO2)
mAr = speciesMass(kAr)
t = 0
while t < 1.0:
t = self.net.step()
p1 = self.r1.thermo.P
p2 = self.r2.thermo.P
self.assertNear(mdot(p1-p2), valve.mdot(t))
self.assertArrayNear(Y1, self.r1.Y)
self.assertNear(speciesMass(kAr), mAr)
self.assertNear(speciesMass(kO2), mO2)
def test_valve_errors(self):
self.make_reactors()
res = ct.Reservoir()
with self.assertRaises(RuntimeError):
# Must assign contents of both reactors before creating Valve
v = ct.Valve(self.r1, res)
v = ct.Valve(self.r1, self.r2)
with self.assertRaises(RuntimeError):
# inlet and outlet cannot be reassigned
v._install(self.r2, self.r1)
def test_pressure_controller(self):
self.make_reactors(n_reactors=1)
g = ct.Solution('h2o2.xml')
g.TPX = 500, 2*101325, 'H2:1.0'
inlet_reservoir = ct.Reservoir(g)
g.TP = 300, 101325
outlet_reservoir = ct.Reservoir(g)
mfc = ct.MassFlowController(inlet_reservoir, self.r1)
mdot = lambda t: np.exp(-100*(t-0.5)**2)
mfc.set_mass_flow_rate(mdot)
pc = ct.PressureController(self.r1, outlet_reservoir)
pc.set_master(mfc)
pc.set_pressure_coeff(1e-5)
t = 0
while t < 1.0:
t = self.net.step()
self.assertNear(mdot(t), mfc.mdot(t))
dP = self.r1.thermo.P - outlet_reservoir.thermo.P
self.assertNear(mdot(t) + 1e-5 * dP, pc.mdot(t))
def test_pressure_controller_errors(self):
self.make_reactors()
res = ct.Reservoir(self.gas1)
mfc = ct.MassFlowController(res, self.r1, mdot=0.6)
p = ct.PressureController(self.r1, self.r2, master=mfc, K=0.5)
with self.assertRaises(RuntimeError):
p = ct.PressureController(self.r1, self.r2, K=0.5)
p.mdot(0.0)
with self.assertRaises(RuntimeError):
p = ct.PressureController(self.r1, self.r2, master=mfc)
p.mdot(0.0)
with self.assertRaises(RuntimeError):
p = ct.PressureController(self.r1, self.r2)
p.mdot(0.0)
def test_set_initial_time(self):
self.make_reactors(P1=10*ct.one_atm, X1='AR:1.0', X2='O2:1.0')
self.net.rtol = 1e-12
valve = ct.Valve(self.r1, self.r2)
mdot = lambda dP: 5e-3 * np.sqrt(dP) if dP > 0 else 0.0
valve.set_valve_coeff(mdot)
t0 = 0.0
tf = t0 + 0.5
self.net.advance(tf)
self.assertNear(self.net.time, tf)
p1a = self.r1.thermo.P
p2a = self.r2.thermo.P
self.make_reactors(P1=10*ct.one_atm, X1='AR:1.0', X2='O2:1.0')
self.net.rtol = 1e-12
valve = ct.Valve(self.r1, self.r2)
mdot = lambda dP: 5e-3 * np.sqrt(dP) if dP > 0 else 0.0
valve.set_valve_coeff(mdot)
t0 = 0.2
self.net.set_initial_time(t0)
tf = t0 + 0.5
self.net.advance(tf)
self.assertNear(self.net.time, tf)
p1b = self.r1.thermo.P
p2b = self.r2.thermo.P
self.assertNear(p1a, p1b)
self.assertNear(p2a, p2b)
def test_reinitialize(self):
self.make_reactors(T1=300, T2=1000, independent=False)
self.add_wall(U=200, A=1.0)
self.net.advance(1.0)
T1a = self.r1.T
T2a = self.r2.T
self.r1.thermo.TD = 300, None
self.r1.syncState()
self.r2.thermo.TD = 1000, None
self.r2.syncState()
self.assertNear(self.r1.T, 300)
self.assertNear(self.r2.T, 1000)
self.net.advance(2.0)
T1b = self.r1.T
T2b = self.r2.T
self.assertNear(T1a, T1b)
self.assertNear(T2a, T2b)
def test_unpicklable(self):
self.make_reactors()
import pickle
with self.assertRaises(NotImplementedError):
pickle.dumps(self.r1)
with self.assertRaises(NotImplementedError):
pickle.dumps(self.net)
def test_uncopyable(self):
self.make_reactors()
import copy
with self.assertRaises(NotImplementedError):
copy.copy(self.r1)
with self.assertRaises(NotImplementedError):
copy.copy(self.net)
def test_invalid_property(self):
self.make_reactors()
for x in (self.r1, self.net):
with self.assertRaises(AttributeError):
x.foobar = 300
with self.assertRaises(AttributeError):
x.foobar
def test_bad_kwarg(self):
self.reactorClass(name='ok')
with self.assertRaises(TypeError):
r1 = self.reactorClass(foobar=3.14)
class TestIdealGasReactor(TestReactor):
reactorClass = ct.IdealGasReactor
class TestWellStirredReactorIgnition(utilities.CanteraTest):
""" Ignition (or not) of a well-stirred reactor """
def setup(self, T0, P0, mdot_fuel, mdot_ox):
self.gas = ct.Solution('gri30.xml')
# fuel inlet
self.gas.TPX = T0, P0, "CH4:1.0"
self.fuel_in = ct.Reservoir(self.gas)
# oxidizer inlet
self.gas.TPX = T0, P0, "N2:3.76, O2:1.0"
self.oxidizer_in = ct.Reservoir(self.gas)
# reactor, initially filled with N2
self.gas.TPX = T0, P0, "N2:1.0"
self.combustor = ct.IdealGasReactor(self.gas)
self.combustor.volume = 1.0
# outlet
self.exhaust = ct.Reservoir(self.gas)
# connect the reactor to the reservoirs
self.fuel_mfc = ct.MassFlowController(self.fuel_in, self.combustor)
self.fuel_mfc.set_mass_flow_rate(mdot_fuel)
self.oxidizer_mfc = ct.MassFlowController(self.oxidizer_in, self.combustor)
self.oxidizer_mfc.set_mass_flow_rate(mdot_ox)
self.valve = ct.Valve(self.combustor, self.exhaust)
self.valve.set_valve_coeff(1.0)
self.net = ct.ReactorNet()
self.net.add_reactor(self.combustor)
self.net.max_err_test_fails = 10
def integrate(self, tf):
t = 0.0
times = []
T = []
i = 0
while t < tf:
i += 1
t = self.net.step()
times.append(t)
T.append(self.combustor.T)
return times, T
def test_nonreacting(self):
mdot_f = 1.0
mdot_o = 5.0
T0 = 900.0
self.setup(T0, 10*ct.one_atm, mdot_f, mdot_o)
self.gas.set_multiplier(0.0)
t,T = self.integrate(100.0)
for i in range(len(t)):
self.assertNear(T[i], T0, rtol=1e-5)
self.assertNear(self.combustor.thermo['CH4'].Y,
mdot_f / (mdot_o + mdot_f))
def test_ignition1(self):
self.setup(900.0, 10*ct.one_atm, 1.0, 5.0)
t,T = self.integrate(10.0)
self.assertTrue(T[-1] > 1200) # mixture ignited
for i in range(len(t)):
if T[i] > 0.5 * (T[0] + T[-1]):
tIg = t[i]
break
# regression test; no external basis for this result
self.assertNear(tIg, 2.2249, 1e-3)
def test_ignition2(self):
self.setup(900.0, 10*ct.one_atm, 1.0, 20.0)
t,T = self.integrate(10.0)
self.assertTrue(T[-1] > 1200) # mixture ignited
for i in range(len(t)):
if T[i] > 0.5 * (T[0] + T[-1]):
tIg = t[i]
break
# regression test; no external basis for this result
self.assertNear(tIg, 1.4856, 1e-3)
def test_ignition3(self):
self.setup(900.0, 10*ct.one_atm, 1.0, 80.0)
self.net.set_max_time_step(0.5)
t,T = self.integrate(100.0)
self.assertTrue(T[-1] < 910) # mixture did not ignite
class TestConstPressureReactor(utilities.CanteraTest):
"""
The constant pressure reactor should give essentially the same results as
as a regular "Reactor" with a wall with a very high expansion rate
coefficient.
"""
reactorClass = ct.ConstPressureReactor
def create_reactors(self, add_Q=False, add_mdot=False, add_surf=False):
self.gas = ct.Solution('gri30.xml')
self.gas.TPX = 900, 25*ct.one_atm, 'CO:0.5, H2O:0.2'
self.gas1 = ct.Solution('gri30.xml')
self.gas1.ID = 'gas'
self.gas2 = ct.Solution('gri30.xml')
self.gas2.ID = 'gas'
resGas = ct.Solution('gri30.xml')
solid = ct.Solution('diamond.xml', 'diamond')
T0 = 1200
P0 = 25*ct.one_atm
X0 = 'CH4:0.5, H2O:0.2, CO:0.3'
self.gas1.TPX = T0, P0, X0
self.gas2.TPX = T0, P0, X0
self.r1 = ct.IdealGasReactor(self.gas1)
self.r2 = self.reactorClass(self.gas2)
self.r1.volume = 0.2
self.r2.volume = 0.2
resGas.TP = T0 - 300, P0
env = ct.Reservoir(resGas)
U = 300 if add_Q else 0
self.w1 = ct.Wall(self.r1, env, K=1e3, A=0.1, U=U)
self.w2 = ct.Wall(self.r2, env, A=0.1, U=U)
if add_mdot:
mfc1 = ct.MassFlowController(env, self.r1, mdot=0.05)
mfc2 = ct.MassFlowController(env, self.r2, mdot=0.05)
if add_surf:
self.interface1 = ct.Interface('diamond.xml', 'diamond_100',
(self.gas1, solid))
self.interface2 = ct.Interface('diamond.xml', 'diamond_100',
(self.gas2, solid))
C = np.zeros(self.interface1.n_species)
C[0] = 0.3
C[4] = 0.7
self.w1.left.kinetics = self.interface1
self.w2.left.kinetics = self.interface2
self.w1.left.coverages = C
self.w2.left.coverages = C
self.net1 = ct.ReactorNet([self.r1])
self.net2 = ct.ReactorNet([self.r2])
self.net1.set_max_time_step(0.05)
self.net2.set_max_time_step(0.05)
self.net2.max_err_test_fails = 10
def test_component_index(self):
self.create_reactors(add_surf=True)
for (gas,net,iface,r) in ((self.gas1, self.net1, self.interface1, self.r1),
(self.gas2, self.net2, self.interface2, self.r2)):
net.step()
N0 = net.n_vars - gas.n_species - iface.n_species
N1 = net.n_vars - iface.n_species
for i, name in enumerate(gas.species_names):
self.assertEqual(i + N0, r.component_index(name))
for i, name in enumerate(iface.species_names):
self.assertEqual(i + N1, r.component_index(name))
def integrate(self, surf=False):
for t in np.arange(0.5, 50, 1.0):
self.net1.advance(t)
self.net2.advance(t)
self.assertArrayNear(self.r1.thermo.Y, self.r2.thermo.Y,
rtol=5e-4, atol=1e-6)
self.assertNear(self.r1.T, self.r2.T, rtol=1e-5)
self.assertNear(self.r1.thermo.P, self.r2.thermo.P, rtol=1e-6)
if surf:
self.assertArrayNear(self.w1.left.coverages,
self.w2.left.coverages,
rtol=1e-4, atol=1e-8)
def test_closed(self):
self.create_reactors()
self.integrate()
def test_with_heat_transfer(self):
self.create_reactors(add_Q=True)
self.integrate()
def test_with_mdot(self):
self.create_reactors(add_mdot=True)
self.integrate()
def test_with_surface_reactions(self):
if (not ct.__sundials_version__ and
self.reactorClass == ct.ConstPressureReactor):
raise unittest.SkipTest("Disabled until there is an interface for "
"setting the max_err_test_fails parameter for the old CVODE")
self.create_reactors(add_surf=True)
self.net1.atol = self.net2.atol = 1e-18
self.net1.rtol = self.net2.rtol = 1e-9
self.integrate(surf=True)
class TestIdealGasConstPressureReactor(TestConstPressureReactor):
reactorClass = ct.IdealGasConstPressureReactor
class TestFlowReactor(utilities.CanteraTest):
def test_nonreacting(self):
g = ct.Solution('h2o2.xml')
g.TPX = 300, 101325, 'O2:1.0'
r = ct.FlowReactor(g)
r.mass_flow_rate = 10
net = ct.ReactorNet()
net.add_reactor(r)
t = 0
v0 = r.speed
self.assertNear(v0, 10 / r.density)
while t < 10.0:
t = net.step()
self.assertNear(v0, r.speed)
self.assertNear(r.distance, v0 * t)
@unittest.skipUnless(ct.__sundials_version__,
"Disabled until there is an interface for setting the "
"max_err_test_fails parameter for the old CVODE")
def test_reacting(self):
g = ct.Solution('gri30.xml')
g.TPX = 1400, 20*101325, 'CO:1.0, H2O:1.0'
r = ct.FlowReactor(g)
r.mass_flow_rate = 10
net = ct.ReactorNet()
net.add_reactor(r)
net.atol = 1e-18
net.rtol = 1e-9
net.max_err_test_fails = 10
t = 0
self.assertNear(r.speed, 10 / r.density)
while t < 1.0:
t1 = net.time
x1 = r.distance
t = net.step()
v = (r.distance - x1) / (net.time - t1)
self.assertNear(r.speed, v, 1e-3)
class TestWallKinetics(utilities.CanteraTest):
def make_reactors(self):
self.net = ct.ReactorNet()
self.gas = ct.Solution('diamond.xml', 'gas')
self.solid = ct.Solution('diamond.xml', 'diamond')
self.interface = ct.Interface('diamond.xml', 'diamond_100',
(self.gas, self.solid))
self.r1 = ct.IdealGasReactor(self.gas)
self.r1.volume = 0.01
self.net.add_reactor(self.r1)
self.r2 = ct.IdealGasReactor(self.gas)
self.r2.volume = 0.01
self.net.add_reactor(self.r2)
self.w = ct.Wall(self.r1, self.r2)
self.w.area = 1.0
def test_coverages(self):
self.make_reactors()
self.w.left.kinetics = self.interface
self.w.left.coverages = {'c6HH':0.3, 'c6HM':0.7}
self.assertNear(self.w.left.coverages[0], 0.3)
self.assertNear(self.w.left.coverages[1], 0.0)
self.assertNear(self.w.left.coverages[4], 0.7)
self.net.advance(1e-5)
C_left = self.w.left.coverages
self.assertEqual(self.w.right.kinetics, None)
with self.assertRaises(Exception):
self.w.right.coverages
self.make_reactors()
self.w.right.kinetics = self.interface
self.w.right.coverages = 'c6HH:0.3, c6HM:0.7'
self.assertNear(self.w.right.coverages[0], 0.3)
self.assertNear(self.w.right.coverages[4], 0.7)
self.assertEqual(self.w.left.kinetics, None)
with self.assertRaises(Exception):
self.w.left.coverages
self.net.advance(1e-5)
C_right = self.w.right.coverages
self.assertNear(sum(C_left), 1.0)
self.assertArrayNear(C_left, C_right)
def test_coverages_regression1(self):
# Test with energy equation disabled
self.make_reactors()
self.r1.energy_enabled = False
self.r2.energy_enabled = False
self.w.left.kinetics = self.interface
C = np.zeros(self.interface.n_species)
C[0] = 0.3
C[4] = 0.7
self.w.left.coverages = C
self.assertArrayNear(self.w.left.coverages, C)
data = []
test_file = 'test_coverages_regression1.csv'
reference_file = '../data/WallKinetics-coverages-regression1.csv'
data = []
for t in np.linspace(1e-6, 1e-3):
self.net.advance(t)
data.append([t, self.r1.T, self.r1.thermo.P, self.r1.mass] +
list(self.r1.thermo.X) + list(self.w.left.coverages))
np.savetxt(test_file, data, delimiter=',')
bad = utilities.compareProfiles(reference_file, test_file,
rtol=1e-5, atol=1e-9, xtol=1e-12)
self.assertFalse(bool(bad), bad)
def test_coverages_regression2(self):
# Test with energy equation enabled
self.make_reactors()
self.w.left.kinetics = self.interface
C = np.zeros(self.interface.n_species)
C[0] = 0.3
C[4] = 0.7
self.w.left.coverages = C
self.assertArrayNear(self.w.left.coverages, C)
data = []
test_file = 'test_coverages_regression2.csv'
reference_file = '../data/WallKinetics-coverages-regression2.csv'
data = []
for t in np.linspace(1e-6, 1e-3):
self.net.advance(t)
data.append([t, self.r1.T, self.r1.thermo.P, self.r1.mass] +
list(self.r1.thermo.X) + list(self.w.left.coverages))
np.savetxt(test_file, data, delimiter=',')
bad = utilities.compareProfiles(reference_file, test_file,
rtol=1e-5, atol=1e-9, xtol=1e-12)
self.assertFalse(bool(bad), bad)
@unittest.skipUnless(ct.__sundials_version__,
"Sensitivity calculations require Sundials")
class TestReactorSensitivities(utilities.CanteraTest):
def test_sensitivities1(self):
net = ct.ReactorNet()
gas = ct.Solution('gri30.xml')
gas.TPX = 1300, 20*101325, 'CO:1.0, H2:0.1, CH4:0.1, H2O:0.5'
r1 = ct.IdealGasReactor(gas)
net.add_reactor(r1)
self.assertEqual(net.n_sensitivity_params, 0)
r1.add_sensitivity_reaction(40)
r1.add_sensitivity_reaction(41)
net.advance(0.1)
self.assertEqual(net.n_sensitivity_params, 2)
self.assertEqual(net.n_vars,
gas.n_species + r1.component_index(gas.species_name(0)))
S = net.sensitivities()
self.assertEqual(S.shape, (net.n_vars, net.n_sensitivity_params))
def test_sensitivities2(self):
net = ct.ReactorNet()
gas1 = ct.Solution('diamond.xml', 'gas')
solid = ct.Solution('diamond.xml', 'diamond')
interface = ct.Interface('diamond.xml', 'diamond_100',
(gas1, solid))
r1 = ct.IdealGasReactor(gas1)
net.add_reactor(r1)
net.atol_sensitivity = 1e-10
net.rtol_sensitivity = 1e-8
gas2 = ct.Solution('h2o2.xml')
gas2.TPX = 900, 101325, 'H2:0.1, OH:1e-7, O2:0.1, AR:1e-5'
r2 = ct.IdealGasReactor(gas2)
net.add_reactor(r2)
w = ct.Wall(r1, r2)
w.area = 1.5
w.left.kinetics = interface
C = np.zeros(interface.n_species)
C[0] = 0.3
C[4] = 0.7
w.left.coverages = C
w.left.add_sensitivity_reaction(2)
r2.add_sensitivity_reaction(18)
for T in (901, 905, 910, 950, 1500):
while r2.T < T:
net.step()
S = net.sensitivities()
# number of non-species variables in each reactor
Ns = r1.component_index(gas1.species_name(0))
# Index of first variable corresponding to r2
K2 = Ns + gas1.n_species + interface.n_species
# Constant volume should generate zero sensitivity coefficient
self.assertArrayNear(S[1,:], np.zeros(2))
self.assertArrayNear(S[K2+1,:], np.zeros(2))
# Sensitivity coefficients for the disjoint reactors should be zero
self.assertNear(np.linalg.norm(S[Ns:K2,1]), 0.0, atol=1e-5)
self.assertNear(np.linalg.norm(S[K2+Ns:,0]), 0.0, atol=1e-5)
def _test_parameter_order1(self, reactorClass):
# Single reactor, changing the order in which parameters are added
gas = ct.Solution('h2o2.xml')
def setup():
net = ct.ReactorNet()
gas.TPX = 900, 101325, 'H2:0.1, OH:1e-7, O2:0.1, AR:1e-5'
r = reactorClass(gas)
net.add_reactor(r)
return r, net
def integrate(r, net):
while r.T < 910:
net.step()
return net.sensitivities()
r1,net1 = setup()
params1 = [2,10,18,19]
for p in params1:
r1.add_sensitivity_reaction(p)
S1 = integrate(r1, net1)
pname = lambda r,i: '%s: %s' % (r.name, gas.reaction_equation(i))
for i,p in enumerate(params1):
self.assertEqual(pname(r1,p), net1.sensitivity_parameter_name(i))
r2,net2 = setup()
params2 = [19,10,2,18]
for p in params2:
r2.add_sensitivity_reaction(p)
S2 = integrate(r2, net2)
for i,p in enumerate(params2):
self.assertEqual(pname(r2,p), net2.sensitivity_parameter_name(i))
for i,j in enumerate((2,1,3,0)):
self.assertArrayNear(S1[:,i], S2[:,j])
def test_parameter_order1a(self):
self._test_parameter_order1(ct.IdealGasReactor)
def test_parameter_order1b(self):
self._test_parameter_order1(ct.IdealGasConstPressureReactor)
def test_parameter_order2(self):
# Multiple reactors, changing the order in which parameters are added
gas = ct.Solution('h2o2.xml')
def setup(reverse=False):
net = ct.ReactorNet()
gas1 = ct.Solution('h2o2.xml')
gas1.TPX = 900, 101325, 'H2:0.1, OH:1e-7, O2:0.1, AR:1e-5'
rA = ct.IdealGasReactor(gas1)
gas2 = ct.Solution('h2o2.xml')
gas2.TPX = 920, 101325, 'H2:0.1, OH:1e-7, O2:0.1, AR:0.5'
rB = ct.IdealGasReactor(gas2)
if reverse:
net.add_reactor(rB)
net.add_reactor(rA)
else:
net.add_reactor(rA)
net.add_reactor(rB)
return rA, rB, net
def integrate(r, net):
net.advance(1e-4)
return net.sensitivities()
S = []
for reverse in (True,False):
rA1,rB1,net1 = setup(reverse)
params1 = [(rA1,2),(rA1,19),(rB1,10),(rB1,18)]
for r,p in params1:
r.add_sensitivity_reaction(p)
S.append(integrate(rA1, net1))
pname = lambda r,i: '%s: %s' % (r.name, gas.reaction_equation(i))
for i,(r,p) in enumerate(params1):
self.assertEqual(pname(r,p), net1.sensitivity_parameter_name(i))
rA2,rB2,net2 = setup(reverse)
params2 = [(rB2,10),(rA2,19),(rB2,18),(rA2,2)]
for r,p in params2:
r.add_sensitivity_reaction(p)
S.append(integrate(rA2, net2))
for i,(r,p) in enumerate(params2):
self.assertEqual(pname(r,p), net2.sensitivity_parameter_name(i))
# Check that the results reflect the changed parameter ordering
for a,b in ((0,1), (2,3)):
for i,j in enumerate((3,1,0,2)):
self.assertArrayNear(S[a][:,i], S[b][:,j])
# Check that results are consistent after changing the order that
# reactors are added to the network
N = gas.n_species + r.component_index(gas.species_name(0))
self.assertArrayNear(S[0][:N], S[2][N:], 1e-5, 1e-5)
self.assertArrayNear(S[0][N:], S[2][:N], 1e-5, 1e-5)
self.assertArrayNear(S[1][:N], S[3][N:], 1e-5, 1e-5)
self.assertArrayNear(S[1][N:], S[3][:N], 1e-5, 1e-5)
def test_parameter_order3(self):
# Test including reacting surfaces
gas1 = ct.Solution('diamond.xml', 'gas')
solid = ct.Solution('diamond.xml', 'diamond')
interface = ct.Interface('diamond.xml', 'diamond_100',
(gas1, solid))
gas2 = ct.Solution('h2o2.xml')
def setup(order):
gas1.TPX = 1200, 1e3, 'H:0.002, H2:1, CH4:0.01, CH3:0.0002'
gas2.TPX = 900, 101325, 'H2:0.1, OH:1e-7, O2:0.1, AR:1e-5'
net = ct.ReactorNet()
rA = ct.IdealGasReactor(gas1)
rB = ct.IdealGasReactor(gas2)
if order % 2 == 0:
wA = ct.Wall(rA, rB)
wB = ct.Wall(rB, rA)
else:
wB = ct.Wall(rB, rA)
wA = ct.Wall(rA, rB)
wA.left.kinetics = interface
wB.right.kinetics = interface
wA.area = 0.1
wB.area = 10
C1 = np.zeros(interface.n_species)
C2 = np.zeros(interface.n_species)
C1[0] = 0.3
C1[4] = 0.7
C2[0] = 0.9
C2[4] = 0.1
wA.left.coverages = C1
wB.right.coverages = C2
if order // 2 == 0:
net.add_reactor(rA)
net.add_reactor(rB)
else:
net.add_reactor(rB)
net.add_reactor(rA)
return rA,rB,wA,wB,net
def integrate(r, net):
net.advance(1e-4)
return net.sensitivities()
S = []
for order in range(4):
rA,rB,wA,wB,net = setup(order)
for (obj,k) in [(rB,2), (rB,18), (wA.left,2),
(wA.left,0), (wB.right,2)]:
obj.add_sensitivity_reaction(k)
integrate(rB, net)
S.append(net.sensitivities())
rA,rB,wA,wB,net = setup(order)
for (obj,k) in [(wB.right,2), (wA.left,2), (rB,18),
(wA.left,0), (rB,2)]:
obj.add_sensitivity_reaction(k)
integrate(rB, net)
S.append(net.sensitivities())
for a,b in [(0,1),(2,3),(4,5),(6,7)]:
for i,j in enumerate((4,2,1,3,0)):
self.assertArrayNear(S[a][:,i], S[b][:,j], 1e-2, 1e-3)
class CombustorTestImplementation(object):
"""
These tests are based on the sample:
interfaces/cython/cantera/examples/reactors/combustor.py
with some simplifications so that they run faster and produce more
consistent output.
"""
referenceFile = '../data/CombustorTest-integrateWithAdvance.csv'
def setUp(self):
self.gas = ct.Solution('h2o2.xml')
# create a reservoir for the fuel inlet, and set to pure methane.
self.gas.TPX = 300.0, ct.one_atm, 'H2:1.0'
fuel_in = ct.Reservoir(self.gas)
fuel_mw = self.gas.mean_molecular_weight
# Oxidizer inlet
self.gas.TPX = 300.0, ct.one_atm, 'O2:1.0, AR:3.0'
oxidizer_in = ct.Reservoir(self.gas)
oxidizer_mw = self.gas.mean_molecular_weight
# to ignite the fuel/air mixture, we'll introduce a pulse of radicals.
# The steady-state behavior is independent of how we do this, so we'll
# just use a stream of pure atomic hydrogen.
self.gas.TPX = 300.0, ct.one_atm, 'H:1.0'
self.igniter = ct.Reservoir(self.gas)
# create the combustor, and fill it in initially with a diluent
self.gas.TPX = 300.0, ct.one_atm, 'AR:1.0'
self.combustor = ct.IdealGasReactor(self.gas)
# create a reservoir for the exhaust
self.exhaust = ct.Reservoir(self.gas)
# compute fuel and air mass flow rates
factor = 0.1
oxidizer_mdot = 4 * factor*oxidizer_mw
fuel_mdot = factor*fuel_mw
# The igniter will use a time-dependent igniter mass flow rate.
def igniter_mdot(t, t0=0.1, fwhm=0.05, amplitude=0.1):
return amplitude * math.exp(-(t-t0)**2 * 4 * math.log(2) / fwhm**2)
# create and install the mass flow controllers. Controllers
# m1 and m2 provide constant mass flow rates, and m3 provides
# a short Gaussian pulse only to ignite the mixture
m1 = ct.MassFlowController(fuel_in, self.combustor, mdot=fuel_mdot)
m2 = ct.MassFlowController(oxidizer_in, self.combustor, mdot=oxidizer_mdot)
m3 = ct.MassFlowController(self.igniter, self.combustor, mdot=igniter_mdot)
# put a valve on the exhaust line to regulate the pressure
self.v = ct.Valve(self.combustor, self.exhaust, K=1.0)
# the simulation only contains one reactor
self.sim = ct.ReactorNet([self.combustor])
def test_integrateWithStep(self):
tnow = 0.0
tfinal = 0.25
self.data = []
while tnow < tfinal:
tnow = self.sim.step()
self.data.append([tnow, self.combustor.T] +
list(self.combustor.thermo.X))
self.assertTrue(tnow >= tfinal)
bad = utilities.compareProfiles(self.referenceFile, self.data,
rtol=1e-3, atol=1e-9)
self.assertFalse(bad, bad)
def test_integrateWithAdvance(self, saveReference=False):
self.data = []
for t in np.linspace(0, 0.25, 101)[1:]:
self.sim.advance(t)
self.data.append([t, self.combustor.T] +
list(self.combustor.thermo.X))
if saveReference:
np.savetxt(self.referenceFile, np.array(self.data), '%11.6e', ', ')
else:
bad = utilities.compareProfiles(self.referenceFile, self.data,
rtol=1e-6, atol=1e-12)
self.assertFalse(bad, bad)
class WallTestImplementation(object):
"""
These tests are based on the sample:
interfaces/cython/cantera/examples/reactors/reactor2.py
with some simplifications so that they run faster and produce more
consistent output.
"""
referenceFile = '../data/WallTest-integrateWithAdvance.csv'
def setUp(self):
# reservoir to represent the environment
self.gas0 = ct.Solution('air.xml')
self.gas0.TP = 300, ct.one_atm
self.env = ct.Reservoir(self.gas0)
# reactor to represent the side filled with Argon
self.gas1 = ct.Solution('air.xml')
self.gas1.TPX = 1000.0, 30*ct.one_atm, 'AR:1.0'
self.r1 = ct.Reactor(self.gas1)
# reactor to represent the combustible mixture
self.gas2 = ct.Solution('h2o2.xml')
self.gas2.TPX = 500.0, 1.5*ct.one_atm, 'H2:0.5, O2:1.0, AR:10.0'
self.r2 = ct.Reactor(self.gas2)
# Wall between the two reactors
self.w1 = ct.Wall(self.r2, self.r1, A=1.0, K=2e-4, U=400.0)
# Wall to represent heat loss to the environment
self.w2 = ct.Wall(self.r2, self.env, A=1.0, U=2000.0)
# Create the reactor network
self.sim = ct.ReactorNet([self.r1, self.r2])
def test_integrateWithStep(self):
tnow = 0.0
tfinal = 0.01
self.data = []
while tnow < tfinal:
tnow = self.sim.step()
self.data.append([tnow,
self.r1.T, self.r2.T,
self.r1.thermo.P, self.r2.thermo.P,
self.r1.volume, self.r2.volume])
self.assertTrue(tnow >= tfinal)
bad = utilities.compareProfiles(self.referenceFile, self.data,
rtol=1e-3, atol=1e-8)
self.assertFalse(bad, bad)
def test_integrateWithAdvance(self, saveReference=False):
self.data = []
for t in np.linspace(0, 0.01, 200)[1:]:
self.sim.advance(t)
self.data.append([t,
self.r1.T, self.r2.T,
self.r1.thermo.P, self.r2.thermo.P,
self.r1.volume, self.r2.volume])
if saveReference:
np.savetxt(self.referenceFile, np.array(self.data), '%11.6e', ', ')
else:
bad = utilities.compareProfiles(self.referenceFile, self.data,
rtol=2e-5, atol=1e-9)
self.assertFalse(bad, bad)
# Keep the implementations separate from the unittest-derived class
# so that they can be run independently to generate the reference data files.
class CombustorTest(CombustorTestImplementation, unittest.TestCase): pass
class WallTest(WallTestImplementation, unittest.TestCase): pass
| |
# -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'Journal.ccn_code'
db.add_column('journalmanager_journal', 'ccn_code',
self.gf('django.db.models.fields.CharField')(default='', max_length=64, blank=True),
keep_default=False)
def backwards(self, orm):
# Deleting field 'Journal.ccn_code'
db.delete_column('journalmanager_journal', 'ccn_code')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'journalmanager.aheadpressrelease': {
'Meta': {'object_name': 'AheadPressRelease', '_ormbases': ['journalmanager.PressRelease']},
'journal': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'press_releases'", 'to': "orm['journalmanager.Journal']"}),
'pressrelease_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['journalmanager.PressRelease']", 'unique': 'True', 'primary_key': 'True'})
},
'journalmanager.article': {
'Meta': {'object_name': 'Article'},
'aid': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '32'}),
'article_type': ('django.db.models.fields.CharField', [], {'max_length': '32', 'db_index': 'True'}),
'articles_linkage_is_pending': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'auto_now_add': 'True', 'blank': 'True'}),
'doi': ('django.db.models.fields.CharField', [], {'default': "u''", 'max_length': '2048', 'db_index': 'True'}),
'domain_key': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '2048', 'db_index': 'False'}),
'es_is_dirty': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'es_updated_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_aop': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_visible': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'issn_epub': ('django.db.models.fields.CharField', [], {'max_length': '9', 'db_index': 'True'}),
'issn_ppub': ('django.db.models.fields.CharField', [], {'max_length': '9', 'db_index': 'True'}),
'issue': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'articles'", 'null': 'True', 'to': "orm['journalmanager.Issue']"}),
'journal': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'articles'", 'null': 'True', 'to': "orm['journalmanager.Journal']"}),
'journal_title': ('django.db.models.fields.CharField', [], {'max_length': '512', 'db_index': 'True'}),
'related_articles': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': "orm['journalmanager.Article']", 'null': 'True', 'through': "orm['journalmanager.ArticlesLinkage']", 'blank': 'True'}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'auto_now': 'True', 'blank': 'True'}),
'xml': ('scielomanager.custom_fields.XMLSPSField', [], {}),
'xml_version': ('django.db.models.fields.CharField', [], {'max_length': '9'})
},
'journalmanager.articleslinkage': {
'Meta': {'object_name': 'ArticlesLinkage'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'link_to': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'referrers'", 'to': "orm['journalmanager.Article']"}),
'link_type': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'referrer': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'links_to'", 'to': "orm['journalmanager.Article']"})
},
'journalmanager.collection': {
'Meta': {'ordering': "['name']", 'object_name': 'Collection'},
'acronym': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '16', 'blank': 'True'}),
'address': ('django.db.models.fields.TextField', [], {}),
'address_complement': ('django.db.models.fields.CharField', [], {'max_length': '128', 'blank': 'True'}),
'address_number': ('django.db.models.fields.CharField', [], {'max_length': '8'}),
'city': ('django.db.models.fields.CharField', [], {'max_length': '32', 'blank': 'True'}),
'collection': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'user_collection'", 'to': "orm['auth.User']", 'through': "orm['journalmanager.UserCollections']", 'blank': 'True', 'symmetrical': 'False', 'null': 'True'}),
'country': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75'}),
'fax': ('django.db.models.fields.CharField', [], {'max_length': '16', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'logo': ('django.db.models.fields.files.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '128', 'db_index': 'True'}),
'name_slug': ('django.db.models.fields.SlugField', [], {'max_length': '50', 'unique': 'True', 'null': 'True', 'blank': 'True'}),
'phone': ('django.db.models.fields.CharField', [], {'max_length': '16', 'blank': 'True'}),
'state': ('django.db.models.fields.CharField', [], {'max_length': '32', 'blank': 'True'}),
'url': ('django.db.models.fields.URLField', [], {'max_length': '200'}),
'zip_code': ('django.db.models.fields.CharField', [], {'max_length': '16', 'null': 'True', 'blank': 'True'})
},
'journalmanager.institution': {
'Meta': {'ordering': "['name']", 'object_name': 'Institution'},
'acronym': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '16', 'blank': 'True'}),
'address': ('django.db.models.fields.TextField', [], {}),
'address_complement': ('django.db.models.fields.CharField', [], {'max_length': '128', 'blank': 'True'}),
'address_number': ('django.db.models.fields.CharField', [], {'max_length': '8'}),
'cel': ('django.db.models.fields.CharField', [], {'max_length': '16', 'blank': 'True'}),
'city': ('django.db.models.fields.CharField', [], {'max_length': '32', 'blank': 'True'}),
'complement': ('django.db.models.fields.TextField', [], {'default': "''", 'blank': 'True'}),
'country': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75'}),
'fax': ('django.db.models.fields.CharField', [], {'max_length': '16', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_trashed': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '256', 'db_index': 'True'}),
'phone': ('django.db.models.fields.CharField', [], {'max_length': '16', 'blank': 'True'}),
'state': ('django.db.models.fields.CharField', [], {'max_length': '32', 'blank': 'True'}),
'updated': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'zip_code': ('django.db.models.fields.CharField', [], {'max_length': '16', 'null': 'True', 'blank': 'True'})
},
'journalmanager.issue': {
'Meta': {'ordering': "('created', 'id')", 'object_name': 'Issue'},
'cover': ('django.db.models.fields.files.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'ctrl_vocabulary': ('django.db.models.fields.CharField', [], {'max_length': '64', 'blank': 'True'}),
'editorial_standard': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_marked_up': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_trashed': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True'}),
'journal': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['journalmanager.Journal']"}),
'label': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '64', 'null': 'True', 'blank': 'True'}),
'number': ('django.db.models.fields.CharField', [], {'max_length': '16', 'blank': 'True'}),
'order': ('django.db.models.fields.IntegerField', [], {'blank': 'True'}),
'publication_end_month': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'publication_start_month': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'publication_year': ('django.db.models.fields.IntegerField', [], {}),
'section': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['journalmanager.Section']", 'symmetrical': 'False', 'blank': 'True'}),
'spe_text': ('django.db.models.fields.CharField', [], {'max_length': '15', 'null': 'True', 'blank': 'True'}),
'suppl_text': ('django.db.models.fields.CharField', [], {'max_length': '15', 'null': 'True', 'blank': 'True'}),
'total_documents': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'type': ('django.db.models.fields.CharField', [], {'default': "'regular'", 'max_length': '15'}),
'updated': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'use_license': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['journalmanager.UseLicense']", 'null': 'True'}),
'volume': ('django.db.models.fields.CharField', [], {'max_length': '16', 'blank': 'True'})
},
'journalmanager.issuetitle': {
'Meta': {'object_name': 'IssueTitle'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'issue': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['journalmanager.Issue']"}),
'language': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['journalmanager.Language']"}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '256'})
},
'journalmanager.journal': {
'Meta': {'ordering': "('title', 'id')", 'object_name': 'Journal'},
'abstract_keyword_languages': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'abstract_keyword_languages'", 'symmetrical': 'False', 'to': "orm['journalmanager.Language']"}),
'acronym': ('django.db.models.fields.CharField', [], {'max_length': '16'}),
'ccn_code': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '64', 'blank': 'True'}),
'collections': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['journalmanager.Collection']", 'through': "orm['journalmanager.Membership']", 'symmetrical': 'False'}),
'copyrighter': ('django.db.models.fields.CharField', [], {'max_length': '254'}),
'cover': ('scielomanager.custom_fields.ContentTypeRestrictedFileField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'creator': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'enjoy_creator'", 'to': "orm['auth.User']"}),
'ctrl_vocabulary': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'current_ahead_documents': ('django.db.models.fields.IntegerField', [], {'default': '0', 'max_length': '3', 'null': 'True', 'blank': 'True'}),
'editor': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'editor_journal'", 'null': 'True', 'to': "orm['auth.User']"}),
'editor_address': ('django.db.models.fields.CharField', [], {'max_length': '512'}),
'editor_address_city': ('django.db.models.fields.CharField', [], {'max_length': '256'}),
'editor_address_country': ('scielo_extensions.modelfields.CountryField', [], {'max_length': '2'}),
'editor_address_state': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'editor_address_zip': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'editor_email': ('django.db.models.fields.EmailField', [], {'max_length': '75'}),
'editor_name': ('django.db.models.fields.CharField', [], {'max_length': '512'}),
'editor_phone1': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'editor_phone2': ('django.db.models.fields.CharField', [], {'max_length': '32', 'null': 'True', 'blank': 'True'}),
'editorial_standard': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'eletronic_issn': ('django.db.models.fields.CharField', [], {'max_length': '9', 'db_index': 'True'}),
'final_num': ('django.db.models.fields.CharField', [], {'max_length': '16', 'blank': 'True'}),
'final_vol': ('django.db.models.fields.CharField', [], {'max_length': '16', 'blank': 'True'}),
'final_year': ('django.db.models.fields.CharField', [], {'max_length': '4', 'null': 'True', 'blank': 'True'}),
'frequency': ('django.db.models.fields.CharField', [], {'max_length': '16'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'index_coverage': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'init_num': ('django.db.models.fields.CharField', [], {'max_length': '16', 'null': 'True', 'blank': 'True'}),
'init_vol': ('django.db.models.fields.CharField', [], {'max_length': '16', 'null': 'True', 'blank': 'True'}),
'init_year': ('django.db.models.fields.CharField', [], {'max_length': '4'}),
'is_indexed_aehci': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_indexed_scie': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_indexed_ssci': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_trashed': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True'}),
'languages': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['journalmanager.Language']", 'symmetrical': 'False'}),
'logo': ('scielomanager.custom_fields.ContentTypeRestrictedFileField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'medline_code': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True', 'blank': 'True'}),
'medline_title': ('django.db.models.fields.CharField', [], {'max_length': '256', 'null': 'True', 'blank': 'True'}),
'national_code': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True', 'blank': 'True'}),
'notes': ('django.db.models.fields.TextField', [], {'max_length': '254', 'null': 'True', 'blank': 'True'}),
'other_previous_title': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'previous_ahead_documents': ('django.db.models.fields.IntegerField', [], {'default': '0', 'max_length': '3', 'null': 'True', 'blank': 'True'}),
'previous_title': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'prev_title'", 'null': 'True', 'to': "orm['journalmanager.Journal']"}),
'print_issn': ('django.db.models.fields.CharField', [], {'max_length': '9', 'db_index': 'True'}),
'pub_level': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'publication_city': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'publisher_country': ('scielo_extensions.modelfields.CountryField', [], {'max_length': '2'}),
'publisher_name': ('django.db.models.fields.CharField', [], {'max_length': '256'}),
'publisher_state': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'scielo_issn': ('django.db.models.fields.CharField', [], {'max_length': '16'}),
'secs_code': ('django.db.models.fields.CharField', [], {'max_length': '64', 'blank': 'True'}),
'short_title': ('django.db.models.fields.CharField', [], {'max_length': '256', 'null': 'True', 'db_index': 'True'}),
'sponsor': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'journal_sponsor'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['journalmanager.Sponsor']"}),
'study_areas': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'journals_migration_tmp'", 'null': 'True', 'to': "orm['journalmanager.StudyArea']"}),
'subject_categories': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'journals'", 'null': 'True', 'to': "orm['journalmanager.SubjectCategory']"}),
'subject_descriptors': ('django.db.models.fields.CharField', [], {'max_length': '1024'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '256', 'db_index': 'True'}),
'title_iso': ('django.db.models.fields.CharField', [], {'max_length': '256', 'db_index': 'True'}),
'twitter_user': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True', 'blank': 'True'}),
'updated': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'url_journal': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True', 'blank': 'True'}),
'url_online_submission': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True', 'blank': 'True'}),
'use_license': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['journalmanager.UseLicense']"})
},
'journalmanager.journalmission': {
'Meta': {'object_name': 'JournalMission'},
'description': ('django.db.models.fields.TextField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'journal': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'missions'", 'to': "orm['journalmanager.Journal']"}),
'language': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['journalmanager.Language']", 'null': 'True'})
},
'journalmanager.journaltimeline': {
'Meta': {'object_name': 'JournalTimeline'},
'collection': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['journalmanager.Collection']"}),
'created_by': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'journal': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'statuses'", 'to': "orm['journalmanager.Journal']"}),
'reason': ('django.db.models.fields.TextField', [], {'default': "''"}),
'since': ('django.db.models.fields.DateTimeField', [], {}),
'status': ('django.db.models.fields.CharField', [], {'max_length': '16'})
},
'journalmanager.journaltitle': {
'Meta': {'object_name': 'JournalTitle'},
'category': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'journal': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'other_titles'", 'to': "orm['journalmanager.Journal']"}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '128'})
},
'journalmanager.language': {
'Meta': {'ordering': "['name']", 'object_name': 'Language'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'iso_code': ('django.db.models.fields.CharField', [], {'max_length': '2'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '64'})
},
'journalmanager.membership': {
'Meta': {'unique_together': "(('journal', 'collection'),)", 'object_name': 'Membership'},
'collection': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['journalmanager.Collection']"}),
'created_by': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'journal': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['journalmanager.Journal']"}),
'reason': ('django.db.models.fields.TextField', [], {'default': "''", 'blank': 'True'}),
'since': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'status': ('django.db.models.fields.CharField', [], {'default': "'inprogress'", 'max_length': '16'})
},
'journalmanager.pendedform': {
'Meta': {'object_name': 'PendedForm'},
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'form_hash': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'pending_forms'", 'to': "orm['auth.User']"}),
'view_name': ('django.db.models.fields.CharField', [], {'max_length': '128'})
},
'journalmanager.pendedvalue': {
'Meta': {'object_name': 'PendedValue'},
'form': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'data'", 'to': "orm['journalmanager.PendedForm']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'value': ('django.db.models.fields.TextField', [], {})
},
'journalmanager.pressrelease': {
'Meta': {'object_name': 'PressRelease'},
'doi': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
'journalmanager.pressreleasearticle': {
'Meta': {'object_name': 'PressReleaseArticle'},
'article_pid': ('django.db.models.fields.CharField', [], {'max_length': '32', 'db_index': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'press_release': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'articles'", 'to': "orm['journalmanager.PressRelease']"})
},
'journalmanager.pressreleasetranslation': {
'Meta': {'object_name': 'PressReleaseTranslation'},
'content': ('django.db.models.fields.TextField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'language': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['journalmanager.Language']"}),
'press_release': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'translations'", 'to': "orm['journalmanager.PressRelease']"}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '128'})
},
'journalmanager.regularpressrelease': {
'Meta': {'object_name': 'RegularPressRelease', '_ormbases': ['journalmanager.PressRelease']},
'issue': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'press_releases'", 'to': "orm['journalmanager.Issue']"}),
'pressrelease_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['journalmanager.PressRelease']", 'unique': 'True', 'primary_key': 'True'})
},
'journalmanager.section': {
'Meta': {'ordering': "('id',)", 'object_name': 'Section'},
'code': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '21', 'blank': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_trashed': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True'}),
'journal': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['journalmanager.Journal']"}),
'legacy_code': ('django.db.models.fields.CharField', [], {'max_length': '16', 'null': 'True', 'blank': 'True'}),
'updated': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
},
'journalmanager.sectiontitle': {
'Meta': {'ordering': "['title']", 'object_name': 'SectionTitle'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'language': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['journalmanager.Language']"}),
'section': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'titles'", 'to': "orm['journalmanager.Section']"}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '256'})
},
'journalmanager.sponsor': {
'Meta': {'ordering': "['name']", 'object_name': 'Sponsor', '_ormbases': ['journalmanager.Institution']},
'collections': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['journalmanager.Collection']", 'symmetrical': 'False'}),
'institution_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['journalmanager.Institution']", 'unique': 'True', 'primary_key': 'True'})
},
'journalmanager.studyarea': {
'Meta': {'object_name': 'StudyArea'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'study_area': ('django.db.models.fields.CharField', [], {'max_length': '256'})
},
'journalmanager.subjectcategory': {
'Meta': {'object_name': 'SubjectCategory'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'term': ('django.db.models.fields.CharField', [], {'max_length': '256', 'db_index': 'True'})
},
'journalmanager.translateddata': {
'Meta': {'object_name': 'TranslatedData'},
'field': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'language': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'translation': ('django.db.models.fields.CharField', [], {'max_length': '512', 'null': 'True', 'blank': 'True'})
},
'journalmanager.uselicense': {
'Meta': {'ordering': "['license_code']", 'object_name': 'UseLicense'},
'disclaimer': ('django.db.models.fields.TextField', [], {'max_length': '512', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_default': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'license_code': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '64'}),
'reference_url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'})
},
'journalmanager.usercollections': {
'Meta': {'unique_together': "(('user', 'collection'),)", 'object_name': 'UserCollections'},
'collection': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['journalmanager.Collection']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_default': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_manager': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'journalmanager.userprofile': {
'Meta': {'object_name': 'UserProfile'},
'email_notifications': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'tz': ('django.db.models.fields.CharField', [], {'default': "'America/Sao_Paulo'", 'max_length': '150'}),
'user': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['auth.User']", 'unique': 'True'})
}
}
complete_apps = ['journalmanager']
| |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
test_django-behaviors
------------
Tests for `django-behaviors` behaviors module.
"""
from django.contrib.auth import get_user_model
from django.test import override_settings
from django.utils import timezone
from django.core.exceptions import ObjectDoesNotExist
from test_plus.test import TestCase
from datetime import timedelta
from .models import (AuthoredMock, EditoredMock, PublishedMock,
ReleasedMock, SluggedMock, NonUniqueSluggedMock,
TimestampedMock, StoreDeletedMock)
class TestAuthored(TestCase):
@classmethod
def setUpTestData(cls):
User = get_user_model()
cls.author = User.objects.create(
username='u1', email='u1@example.com', password='password')
cls.mock = AuthoredMock.objects.create(author=cls.author)
def setUp(self):
self.author.refresh_from_db()
self.mock.refresh_from_db()
def test_author_field_label(self):
field_label = self.mock._meta.get_field('author').verbose_name
self.assertEqual(field_label, 'author')
def test_author_exists(self):
self.assertIsNotNone(self.mock.author)
self.assertEqual(self.mock.author, self.author)
def test_author_related_name(self):
related_name = self.mock._meta.get_field('author').related_query_name()
self.assertEqual(related_name, 'tests_authoredmock_author')
class TestEditored(TestCase):
@classmethod
def setUpTestData(cls):
User = get_user_model()
cls.editor = User.objects.create(
username='u1', email='u1@example.com', password='password')
cls.mock = EditoredMock.objects.create()
def setUp(self):
self.editor.refresh_from_db()
self.mock.refresh_from_db()
def test_editor_field_label(self):
field_label = self.mock._meta.get_field('editor').verbose_name
self.assertEqual(field_label, 'editor')
def test_editor_doesnt_exist(self):
self.assertIsNone(self.mock.editor)
def test_editor_exists(self):
self.mock.editor = self.editor
self.mock.save()
self.assertIsNotNone(self.mock.editor)
self.assertEqual(self.mock.editor, self.editor)
def test_editor_related_name(self):
related_name = self.mock._meta.get_field('editor').related_query_name()
self.assertEqual(related_name, 'tests_editoredmock_editor')
class TestPublished(TestCase):
@classmethod
def setUpTestData(cls):
cls.mock = PublishedMock.objects.create()
def setUp(self):
self.mock.refresh_from_db()
def test_draft_true_by_default(self):
self.assertTrue(self.mock.draft)
def test_published_property(self):
self.mock.publication_status = PublishedMock.PUBLISHED
self.mock.save()
self.assertTrue(self.mock.published)
class TestReleased(TestCase):
@classmethod
def setUpTestData(cls):
cls.mock = ReleasedMock.objects.create()
cls.mock2 = ReleasedMock.objects.create(release_date=timezone.now())
def setUp(self):
self.mock.refresh_from_db()
self.mock2.refresh_from_db()
def test_nullable_release_date(self):
self.assertIsNone(self.mock.release_date)
def test_nulled_release_date_released_is_false(self):
self.assertFalse(self.mock.released)
def test_future_release_date_released_is_false(self):
week_in_advance = timezone.now() + timedelta(weeks=1)
self.mock.release_date = week_in_advance
self.mock.save()
self.assertFalse(self.mock.released)
def test_past_release_date_released_is_true(self):
self.mock.release_date = timezone.now()
self.mock.save()
self.assertTrue(self.mock.released)
def test_release_on_no_date_provided(self):
self.mock.release_on()
self.assertTrue(self.mock.released)
def test_release_on_future_date_provided(self):
week_in_advance = timezone.now() + timedelta(weeks=1)
self.mock.release_on(week_in_advance)
self.assertFalse(self.mock.released)
def test_release_on_past_date_provided(self):
self.mock.release_on(timezone.now())
self.assertTrue(self.mock.released)
class TestSlugged(TestCase):
@classmethod
def setUpTestData(cls):
cls.mock = SluggedMock.objects.create(title="Slugged Title")
cls.mock2 = SluggedMock.objects.create(title="Slugged TITLE")
cls.mock3 = SluggedMock.objects.create(title="SLUGGED Title")
def setUp(self):
self.mock.refresh_from_db()
self.mock2.refresh_from_db()
self.mock3.refresh_from_db()
def test_title_field_slugged(self):
self.assertEqual(self.mock.slug, "slugged-title")
def test_generate_unique_slug(self):
self.assertEqual(self.mock.slug, "slugged-title")
self.assertEqual(self.mock2.slug, "slugged-title-1")
self.assertEqual(self.mock3.slug, "slugged-title-2")
@override_settings(UNIQUE_SLUG_BEHAVIOR=False)
class TestNonUniqueSlugged(TestCase):
@classmethod
def setUpTestData(cls):
cls.mock = NonUniqueSluggedMock.objects.create(title="Slugged Title")
cls.mock2 = NonUniqueSluggedMock.objects.create(title="Slugged TITLE")
cls.mock3 = NonUniqueSluggedMock.objects.create(title="SLUGGED Title")
def setUp(self):
self.mock.refresh_from_db()
self.mock2.refresh_from_db()
self.mock3.refresh_from_db()
def test_generate_non_unique_slug(self):
self.assertEqual(self.mock.slug, "slugged-title")
self.assertEqual(self.mock2.slug, "slugged-title")
self.assertEqual(self.mock3.slug, "slugged-title")
class TestTimestamped(TestCase):
@classmethod
def setUpTestData(cls):
cls.mock = TimestampedMock.objects.create()
def setUp(self):
self.mock.refresh_from_db()
def test_timestamp_changed_initially_false(self):
self.assertFalse(self.mock.changed)
def test_timestamp_changed_after_save(self):
self.mock.save()
self.assertTrue(self.mock.changed)
class TestStoreDeleted(TestCase):
@classmethod
def setUpTestData(cls):
cls.mock_to_delete = StoreDeletedMock.objects.create()
cls.mock_to_restore = StoreDeletedMock.objects.create()
def setUp(self):
self.mock_to_delete.refresh_from_db()
def test_delete_model(self):
self.mock_to_delete.delete()
self.assertIsNotNone(self.mock_to_delete.deleted)
def test_restore_model(self):
self.mock_to_restore.delete()
self.mock_to_restore.restore()
self.assertIsNone(self.mock_to_restore.deleted)
def test_delete_not_created_object_raises_exception(self):
mock = StoreDeletedMock()
self.assertIsNone(mock.pk)
with self.assertRaises(ObjectDoesNotExist) as raises_context:
mock.delete()
self.assertIsNotNone(raises_context)
def test_restore_not_created_object_raises_exception(self):
mock = StoreDeletedMock()
self.assertIsNone(mock.pk)
with self.assertRaises(ObjectDoesNotExist) as raises_context:
mock.restore()
self.assertIsNotNone(raises_context)
def test_is_deleted_property_returns_true_when_delete_object(self):
self.mock_to_delete.delete()
self.assertTrue(self.mock_to_delete.is_deleted)
def test_is_deleted_property_returns_false_when_restore_object(self):
self.mock_to_restore.delete()
self.mock_to_restore.restore()
self.assertFalse(self.mock_to_restore.is_deleted)
def test_is_deleted_property_returns_false_when_create_object(self):
mock = StoreDeletedMock()
mock.save()
self.assertFalse(mock.is_deleted)
| |
import numbers
import numpy as np
import torch
import torch.autograd
import _ext
import _extc
import error_checking as ec
from kernels import KERNELS, KERNEL_NAMES
MAX_FLOAT = float(np.finfo(np.float32).max)
class ParticleProjection(torch.nn.Module):
""" The particle projection layer. Projects the given set of particles onto
a camera image plane. For each particle, this layer finds its location on
the image plane, then adds a small circular Gaussian centered at that location
to the image. The contributions from all particles are added together into
a final image. Note that unlike the other layers in this package, this layer
only works with 3D particles.
"""
def __init__(self, camera_fl, camera_size, filter_std, filter_scale):
""" Initialize a ParticleProjection layer.
Arguments:
-camera_fl: The camera focal length in pixels (all pixels are
assumed to be square. This layer does not simulate
any image warping e.g. radial distortion).
-camera_size: 2-tuple with the image width and height in pixels.
-filter_std: The standard deviation of the Gaussian that is
added at each pixel location.
-filter_scale: Before adding the Gaussian for an individual
particle, it is scaled by this value.
"""
super(ParticleProjection, self).__init__()
self.camera_size = ec.make_list(camera_size, 2, "camera_size",
"%s > 0", "isinstance(%s, numbers.Integral)")
self.camera_fl = ec.check_conditions(camera_fl, "camera_fl",
"%s > 0", "isinstance(%s, numbers.Real)")
self.filter_std = ec.check_conditions(filter_std, "filter_std",
"%s > 0", "isinstance(%s, numbers.Real)")
self.filter_scale = ec.check_conditions(filter_scale, "filter_scale",
"%s > 0", "isinstance(%s, numbers.Real)")
self.register_buffer("empty_depth_mask",
torch.ones(1, self.camera_size[1], self.camera_size[0])*MAX_FLOAT)
def _rotationMatrixFromQuaternion(self, quat):
"""
1 - 2*qy2 - 2*qz2 2*qx*qy - 2*qz*qw 2*qx*qz + 2*qy*qw
2*qx*qy + 2*qz*qw 1 - 2*qx2 - 2*qz2 2*qy*qz - 2*qx*qw
2*qx*qz - 2*qy*qw 2*qy*qz + 2*qx*qw 1 - 2*qx2 - 2*qy2
"""
quat = quat.data
qx = quat[:, 0]
qy = quat[:, 1]
qz = quat[:, 2]
qw = quat[:, 3]
qx2 = qx*qx
qxqy = qx*qy
qxqz = qx*qz
qxqw = qx*qw
qy2 = qy*qy
qyqz = qy*qz
qyqw = qy*qw
qz2 = qz*qz
qzqw = qz*qw
ret = quat.new(quat.size()[0], 3, 3)
ret[:, 0, 0] = 1 - 2*qy2 - 2*qz2
ret[:, 1, 0] = 2*qxqy - 2*qzqw
ret[:, 2, 0] = 2*qxqz + 2*qyqw
ret[:, 0, 1] = 2*qxqy + 2*qzqw
ret[:, 1, 1] = 1 - 2*qx2 - 2*qz2
ret[:, 2, 1] = 2*qyqz - 2*qxqw
ret[:, 0, 2] = 2*qxqz - 2*qyqw
ret[:, 1, 2] = 2*qyqz + 2*qxqw
ret[:, 2, 2] = 1 - 2*qx2 - 2*qy2
return torch.autograd.Variable(ret, requires_grad=False)
def forward(self, locs, camera_pose, camera_rot, depth_mask=None):
""" Forwad pass for the particle projection. Takes in the set of
particles and outputs an image.
Arguments:
-locs: A BxNx3 tensor where B is the batch size, N is the number
of particles, and 3 is the dimensionality of the
particles' coordinate space (this layer currently only
supports 3D projections).
-camera_pose: A Bx3 tensor containing the camera translation.
-camera_rot: A Bx4 tensor containing the camera rotation as a
quaternion in xyzw format.
-depth_mask: An optional BxHxW tensor where W and H are the
camera image width and height respectively. If not
None, then this is used to compute occlusions. The
value in each pixel in the depth_mask should be
the distance to the first object. Any particles
further away than that value will not be projected
onto the output image.
Returns: A BxHxW tensor of the projected particles.
"""
# Error checking.
batch_size = locs.size()[0]
N = locs.size()[1]
ec.check_tensor_dims(locs, "locs", (batch_size, N, 3))
ec.check_tensor_dims(camera_pose, "camera_pose", (batch_size, 3))
ec.check_tensor_dims(camera_rot, "camera_rot", (batch_size, 4))
if depth_mask is not None:
ec.check_tensor_dims(depth_mask, "depth_mask", (batch_size,
self.camera_size[1], self.camera_size[0]))
depth_mask = depth_mask.contiguous()
else:
if self.empty_depth_mask.size()[0] != batch_size:
self.empty_depth_mask.resize_(
batch_size, self.camera_size[1], self.camera_size[0])
self.empty_depth_mask.fill_(MAX_FLOAT)
depth_mask = torch.autograd.Variable(
self.empty_depth_mask, requires_grad=False)
if locs.is_cuda:
depth_mask = depth_mask.cuda()
# Let's transform the particles to camera space here.
locs = locs - camera_pose.unsqueeze(1)
# Ensure the rotation quaternion is normalized.
camera_rot = camera_rot / \
torch.sqrt(torch.sum(camera_rot**2, 1, keepdim=True))
# Invert the rotation.
inv = camera_rot.data.new(1, 4)
inv[0, 0] = -1
inv[0, 1] = -1
inv[0, 2] = -1
inv[0, 3] = 1
inv = torch.autograd.Variable(inv, requires_grad=False)
camera_rot = camera_rot*inv
rot = self._rotationMatrixFromQuaternion(camera_rot)
# Rotate the locs into camera space.
try:
# There's a bug that causes this to fail on the first call when using cuda.
# To fix that, just call it again.
locs = torch.bmm(locs, rot)
except RuntimeError:
locs = torch.bmm(locs, rot)
locs = locs.contiguous()
proj = _ParticleProjectionFunction(self.camera_fl, self.camera_size, self.filter_std,
self.filter_scale)
ret = proj(locs, depth_mask)
return ret
"""
INTERNAL FUNCTIONS
"""
class _ParticleProjectionFunction(torch.autograd.Function):
def __init__(self, camera_fl, camera_size, filter_std, filter_scale):
super(_ParticleProjectionFunction, self).__init__()
self.camera_fl = camera_fl
self.camera_size = camera_size
self.filter_std = filter_std
self.filter_scale = filter_scale
def forward(self, locs, depth_mask):
self.save_for_backward(locs, depth_mask)
batch_size = locs.size()[0]
ret = locs.new(batch_size, self.camera_size[1], self.camera_size[0])
ret.fill_(0)
if locs.is_cuda:
if not _extc.spnc_particleprojection_forward(locs, self.camera_fl,
self.filter_std, self.filter_scale, depth_mask, ret):
raise Exception("Cuda error")
else:
_ext.spn_particleprojection_forward(locs, self.camera_fl,
self.filter_std, self.filter_scale, depth_mask, ret)
return ret
def backward(self, grad_output):
locs, depth_mask = self.saved_tensors
ret_locs = grad_output.new(locs.size())
ret_locs.fill_(0)
ret_depth_mask = grad_output.new(depth_mask.size())
ret_depth_mask.fill_(0)
if grad_output.is_cuda:
if not _extc.spnc_particleprojection_backward(locs,
self.camera_fl, self.filter_std, self.filter_scale, depth_mask, grad_output, ret_locs):
raise Exception("Cuda error")
else:
_ext.spn_particleprojection_backward(locs,
self.camera_fl, self.filter_std, self.filter_scale, depth_mask, grad_output, ret_locs)
return (ret_locs,
ret_depth_mask,)
| |
from nose.tools import eq_, ok_, raises, assert_true
from wtforms import fields
from flask_admin import form
from flask_admin._compat import as_unicode
from flask_admin._compat import iteritems
from flask_admin.contrib.sqla import ModelView, filters
from flask_babelex import Babel
from . import setup
from datetime import datetime, time, date
class CustomModelView(ModelView):
def __init__(self, model, session,
name=None, category=None, endpoint=None, url=None,
**kwargs):
for k, v in iteritems(kwargs):
setattr(self, k, v)
super(CustomModelView, self).__init__(model, session, name, category,
endpoint, url)
def create_models(db):
class Model1(db.Model):
def __init__(self, test1=None, test2=None, test3=None, test4=None,
bool_field=False, date_field=None, time_field=None,
datetime_field=None, enum_field=None):
self.test1 = test1
self.test2 = test2
self.test3 = test3
self.test4 = test4
self.bool_field = bool_field
self.date_field = date_field
self.time_field = time_field
self.datetime_field = datetime_field
self.enum_field = enum_field
id = db.Column(db.Integer, primary_key=True)
test1 = db.Column(db.String(20))
test2 = db.Column(db.Unicode(20))
test3 = db.Column(db.Text)
test4 = db.Column(db.UnicodeText)
bool_field = db.Column(db.Boolean)
enum_field = db.Column(db.Enum('model1_v1', 'model1_v2'), nullable=True)
date_field = db.Column(db.Date)
time_field = db.Column(db.Time)
datetime_field = db.Column(db.DateTime)
def __unicode__(self):
return self.test1
def __str__(self):
return self.test1
class Model2(db.Model):
def __init__(self, string_field=None, int_field=None, bool_field=None,
model1=None, float_field=None):
self.string_field = string_field
self.int_field = int_field
self.bool_field = bool_field
self.model1 = model1
self.float_field = float_field
id = db.Column(db.Integer, primary_key=True)
string_field = db.Column(db.String)
int_field = db.Column(db.Integer)
bool_field = db.Column(db.Boolean)
enum_field = db.Column(db.Enum('model2_v1', 'model2_v2'), nullable=True)
float_field = db.Column(db.Float)
# Relation
model1_id = db.Column(db.Integer, db.ForeignKey(Model1.id))
model1 = db.relationship(Model1, backref='model2')
db.create_all()
return Model1, Model2
def fill_db(db, Model1, Model2):
model1_obj1 = Model1('test1_val_1', 'test2_val_1', bool_field=True)
model1_obj2 = Model1('test1_val_2', 'test2_val_2')
model1_obj3 = Model1('test1_val_3', 'test2_val_3')
model1_obj4 = Model1('test1_val_4', 'test2_val_4')
model2_obj1 = Model2('test2_val_1', model1=model1_obj1, float_field=None)
model2_obj2 = Model2('test2_val_2', model1=model1_obj2, float_field=None)
model2_obj3 = Model2('test2_val_3', int_field=5000, float_field=25.9)
model2_obj4 = Model2('test2_val_4', int_field=9000, float_field=75.5)
date_obj1 = Model1('date_obj1', date_field=date(2014,11,17))
date_obj2 = Model1('date_obj2', date_field=date(2013,10,16))
timeonly_obj1 = Model1('timeonly_obj1', time_field=time(11,10,9))
timeonly_obj2 = Model1('timeonly_obj2', time_field=time(10,9,8))
datetime_obj1 = Model1('datetime_obj1', datetime_field=datetime(2014,4,3,1,9,0))
datetime_obj2 = Model1('datetime_obj2', datetime_field=datetime(2013,3,2,0,8,0))
enum_obj1 = Model1('enum_obj1', enum_field="model1_v1")
enum_obj2 = Model1('enum_obj2', enum_field="model1_v2")
empty_obj = Model1(test2="empty_obj")
db.session.add_all([
model1_obj1, model1_obj2, model1_obj3, model1_obj4,
model2_obj1, model2_obj2, model2_obj3, model2_obj4,
date_obj1, timeonly_obj1, datetime_obj1,
date_obj2, timeonly_obj2, datetime_obj2,
enum_obj1, enum_obj2, empty_obj
])
db.session.commit()
def test_model():
app, db, admin = setup()
Model1, Model2 = create_models(db)
db.create_all()
view = CustomModelView(Model1, db.session)
admin.add_view(view)
eq_(view.model, Model1)
eq_(view.name, 'Model1')
eq_(view.endpoint, 'model1')
eq_(view._primary_key, 'id')
ok_('test1' in view._sortable_columns)
ok_('test2' in view._sortable_columns)
ok_('test3' in view._sortable_columns)
ok_('test4' in view._sortable_columns)
ok_(view._create_form_class is not None)
ok_(view._edit_form_class is not None)
eq_(view._search_supported, False)
eq_(view._filters, None)
# Verify form
eq_(view._create_form_class.test1.field_class, fields.StringField)
eq_(view._create_form_class.test2.field_class, fields.StringField)
eq_(view._create_form_class.test3.field_class, fields.TextAreaField)
eq_(view._create_form_class.test4.field_class, fields.TextAreaField)
# Make some test clients
client = app.test_client()
rv = client.get('/admin/model1/')
eq_(rv.status_code, 200)
rv = client.get('/admin/model1/new/')
eq_(rv.status_code, 200)
rv = client.post('/admin/model1/new/',
data=dict(test1='test1large',
test2='test2',
time_field=time(0,0,0)))
eq_(rv.status_code, 302)
model = db.session.query(Model1).first()
eq_(model.test1, u'test1large')
eq_(model.test2, u'test2')
eq_(model.test3, u'')
eq_(model.test4, u'')
rv = client.get('/admin/model1/')
eq_(rv.status_code, 200)
ok_(u'test1large' in rv.data.decode('utf-8'))
url = '/admin/model1/edit/?id=%s' % model.id
rv = client.get(url)
eq_(rv.status_code, 200)
# verify that midnight does not show as blank
ok_(u'00:00:00' in rv.data.decode('utf-8'))
rv = client.post(url,
data=dict(test1='test1small', test2='test2large'))
eq_(rv.status_code, 302)
model = db.session.query(Model1).first()
eq_(model.test1, 'test1small')
eq_(model.test2, 'test2large')
eq_(model.test3, '')
eq_(model.test4, '')
url = '/admin/model1/delete/?id=%s' % model.id
rv = client.post(url)
eq_(rv.status_code, 302)
eq_(db.session.query(Model1).count(), 0)
@raises(Exception)
def test_no_pk():
app, db, admin = setup()
class Model(db.Model):
test = db.Column(db.Integer)
view = CustomModelView(Model)
admin.add_view(view)
def test_list_columns():
app, db, admin = setup()
Model1, Model2 = create_models(db)
view = CustomModelView(Model1, db.session,
column_list=['test1', 'test3'],
column_labels=dict(test1='Column1'))
admin.add_view(view)
eq_(len(view._list_columns), 2)
eq_(view._list_columns, [('test1', 'Column1'), ('test3', 'Test3')])
client = app.test_client()
rv = client.get('/admin/model1/')
data = rv.data.decode('utf-8')
ok_('Column1' in data)
ok_('Test2' not in data)
def test_exclude_columns():
app, db, admin = setup()
Model1, Model2 = create_models(db)
view = CustomModelView(
Model1, db.session,
column_exclude_list=['test2', 'test4', 'enum_field', 'date_field', 'time_field', 'datetime_field']
)
admin.add_view(view)
eq_(
view._list_columns,
[('test1', 'Test1'), ('test3', 'Test3'), ('bool_field', 'Bool Field')]
)
client = app.test_client()
rv = client.get('/admin/model1/')
data = rv.data.decode('utf-8')
ok_('Test1' in data)
ok_('Test2' not in data)
def test_column_searchable_list():
app, db, admin = setup()
Model1, Model2 = create_models(db)
view = CustomModelView(Model2, db.session,
column_searchable_list=['string_field', 'int_field'])
admin.add_view(view)
eq_(view._search_supported, True)
eq_(len(view._search_fields), 2)
ok_(isinstance(view._search_fields[0][0], db.Column))
ok_(isinstance(view._search_fields[1][0], db.Column))
eq_(view._search_fields[0][0].name, 'string_field')
eq_(view._search_fields[1][0].name, 'int_field')
db.session.add(Model2('model1-test', 5000))
db.session.add(Model2('model2-test', 9000))
db.session.commit()
client = app.test_client()
rv = client.get('/admin/model2/?search=model1')
data = rv.data.decode('utf-8')
ok_('model1-test' in data)
ok_('model2-test' not in data)
rv = client.get('/admin/model2/?search=9000')
data = rv.data.decode('utf-8')
ok_('model1-test' not in data)
ok_('model2-test' in data)
def test_complex_searchable_list():
app, db, admin = setup()
Model1, Model2 = create_models(db)
view = CustomModelView(Model2, db.session,
column_searchable_list=['model1.test1'])
admin.add_view(view)
m1 = Model1('model1-test1-val')
m2 = Model1('model1-test2-val')
db.session.add(m1)
db.session.add(m2)
db.session.add(Model2('model2-test1-val', model1=m1))
db.session.add(Model2('model2-test2-val', model1=m2))
db.session.commit()
client = app.test_client()
# test relation string - 'model1.test1'
rv = client.get('/admin/model2/?search=model1-test1')
data = rv.data.decode('utf-8')
ok_('model2-test1-val' in data)
ok_('model2-test2-val' not in data)
view2 = CustomModelView(Model1, db.session,
column_searchable_list=[Model2.string_field])
admin.add_view(view2)
# test relation object - Model2.string_field
rv = client.get('/admin/model1/?search=model2-test1')
data = rv.data.decode('utf-8')
ok_('model1-test1-val' in data)
ok_('model1-test2-val' not in data)
def test_complex_searchable_list_missing_children():
app, db, admin = setup()
Model1, Model2 = create_models(db)
view = CustomModelView(Model1, db.session,
column_searchable_list=[
'test1', 'model2.string_field'])
admin.add_view(view)
db.session.add(Model1('magic string'))
db.session.commit()
client = app.test_client()
rv = client.get('/admin/model1/?search=magic')
data = rv.data.decode('utf-8')
ok_('magic string' in data)
def test_column_editable_list():
app, db, admin = setup()
Model1, Model2 = create_models(db)
view = CustomModelView(Model1, db.session,
column_editable_list=[
'test1', 'enum_field'])
admin.add_view(view)
fill_db(db, Model1, Model2)
client = app.test_client()
# Test in-line edit field rendering
rv = client.get('/admin/model1/')
data = rv.data.decode('utf-8')
ok_('data-role="x-editable"' in data)
# Form - Test basic in-line edit functionality
rv = client.post('/admin/model1/ajax/update/', data={
'test1-1': 'change-success-1',
})
data = rv.data.decode('utf-8')
ok_('Record was successfully saved.' == data)
# ensure the value has changed
rv = client.get('/admin/model1/')
data = rv.data.decode('utf-8')
ok_('change-success-1' in data)
# Test validation error
rv = client.post('/admin/model1/ajax/update/', data={
'enum_field-1': 'problematic-input',
})
eq_(rv.status_code, 500)
# Test invalid primary key
rv = client.post('/admin/model1/ajax/update/', data={
'test1-1000': 'problematic-input',
})
data = rv.data.decode('utf-8')
eq_(rv.status_code, 500)
# Test editing column not in column_editable_list
rv = client.post('/admin/model1/ajax/update/', data={
'test2-1': 'problematic-input',
})
data = rv.data.decode('utf-8')
eq_(rv.status_code, 500)
# Test in-line editing for relations
view = CustomModelView(Model2, db.session,
column_editable_list=[
'model1'])
admin.add_view(view)
rv = client.post('/admin/model2/ajax/update/', data={
'model1-1': '3',
})
data = rv.data.decode('utf-8')
ok_('Record was successfully saved.' == data)
# confirm the value has changed
rv = client.get('/admin/model2/')
data = rv.data.decode('utf-8')
ok_('test1_val_3' in data)
def test_column_filters():
app, db, admin = setup()
Model1, Model2 = create_models(db)
view = CustomModelView(
Model1, db.session,
column_filters=['test1']
)
admin.add_view(view)
client = app.test_client()
eq_(len(view._filters), 7)
eq_([(f['index'], f['operation']) for f in view._filter_groups[u'Test1']],
[
(0, u'contains'),
(1, u'not contains'),
(2, u'equals'),
(3, u'not equal'),
(4, u'empty'),
(5, u'in list'),
(6, u'not in list'),
])
# Test filter that references property
view = CustomModelView(Model2, db.session,
column_filters=['model1'])
eq_([(f['index'], f['operation']) for f in view._filter_groups[u'Model1 / Test1']],
[
(0, u'contains'),
(1, u'not contains'),
(2, u'equals'),
(3, u'not equal'),
(4, u'empty'),
(5, u'in list'),
(6, u'not in list'),
])
eq_([(f['index'], f['operation']) for f in view._filter_groups[u'Model1 / Test2']],
[
(7, u'contains'),
(8, u'not contains'),
(9, u'equals'),
(10, u'not equal'),
(11, u'empty'),
(12, u'in list'),
(13, u'not in list'),
])
eq_([(f['index'], f['operation']) for f in view._filter_groups[u'Model1 / Test3']],
[
(14, u'contains'),
(15, u'not contains'),
(16, u'equals'),
(17, u'not equal'),
(18, u'empty'),
(19, u'in list'),
(20, u'not in list'),
])
eq_([(f['index'], f['operation']) for f in view._filter_groups[u'Model1 / Test4']],
[
(21, u'contains'),
(22, u'not contains'),
(23, u'equals'),
(24, u'not equal'),
(25, u'empty'),
(26, u'in list'),
(27, u'not in list'),
])
eq_([(f['index'], f['operation']) for f in view._filter_groups[u'Model1 / Bool Field']],
[
(28, u'equals'),
(29, u'not equal'),
])
eq_([(f['index'], f['operation']) for f in view._filter_groups[u'Model1 / Enum Field']],
[
(30, u'equals'),
(31, u'not equal'),
(32, u'empty'),
(33, u'in list'),
(34, u'not in list'),
])
# Test filter with a dot
view = CustomModelView(Model2, db.session,
column_filters=['model1.bool_field'])
eq_([(f['index'], f['operation']) for f in view._filter_groups[u'Model1 / Bool Field']],
[
(0, 'equals'),
(1, 'not equal'),
])
# Test column_labels on filters
view = CustomModelView(Model2, db.session,
column_filters=['model1.bool_field', 'string_field'],
column_labels={
'model1.bool_field': 'Test Filter #1',
'string_field': 'Test Filter #2',
})
eq_(list(view._filter_groups.keys()), [u'Test Filter #1', u'Test Filter #2'])
fill_db(db, Model1, Model2)
# Test equals
rv = client.get('/admin/model1/?flt0_0=test1_val_1')
eq_(rv.status_code, 200)
data = rv.data.decode('utf-8')
# the filter value is always in "data"
# need to check a different column than test1 for the expected row
ok_('test2_val_1' in data)
ok_('test1_val_2' not in data)
# Test NOT IN filter
rv = client.get('/admin/model1/?flt0_6=test1_val_1')
eq_(rv.status_code, 200)
data = rv.data.decode('utf-8')
ok_('test1_val_2' in data)
ok_('test2_val_1' not in data)
# Test string filter
view = CustomModelView(Model1, db.session,
column_filters=['test1'], endpoint='_strings')
admin.add_view(view)
eq_([(f['index'], f['operation']) for f in view._filter_groups[u'Test1']],
[
(0, 'contains'),
(1, 'not contains'),
(2, 'equals'),
(3, 'not equal'),
(4, 'empty'),
(5, 'in list'),
(6, 'not in list'),
])
# string - equals
rv = client.get('/admin/_strings/?flt0_0=test1_val_1')
eq_(rv.status_code, 200)
data = rv.data.decode('utf-8')
ok_('test2_val_1' in data)
ok_('test1_val_2' not in data)
# string - not equal
rv = client.get('/admin/_strings/?flt0_1=test1_val_1')
eq_(rv.status_code, 200)
data = rv.data.decode('utf-8')
ok_('test2_val_1' not in data)
ok_('test1_val_2' in data)
# string - contains
rv = client.get('/admin/_strings/?flt0_2=test1_val_1')
eq_(rv.status_code, 200)
data = rv.data.decode('utf-8')
ok_('test2_val_1' in data)
ok_('test1_val_2' not in data)
# string - not contains
rv = client.get('/admin/_strings/?flt0_3=test1_val_1')
eq_(rv.status_code, 200)
data = rv.data.decode('utf-8')
ok_('test2_val_1' not in data)
ok_('test1_val_2' in data)
# string - empty
rv = client.get('/admin/_strings/?flt0_4=1')
eq_(rv.status_code, 200)
data = rv.data.decode('utf-8')
ok_('empty_obj' in data)
ok_('test1_val_1' not in data)
ok_('test1_val_2' not in data)
# string - not empty
rv = client.get('/admin/_strings/?flt0_4=0')
eq_(rv.status_code, 200)
data = rv.data.decode('utf-8')
ok_('empty_obj' not in data)
ok_('test1_val_1' in data)
ok_('test1_val_2' in data)
# string - in list
rv = client.get('/admin/_strings/?flt0_5=test1_val_1%2Ctest1_val_2')
eq_(rv.status_code, 200)
data = rv.data.decode('utf-8')
ok_('test2_val_1' in data)
ok_('test2_val_2' in data)
ok_('test1_val_3' not in data)
ok_('test1_val_4' not in data)
# string - not in list
rv = client.get('/admin/_strings/?flt0_6=test1_val_1%2Ctest1_val_2')
eq_(rv.status_code, 200)
data = rv.data.decode('utf-8')
ok_('test2_val_1' not in data)
ok_('test2_val_2' not in data)
ok_('test1_val_3' in data)
ok_('test1_val_4' in data)
# Test integer filter
view = CustomModelView(Model2, db.session,
column_filters=['int_field'])
admin.add_view(view)
eq_([(f['index'], f['operation']) for f in view._filter_groups[u'Int Field']],
[
(0, 'equals'),
(1, 'not equal'),
(2, 'greater than'),
(3, 'smaller than'),
(4, 'empty'),
(5, 'in list'),
(6, 'not in list'),
])
# integer - equals
rv = client.get('/admin/model2/?flt0_0=5000')
eq_(rv.status_code, 200)
data = rv.data.decode('utf-8')
ok_('test2_val_3' in data)
ok_('test2_val_4' not in data)
# integer - equals - test validation
rv = client.get('/admin/model2/?flt0_0=badval')
eq_(rv.status_code, 200)
data = rv.data.decode('utf-8')
ok_('Invalid Filter Value' in data)
# integer - not equal
rv = client.get('/admin/model2/?flt0_1=5000')
eq_(rv.status_code, 200)
data = rv.data.decode('utf-8')
ok_('test2_val_3' not in data)
ok_('test2_val_4' in data)
# integer - greater
rv = client.get('/admin/model2/?flt0_2=6000')
eq_(rv.status_code, 200)
data = rv.data.decode('utf-8')
ok_('test2_val_3' not in data)
ok_('test2_val_4' in data)
# integer - smaller
rv = client.get('/admin/model2/?flt0_3=6000')
eq_(rv.status_code, 200)
data = rv.data.decode('utf-8')
ok_('test2_val_3' in data)
ok_('test2_val_4' not in data)
# integer - empty
rv = client.get('/admin/model2/?flt0_4=1')
eq_(rv.status_code, 200)
data = rv.data.decode('utf-8')
ok_('test2_val_1' in data)
ok_('test2_val_2' in data)
ok_('test2_val_3' not in data)
ok_('test2_val_4' not in data)
# integer - not empty
rv = client.get('/admin/model2/?flt0_4=0')
eq_(rv.status_code, 200)
data = rv.data.decode('utf-8')
ok_('test2_val_1' not in data)
ok_('test2_val_2' not in data)
ok_('test2_val_3' in data)
ok_('test2_val_4' in data)
# integer - in list
rv = client.get('/admin/model2/?flt0_5=5000%2C9000')
eq_(rv.status_code, 200)
data = rv.data.decode('utf-8')
ok_('test2_val_1' not in data)
ok_('test2_val_2' not in data)
ok_('test2_val_3' in data)
ok_('test2_val_4' in data)
# integer - in list - test validation
rv = client.get('/admin/model2/?flt0_5=5000%2Cbadval')
eq_(rv.status_code, 200)
data = rv.data.decode('utf-8')
ok_('Invalid Filter Value' in data)
# integer - not in list
rv = client.get('/admin/model2/?flt0_6=5000%2C9000')
eq_(rv.status_code, 200)
data = rv.data.decode('utf-8')
ok_('test2_val_1' in data)
ok_('test2_val_2' in data)
ok_('test2_val_3' not in data)
ok_('test2_val_4' not in data)
# Test float filter
view = CustomModelView(Model2, db.session, column_filters=['float_field'],
endpoint="_float")
admin.add_view(view)
eq_([(f['index'], f['operation']) for f in view._filter_groups[u'Float Field']],
[
(0, 'equals'),
(1, 'not equal'),
(2, 'greater than'),
(3, 'smaller than'),
(4, 'empty'),
(5, 'in list'),
(6, 'not in list'),
])
# float - equals
rv = client.get('/admin/_float/?flt0_0=25.9')
eq_(rv.status_code, 200)
data = rv.data.decode('utf-8')
ok_('test2_val_3' in data)
ok_('test2_val_4' not in data)
# float - equals - test validation
rv = client.get('/admin/_float/?flt0_0=badval')
eq_(rv.status_code, 200)
data = rv.data.decode('utf-8')
ok_('Invalid Filter Value' in data)
# float - not equal
rv = client.get('/admin/_float/?flt0_1=25.9')
eq_(rv.status_code, 200)
data = rv.data.decode('utf-8')
ok_('test2_val_3' not in data)
ok_('test2_val_4' in data)
# float - greater
rv = client.get('/admin/_float/?flt0_2=60.5')
eq_(rv.status_code, 200)
data = rv.data.decode('utf-8')
ok_('test2_val_3' not in data)
ok_('test2_val_4' in data)
# float - smaller
rv = client.get('/admin/_float/?flt0_3=60.5')
eq_(rv.status_code, 200)
data = rv.data.decode('utf-8')
ok_('test2_val_3' in data)
ok_('test2_val_4' not in data)
# float - empty
rv = client.get('/admin/_float/?flt0_4=1')
eq_(rv.status_code, 200)
data = rv.data.decode('utf-8')
ok_('test2_val_1' in data)
ok_('test2_val_2' in data)
ok_('test2_val_3' not in data)
ok_('test2_val_4' not in data)
# float - not empty
rv = client.get('/admin/_float/?flt0_4=0')
eq_(rv.status_code, 200)
data = rv.data.decode('utf-8')
ok_('test2_val_1' not in data)
ok_('test2_val_2' not in data)
ok_('test2_val_3' in data)
ok_('test2_val_4' in data)
# float - in list
rv = client.get('/admin/_float/?flt0_5=25.9%2C75.5')
eq_(rv.status_code, 200)
data = rv.data.decode('utf-8')
ok_('test2_val_1' not in data)
ok_('test2_val_2' not in data)
ok_('test2_val_3' in data)
ok_('test2_val_4' in data)
# float - in list - test validation
rv = client.get('/admin/_float/?flt0_5=25.9%2Cbadval')
eq_(rv.status_code, 200)
data = rv.data.decode('utf-8')
ok_('Invalid Filter Value' in data)
# float - not in list
rv = client.get('/admin/_float/?flt0_6=25.9%2C75.5')
eq_(rv.status_code, 200)
data = rv.data.decode('utf-8')
ok_('test2_val_1' in data)
ok_('test2_val_2' in data)
ok_('test2_val_3' not in data)
ok_('test2_val_4' not in data)
# Test filters to joined table field
view = CustomModelView(
Model2, db.session,
endpoint='_model2',
column_filters=['model1.bool_field'],
column_list=[
'string_field',
'model1.id',
'model1.bool_field',
]
)
admin.add_view(view)
rv = client.get('/admin/_model2/?flt1_0=1')
eq_(rv.status_code, 200)
data = rv.data.decode('utf-8')
ok_('test2_val_1' in data)
ok_('test2_val_2' not in data)
ok_('test2_val_3' not in data)
ok_('test2_val_4' not in data)
# Test human readable URLs
view = CustomModelView(
Model1, db.session,
column_filters=['test1'],
endpoint='_model3',
named_filter_urls=True
)
admin.add_view(view)
rv = client.get('/admin/_model3/?flt1_test1_equals=test1_val_1')
eq_(rv.status_code, 200)
data = rv.data.decode('utf-8')
ok_('test1_val_1' in data)
ok_('test1_val_2' not in data)
# Test date, time, and datetime filters
view = CustomModelView(Model1, db.session,
column_filters=['date_field', 'datetime_field', 'time_field'],
endpoint="_datetime")
admin.add_view(view)
eq_([(f['index'], f['operation']) for f in view._filter_groups[u'Date Field']],
[
(0, 'equals'),
(1, 'not equal'),
(2, 'greater than'),
(3, 'smaller than'),
(4, 'between'),
(5, 'not between'),
(6, 'empty'),
])
eq_([(f['index'], f['operation']) for f in view._filter_groups[u'Datetime Field']],
[
(7, 'equals'),
(8, 'not equal'),
(9, 'greater than'),
(10, 'smaller than'),
(11, 'between'),
(12, 'not between'),
(13, 'empty'),
])
eq_([(f['index'], f['operation']) for f in view._filter_groups[u'Time Field']],
[
(14, 'equals'),
(15, 'not equal'),
(16, 'greater than'),
(17, 'smaller than'),
(18, 'between'),
(19, 'not between'),
(20, 'empty'),
])
# date - equals
rv = client.get('/admin/_datetime/?flt0_0=2014-11-17')
eq_(rv.status_code, 200)
data = rv.data.decode('utf-8')
ok_('date_obj1' in data)
ok_('date_obj2' not in data)
# date - not equal
rv = client.get('/admin/_datetime/?flt0_1=2014-11-17')
eq_(rv.status_code, 200)
data = rv.data.decode('utf-8')
ok_('date_obj1' not in data)
ok_('date_obj2' in data)
# date - greater
rv = client.get('/admin/_datetime/?flt0_2=2014-11-16')
eq_(rv.status_code, 200)
data = rv.data.decode('utf-8')
ok_('date_obj1' in data)
ok_('date_obj2' not in data)
# date - smaller
rv = client.get('/admin/_datetime/?flt0_3=2014-11-16')
eq_(rv.status_code, 200)
data = rv.data.decode('utf-8')
ok_('date_obj1' not in data)
ok_('date_obj2' in data)
# date - between
rv = client.get('/admin/_datetime/?flt0_4=2014-11-13+to+2014-11-20')
eq_(rv.status_code, 200)
data = rv.data.decode('utf-8')
ok_('date_obj1' in data)
ok_('date_obj2' not in data)
# date - not between
rv = client.get('/admin/_datetime/?flt0_5=2014-11-13+to+2014-11-20')
eq_(rv.status_code, 200)
data = rv.data.decode('utf-8')
ok_('date_obj1' not in data)
ok_('date_obj2' in data)
# date - empty
rv = client.get('/admin/_datetime/?flt0_6=1')
eq_(rv.status_code, 200)
data = rv.data.decode('utf-8')
ok_('test1_val_1' in data)
ok_('date_obj1' not in data)
ok_('date_obj2' not in data)
# date - empty
rv = client.get('/admin/_datetime/?flt0_6=0')
eq_(rv.status_code, 200)
data = rv.data.decode('utf-8')
ok_('test1_val_1' not in data)
ok_('date_obj1' in data)
ok_('date_obj2' in data)
# datetime - equals
rv = client.get('/admin/_datetime/?flt0_7=2014-04-03+01%3A09%3A00')
eq_(rv.status_code, 200)
data = rv.data.decode('utf-8')
ok_('datetime_obj1' in data)
ok_('datetime_obj2' not in data)
# datetime - not equal
rv = client.get('/admin/_datetime/?flt0_8=2014-04-03+01%3A09%3A00')
eq_(rv.status_code, 200)
data = rv.data.decode('utf-8')
ok_('datetime_obj1' not in data)
ok_('datetime_obj2' in data)
# datetime - greater
rv = client.get('/admin/_datetime/?flt0_9=2014-04-03+01%3A08%3A00')
eq_(rv.status_code, 200)
data = rv.data.decode('utf-8')
ok_('datetime_obj1' in data)
ok_('datetime_obj2' not in data)
# datetime - smaller
rv = client.get('/admin/_datetime/?flt0_10=2014-04-03+01%3A08%3A00')
eq_(rv.status_code, 200)
data = rv.data.decode('utf-8')
ok_('datetime_obj1' not in data)
ok_('datetime_obj2' in data)
# datetime - between
rv = client.get('/admin/_datetime/?flt0_11=2014-04-02+00%3A00%3A00+to+2014-11-20+23%3A59%3A59')
eq_(rv.status_code, 200)
data = rv.data.decode('utf-8')
ok_('datetime_obj1' in data)
ok_('datetime_obj2' not in data)
# datetime - not between
rv = client.get('/admin/_datetime/?flt0_12=2014-04-02+00%3A00%3A00+to+2014-11-20+23%3A59%3A59')
eq_(rv.status_code, 200)
data = rv.data.decode('utf-8')
ok_('datetime_obj1' not in data)
ok_('datetime_obj2' in data)
# datetime - empty
rv = client.get('/admin/_datetime/?flt0_13=1')
eq_(rv.status_code, 200)
data = rv.data.decode('utf-8')
ok_('test1_val_1' in data)
ok_('datetime_obj1' not in data)
ok_('datetime_obj2' not in data)
# datetime - not empty
rv = client.get('/admin/_datetime/?flt0_13=0')
eq_(rv.status_code, 200)
data = rv.data.decode('utf-8')
ok_('test1_val_1' not in data)
ok_('datetime_obj1' in data)
ok_('datetime_obj2' in data)
# time - equals
rv = client.get('/admin/_datetime/?flt0_14=11%3A10%3A09')
eq_(rv.status_code, 200)
data = rv.data.decode('utf-8')
ok_('timeonly_obj1' in data)
ok_('timeonly_obj2' not in data)
# time - not equal
rv = client.get('/admin/_datetime/?flt0_15=11%3A10%3A09')
eq_(rv.status_code, 200)
data = rv.data.decode('utf-8')
ok_('timeonly_obj1' not in data)
ok_('timeonly_obj2' in data)
# time - greater
rv = client.get('/admin/_datetime/?flt0_16=11%3A09%3A09')
eq_(rv.status_code, 200)
data = rv.data.decode('utf-8')
ok_('timeonly_obj1' in data)
ok_('timeonly_obj2' not in data)
# time - smaller
rv = client.get('/admin/_datetime/?flt0_17=11%3A09%3A09')
eq_(rv.status_code, 200)
data = rv.data.decode('utf-8')
ok_('timeonly_obj1' not in data)
ok_('timeonly_obj2' in data)
# time - between
rv = client.get('/admin/_datetime/?flt0_18=10%3A40%3A00+to+11%3A50%3A59')
eq_(rv.status_code, 200)
data = rv.data.decode('utf-8')
ok_('timeonly_obj1' in data)
ok_('timeonly_obj2' not in data)
# time - not between
rv = client.get('/admin/_datetime/?flt0_19=10%3A40%3A00+to+11%3A50%3A59')
eq_(rv.status_code, 200)
data = rv.data.decode('utf-8')
ok_('timeonly_obj1' not in data)
ok_('timeonly_obj2' in data)
# time - empty
rv = client.get('/admin/_datetime/?flt0_20=1')
eq_(rv.status_code, 200)
data = rv.data.decode('utf-8')
ok_('test1_val_1' in data)
ok_('timeonly_obj1' not in data)
ok_('timeonly_obj2' not in data)
# time - not empty
rv = client.get('/admin/_datetime/?flt0_20=0')
eq_(rv.status_code, 200)
data = rv.data.decode('utf-8')
ok_('test1_val_1' not in data)
ok_('timeonly_obj1' in data)
ok_('timeonly_obj2' in data)
# Test enum filter
view = CustomModelView(Model1, db.session,
column_filters=['enum_field'],
endpoint="_enumfield")
admin.add_view(view)
# enum - equals
rv = client.get('/admin/_enumfield/?flt0_0=model1_v1')
eq_(rv.status_code, 200)
data = rv.data.decode('utf-8')
ok_('enum_obj1' in data)
ok_('enum_obj2' not in data)
# enum - not equal
rv = client.get('/admin/_enumfield/?flt0_1=model1_v1')
eq_(rv.status_code, 200)
data = rv.data.decode('utf-8')
ok_('enum_obj1' not in data)
ok_('enum_obj2' in data)
# enum - empty
rv = client.get('/admin/_enumfield/?flt0_2=1')
eq_(rv.status_code, 200)
data = rv.data.decode('utf-8')
ok_('test1_val_1' in data)
ok_('enum_obj1' not in data)
ok_('enum_obj2' not in data)
# enum - not empty
rv = client.get('/admin/_enumfield/?flt0_2=0')
eq_(rv.status_code, 200)
data = rv.data.decode('utf-8')
ok_('test1_val_1' not in data)
ok_('enum_obj1' in data)
ok_('enum_obj2' in data)
# enum - in list
rv = client.get('/admin/_enumfield/?flt0_3=model1_v1%2Cmodel1_v2')
eq_(rv.status_code, 200)
data = rv.data.decode('utf-8')
ok_('test1_val_1' not in data)
ok_('enum_obj1' in data)
ok_('enum_obj2' in data)
# enum - not in list
rv = client.get('/admin/_enumfield/?flt0_4=model1_v1%2Cmodel1_v2')
eq_(rv.status_code, 200)
data = rv.data.decode('utf-8')
ok_('test1_val_1' in data)
ok_('enum_obj1' not in data)
ok_('enum_obj2' not in data)
# Test single custom filter on relation
view = CustomModelView(Model2, db.session,
column_filters = [
filters.FilterEqual(Model1.test1, "Test1")
], endpoint='_relation_test')
admin.add_view(view)
rv = client.get('/admin/_relation_test/?flt1_0=test1_val_1')
data = rv.data.decode('utf-8')
ok_('test1_val_1' in data)
ok_('test1_val_2' not in data)
def test_url_args():
app, db, admin = setup()
Model1, Model2 = create_models(db)
view = CustomModelView(Model1, db.session,
page_size=2,
column_searchable_list=['test1'],
column_filters=['test1'])
admin.add_view(view)
db.session.add(Model1('data1'))
db.session.add(Model1('data2'))
db.session.add(Model1('data3'))
db.session.add(Model1('data4'))
db.session.commit()
client = app.test_client()
rv = client.get('/admin/model1/')
data = rv.data.decode('utf-8')
ok_('data1' in data)
ok_('data3' not in data)
# page
rv = client.get('/admin/model1/?page=1')
data = rv.data.decode('utf-8')
ok_('data1' not in data)
ok_('data3' in data)
# sort
rv = client.get('/admin/model1/?sort=0&desc=1')
data = rv.data.decode('utf-8')
ok_('data1' not in data)
ok_('data3' in data)
ok_('data4' in data)
# search
rv = client.get('/admin/model1/?search=data1')
data = rv.data.decode('utf-8')
ok_('data1' in data)
ok_('data2' not in data)
rv = client.get('/admin/model1/?search=^data1')
data = rv.data.decode('utf-8')
ok_('data2' not in data)
# like
rv = client.get('/admin/model1/?flt0=0&flt0v=data1')
data = rv.data.decode('utf-8')
ok_('data1' in data)
# not like
rv = client.get('/admin/model1/?flt0=1&flt0v=data1')
data = rv.data.decode('utf-8')
ok_('data2' in data)
def test_non_int_pk():
app, db, admin = setup()
class Model(db.Model):
id = db.Column(db.String, primary_key=True)
test = db.Column(db.String)
db.create_all()
view = CustomModelView(Model, db.session, form_columns=['id', 'test'])
admin.add_view(view)
client = app.test_client()
rv = client.get('/admin/model/')
eq_(rv.status_code, 200)
rv = client.post('/admin/model/new/',
data=dict(id='test1', test='test2'))
eq_(rv.status_code, 302)
rv = client.get('/admin/model/')
eq_(rv.status_code, 200)
data = rv.data.decode('utf-8')
ok_('test1' in data)
rv = client.get('/admin/model/edit/?id=test1')
eq_(rv.status_code, 200)
data = rv.data.decode('utf-8')
ok_('test2' in data)
def test_form_columns():
app, db, admin = setup()
class Model(db.Model):
id = db.Column(db.String, primary_key=True)
int_field = db.Column(db.Integer)
datetime_field = db.Column(db.DateTime)
text_field = db.Column(db.UnicodeText)
excluded_column = db.Column(db.String)
class ChildModel(db.Model):
id = db.Column(db.String, primary_key=True)
model_id = db.Column(db.Integer, db.ForeignKey(Model.id))
model = db.relationship(Model, backref='backref')
db.create_all()
view1 = CustomModelView(Model, db.session, endpoint='view1',
form_columns=('int_field', 'text_field'))
view2 = CustomModelView(Model, db.session, endpoint='view2',
form_excluded_columns=('excluded_column',))
view3 = CustomModelView(ChildModel, db.session, endpoint='view3')
form1 = view1.create_form()
form2 = view2.create_form()
form3 = view3.create_form()
ok_('int_field' in form1._fields)
ok_('text_field' in form1._fields)
ok_('datetime_field' not in form1._fields)
ok_('excluded_column' not in form2._fields)
ok_(type(form3.model).__name__ == 'QuerySelectField')
# TODO: form_args
def test_form_override():
app, db, admin = setup()
class Model(db.Model):
id = db.Column(db.String, primary_key=True)
test = db.Column(db.String)
db.create_all()
view1 = CustomModelView(Model, db.session, endpoint='view1')
view2 = CustomModelView(Model, db.session, endpoint='view2', form_overrides=dict(test=fields.FileField))
admin.add_view(view1)
admin.add_view(view2)
eq_(view1._create_form_class.test.field_class, fields.StringField)
eq_(view2._create_form_class.test.field_class, fields.FileField)
def test_form_onetoone():
app, db, admin = setup()
class Model1(db.Model):
id = db.Column(db.Integer, primary_key=True)
test = db.Column(db.String)
class Model2(db.Model):
id = db.Column(db.Integer, primary_key=True)
model1_id = db.Column(db.Integer, db.ForeignKey(Model1.id))
model1 = db.relationship(Model1, backref=db.backref('model2', uselist=False))
db.create_all()
view1 = CustomModelView(Model1, db.session, endpoint='view1')
view2 = CustomModelView(Model2, db.session, endpoint='view2')
admin.add_view(view1)
admin.add_view(view2)
model1 = Model1(test='test')
model2 = Model2(model1=model1)
db.session.add(model1)
db.session.add(model2)
db.session.commit()
eq_(model1.model2, model2)
eq_(model2.model1, model1)
eq_(view1._create_form_class.model2.kwargs['widget'].multiple, False)
eq_(view2._create_form_class.model1.kwargs['widget'].multiple, False)
def test_relations():
# TODO: test relations
pass
def test_on_model_change_delete():
app, db, admin = setup()
Model1, _ = create_models(db)
db.create_all()
class ModelView(CustomModelView):
def on_model_change(self, form, model, is_created):
model.test1 = model.test1.upper()
def on_model_delete(self, model):
self.deleted = True
view = ModelView(Model1, db.session)
admin.add_view(view)
client = app.test_client()
client.post('/admin/model1/new/',
data=dict(test1='test1large', test2='test2'))
model = db.session.query(Model1).first()
eq_(model.test1, 'TEST1LARGE')
url = '/admin/model1/edit/?id=%s' % model.id
client.post(url, data=dict(test1='test1small', test2='test2large'))
model = db.session.query(Model1).first()
eq_(model.test1, 'TEST1SMALL')
url = '/admin/model1/delete/?id=%s' % model.id
client.post(url)
ok_(view.deleted)
def test_multiple_delete():
app, db, admin = setup()
M1, _ = create_models(db)
db.session.add_all([M1('a'), M1('b'), M1('c')])
db.session.commit()
eq_(M1.query.count(), 3)
view = ModelView(M1, db.session)
admin.add_view(view)
client = app.test_client()
rv = client.post('/admin/model1/action/', data=dict(action='delete', rowid=[1, 2, 3]))
eq_(rv.status_code, 302)
eq_(M1.query.count(), 0)
def test_default_sort():
app, db, admin = setup()
M1, _ = create_models(db)
db.session.add_all([M1('c'), M1('b'), M1('a')])
db.session.commit()
eq_(M1.query.count(), 3)
view = CustomModelView(M1, db.session, column_default_sort='test1')
admin.add_view(view)
_, data = view.get_list(0, None, None, None, None)
eq_(len(data), 3)
eq_(data[0].test1, 'a')
eq_(data[1].test1, 'b')
eq_(data[2].test1, 'c')
def test_complex_sort():
app, db, admin = setup()
M1, M2 = create_models(db)
m1 = M1('b')
db.session.add(m1)
db.session.add(M2('c', model1=m1))
m2 = M1('a')
db.session.add(m2)
db.session.add(M2('c', model1=m2))
db.session.commit()
# test sorting on relation string - 'model1.test1'
view = CustomModelView(M2, db.session,
column_list = ['string_field', 'model1.test1'],
column_sortable_list = ['model1.test1'])
admin.add_view(view)
client = app.test_client()
rv = client.get('/admin/model2/?sort=1')
eq_(rv.status_code, 200)
# test sorting on relation object - M2.string_field
view2 = CustomModelView(M1, db.session,
column_list = ['model2.string_field'],
column_sortable_list = [M2.string_field])
admin.add_view(view2)
client = app.test_client()
rv = client.get('/admin/model1/?sort=1')
eq_(rv.status_code, 200)
data = rv.data.decode('utf-8')
ok_('Sort by' in data)
def test_default_complex_sort():
app, db, admin = setup()
M1, M2 = create_models(db)
m1 = M1('b')
db.session.add(m1)
db.session.add(M2('c', model1=m1))
m2 = M1('a')
db.session.add(m2)
db.session.add(M2('c', model1=m2))
db.session.commit()
view = CustomModelView(M2, db.session, column_default_sort='model1.test1')
admin.add_view(view)
_, data = view.get_list(0, None, None, None, None)
eq_(len(data), 2)
eq_(data[0].model1.test1, 'a')
eq_(data[1].model1.test1, 'b')
def test_extra_fields():
app, db, admin = setup()
Model1, _ = create_models(db)
view = CustomModelView(
Model1, db.session,
form_extra_fields={
'extra_field': fields.StringField('Extra Field')
}
)
admin.add_view(view)
client = app.test_client()
rv = client.get('/admin/model1/new/')
eq_(rv.status_code, 200)
# Check presence and order
data = rv.data.decode('utf-8')
ok_('Extra Field' in data)
pos1 = data.find('Extra Field')
pos2 = data.find('Test1')
ok_(pos2 < pos1)
def test_extra_field_order():
app, db, admin = setup()
Model1, _ = create_models(db)
view = CustomModelView(
Model1, db.session,
form_columns=('extra_field', 'test1'),
form_extra_fields={
'extra_field': fields.StringField('Extra Field')
}
)
admin.add_view(view)
client = app.test_client()
rv = client.get('/admin/model1/new/')
eq_(rv.status_code, 200)
# Check presence and order
data = rv.data.decode('utf-8')
pos1 = data.find('Extra Field')
pos2 = data.find('Test1')
ok_(pos2 > pos1)
def test_modelview_localization():
def test_locale(locale):
try:
app, db, admin = setup()
app.config['BABEL_DEFAULT_LOCALE'] = locale
babel = Babel(app)
Model1, _ = create_models(db)
view = CustomModelView(
Model1, db.session,
column_filters=['test1', 'bool_field', 'date_field', 'datetime_field', 'time_field']
)
admin.add_view(view)
client = app.test_client()
rv = client.get('/admin/model1/')
eq_(rv.status_code, 200)
rv = client.get('/admin/model1/new/')
eq_(rv.status_code, 200)
except:
print("Error on the following locale:", locale)
raise
locales = ['en', 'cs', 'de', 'es', 'fa', 'fr', 'pt', 'ru', 'zh_CN', 'zh_TW']
for locale in locales:
test_locale(locale)
def test_custom_form_base():
app, db, admin = setup()
class TestForm(form.BaseForm):
pass
Model1, _ = create_models(db)
view = CustomModelView(
Model1, db.session,
form_base_class=TestForm
)
admin.add_view(view)
ok_(hasattr(view._create_form_class, 'test1'))
create_form = view.create_form()
ok_(isinstance(create_form, TestForm))
def test_ajax_fk():
app, db, admin = setup()
Model1, Model2 = create_models(db)
view = CustomModelView(
Model2, db.session,
url='view',
form_ajax_refs={
'model1': {
'fields': ('test1', 'test2')
}
}
)
admin.add_view(view)
ok_(u'model1' in view._form_ajax_refs)
model = Model1(u'first')
model2 = Model1(u'foo', u'bar')
db.session.add_all([model, model2])
db.session.commit()
# Check loader
loader = view._form_ajax_refs[u'model1']
mdl = loader.get_one(model.id)
eq_(mdl.test1, model.test1)
items = loader.get_list(u'fir')
eq_(len(items), 1)
eq_(items[0].id, model.id)
items = loader.get_list(u'bar')
eq_(len(items), 1)
eq_(items[0].test1, u'foo')
# Check form generation
form = view.create_form()
eq_(form.model1.__class__.__name__, u'AjaxSelectField')
with app.test_request_context('/admin/view/'):
ok_(u'value=""' not in form.model1())
form.model1.data = model
ok_(u'data-json="[%s, "first"]"' % model.id in form.model1())
ok_(u'value="1"' in form.model1())
# Check querying
client = app.test_client()
req = client.get(u'/admin/view/ajax/lookup/?name=model1&query=foo')
eq_(req.data.decode('utf-8'), u'[[%s, "foo"]]' % model2.id)
# Check submitting
req = client.post('/admin/view/new/', data={u'model1': as_unicode(model.id)})
mdl = db.session.query(Model2).first()
ok_(mdl is not None)
ok_(mdl.model1 is not None)
eq_(mdl.model1.id, model.id)
eq_(mdl.model1.test1, u'first')
def test_ajax_fk_multi():
app, db, admin = setup()
class Model1(db.Model):
__tablename__ = 'model1'
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(20))
def __str__(self):
return self.name
table = db.Table('m2m', db.Model.metadata,
db.Column('model1_id', db.Integer, db.ForeignKey('model1.id')),
db.Column('model2_id', db.Integer, db.ForeignKey('model2.id'))
)
class Model2(db.Model):
__tablename__ = 'model2'
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(20))
model1_id = db.Column(db.Integer(), db.ForeignKey(Model1.id))
model1 = db.relationship(Model1, backref='models2', secondary=table)
db.create_all()
view = CustomModelView(
Model2, db.session,
url='view',
form_ajax_refs={
'model1': {
'fields': ['name']
}
}
)
admin.add_view(view)
ok_(u'model1' in view._form_ajax_refs)
model = Model1(name=u'first')
db.session.add_all([model, Model1(name=u'foo')])
db.session.commit()
# Check form generation
form = view.create_form()
eq_(form.model1.__class__.__name__, u'AjaxSelectMultipleField')
with app.test_request_context('/admin/view/'):
ok_(u'data-json="[]"' in form.model1())
form.model1.data = [model]
ok_(u'data-json="[[1, "first"]]"' in form.model1())
# Check submitting
client = app.test_client()
client.post('/admin/view/new/', data={u'model1': as_unicode(model.id)})
mdl = db.session.query(Model2).first()
ok_(mdl is not None)
ok_(mdl.model1 is not None)
eq_(len(mdl.model1), 1)
def test_safe_redirect():
app, db, admin = setup()
Model1, _ = create_models(db)
db.create_all()
view = CustomModelView(Model1, db.session)
admin.add_view(view)
client = app.test_client()
rv = client.post('/admin/model1/new/?url=http://localhost/admin/model2view/',
data=dict(test1='test1large', test2='test2'))
eq_(rv.status_code, 302)
assert_true(rv.location.startswith('http://localhost/admin/model1/edit/'))
assert_true('url=http%3A%2F%2Flocalhost%2Fadmin%2Fmodel2view%2F' in rv.location)
assert_true('id=1' in rv.location)
rv = client.post('/admin/model1/new/?url=http://google.com/evil/',
data=dict(test1='test1large', test2='test2'))
eq_(rv.status_code, 302)
assert_true(rv.location.startswith('http://localhost/admin/model1/edit/'))
assert_true('url=%2Fadmin%2Fmodel1%2F' in rv.location)
assert_true('id=2' in rv.location)
def test_simple_list_pager():
app, db, admin = setup()
Model1, _ = create_models(db)
db.create_all()
class TestModelView(CustomModelView):
simple_list_pager = True
def get_count_query(self):
assert False
view = TestModelView(Model1, db.session)
admin.add_view(view)
count, data = view.get_list(0, None, None, None, None)
assert_true(count is None)
def test_advanced_joins():
app, db, admin = setup()
class Model1(db.Model):
id = db.Column(db.Integer, primary_key=True)
val1 = db.Column(db.String(20))
test = db.Column(db.String(20))
class Model2(db.Model):
id = db.Column(db.Integer, primary_key=True)
val2 = db.Column(db.String(20))
model1_id = db.Column(db.Integer, db.ForeignKey(Model1.id))
model1 = db.relationship(Model1, backref='model2')
class Model3(db.Model):
id = db.Column(db.Integer, primary_key=True)
val2 = db.Column(db.String(20))
model2_id = db.Column(db.Integer, db.ForeignKey(Model2.id))
model2 = db.relationship(Model2, backref='model3')
view1 = CustomModelView(Model1, db.session)
admin.add_view(view1)
view2 = CustomModelView(Model2, db.session)
admin.add_view(view2)
view3 = CustomModelView(Model3, db.session)
admin.add_view(view3)
# Test joins
attr, path = view2._get_field_with_path('model1.val1')
eq_(attr, Model1.val1)
eq_(path, [Model2.model1])
attr, path = view1._get_field_with_path('model2.val2')
eq_(attr, Model2.val2)
eq_(id(path[0]), id(Model1.model2))
attr, path = view3._get_field_with_path('model2.model1.val1')
eq_(attr, Model1.val1)
eq_(path, [Model3.model2, Model2.model1])
# Test how joins are applied
query = view3.get_query()
joins = {}
q1, joins, alias = view3._apply_path_joins(query, joins, path)
ok_((True, Model3.model2) in joins)
ok_((True, Model2.model1) in joins)
ok_(alias is not None)
# Check if another join would use same path
attr, path = view2._get_field_with_path('model1.test')
q2, joins, alias = view2._apply_path_joins(query, joins, path)
eq_(len(joins), 2)
for p in q2._join_entities:
ok_(p in q1._join_entities)
ok_(alias is not None)
# Check if normal properties are supported by _get_field_with_path
attr, path = view2._get_field_with_path(Model1.test)
eq_(attr, Model1.test)
eq_(path, [Model1.__table__])
q3, joins, alias = view2._apply_path_joins(view2.get_query(), joins, path)
eq_(len(joins), 3)
ok_(alias is None)
def test_multipath_joins():
app, db, admin = setup()
class Model1(db.Model):
id = db.Column(db.Integer, primary_key=True)
val1 = db.Column(db.String(20))
test = db.Column(db.String(20))
class Model2(db.Model):
id = db.Column(db.Integer, primary_key=True)
val2 = db.Column(db.String(20))
first_id = db.Column(db.Integer, db.ForeignKey(Model1.id))
first = db.relationship(Model1, backref='first', foreign_keys=[first_id])
second_id = db.Column(db.Integer, db.ForeignKey(Model1.id))
second = db.relationship(Model1, backref='second', foreign_keys=[second_id])
db.create_all()
view = CustomModelView(Model2, db.session, filters=['first.test'])
admin.add_view(view)
client = app.test_client()
rv = client.get('/admin/model2/')
eq_(rv.status_code, 200)
| |
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Recurrent Neural Network estimators."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import six
from tensorflow.contrib.estimator.python.estimator import extenders
from tensorflow.contrib.feature_column.python.feature_column import sequence_feature_column as seq_fc
from tensorflow.python.estimator import estimator
from tensorflow.python.estimator.canned import head as head_lib
from tensorflow.python.estimator.canned import optimizers
from tensorflow.python.feature_column import feature_column as feature_column_lib
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.layers import core as core_layers
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import check_ops
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import partitioned_variables
from tensorflow.python.ops import rnn
from tensorflow.python.ops import rnn_cell
from tensorflow.python.ops import variable_scope
from tensorflow.python.summary import summary
from tensorflow.python.training import optimizer as optimizer_lib
from tensorflow.python.training import training_util
# The defaults are historical artifacts of the initial implementation, but seem
# reasonable choices.
_DEFAULT_LEARNING_RATE = 0.05
_DEFAULT_CLIP_NORM = 5.0
_CELL_TYPES = {'basic_rnn': rnn_cell.BasicRNNCell,
'lstm': rnn_cell.BasicLSTMCell,
'gru': rnn_cell.GRUCell}
# Indicates no value was provided by the user to a kwarg.
USE_DEFAULT = object()
def _single_rnn_cell(num_units, cell_type):
cell_type = _CELL_TYPES.get(cell_type, cell_type)
if not cell_type or not issubclass(cell_type, rnn_cell.RNNCell):
raise ValueError('Supported cell types are {}; got {}'.format(
list(_CELL_TYPES.keys()), cell_type))
return cell_type(num_units=num_units)
def _make_rnn_cell_fn(num_units, cell_type='basic_rnn'):
"""Convenience function to create `rnn_cell_fn` for canned RNN Estimators.
Args:
num_units: Iterable of integer number of hidden units per RNN layer.
cell_type: A subclass of `tf.nn.rnn_cell.RNNCell` or a string specifying
the cell type. Supported strings are: `'basic_rnn'`, `'lstm'`, and
`'gru'`.
Returns:
A function that takes a single argument, an instance of
`tf.estimator.ModeKeys`, and returns an instance derived from
`tf.nn.rnn_cell.RNNCell`.
Raises:
ValueError: If cell_type is not supported.
"""
def rnn_cell_fn(mode):
# Unused. Part of the rnn_cell_fn interface since user specified functions
# may need different behavior across modes (e.g. dropout).
del mode
cells = [_single_rnn_cell(n, cell_type) for n in num_units]
if len(cells) == 1:
return cells[0]
return rnn_cell.MultiRNNCell(cells)
return rnn_cell_fn
def _concatenate_context_input(sequence_input, context_input):
"""Replicates `context_input` across all timesteps of `sequence_input`.
Expands dimension 1 of `context_input` then tiles it `sequence_length` times.
This value is appended to `sequence_input` on dimension 2 and the result is
returned.
Args:
sequence_input: A `Tensor` of dtype `float32` and shape `[batch_size,
padded_length, d0]`.
context_input: A `Tensor` of dtype `float32` and shape `[batch_size, d1]`.
Returns:
A `Tensor` of dtype `float32` and shape `[batch_size, padded_length,
d0 + d1]`.
Raises:
ValueError: If `sequence_input` does not have rank 3 or `context_input` does
not have rank 2.
"""
seq_rank_check = check_ops.assert_rank(
sequence_input,
3,
message='sequence_input must have rank 3',
data=[array_ops.shape(sequence_input)])
seq_type_check = check_ops.assert_type(
sequence_input,
dtypes.float32,
message='sequence_input must have dtype float32; got {}.'.format(
sequence_input.dtype))
ctx_rank_check = check_ops.assert_rank(
context_input,
2,
message='context_input must have rank 2',
data=[array_ops.shape(context_input)])
ctx_type_check = check_ops.assert_type(
context_input,
dtypes.float32,
message='context_input must have dtype float32; got {}.'.format(
context_input.dtype))
with ops.control_dependencies(
[seq_rank_check, seq_type_check, ctx_rank_check, ctx_type_check]):
padded_length = array_ops.shape(sequence_input)[1]
tiled_context_input = array_ops.tile(
array_ops.expand_dims(context_input, 1),
array_ops.concat([[1], [padded_length], [1]], 0))
return array_ops.concat([sequence_input, tiled_context_input], 2)
def _select_last_activations(activations, sequence_lengths):
"""Selects the nth set of activations for each n in `sequence_length`.
Returns a `Tensor` of shape `[batch_size, k]`. If `sequence_length` is not
`None`, then `output[i, :] = activations[i, sequence_length[i] - 1, :]`. If
`sequence_length` is `None`, then `output[i, :] = activations[i, -1, :]`.
Args:
activations: A `Tensor` with shape `[batch_size, padded_length, k]`.
sequence_lengths: A `Tensor` with shape `[batch_size]` or `None`.
Returns:
A `Tensor` of shape `[batch_size, k]`.
"""
with ops.name_scope(
'select_last_activations', values=[activations, sequence_lengths]):
activations_shape = array_ops.shape(activations)
batch_size = activations_shape[0]
padded_length = activations_shape[1]
output_units = activations_shape[2]
if sequence_lengths is None:
sequence_lengths = padded_length
start_indices = math_ops.to_int64(
math_ops.range(batch_size) * padded_length)
last_indices = start_indices + sequence_lengths - 1
reshaped_activations = array_ops.reshape(
activations, [batch_size * padded_length, output_units])
last_activations = array_ops.gather(reshaped_activations, last_indices)
last_activations.set_shape([activations.shape[0], activations.shape[2]])
return last_activations
def _rnn_logit_fn_builder(output_units, rnn_cell_fn, sequence_feature_columns,
context_feature_columns, input_layer_partitioner):
"""Function builder for a rnn logit_fn.
Args:
output_units: An int indicating the dimension of the logit layer.
rnn_cell_fn: A function with one argument, a `tf.estimator.ModeKeys`, and
returns an object of type `tf.nn.rnn_cell.RNNCell`.
sequence_feature_columns: An iterable containing the `FeatureColumn`s
that represent sequential input.
context_feature_columns: An iterable containing the `FeatureColumn`s
that represent contextual input.
input_layer_partitioner: Partitioner for input layer.
Returns:
A logit_fn (see below).
Raises:
ValueError: If output_units is not an int.
"""
if not isinstance(output_units, int):
raise ValueError('output_units must be an int. Given type: {}'.format(
type(output_units)))
def rnn_logit_fn(features, mode):
"""Recurrent Neural Network logit_fn.
Args:
features: This is the first item returned from the `input_fn`
passed to `train`, `evaluate`, and `predict`. This should be a
single `Tensor` or `dict` of same.
mode: Optional. Specifies if this training, evaluation or prediction. See
`ModeKeys`.
Returns:
A `Tensor` representing the logits.
"""
with variable_scope.variable_scope(
'sequence_input_layer',
values=tuple(six.itervalues(features)),
partitioner=input_layer_partitioner):
sequence_input, sequence_length = seq_fc.sequence_input_layer(
features=features, feature_columns=sequence_feature_columns)
summary.histogram('sequence_length', sequence_length)
if context_feature_columns:
context_input = feature_column_lib.input_layer(
features=features,
feature_columns=context_feature_columns)
sequence_input = _concatenate_context_input(sequence_input,
context_input)
cell = rnn_cell_fn(mode)
# Ignore output state.
rnn_outputs, _ = rnn.dynamic_rnn(
cell=cell,
inputs=sequence_input,
sequence_length=sequence_length,
dtype=dtypes.float32,
time_major=False)
last_activations = _select_last_activations(rnn_outputs, sequence_length)
with variable_scope.variable_scope('logits', values=(rnn_outputs,)):
logits = core_layers.dense(
last_activations,
units=output_units,
activation=None,
kernel_initializer=init_ops.glorot_uniform_initializer())
return logits
return rnn_logit_fn
def _rnn_model_fn(features,
labels,
mode,
head,
rnn_cell_fn,
sequence_feature_columns,
context_feature_columns,
optimizer='Adagrad',
input_layer_partitioner=None,
config=None):
"""Recurrent Neural Net model_fn.
Args:
features: dict of `Tensor` and `SparseTensor` objects returned from
`input_fn`.
labels: `Tensor` of shape [batch_size, 1] or [batch_size] with labels.
mode: Defines whether this is training, evaluation or prediction.
See `ModeKeys`.
head: A `head_lib._Head` instance.
rnn_cell_fn: A function with one argument, a `tf.estimator.ModeKeys`, and
returns an object of type `tf.nn.rnn_cell.RNNCell`.
sequence_feature_columns: Iterable containing `FeatureColumn`s that
represent sequential model inputs.
context_feature_columns: Iterable containing `FeatureColumn`s that
represent model inputs not associated with a specific timestep.
optimizer: String, `tf.Optimizer` object, or callable that creates the
optimizer to use for training. If not specified, will use the Adagrad
optimizer with a default learning rate of 0.05 and gradient clip norm of
5.0.
input_layer_partitioner: Partitioner for input layer. Defaults
to `min_max_variable_partitioner` with `min_slice_size` 64 << 20.
config: `RunConfig` object to configure the runtime settings.
Returns:
An `EstimatorSpec` instance.
Raises:
ValueError: If mode or optimizer is invalid, or features has the wrong type.
"""
if not isinstance(features, dict):
raise ValueError('features should be a dictionary of `Tensor`s. '
'Given type: {}'.format(type(features)))
# If user does not provide an optimizer instance, use the optimizer specified
# by the string with default learning rate and gradient clipping.
if not isinstance(optimizer, optimizer_lib.Optimizer):
optimizer = optimizers.get_optimizer_instance(
optimizer, learning_rate=_DEFAULT_LEARNING_RATE)
optimizer = extenders.clip_gradients_by_norm(optimizer, _DEFAULT_CLIP_NORM)
num_ps_replicas = config.num_ps_replicas if config else 0
partitioner = partitioned_variables.min_max_variable_partitioner(
max_partitions=num_ps_replicas)
with variable_scope.variable_scope(
'rnn',
values=tuple(six.itervalues(features)),
partitioner=partitioner):
input_layer_partitioner = input_layer_partitioner or (
partitioned_variables.min_max_variable_partitioner(
max_partitions=num_ps_replicas,
min_slice_size=64 << 20))
logit_fn = _rnn_logit_fn_builder(
output_units=head.logits_dimension,
rnn_cell_fn=rnn_cell_fn,
sequence_feature_columns=sequence_feature_columns,
context_feature_columns=context_feature_columns,
input_layer_partitioner=input_layer_partitioner)
logits = logit_fn(features=features, mode=mode)
def _train_op_fn(loss):
"""Returns the op to optimize the loss."""
return optimizer.minimize(
loss,
global_step=training_util.get_global_step())
return head.create_estimator_spec(
features=features,
mode=mode,
labels=labels,
train_op_fn=_train_op_fn,
logits=logits)
def _assert_rnn_cell_fn(rnn_cell_fn, num_units, cell_type):
"""Assert arguments are valid and return rnn_cell_fn."""
if rnn_cell_fn and (num_units or cell_type != USE_DEFAULT):
raise ValueError(
'num_units and cell_type must not be specified when using rnn_cell_fn'
)
if not rnn_cell_fn:
if cell_type == USE_DEFAULT:
cell_type = 'basic_rnn'
rnn_cell_fn = _make_rnn_cell_fn(num_units, cell_type)
return rnn_cell_fn
class RNNClassifier(estimator.Estimator):
"""A classifier for TensorFlow RNN models.
Trains a recurrent neural network model to classify instances into one of
multiple classes.
Example:
```python
token_sequence = sequence_categorical_column_with_hash_bucket(...)
token_emb = embedding_column(categorical_column=token_sequence, ...)
estimator = RNNClassifier(
sequence_feature_columns=[token_emb],
num_units=[32, 16], cell_type='lstm')
# Input builders
def input_fn_train: # returns x, y
pass
estimator.train(input_fn=input_fn_train, steps=100)
def input_fn_eval: # returns x, y
pass
metrics = estimator.evaluate(input_fn=input_fn_eval, steps=10)
def input_fn_predict: # returns x, None
pass
predictions = estimator.predict(input_fn=input_fn_predict)
```
Input of `train` and `evaluate` should have following features,
otherwise there will be a `KeyError`:
* if `weight_column` is not `None`, a feature with
`key=weight_column` whose value is a `Tensor`.
* for each `column` in `sequence_feature_columns`:
- a feature with `key=column.name` whose `value` is a `SparseTensor`.
* for each `column` in `context_feature_columns`:
- if `column` is a `_CategoricalColumn`, a feature with `key=column.name`
whose `value` is a `SparseTensor`.
- if `column` is a `_WeightedCategoricalColumn`, two features: the first
with `key` the id column name, the second with `key` the weight column
name. Both features' `value` must be a `SparseTensor`.
- if `column` is a `_DenseColumn`, a feature with `key=column.name`
whose `value` is a `Tensor`.
Loss is calculated by using softmax cross entropy.
@compatibility(eager)
Estimators are not compatible with eager execution.
@end_compatibility
"""
def __init__(self,
sequence_feature_columns,
context_feature_columns=None,
num_units=None,
cell_type=USE_DEFAULT,
rnn_cell_fn=None,
model_dir=None,
n_classes=2,
weight_column=None,
label_vocabulary=None,
optimizer='Adagrad',
input_layer_partitioner=None,
config=None):
"""Initializes a `RNNClassifier` instance.
Args:
sequence_feature_columns: An iterable containing the `FeatureColumn`s
that represent sequential input. All items in the set should either be
sequence columns (e.g. `sequence_numeric_column`) or constructed from
one (e.g. `embedding_column` with `sequence_categorical_column_*` as
input).
context_feature_columns: An iterable containing the `FeatureColumn`s
for contextual input. The data represented by these columns will be
replicated and given to the RNN at each timestep. These columns must be
instances of classes derived from `_DenseColumn` such as
`numeric_column`, not the sequential variants.
num_units: Iterable of integer number of hidden units per RNN layer. If
set, `cell_type` must also be specified and `rnn_cell_fn` must be
`None`.
cell_type: A subclass of `tf.nn.rnn_cell.RNNCell` or a string specifying
the cell type. Supported strings are: `'basic_rnn'`, `'lstm'`, and
`'gru'`. If set, `num_units` must also be specified and `rnn_cell_fn`
must be `None`.
rnn_cell_fn: A function with one argument, a `tf.estimator.ModeKeys`, and
returns an object of type `tf.nn.rnn_cell.RNNCell` that will be used to
construct the RNN. If set, `num_units` and `cell_type` cannot be set.
This is for advanced users who need additional customization beyond
`num_units` and `cell_type`. Note that `tf.nn.rnn_cell.MultiRNNCell` is
needed for stacked RNNs.
model_dir: Directory to save model parameters, graph and etc. This can
also be used to load checkpoints from the directory into a estimator to
continue training a previously saved model.
n_classes: Number of label classes. Defaults to 2, namely binary
classification. Must be > 1.
weight_column: A string or a `_NumericColumn` created by
`tf.feature_column.numeric_column` defining feature column representing
weights. It is used to down weight or boost examples during training. It
will be multiplied by the loss of the example. If it is a string, it is
used as a key to fetch weight tensor from the `features`. If it is a
`_NumericColumn`, raw tensor is fetched by key `weight_column.key`,
then weight_column.normalizer_fn is applied on it to get weight tensor.
label_vocabulary: A list of strings represents possible label values. If
given, labels must be string type and have any value in
`label_vocabulary`. If it is not given, that means labels are
already encoded as integer or float within [0, 1] for `n_classes=2` and
encoded as integer values in {0, 1,..., n_classes-1} for `n_classes`>2 .
Also there will be errors if vocabulary is not provided and labels are
string.
optimizer: An instance of `tf.Optimizer` or string specifying optimizer
type. Defaults to Adagrad optimizer.
input_layer_partitioner: Optional. Partitioner for input layer. Defaults
to `min_max_variable_partitioner` with `min_slice_size` 64 << 20.
config: `RunConfig` object to configure the runtime settings.
Raises:
ValueError: If `num_units`, `cell_type`, and `rnn_cell_fn` are not
compatible.
"""
rnn_cell_fn = _assert_rnn_cell_fn(rnn_cell_fn, num_units, cell_type)
if n_classes == 2:
head = head_lib._binary_logistic_head_with_sigmoid_cross_entropy_loss( # pylint: disable=protected-access
weight_column=weight_column,
label_vocabulary=label_vocabulary)
else:
head = head_lib._multi_class_head_with_softmax_cross_entropy_loss( # pylint: disable=protected-access
n_classes, weight_column=weight_column,
label_vocabulary=label_vocabulary)
def _model_fn(features, labels, mode, config):
return _rnn_model_fn(
features=features,
labels=labels,
mode=mode,
head=head,
rnn_cell_fn=rnn_cell_fn,
sequence_feature_columns=tuple(sequence_feature_columns or []),
context_feature_columns=tuple(context_feature_columns or []),
optimizer=optimizer,
input_layer_partitioner=input_layer_partitioner,
config=config)
super(RNNClassifier, self).__init__(
model_fn=_model_fn, model_dir=model_dir, config=config)
class RNNEstimator(estimator.Estimator):
"""An Estimator for TensorFlow RNN models with user-specified head.
Example:
```python
token_sequence = sequence_categorical_column_with_hash_bucket(...)
token_emb = embedding_column(categorical_column=token_sequence, ...)
estimator = RNNEstimator(
head=tf.contrib.estimator.regression_head(),
sequence_feature_columns=[token_emb],
num_units=[32, 16], cell_type='lstm')
# Or with custom RNN cell:
def rnn_cell_fn(mode):
cells = [ tf.contrib.rnn.LSTMCell(size) for size in [32, 16] ]
if mode == tf.estimator.ModeKeys.TRAIN:
cells = [ tf.contrib.rnn.DropoutWrapper(cell, input_keep_prob=0.5)
for cell in cells ]
return tf.contrib.rnn.MultiRNNCell(cells)
estimator = RNNEstimator(
head=tf.contrib.estimator.regression_head(),
sequence_feature_columns=[token_emb],
rnn_cell_fn=rnn_cell_fn)
# Input builders
def input_fn_train: # returns x, y
pass
estimator.train(input_fn=input_fn_train, steps=100)
def input_fn_eval: # returns x, y
pass
metrics = estimator.evaluate(input_fn=input_fn_eval, steps=10)
def input_fn_predict: # returns x, None
pass
predictions = estimator.predict(input_fn=input_fn_predict)
```
Input of `train` and `evaluate` should have following features,
otherwise there will be a `KeyError`:
* if the head's `weight_column` is not `None`, a feature with
`key=weight_column` whose value is a `Tensor`.
* for each `column` in `sequence_feature_columns`:
- a feature with `key=column.name` whose `value` is a `SparseTensor`.
* for each `column` in `context_feature_columns`:
- if `column` is a `_CategoricalColumn`, a feature with `key=column.name`
whose `value` is a `SparseTensor`.
- if `column` is a `_WeightedCategoricalColumn`, two features: the first
with `key` the id column name, the second with `key` the weight column
name. Both features' `value` must be a `SparseTensor`.
- if `column` is a `_DenseColumn`, a feature with `key=column.name`
whose `value` is a `Tensor`.
Loss and predicted output are determined by the specified head.
@compatibility(eager)
Estimators are not compatible with eager execution.
@end_compatibility
"""
def __init__(self,
head,
sequence_feature_columns,
context_feature_columns=None,
num_units=None,
cell_type=USE_DEFAULT,
rnn_cell_fn=None,
model_dir=None,
optimizer='Adagrad',
input_layer_partitioner=None,
config=None):
"""Initializes a `RNNClassifier` instance.
Args:
head: A `_Head` instance constructed with a method such as
`tf.contrib.estimator.multi_label_head`. This specifies the model's
output and loss function to be optimized.
sequence_feature_columns: An iterable containing the `FeatureColumn`s
that represent sequential input. All items in the set should either be
sequence columns (e.g. `sequence_numeric_column`) or constructed from
one (e.g. `embedding_column` with `sequence_categorical_column_*` as
input).
context_feature_columns: An iterable containing the `FeatureColumn`s
for contextual input. The data represented by these columns will be
replicated and given to the RNN at each timestep. These columns must be
instances of classes derived from `_DenseColumn` such as
`numeric_column`, not the sequential variants.
num_units: Iterable of integer number of hidden units per RNN layer. If
set, `cell_type` must also be specified and `rnn_cell_fn` must be
`None`.
cell_type: A subclass of `tf.nn.rnn_cell.RNNCell` or a string specifying
the cell type. Supported strings are: `'basic_rnn'`, `'lstm'`, and
`'gru'`. If set, `num_units` must also be specified and `rnn_cell_fn`
must be `None`.
rnn_cell_fn: A function with one argument, a `tf.estimator.ModeKeys`, and
returns an object of type `tf.nn.rnn_cell.RNNCell` that will be used to
construct the RNN. If set, `num_units` and `cell_type` cannot be set.
This is for advanced users who need additional customization beyond
`num_units` and `cell_type`. Note that `tf.nn.rnn_cell.MultiRNNCell` is
needed for stacked RNNs.
model_dir: Directory to save model parameters, graph and etc. This can
also be used to load checkpoints from the directory into a estimator to
continue training a previously saved model.
optimizer: An instance of `tf.Optimizer` or string specifying optimizer
type. Defaults to Adagrad optimizer.
input_layer_partitioner: Optional. Partitioner for input layer. Defaults
to `min_max_variable_partitioner` with `min_slice_size` 64 << 20.
config: `RunConfig` object to configure the runtime settings.
Raises:
ValueError: If `num_units`, `cell_type`, and `rnn_cell_fn` are not
compatible.
"""
rnn_cell_fn = _assert_rnn_cell_fn(rnn_cell_fn, num_units, cell_type)
def _model_fn(features, labels, mode, config):
return _rnn_model_fn(
features=features,
labels=labels,
mode=mode,
head=head,
rnn_cell_fn=rnn_cell_fn,
sequence_feature_columns=tuple(sequence_feature_columns or []),
context_feature_columns=tuple(context_feature_columns or []),
optimizer=optimizer,
input_layer_partitioner=input_layer_partitioner,
config=config)
super(RNNEstimator, self).__init__(
model_fn=_model_fn, model_dir=model_dir, config=config)
| |
# This file is adapted from https://github.com/ray-project/ray/blob/master
# /examples/rl_pong/driver.py
#
# Copyright 2016 The BigDL Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# play Pong https://gist.github.com/karpathy/a4166c7fe253700972fcbc77e4ea32c5.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import os
import time
import gym
import numpy as np
import ray
from bigdl.orca import init_orca_context, stop_orca_context
from bigdl.orca import OrcaContext
os.environ["LANG"] = "C.UTF-8"
# Define some hyperparameters.
# The number of hidden layer neurons.
H = 200
learning_rate = 1e-4
# Discount factor for reward.
gamma = 0.99
# The decay factor for RMSProp leaky sum of grad^2.
decay_rate = 0.99
# The input dimensionality: 80x80 grid.
D = 80 * 80
def sigmoid(x):
# Sigmoid "squashing" function to interval [0, 1].
return 1.0 / (1.0 + np.exp(-x))
def preprocess(img):
"""Preprocess 210x160x3 uint8 frame into 6400 (80x80) 1D float vector."""
# Crop the image.
img = img[35:195]
# Downsample by factor of 2.
img = img[::2, ::2, 0]
# Erase background (background type 1).
img[img == 144] = 0
# Erase background (background type 2).
img[img == 109] = 0
# Set everything else (paddles, ball) to 1.
img[img != 0] = 1
return img.astype(np.float).ravel()
def discount_rewards(r):
"""take 1D float array of rewards and compute discounted reward"""
discounted_r = np.zeros_like(r)
running_add = 0
for t in reversed(range(0, r.size)):
# Reset the sum, since this was a game boundary (pong specific!).
if r[t] != 0:
running_add = 0
running_add = running_add * gamma + r[t]
discounted_r[t] = running_add
return discounted_r
# defines the policy network
# x is a vector that holds the preprocessed pixel information
def policy_forward(x, model):
# neurons in the hidden layer (W1) can detect various game senarios
h = np.dot(model["W1"], x) # compute hidden layer neuron activations
h[h < 0] = 0 # ReLU nonlinearity. threhold at zero
# weights in W2 can then decide if each case we should go UP or DOWN
logp = np.dot(model["W2"], h) # compuate the log probability of going up
p = sigmoid(logp)
# Return probability of taking action 2, and hidden state.
return p, h
def policy_backward(eph, epx, epdlogp, model):
"""backward pass. (eph is array of intermediate hidden states)"""
# the way to change the policy parameters is to
# do some rollouts, take the gradient of the sampled actions
# multiply it by the score and add everything
dW2 = np.dot(eph.T, epdlogp).ravel()
dh = np.outer(epdlogp, model["W2"])
# Backprop relu.
dh[eph <= 0] = 0
dW1 = np.dot(dh.T, epx)
return {"W1": dW1, "W2": dW2}
@ray.remote
class PongEnv(object):
def __init__(self):
# Tell numpy to only use one core. If we don't do this, each actor may
# try to use all of the cores and the resulting contention may result
# in no speedup over the serial version. Note that if numpy is using
# OpenBLAS, then you need to set OPENBLAS_NUM_THREADS=1, and you
# probably need to do it from the command line (so it happens before
# numpy is imported).
os.environ["MKL_NUM_THREADS"] = "1"
self.env = gym.make("Pong-v0")
def compute_gradient(self, model):
# model = {'W1':W1, 'W2':W2}
# given a model, run for one episode and return the parameter
# to be updated and sum(reward)
# Reset the game.
observation = self.env.reset()
# Note that prev_x is used in computing the difference frame.
prev_x = None
xs, hs, dlogps, drs = [], [], [], []
reward_sum = 0
done = False
while not done:
cur_x = preprocess(observation)
x = cur_x - prev_x if prev_x is not None else np.zeros(D)
prev_x = cur_x
# feed difference frames into the network
# so that it can detect motion
aprob, h = policy_forward(x, model)
# Sample an action.
action = 2 if np.random.uniform() < aprob else 3
# The observation.
xs.append(x)
# The hidden state.
hs.append(h)
y = 1 if action == 2 else 0 # A "fake label".
# The gradient that encourages the action that was taken to be
# taken (see http://cs231n.github.io/neural-networks-2/#losses if
# confused).
dlogps.append(y - aprob)
observation, reward, done, info = self.env.step(action)
reward_sum += reward
# Record reward (has to be done after we call step() to get reward
# for previous action).
drs.append(reward)
epx = np.vstack(xs)
eph = np.vstack(hs)
epdlogp = np.vstack(dlogps)
epr = np.vstack(drs)
# Reset the array memory.
xs, hs, dlogps, drs = [], [], [], []
# Compute the discounted reward backward through time.
discounted_epr = discount_rewards(epr)
# Standardize the rewards to be unit normal (helps control the gradient
# estimator variance).
discounted_epr -= np.mean(discounted_epr)
discounted_epr /= np.std(discounted_epr)
# Modulate the gradient with advantage (the policy gradient magic
# happens right here).
epdlogp *= discounted_epr
return policy_backward(eph, epx, epdlogp, model), reward_sum
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Train an RL agent")
parser.add_argument('--cluster_mode', type=str, default="local",
help='The mode for the Spark cluster. local, yarn or spark-submit.')
parser.add_argument("--batch_size", default=10, type=int,
help="The number of roll-outs to do per batch.")
parser.add_argument("--iterations", default=-1, type=int,
help="The number of model updates to perform. By "
"default, training will not terminate.")
parser.add_argument("--slave_num", type=int, default=2,
help="The number of slave nodes")
parser.add_argument("--executor_cores", type=int, default=8,
help="The number of driver's cpu cores you want to use."
"You can change it depending on your own cluster setting.")
parser.add_argument("--executor_memory", type=str, default="10g",
help="The size of slave(executor)'s memory you want to use."
"You can change it depending on your own cluster setting.")
parser.add_argument("--driver_memory", type=str, default="2g",
help="The size of driver's memory you want to use."
"You can change it depending on your own cluster setting.")
parser.add_argument("--driver_cores", type=int, default=8,
help="The number of driver's cpu cores you want to use."
"You can change it depending on your own cluster setting.")
parser.add_argument("--extra_executor_memory_for_ray", type=str, default="20g",
help="The extra executor memory to store some data."
"You can change it depending on your own cluster setting.")
parser.add_argument("--object_store_memory", type=str, default="4g",
help="The memory to store data on local."
"You can change it depending on your own cluster setting.")
args = parser.parse_args()
cluster_mode = args.cluster_mode
if cluster_mode.startswith("yarn"):
sc = init_orca_context(cluster_mode=cluster_mode,
cores=args.executor_cores,
memory=args.executor_memory,
init_ray_on_spark=True,
num_executors=args.slave_num,
driver_memory=args.driver_memory,
driver_cores=args.driver_cores,
extra_executor_memory_for_ray=args.extra_executor_memory_for_ray,
object_store_memory=args.object_store_memory)
ray_ctx = OrcaContext.get_ray_context()
elif cluster_mode == "local":
sc = init_orca_context(cores=args.driver_cores)
ray_ctx = OrcaContext.get_ray_context()
elif cluster_mode == "spark-submit":
sc = init_orca_context(cluster_mode=cluster_mode)
ray_ctx = OrcaContext.get_ray_context()
else:
print("init_orca_context failed. cluster_mode should be one of 'local', 'yarn' and 'spark-submit' but got "
+ cluster_mode)
batch_size = args.batch_size
# Run the reinforcement learning.
running_reward = None
batch_num = 1
model = {}
# "Xavier" initialization.
model["W1"] = np.random.randn(H, D) / np.sqrt(D)
model["W2"] = np.random.randn(H) / np.sqrt(H)
# Update buffers that add up gradients over a batch.
grad_buffer = {k: np.zeros_like(v) for k, v in model.items()}
# Update the rmsprop memory.
rmsprop_cache = {k: np.zeros_like(v) for k, v in model.items()}
actors = [PongEnv.remote() for _ in range(batch_size)]
iteration = 0
while iteration != args.iterations:
iteration += 1
model_id = ray.put(model)
actions = []
# Launch tasks to compute gradients from multiple rollouts in parallel.
start_time = time.time()
# run rall_out for batch_size times
for i in range(batch_size):
# compute_gradient returns two variables, so action_id is a list
action_id = actors[i].compute_gradient.remote(model_id)
actions.append(action_id)
for i in range(batch_size):
# wait for one actor to finish its operation
# action_id is the ready object id
action_id, actions = ray.wait(actions)
grad, reward_sum = ray.get(action_id[0])
# Accumulate the gradient of each weight parameter over batch.
for k in model:
grad_buffer[k] += grad[k]
running_reward = (reward_sum if running_reward is None else
running_reward * 0.99 + reward_sum * 0.01)
end_time = time.time()
print("Batch {} computed {} rollouts in {} seconds, "
"running mean is {}".format(batch_num, batch_size,
end_time - start_time,
running_reward))
# update gradient after one iteration
for k, v in model.items():
g = grad_buffer[k]
rmsprop_cache[k] = (decay_rate * rmsprop_cache[k] + (1 - decay_rate) * g ** 2)
model[k] += learning_rate * g / (np.sqrt(rmsprop_cache[k]) + 1e-5)
# Reset the batch gradient buffer.
grad_buffer[k] = np.zeros_like(v)
batch_num += 1
stop_orca_context()
| |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2010 British Broadcasting Corporation and Kamaelia Contributors(1)
#
# (1) Kamaelia Contributors are listed in the AUTHORS file and at
# http://www.kamaelia.org/AUTHORS - please extend this file,
# not this notice.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# -------------------------------------------------------------------------
# Licensed to the BBC under a Contributor Agreement: RJL
"""\
===========================================
Peer-to-Peer Streaming System (client part)
===========================================
This example demonstrates the use of BitTorrent and HTTP to download, share
reconstruct a data stream in real-time.
It expects a webserver hosting a folder that contains:
- meta.txt (a file containing the number of chunks/torrents in the stream
so far as a decimal, ASCII string)
- 1.torrent
- 2.torrent
- ...
- 123.torrent (if meta.txt contained "123")
Only this metainfo is downloaded using HTTP. The stream itself is downloaded
(and uploaded to other downloaders) using BitTorrent.
Other users must upload the stream's chunks using BitTorrent for this demo
to work.
To listen to/view the stream, just point your favourite media player
(say, XMMS) at the reconstructed file after it's been downloading for a minute
or so.
"""
import time
from Axon.Component import component
from Kamaelia.Chassis.Pipeline import pipeline
from Kamaelia.File.Writing import SimpleFileWriter
from Kamaelia.Community.RJL.Kamaelia.File.TriggeredFileReader import TriggeredFileReader
from Kamaelia.Community.RJL.Kamaelia.Protocol.HTTP.HTTPClient import SimpleHTTPClient
from Kamaelia.Community.RJL.Kamaelia.Protocol.Torrent.TorrentPatron import TorrentPatron
from Kamaelia.Community.RJL.Kamaelia.Protocol.Torrent.TorrentIPC import TIPCNewTorrentCreated, TIPCTorrentStatusUpdate
from Kamaelia.Community.RJL.Kamaelia.Util.Clock import CheapAndCheerfulClock
from Kamaelia.Community.RJL.Kamaelia.Util.DataSource import TriggeredSource
class StreamReconstructor(component):
"""\
StreamReconstructor()
This component receives reports on the status/completion of BitTorrent
downloads from a TorrentPatron instance. It keeps a record of the
order in which torrents were started and waits until the first is
finished. It then outputs the filename of this torrent and removes
it from its list. Then it waits for the second torrent (now the first
on the list) to finish downloading, then outputs its filename and so on.
If later torrents finish before earlier ones, their filenames are not
output until their all their predecessors have finished.
The purpose of this is output the names of files whose contents should
be concatenated to a master file to reconstruct the stream.
"""
def main(self):
torrents = []
while 1:
yield 1
while self.dataReady("inbox"):
msg = self.recv("inbox")
if isinstance(msg, TIPCNewTorrentCreated):
torrents.append([msg.torrentid, msg.savefolder]) # add the new torrent to the list of known torrents
elif isinstance(msg, TIPCTorrentStatusUpdate):
# if the status update is about the oldest torrent that
# has not been completed prior to now, then...
if len(torrents) > 0 and msg.torrentid == torrents[0][0]:
# if this oldest torrent is now complete
if msg.statsdictionary.get("fractionDone",0) == 1:
# forward on the name of the file downloaded in this torrent
self.send(torrents[0][1], "outbox")
torrents.pop(0) # and remove it from our list of torrents that we care about
while self.dataReady("control"):
msg = self.recv("control")
if isinstance(msg, shutdown) or isinstance(msg, producerFinished):
# if we are being told to shutdown then do so
self.send(producerFinished(self), "signal")
return
self.pause()
class PartsFilenameGenerator(component):
"""\
PartsFilenameGenerator()
Arguments:
- prefix - string to prepend to the id of a torrent to make its URL
- [suffix] - string to append to the id of the torrent to make the URL
defaults to ".torrent"
Generate the URLs of the .torrents that make up the stream
from reports of the total number of chunks/torrents in the stream
that are received on "inbox".
e.g. Assuming it was created as
PartsFilenameGenerator("http://www.example.com/", ".torrent"),
Send it "3" and it will output (one message listed per line):
- "http://www.example.com/1.torrent"
- "http://www.example.com/2.torrent"
- "http://www.example.com/3.torrent"
Then send it "3" again and it will output nothing.
Now send it "5" and it will output:
- "http://www.example.com/4.torrent"
- "http://www.example.com/5.torrent"
"""
def __init__(self, prefix, suffix = ".torrent")
self.prefix = prefix
self.suffix = suffix
super(self, PartsFilenameGenerator).__init__()
def main(self):
highestseensofar = 0 # we have not outputted any torrent URLs so far
while 1:
yield 1
while self.dataReady("inbox"):
msg = int(self.recv("inbox"))
# output the URLs of all the torrents whose numbers are > the
# number of last torrent output and <= the value of message received
while highestsofar < msg:
highestsofar += 1
self.send(self.prefix + str(highestsofar) + self.suffix, "outbox")
while self.dataReady("control"):
msg = self.recv("control"):
if isinstance(msg, shutdown) or isinstance(msg, producerFinished):
self.send(producerFinished(self), "signal")
return
self.pause()
def P2PStreamer(torrentsfolder):
"""\
Arguments:
- torrentsfolder, e.g. "http://my.server.example.org/radioFoo/"
"""
# Create a pipeline of components whose net result is to output the contents of a certain URL
# (torrentsfolder + metafilename) every 60 seconds (the contents at the time of output, i.e.
# it fetches the page every 60 seconds).
poller = pipeline(
# This generates a message every 60 seconds to wake TriggeredSource
# allowing us to poll the meta file without busy-waiting.
CheapAndCheerfulClock(60.0),
# This sends the string (torrentsfolder + "meta.txt") every time it receives a message
# This string will be the URL of the meta file on the torrent hosting website
# e.g. "http://my.server.example.org/radioFoo/meta.txt"
TriggeredSource(torrentsfolder + "meta.txt"),
# SimpleHTTPClient retrieves the resource specified by the message it receives,
# which will be URL string.
# i.e. It fetches the page whose URL is (torrentsfolder + "meta.txt) (the string
# produced by TriggeredSource) and forwards on the contents of that page.
# The contents of that particular page will always be a number
# (in the form of a decimal ASCII string) which represents the number of
# 'chunks' of the stream that exist
SimpleHTTPClient()
)
# As a whole, streamer acts like a normal streaming client, outputting the contents of
# a stream to its outbox, although in much larger chunks with longer in between chunks
# than for a typical stream.
streamer = pipeline(
# fetch the P2P-stream meta file every 60 seconds and send its contents on
poller,
# PartsFilenameGenerator uses the number retrived by poller
# i.e. the number of chunks/torrents in the stream
# to generate the URLs of all the .torrent files
# (torrent metadata files) that make up the stream.
# (They will have been named 1.torrent,
# 2.torrent, 3.torrent ... etc. on the server).
PartsFilenameGenerator(torrentsfolder, ".torrent"),
# Download these .torrent files (each message received by resourcefetcher
# will be the URL of one .torrent file it should download). The
# contents of the page downloaded it forwarded on to the next component.
# NOTE: this downloads the .torrent file (metadata about part of the
# stream) not the stream itself
SimpleHTTPClient(),
# now use BitTorrent to download the stream itself using the
# metadata retrieved from .torrent files (each has information about a
# section of the stream - a section itself is typically a few MB of data)
# (TorrentPatron is a BitTorrent client component)
TorrentPatron(),
# output the names of the chunks of the stream as soon as they and
# all previous chunks have been downloaded
StreamReconstructor(),
# read the contents of these chunks (files)
TriggeredFileReader(),
)
return streamer
if __name__ == '__main__':
# ask the user from which website we should get the stream's metadata
# e.g. "http://my.server.example.org/radioFoo/"
torrentsfolder = raw_input("P2P-stream meta folder (URL): ")
pipeline(
# fetch the stream using BitTorrent and HTTP - see above for details
streamer = P2PStreamer(torrentsfolder),
# write the stream to a file on disk
SimpleFileWriter("myreconstructedstream.mp3")
).run()
| |
"""Statistical Language Processing tools. (Chapter 23)
We define Unigram and Ngram text models, use them to generate random text,
and show the Viterbi algorithm for segmentatioon of letters into words.
Then we show a very simple Information Retrieval system, and an example
working on a tiny sample of Unix manual pages."""
from math import log, exp
import re
from Other_AIMA_Scripts import search
from utils import *
class CountingProbDist(probability.ProbDist):
"""A probability distribution formed by observing and counting examples.
If P is an instance of this class and o
is an observed value, then there are 3 main operations:
p.add(o) increments the count for observation o by 1.
p.sample() returns a random element from the distribution.
p[o] returns the probability for o (as in a regular ProbDist)."""
def __init__(self, observations=[], default=0):
"""Create a distribution, and optionally add in some observations.
By default this is an unsmoothed distribution, but saying default=1,
for example, gives you add-one smoothing."""
update(self, dictionary=DefaultDict(default), needs_recompute=False,
table=[], n_obs=0)
for o in observations:
self.add(o)
def add(self, o):
"""Add an observation o to the distribution."""
self.dictionary[o] += 1
self.n_obs += 1
self.needs_recompute = True
def sample(self):
"""Return a random sample from the distribution."""
if self.needs_recompute: self._recompute()
if self.n_obs == 0:
return None
i = bisect.bisect_left(self.table, (1 + random.randrange(self.n_obs),))
(count, o) = self.table[i]
return o
def __getitem__(self, item):
"""Return an estimate of the probability of item."""
if self.needs_recompute: self._recompute()
return self.dictionary[item] / self.n_obs
def __len__(self):
if self.needs_recompute: self._recompute()
return self.n_obs
def top(self, n):
"Return (count, obs) tuples for the n most frequent observations."
items = [(v, k) for (k, v) in self.dictionary.items()]
items.sort(); items.reverse()
return items[0:n]
def _recompute(self):
"""Recompute the total count n_obs and the table of entries."""
n_obs = 0
table = []
for (o, count) in self.dictionary.items():
n_obs += count
table.append((n_obs, o))
update(self, n_obs=float(n_obs), table=table, needs_recompute=False)
#______________________________________________________________________________
class UnigramTextModel(CountingProbDist):
"""This is a discrete probability distribution over words, so you
can add, sample, or get P[word], just like with CountingProbDist. You can
also generate a random text n words long with P.samples(n)"""
def samples(self, n):
"Return a string of n words, random according to the model."
return ' '.join([self.sample() for i in range(n)])
class NgramTextModel(CountingProbDist):
"""This is a discrete probability distribution over n-tuples of words.
You can add, sample or get P[(word1, ..., wordn)]. The method P.samples(n)
builds up an n-word sequence; P.add_text and P.add_sequence add data."""
def __init__(self, n, observation_sequence=[]):
## In addition to the dictionary of n-tuples, cond_prob is a
## mapping from (w1, ..., wn-1) to P(wn | w1, ... wn-1)
CountingProbDist.__init__(self)
self.n = n
self.cond_prob = DefaultDict(CountingProbDist())
self.add_sequence(observation_sequence)
## sample, __len__, __getitem__ inherited from CountingProbDist
## Note they deal with tuples, not strings, as inputs
def add(self, ngram):
"""Count 1 for P[(w1, ..., wn)] and for P(wn | (w1, ..., wn-1)"""
CountingProbDist.add(self, ngram)
self.cond_prob[ngram[:-1]].add(ngram[-1])
def add_sequence(self, words):
"""Add each of the tuple words[i:i+n], using a sliding window.
Prefix some copies of the empty word, '', to make the start work."""
n = self.n
words = ['',] * (n-1) + words
for i in range(len(words)-n):
self.add(tuple(words[i:i+n]))
def samples(self, nwords):
"""Build up a random sample of text n words long, using the"""
n = self.n
nminus1gram = ('',) * (n-1)
output = []
while len(output) < nwords:
wn = self.cond_prob[nminus1gram].sample()
if wn:
output.append(wn)
nminus1gram = nminus1gram[1:] + (wn,)
else: ## Cannot continue, so restart.
nminus1gram = ('',) * (n-1)
return ' '.join(output)
#______________________________________________________________________________
def viterbi_segment(text, P):
"""Find the best segmentation of the string of characters, given the
UnigramTextModel P."""
# best[i] = best probability for text[0:i]
# words[i] = best word ending at position i
n = len(text)
words = [''] + list(text)
best = [1.0] + [0.0] * n
## Fill in the vectors best, words via dynamic programming
for i in range(n+1):
for j in range(0, i):
w = text[j:i]
if P[w] * best[i - len(w)] >= best[i]:
best[i] = P[w] * best[i - len(w)]
words[i] = w
## Now recover the sequence of best words
sequence = []; i = len(words)-1
while i > 0:
sequence[0:0] = [words[i]]
i = i - len(words[i])
## Return sequence of best words and overall probability
return sequence, best[-1]
#______________________________________________________________________________
class IRSystem:
"""A very simple Information Retrieval System, as discussed in Sect. 23.2.
The constructor s = IRSystem('the a') builds an empty system with two
stopwords. Next, index several documents with s.index_document(text, url).
Then ask queries with s.query('query words', n) to retrieve the top n
matching documents. Queries are literal words from the document,
except that stopwords are ignored, and there is one special syntax:
The query "learn: man cat", for example, runs "man cat" and indexes it."""
def __init__(self, stopwords='the a of'):
"""Create an IR System. Optionally specify stopwords."""
## index is a map of {word: {docid: count}}, where docid is an int,
## indicating the index into the documents list.
update(self, index=DefaultDict(DefaultDict(0)),
stopwords=set(words(stopwords)), documents=[])
def index_collection(self, filenames):
"Index a whole collection of files."
for filename in filenames:
self.index_document(open(filename).read(), filename)
def index_document(self, text, url):
"Index the text of a document."
## For now, use first line for title
title = text[:text.index('\n')].strip()
docwords = words(text)
docid = len(self.documents)
self.documents.append(Document(title, url, len(docwords)))
for word in docwords:
if word not in self.stopwords:
self.index[word][docid] += 1
def query(self, query_text, n=10):
"""Return a list of n (score, docid) pairs for the best matches.
Also handle the special syntax for 'learn: command'."""
if query_text.startswith("learn:"):
doctext = os.popen(query_text[len("learn:"):], 'r').read()
self.index_document(doctext, query_text)
return []
qwords = [w for w in words(query_text) if w not in self.stopwords]
shortest = argmin(qwords, lambda w: len(self.index[w]))
docs = self.index[shortest]
results = [(sum([self.score(w, d) for w in qwords]), d) for d in docs]
results.sort(); results.reverse()
return results[:n]
def score(self, word, docid):
"Compute a score for this word on this docid."
## There are many options; here we take a very simple approach
return (math.log(1 + self.index[word][docid])
/ math.log(1 + self.documents[docid].nwords))
def present(self, results):
"Present the results as a list."
for (score, d) in results:
doc = self.documents[d]
print ("%5.2f|%25s | %s" % (100 * score, doc.url, doc.title[:45]))
def present_results(self, query_text, n=10):
"Get results for the query and present them."
self.present(self.query(query_text, n))
class UnixConsultant(IRSystem):
"""A trivial IR system over a small collection of Unix man pages."""
def __init__(self):
IRSystem.__init__(self, stopwords="how do i the a of")
import os
mandir = '../data/man/'
man_files = [mandir + f for f in os.listdir(mandir)]
self.index_collection(man_files)
class Document:
"""Metadata for a document: title and url; maybe add others later."""
def __init__(self, title, url, nwords):
update(self, title=title, url=url, nwords=nwords)
def words(text, reg=re.compile('[a-z0-9]+')):
"""Return a list of the words in text, ignoring punctuation and
converting everything to lowercase (to canonicalize).
>>> words("``EGAD!'' Edgar cried.")
['egad', 'edgar', 'cried']
"""
return reg.findall(text.lower())
def canonicalize(text):
"""Return a canonical text: only lowercase letters and blanks.
>>> canonicalize("``EGAD!'' Edgar cried.")
'egad edgar cried'
"""
return ' '.join(words(text))
#______________________________________________________________________________
## Example application (not in book): decode a cipher.
## A cipher is a code that substitutes one character for another.
## A shift cipher is a rotation of the letters in the alphabet,
## such as the famous rot13, which maps A to N, B to M, etc.
#### Encoding
def shift_encode(plaintext, n):
"""Encode text with a shift cipher that moves each letter up by n letters.
>>> shift_encode('abc z', 1)
'bcd a'
"""
return encode(plaintext, alphabet[n:] + alphabet[:n])
def rot13(plaintext):
"""Encode text by rotating letters by 13 spaces in the alphabet.
>>> rot13('hello')
'uryyb'
>>> rot13(rot13('hello'))
'hello'
"""
return shift_encode(plaintext, 13)
def encode(plaintext, code):
"Encodes text, using a code which is a permutation of the alphabet."
from string import maketrans
trans = maketrans(alphabet + alphabet.upper(), code + code.upper())
return plaintext.translate(trans)
alphabet = 'abcdefghijklmnopqrstuvwxyz'
def bigrams(text):
"""Return a list of pairs in text (a sequence of letters or words).
>>> bigrams('this')
['th', 'hi', 'is']
>>> bigrams(['this', 'is', 'a', 'test'])
[['this', 'is'], ['is', 'a'], ['a', 'test']]
"""
return [text[i:i+2] for i in range(len(text) - 1)]
#### Decoding a Shift (or Caesar) Cipher
class ShiftDecoder:
"""There are only 26 possible encodings, so we can try all of them,
and return the one with the highest probability, according to a
bigram probability distribution."""
def __init__(self, training_text):
training_text = canonicalize(training_text)
self.P2 = CountingProbDist(bigrams(training_text), default=1)
def score(self, plaintext):
"Return a score for text based on how common letters pairs are."
s = 1.0
for bi in bigrams(plaintext):
s = s * self.P2[bi]
return s
def decode(self, ciphertext):
"Return the shift decoding of text with the best score."
return argmax(all_shifts(ciphertext), self.score)
def all_shifts(text):
"Return a list of all 26 possible encodings of text by a shift cipher."
return [shift_encode(text, n) for n in range(len(alphabet))]
#### Decoding a General Permutation Cipher
class PermutationDecoder:
"""This is a much harder problem than the shift decoder. There are 26!
permutations, so we can't try them all. Instead we have to search.
We want to search well, but there are many things to consider:
Unigram probabilities (E is the most common letter); Bigram probabilities
(TH is the most common bigram); word probabilities (I and A are the most
common one-letter words, etc.); etc.
We could represent a search state as a permutation of the 26 letters,
and alter the solution through hill climbing. With an initial guess
based on unigram probabilities, this would probably fair well. However,
I chose instead to have an incremental representation. A state is
represented as a letter-to-letter map; for example {'z': 'e'} to
represent that 'z' will be translated to 'e'
"""
def __init__(self, training_text, ciphertext=None):
self.Pwords = UnigramTextModel(words(training_text))
self.P1 = UnigramTextModel(training_text) # By letter
self.P2 = NgramTextModel(2, training_text) # By letter pair
if ciphertext:
return self.decode(ciphertext)
def decode(self, ciphertext):
"Search for a decoding of the ciphertext."
self.ciphertext = ciphertext
problem = PermutationDecoderProblem(decoder=self)
return search.best_first_tree_search(problem, self.score)
def score(self, ciphertext, code):
"""Score is product of word scores, unigram scores, and bigram scores.
This can get very small, so we use logs and exp."""
text = decode(ciphertext, code)
logP = (sum([log(self.Pwords[word]) for word in words(text)]) +
sum([log(self.P1[c]) for c in text]) +
sum([log(self.P2[b]) for b in bigrams(text)]))
return exp(logP)
class PermutationDecoderProblem(search.Problem):
def __init__(self, initial=None, goal=None, decoder=None):
self.initial = initial or {}
self.decoder = decoder
def successors(self, state):
## Find the best
p, plainchar = max([(self.decoder.P1[c], c)
for c in alphabet if c not in state])
succs = [extend(state, plainchar, cipherchar)] #????
def goal_test(self, state):
"We're done when we get all 26 letters assigned."
return len(state) >= 26
#______________________________________________________________________________
| |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import unittest
from nose.tools import * # noqa; PEP8 asserts
from webtest_plus import TestApp
import mock
import datetime
import httplib as http
from flask import Flask
from modularodm import Q
from werkzeug.wrappers import BaseResponse
from framework import auth
from framework.auth import cas
from framework.sessions import Session
from framework.exceptions import HTTPError
from tests.base import OsfTestCase, assert_is_redirect
from tests.factories import (
UserFactory, UnregUserFactory, AuthFactory,
ProjectFactory, NodeFactory, AuthUserFactory, PrivateLinkFactory
)
from framework.auth import User, Auth
from framework.auth.decorators import must_be_logged_in
from website import mails
from website import settings
from website.util import web_url_for
from website.project.decorators import (
must_have_permission, must_be_contributor,
must_have_addon, must_be_addon_authorizer,
)
class TestAuthUtils(OsfTestCase):
def test_unreg_user_can_register(self):
user = UnregUserFactory()
auth.register_unconfirmed(
username=user.username,
password='gattaca',
fullname='Rosie',
)
assert_true(user.get_confirmation_token(user.username))
@mock.patch('framework.auth.views.mails.send_mail')
def test_confirm_email(self, mock_mail):
user = UnregUserFactory()
auth.register_unconfirmed(
username=user.username,
password='gattaca',
fullname='Rosie',
)
token = user.get_confirmation_token(user.username)
res = self.app.get('/confirm/{}/{}'.format(user._id, token), allow_redirects=False)
res = res.follow()
assert_equal(res.status_code, 302)
assert_in('login?service=', res.location)
user.reload()
assert_equal(len(mock_mail.call_args_list), 1)
empty, kwargs = mock_mail.call_args
kwargs['user'].reload()
assert_equal(empty, ())
assert_equal(kwargs, {
'user': user,
'mimetype': 'html',
'mail': mails.WELCOME,
'to_addr': user.username,
})
self.app.set_cookie(settings.COOKIE_NAME, user.get_or_create_cookie())
res = self.app.get('/confirm/{}/{}'.format(user._id, token))
res = res.follow()
assert_equal(res.status_code, 302)
assert_in('dashboard', res.location)
assert_equal(len(mock_mail.call_args_list), 1)
session = Session.find(
Q('data.auth_user_id', 'eq', user._id)
).sort(
'-date_modified'
).limit(1)[0]
assert_equal(len(session.data['status']), 1)
def test_get_user_by_id(self):
user = UserFactory()
assert_equal(User.load(user._id), user)
def test_get_user_by_email(self):
user = UserFactory()
assert_equal(auth.get_user(email=user.username), user)
def test_get_user_with_wrong_password_returns_false(self):
user = UserFactory.build()
user.set_password('killerqueen')
assert_false(
auth.get_user(email=user.username, password='wrong')
)
class TestAuthObject(OsfTestCase):
def test_repr(self):
auth = AuthFactory()
rep = repr(auth)
assert_in(str(auth.user), rep)
def test_factory(self):
auth_obj = AuthFactory()
assert_true(isinstance(auth_obj.user, auth.User))
def test_from_kwargs(self):
user = UserFactory()
request_args = {'view_only': 'mykey'}
kwargs = {'user': user}
auth_obj = Auth.from_kwargs(request_args, kwargs)
assert_equal(auth_obj.user, user)
assert_equal(auth_obj.private_key, request_args['view_only'])
def test_logged_in(self):
user = UserFactory()
auth_obj = Auth(user=user)
assert_true(auth_obj.logged_in)
auth2 = Auth(user=None)
assert_false(auth2.logged_in)
class TestPrivateLink(OsfTestCase):
def setUp(self):
super(TestPrivateLink, self).setUp()
self.flaskapp = Flask('testing_private_links')
@self.flaskapp.route('/project/<pid>/')
@must_be_contributor
def project_get(**kwargs):
return 'success', 200
self.app = TestApp(self.flaskapp)
self.user = AuthUserFactory()
self.project = ProjectFactory(is_public=False)
self.link = PrivateLinkFactory()
self.link.nodes.append(self.project)
self.link.save()
@mock.patch('website.project.decorators.Auth.from_kwargs')
def test_has_private_link_key(self, mock_from_kwargs):
mock_from_kwargs.return_value = Auth(user=None)
res = self.app.get('/project/{0}'.format(self.project._primary_key),
{'view_only': self.link.key})
res = res.follow()
assert_equal(res.status_code, 200)
assert_equal(res.body, 'success')
@mock.patch('website.project.decorators.Auth.from_kwargs')
def test_does_not_have_key(self, mock_from_kwargs):
mock_from_kwargs.return_value = Auth(user=None)
res = self.app.get('/project/{0}'.format(self.project._primary_key),
{'key': None})
assert_is_redirect(res)
# Flask app for testing view decorators
decoratorapp = Flask('decorators')
@must_be_contributor
def view_that_needs_contributor(**kwargs):
return kwargs.get('node') or kwargs.get('parent')
class AuthAppTestCase(OsfTestCase):
def setUp(self):
self.ctx = decoratorapp.test_request_context()
self.ctx.push()
def tearDown(self):
self.ctx.pop()
class TestMustBeContributorDecorator(AuthAppTestCase):
def setUp(self):
super(TestMustBeContributorDecorator, self).setUp()
self.contrib = AuthUserFactory()
self.project = ProjectFactory()
self.project.add_contributor(self.contrib, auth=Auth(self.project.creator))
self.project.save()
def test_must_be_contributor_when_user_is_contributor(self):
result = view_that_needs_contributor(
pid=self.project._primary_key,
user=self.contrib)
assert_equal(result, self.project)
def test_must_be_contributor_when_user_is_not_contributor_raises_error(self):
non_contributor = AuthUserFactory()
with assert_raises(HTTPError):
view_that_needs_contributor(
pid=self.project._primary_key,
user=non_contributor
)
def test_must_be_contributor_no_user(self):
res = view_that_needs_contributor(
pid=self.project._primary_key,
user=None,
)
assert_is_redirect(res)
# redirects to login url
redirect_url = res.headers['Location']
login_url = cas.get_login_url(service_url='http://localhost/')
assert_equal(redirect_url, login_url)
def test_must_be_contributor_parent_admin(self):
user = UserFactory()
node = NodeFactory(parent=self.project, creator=user)
res = view_that_needs_contributor(
pid=self.project._id,
nid=node._id,
user=self.project.creator,
)
assert_equal(res, node)
def test_must_be_contributor_parent_write(self):
user = UserFactory()
node = NodeFactory(parent=self.project, creator=user)
self.project.set_permissions(self.project.creator, ['read', 'write'])
self.project.save()
with assert_raises(HTTPError) as exc_info:
view_that_needs_contributor(
pid=self.project._id,
nid=node._id,
user=self.project.creator,
)
assert_equal(exc_info.exception.code, 403)
@must_be_logged_in
def protected(**kwargs):
return 'open sesame'
@must_have_permission('dance')
def thriller(**kwargs):
return 'chiller'
class TestPermissionDecorators(AuthAppTestCase):
@mock.patch('framework.auth.decorators.Auth.from_kwargs')
def test_must_be_logged_in_decorator_with_user(self, mock_from_kwargs):
user = UserFactory()
mock_from_kwargs.return_value = Auth(user=user)
protected()
@mock.patch('framework.auth.decorators.Auth.from_kwargs')
def test_must_be_logged_in_decorator_with_no_user(self, mock_from_kwargs):
mock_from_kwargs.return_value = Auth()
resp = protected()
assert_true(isinstance(resp, BaseResponse))
login_url = cas.get_login_url(service_url='http://localhost/')
assert_in(login_url, resp.headers.get('location'))
@mock.patch('website.project.decorators._kwargs_to_nodes')
@mock.patch('framework.auth.decorators.Auth.from_kwargs')
def test_must_have_permission_true(self, mock_from_kwargs, mock_to_nodes):
project = ProjectFactory()
project.add_permission(project.creator, 'dance')
mock_from_kwargs.return_value = Auth(user=project.creator)
mock_to_nodes.return_value = (None, project)
thriller(node=project)
@mock.patch('website.project.decorators._kwargs_to_nodes')
@mock.patch('framework.auth.decorators.Auth.from_kwargs')
def test_must_have_permission_false(self, mock_from_kwargs, mock_to_nodes):
project = ProjectFactory()
mock_from_kwargs.return_value = Auth(user=project.creator)
mock_to_nodes.return_value = (None, project)
with assert_raises(HTTPError) as ctx:
thriller(node=project)
assert_equal(ctx.exception.code, http.FORBIDDEN)
@mock.patch('website.project.decorators._kwargs_to_nodes')
@mock.patch('framework.auth.decorators.Auth.from_kwargs')
def test_must_have_permission_not_logged_in(self, mock_from_kwargs, mock_to_nodes):
project = ProjectFactory()
mock_from_kwargs.return_value = Auth()
mock_to_nodes.return_value = (None, project)
with assert_raises(HTTPError) as ctx:
thriller(node=project)
assert_equal(ctx.exception.code, http.UNAUTHORIZED)
def needs_addon_view(**kwargs):
return 'openaddon'
class TestMustHaveAddonDecorator(AuthAppTestCase):
def setUp(self):
super(TestMustHaveAddonDecorator, self).setUp()
self.project = ProjectFactory()
@mock.patch('website.project.decorators._kwargs_to_nodes')
def test_must_have_addon_node_true(self, mock_kwargs_to_nodes):
mock_kwargs_to_nodes.return_value = (None, self.project)
self.project.add_addon('github', auth=None)
decorated = must_have_addon('github', 'node')(needs_addon_view)
res = decorated()
assert_equal(res, 'openaddon')
@mock.patch('website.project.decorators._kwargs_to_nodes')
def test_must_have_addon_node_false(self, mock_kwargs_to_nodes):
mock_kwargs_to_nodes.return_value = (None, self.project)
self.project.delete_addon('github', auth=None)
decorated = must_have_addon('github', 'node')(needs_addon_view)
with assert_raises(HTTPError):
decorated()
@mock.patch('framework.auth.decorators.Auth.from_kwargs')
def test_must_have_addon_user_true(self, mock_current_user):
mock_current_user.return_value = Auth(self.project.creator)
self.project.creator.add_addon('github')
decorated = must_have_addon('github', 'user')(needs_addon_view)
res = decorated()
assert_equal(res, 'openaddon')
@mock.patch('framework.auth.decorators.Auth.from_kwargs')
def test_must_have_addon_user_false(self, mock_current_user):
mock_current_user.return_value = Auth(self.project.creator)
self.project.creator.delete_addon('github')
decorated = must_have_addon('github', 'user')(needs_addon_view)
with assert_raises(HTTPError):
decorated()
class TestMustBeAddonAuthorizerDecorator(AuthAppTestCase):
def setUp(self):
super(TestMustBeAddonAuthorizerDecorator, self).setUp()
self.project = ProjectFactory()
self.decorated = must_be_addon_authorizer('github')(needs_addon_view)
@mock.patch('website.project.decorators._kwargs_to_nodes')
@mock.patch('framework.auth.decorators.Auth.from_kwargs')
def test_must_be_authorizer_true(self, mock_get_current_user, mock_kwargs_to_nodes):
# Mock
mock_get_current_user.return_value = Auth(self.project.creator)
mock_kwargs_to_nodes.return_value = (None, self.project)
# Setup
self.project.add_addon('github', auth=None)
node_settings = self.project.get_addon('github')
self.project.creator.add_addon('github')
user_settings = self.project.creator.get_addon('github')
node_settings.user_settings = user_settings
# Test
res = self.decorated()
assert_equal(res, 'openaddon')
def test_must_be_authorizer_false(self):
# Setup
self.project.add_addon('github', auth=None)
node_settings = self.project.get_addon('github')
user2 = UserFactory()
user2.add_addon('github')
user_settings = user2.get_addon('github')
node_settings.user_settings = user_settings
# Test
with assert_raises(HTTPError):
self.decorated()
def test_must_be_authorizer_no_user_settings(self):
self.project.add_addon('github', auth=None)
with assert_raises(HTTPError):
self.decorated()
def test_must_be_authorizer_no_node_settings(self):
with assert_raises(HTTPError):
self.decorated()
if __name__ == '__main__':
unittest.main()
| |
"""Tests for classes defining properties of ground domains, e.g. ZZ, QQ, ZZ[x] ... """
from sympy import S, sqrt, sin, oo, Poly, Float, Rational
from sympy.abc import x, y, z
from sympy.polys.domains import ZZ, QQ, RR, CC, FF, GF, EX
from sympy.polys.domains.realfield import RealField
from sympy.polys.rings import ring
from sympy.polys.fields import field
from sympy.polys.polyerrors import (
UnificationFailed,
GeneratorsError,
CoercionFailed,
NotInvertible,
DomainError)
from sympy.polys.polyutils import illegal
from sympy.utilities.pytest import raises
ALG = QQ.algebraic_field(sqrt(2), sqrt(3))
def unify(K0, K1):
return K0.unify(K1)
def test_Domain_unify():
F3 = GF(3)
assert unify(F3, F3) == F3
assert unify(F3, ZZ) == ZZ
assert unify(F3, QQ) == QQ
assert unify(F3, ALG) == ALG
assert unify(F3, RR) == RR
assert unify(F3, CC) == CC
assert unify(F3, ZZ[x]) == ZZ[x]
assert unify(F3, ZZ.frac_field(x)) == ZZ.frac_field(x)
assert unify(F3, EX) == EX
assert unify(ZZ, F3) == ZZ
assert unify(ZZ, ZZ) == ZZ
assert unify(ZZ, QQ) == QQ
assert unify(ZZ, ALG) == ALG
assert unify(ZZ, RR) == RR
assert unify(ZZ, CC) == CC
assert unify(ZZ, ZZ[x]) == ZZ[x]
assert unify(ZZ, ZZ.frac_field(x)) == ZZ.frac_field(x)
assert unify(ZZ, EX) == EX
assert unify(QQ, F3) == QQ
assert unify(QQ, ZZ) == QQ
assert unify(QQ, QQ) == QQ
assert unify(QQ, ALG) == ALG
assert unify(QQ, RR) == RR
assert unify(QQ, CC) == CC
assert unify(QQ, ZZ[x]) == QQ[x]
assert unify(QQ, ZZ.frac_field(x)) == QQ.frac_field(x)
assert unify(QQ, EX) == EX
assert unify(RR, F3) == RR
assert unify(RR, ZZ) == RR
assert unify(RR, QQ) == RR
assert unify(RR, ALG) == RR
assert unify(RR, RR) == RR
assert unify(RR, CC) == CC
assert unify(RR, ZZ[x]) == RR[x]
assert unify(RR, ZZ.frac_field(x)) == RR.frac_field(x)
assert unify(RR, EX) == EX
assert RR[x].unify(ZZ.frac_field(y)) == RR.frac_field(x, y)
assert unify(CC, F3) == CC
assert unify(CC, ZZ) == CC
assert unify(CC, QQ) == CC
assert unify(CC, ALG) == CC
assert unify(CC, RR) == CC
assert unify(CC, CC) == CC
assert unify(CC, ZZ[x]) == CC[x]
assert unify(CC, ZZ.frac_field(x)) == CC.frac_field(x)
assert unify(CC, EX) == EX
assert unify(ZZ[x], F3) == ZZ[x]
assert unify(ZZ[x], ZZ) == ZZ[x]
assert unify(ZZ[x], QQ) == QQ[x]
assert unify(ZZ[x], ALG) == ALG[x]
assert unify(ZZ[x], RR) == RR[x]
assert unify(ZZ[x], CC) == CC[x]
assert unify(ZZ[x], ZZ[x]) == ZZ[x]
assert unify(ZZ[x], ZZ.frac_field(x)) == ZZ.frac_field(x)
assert unify(ZZ[x], EX) == EX
assert unify(ZZ.frac_field(x), F3) == ZZ.frac_field(x)
assert unify(ZZ.frac_field(x), ZZ) == ZZ.frac_field(x)
assert unify(ZZ.frac_field(x), QQ) == QQ.frac_field(x)
assert unify(ZZ.frac_field(x), ALG) == ALG.frac_field(x)
assert unify(ZZ.frac_field(x), RR) == RR.frac_field(x)
assert unify(ZZ.frac_field(x), CC) == CC.frac_field(x)
assert unify(ZZ.frac_field(x), ZZ[x]) == ZZ.frac_field(x)
assert unify(ZZ.frac_field(x), ZZ.frac_field(x)) == ZZ.frac_field(x)
assert unify(ZZ.frac_field(x), EX) == EX
assert unify(EX, F3) == EX
assert unify(EX, ZZ) == EX
assert unify(EX, QQ) == EX
assert unify(EX, ALG) == EX
assert unify(EX, RR) == EX
assert unify(EX, CC) == EX
assert unify(EX, ZZ[x]) == EX
assert unify(EX, ZZ.frac_field(x)) == EX
assert unify(EX, EX) == EX
def test_Domain_unify_composite():
assert unify(ZZ.poly_ring(x), ZZ) == ZZ.poly_ring(x)
assert unify(ZZ.poly_ring(x), QQ) == QQ.poly_ring(x)
assert unify(QQ.poly_ring(x), ZZ) == QQ.poly_ring(x)
assert unify(QQ.poly_ring(x), QQ) == QQ.poly_ring(x)
assert unify(ZZ, ZZ.poly_ring(x)) == ZZ.poly_ring(x)
assert unify(QQ, ZZ.poly_ring(x)) == QQ.poly_ring(x)
assert unify(ZZ, QQ.poly_ring(x)) == QQ.poly_ring(x)
assert unify(QQ, QQ.poly_ring(x)) == QQ.poly_ring(x)
assert unify(ZZ.poly_ring(x, y), ZZ) == ZZ.poly_ring(x, y)
assert unify(ZZ.poly_ring(x, y), QQ) == QQ.poly_ring(x, y)
assert unify(QQ.poly_ring(x, y), ZZ) == QQ.poly_ring(x, y)
assert unify(QQ.poly_ring(x, y), QQ) == QQ.poly_ring(x, y)
assert unify(ZZ, ZZ.poly_ring(x, y)) == ZZ.poly_ring(x, y)
assert unify(QQ, ZZ.poly_ring(x, y)) == QQ.poly_ring(x, y)
assert unify(ZZ, QQ.poly_ring(x, y)) == QQ.poly_ring(x, y)
assert unify(QQ, QQ.poly_ring(x, y)) == QQ.poly_ring(x, y)
assert unify(ZZ.frac_field(x), ZZ) == ZZ.frac_field(x)
assert unify(ZZ.frac_field(x), QQ) == QQ.frac_field(x)
assert unify(QQ.frac_field(x), ZZ) == QQ.frac_field(x)
assert unify(QQ.frac_field(x), QQ) == QQ.frac_field(x)
assert unify(ZZ, ZZ.frac_field(x)) == ZZ.frac_field(x)
assert unify(QQ, ZZ.frac_field(x)) == QQ.frac_field(x)
assert unify(ZZ, QQ.frac_field(x)) == QQ.frac_field(x)
assert unify(QQ, QQ.frac_field(x)) == QQ.frac_field(x)
assert unify(ZZ.frac_field(x, y), ZZ) == ZZ.frac_field(x, y)
assert unify(ZZ.frac_field(x, y), QQ) == QQ.frac_field(x, y)
assert unify(QQ.frac_field(x, y), ZZ) == QQ.frac_field(x, y)
assert unify(QQ.frac_field(x, y), QQ) == QQ.frac_field(x, y)
assert unify(ZZ, ZZ.frac_field(x, y)) == ZZ.frac_field(x, y)
assert unify(QQ, ZZ.frac_field(x, y)) == QQ.frac_field(x, y)
assert unify(ZZ, QQ.frac_field(x, y)) == QQ.frac_field(x, y)
assert unify(QQ, QQ.frac_field(x, y)) == QQ.frac_field(x, y)
assert unify(ZZ.poly_ring(x), ZZ.poly_ring(x)) == ZZ.poly_ring(x)
assert unify(ZZ.poly_ring(x), QQ.poly_ring(x)) == QQ.poly_ring(x)
assert unify(QQ.poly_ring(x), ZZ.poly_ring(x)) == QQ.poly_ring(x)
assert unify(QQ.poly_ring(x), QQ.poly_ring(x)) == QQ.poly_ring(x)
assert unify(ZZ.poly_ring(x, y), ZZ.poly_ring(x)) == ZZ.poly_ring(x, y)
assert unify(ZZ.poly_ring(x, y), QQ.poly_ring(x)) == QQ.poly_ring(x, y)
assert unify(QQ.poly_ring(x, y), ZZ.poly_ring(x)) == QQ.poly_ring(x, y)
assert unify(QQ.poly_ring(x, y), QQ.poly_ring(x)) == QQ.poly_ring(x, y)
assert unify(ZZ.poly_ring(x), ZZ.poly_ring(x, y)) == ZZ.poly_ring(x, y)
assert unify(ZZ.poly_ring(x), QQ.poly_ring(x, y)) == QQ.poly_ring(x, y)
assert unify(QQ.poly_ring(x), ZZ.poly_ring(x, y)) == QQ.poly_ring(x, y)
assert unify(QQ.poly_ring(x), QQ.poly_ring(x, y)) == QQ.poly_ring(x, y)
assert unify(ZZ.poly_ring(x, y), ZZ.poly_ring(x, z)) == ZZ.poly_ring(x, y, z)
assert unify(ZZ.poly_ring(x, y), QQ.poly_ring(x, z)) == QQ.poly_ring(x, y, z)
assert unify(QQ.poly_ring(x, y), ZZ.poly_ring(x, z)) == QQ.poly_ring(x, y, z)
assert unify(QQ.poly_ring(x, y), QQ.poly_ring(x, z)) == QQ.poly_ring(x, y, z)
assert unify(ZZ.frac_field(x), ZZ.frac_field(x)) == ZZ.frac_field(x)
assert unify(ZZ.frac_field(x), QQ.frac_field(x)) == QQ.frac_field(x)
assert unify(QQ.frac_field(x), ZZ.frac_field(x)) == QQ.frac_field(x)
assert unify(QQ.frac_field(x), QQ.frac_field(x)) == QQ.frac_field(x)
assert unify(ZZ.frac_field(x, y), ZZ.frac_field(x)) == ZZ.frac_field(x, y)
assert unify(ZZ.frac_field(x, y), QQ.frac_field(x)) == QQ.frac_field(x, y)
assert unify(QQ.frac_field(x, y), ZZ.frac_field(x)) == QQ.frac_field(x, y)
assert unify(QQ.frac_field(x, y), QQ.frac_field(x)) == QQ.frac_field(x, y)
assert unify(ZZ.frac_field(x), ZZ.frac_field(x, y)) == ZZ.frac_field(x, y)
assert unify(ZZ.frac_field(x), QQ.frac_field(x, y)) == QQ.frac_field(x, y)
assert unify(QQ.frac_field(x), ZZ.frac_field(x, y)) == QQ.frac_field(x, y)
assert unify(QQ.frac_field(x), QQ.frac_field(x, y)) == QQ.frac_field(x, y)
assert unify(ZZ.frac_field(x, y), ZZ.frac_field(x, z)) == ZZ.frac_field(x, y, z)
assert unify(ZZ.frac_field(x, y), QQ.frac_field(x, z)) == QQ.frac_field(x, y, z)
assert unify(QQ.frac_field(x, y), ZZ.frac_field(x, z)) == QQ.frac_field(x, y, z)
assert unify(QQ.frac_field(x, y), QQ.frac_field(x, z)) == QQ.frac_field(x, y, z)
assert unify(ZZ.poly_ring(x), ZZ.frac_field(x)) == ZZ.frac_field(x)
assert unify(ZZ.poly_ring(x), QQ.frac_field(x)) == ZZ.frac_field(x)
assert unify(QQ.poly_ring(x), ZZ.frac_field(x)) == ZZ.frac_field(x)
assert unify(QQ.poly_ring(x), QQ.frac_field(x)) == QQ.frac_field(x)
assert unify(ZZ.poly_ring(x, y), ZZ.frac_field(x)) == ZZ.frac_field(x, y)
assert unify(ZZ.poly_ring(x, y), QQ.frac_field(x)) == ZZ.frac_field(x, y)
assert unify(QQ.poly_ring(x, y), ZZ.frac_field(x)) == ZZ.frac_field(x, y)
assert unify(QQ.poly_ring(x, y), QQ.frac_field(x)) == QQ.frac_field(x, y)
assert unify(ZZ.poly_ring(x), ZZ.frac_field(x, y)) == ZZ.frac_field(x, y)
assert unify(ZZ.poly_ring(x), QQ.frac_field(x, y)) == ZZ.frac_field(x, y)
assert unify(QQ.poly_ring(x), ZZ.frac_field(x, y)) == ZZ.frac_field(x, y)
assert unify(QQ.poly_ring(x), QQ.frac_field(x, y)) == QQ.frac_field(x, y)
assert unify(ZZ.poly_ring(x, y), ZZ.frac_field(x, z)) == ZZ.frac_field(x, y, z)
assert unify(ZZ.poly_ring(x, y), QQ.frac_field(x, z)) == ZZ.frac_field(x, y, z)
assert unify(QQ.poly_ring(x, y), ZZ.frac_field(x, z)) == ZZ.frac_field(x, y, z)
assert unify(QQ.poly_ring(x, y), QQ.frac_field(x, z)) == QQ.frac_field(x, y, z)
assert unify(ZZ.frac_field(x), ZZ.poly_ring(x)) == ZZ.frac_field(x)
assert unify(ZZ.frac_field(x), QQ.poly_ring(x)) == ZZ.frac_field(x)
assert unify(QQ.frac_field(x), ZZ.poly_ring(x)) == ZZ.frac_field(x)
assert unify(QQ.frac_field(x), QQ.poly_ring(x)) == QQ.frac_field(x)
assert unify(ZZ.frac_field(x, y), ZZ.poly_ring(x)) == ZZ.frac_field(x, y)
assert unify(ZZ.frac_field(x, y), QQ.poly_ring(x)) == ZZ.frac_field(x, y)
assert unify(QQ.frac_field(x, y), ZZ.poly_ring(x)) == ZZ.frac_field(x, y)
assert unify(QQ.frac_field(x, y), QQ.poly_ring(x)) == QQ.frac_field(x, y)
assert unify(ZZ.frac_field(x), ZZ.poly_ring(x, y)) == ZZ.frac_field(x, y)
assert unify(ZZ.frac_field(x), QQ.poly_ring(x, y)) == ZZ.frac_field(x, y)
assert unify(QQ.frac_field(x), ZZ.poly_ring(x, y)) == ZZ.frac_field(x, y)
assert unify(QQ.frac_field(x), QQ.poly_ring(x, y)) == QQ.frac_field(x, y)
assert unify(ZZ.frac_field(x, y), ZZ.poly_ring(x, z)) == ZZ.frac_field(x, y, z)
assert unify(ZZ.frac_field(x, y), QQ.poly_ring(x, z)) == ZZ.frac_field(x, y, z)
assert unify(QQ.frac_field(x, y), ZZ.poly_ring(x, z)) == ZZ.frac_field(x, y, z)
assert unify(QQ.frac_field(x, y), QQ.poly_ring(x, z)) == QQ.frac_field(x, y, z)
def test_Domain_unify_algebraic():
sqrt5 = QQ.algebraic_field(sqrt(5))
sqrt7 = QQ.algebraic_field(sqrt(7))
sqrt57 = QQ.algebraic_field(sqrt(5), sqrt(7))
assert sqrt5.unify(sqrt7) == sqrt57
assert sqrt5.unify(sqrt5[x, y]) == sqrt5[x, y]
assert sqrt5[x, y].unify(sqrt5) == sqrt5[x, y]
assert sqrt5.unify(sqrt5.frac_field(x, y)) == sqrt5.frac_field(x, y)
assert sqrt5.frac_field(x, y).unify(sqrt5) == sqrt5.frac_field(x, y)
assert sqrt5.unify(sqrt7[x, y]) == sqrt57[x, y]
assert sqrt5[x, y].unify(sqrt7) == sqrt57[x, y]
assert sqrt5.unify(sqrt7.frac_field(x, y)) == sqrt57.frac_field(x, y)
assert sqrt5.frac_field(x, y).unify(sqrt7) == sqrt57.frac_field(x, y)
def test_Domain_unify_with_symbols():
raises(UnificationFailed, lambda: ZZ[x, y].unify_with_symbols(ZZ, (y, z)))
raises(UnificationFailed, lambda: ZZ.unify_with_symbols(ZZ[x, y], (y, z)))
def test_Domain__contains__():
assert (0 in EX) is True
assert (0 in ZZ) is True
assert (0 in QQ) is True
assert (0 in RR) is True
assert (0 in CC) is True
assert (0 in ALG) is True
assert (0 in ZZ[x, y]) is True
assert (0 in QQ[x, y]) is True
assert (0 in RR[x, y]) is True
assert (-7 in EX) is True
assert (-7 in ZZ) is True
assert (-7 in QQ) is True
assert (-7 in RR) is True
assert (-7 in CC) is True
assert (-7 in ALG) is True
assert (-7 in ZZ[x, y]) is True
assert (-7 in QQ[x, y]) is True
assert (-7 in RR[x, y]) is True
assert (17 in EX) is True
assert (17 in ZZ) is True
assert (17 in QQ) is True
assert (17 in RR) is True
assert (17 in CC) is True
assert (17 in ALG) is True
assert (17 in ZZ[x, y]) is True
assert (17 in QQ[x, y]) is True
assert (17 in RR[x, y]) is True
assert (Rational(-1, 7) in EX) is True
assert (Rational(-1, 7) in ZZ) is False
assert (Rational(-1, 7) in QQ) is True
assert (Rational(-1, 7) in RR) is True
assert (Rational(-1, 7) in CC) is True
assert (Rational(-1, 7) in ALG) is True
assert (Rational(-1, 7) in ZZ[x, y]) is False
assert (Rational(-1, 7) in QQ[x, y]) is True
assert (Rational(-1, 7) in RR[x, y]) is True
assert (Rational(3, 5) in EX) is True
assert (Rational(3, 5) in ZZ) is False
assert (Rational(3, 5) in QQ) is True
assert (Rational(3, 5) in RR) is True
assert (Rational(3, 5) in CC) is True
assert (Rational(3, 5) in ALG) is True
assert (Rational(3, 5) in ZZ[x, y]) is False
assert (Rational(3, 5) in QQ[x, y]) is True
assert (Rational(3, 5) in RR[x, y]) is True
assert (3.0 in EX) is True
assert (3.0 in ZZ) is True
assert (3.0 in QQ) is True
assert (3.0 in RR) is True
assert (3.0 in CC) is True
assert (3.0 in ALG) is True
assert (3.0 in ZZ[x, y]) is True
assert (3.0 in QQ[x, y]) is True
assert (3.0 in RR[x, y]) is True
assert (3.14 in EX) is True
assert (3.14 in ZZ) is False
assert (3.14 in QQ) is True
assert (3.14 in RR) is True
assert (3.14 in CC) is True
assert (3.14 in ALG) is True
assert (3.14 in ZZ[x, y]) is False
assert (3.14 in QQ[x, y]) is True
assert (3.14 in RR[x, y]) is True
assert (oo in ALG) is False
assert (oo in ZZ[x, y]) is False
assert (oo in QQ[x, y]) is False
assert (-oo in ZZ) is False
assert (-oo in QQ) is False
assert (-oo in ALG) is False
assert (-oo in ZZ[x, y]) is False
assert (-oo in QQ[x, y]) is False
assert (sqrt(7) in EX) is True
assert (sqrt(7) in ZZ) is False
assert (sqrt(7) in QQ) is False
assert (sqrt(7) in RR) is True
assert (sqrt(7) in CC) is True
assert (sqrt(7) in ALG) is False
assert (sqrt(7) in ZZ[x, y]) is False
assert (sqrt(7) in QQ[x, y]) is False
assert (sqrt(7) in RR[x, y]) is True
assert (2*sqrt(3) + 1 in EX) is True
assert (2*sqrt(3) + 1 in ZZ) is False
assert (2*sqrt(3) + 1 in QQ) is False
assert (2*sqrt(3) + 1 in RR) is True
assert (2*sqrt(3) + 1 in CC) is True
assert (2*sqrt(3) + 1 in ALG) is True
assert (2*sqrt(3) + 1 in ZZ[x, y]) is False
assert (2*sqrt(3) + 1 in QQ[x, y]) is False
assert (2*sqrt(3) + 1 in RR[x, y]) is True
assert (sin(1) in EX) is True
assert (sin(1) in ZZ) is False
assert (sin(1) in QQ) is False
assert (sin(1) in RR) is True
assert (sin(1) in CC) is True
assert (sin(1) in ALG) is False
assert (sin(1) in ZZ[x, y]) is False
assert (sin(1) in QQ[x, y]) is False
assert (sin(1) in RR[x, y]) is True
assert (x**2 + 1 in EX) is True
assert (x**2 + 1 in ZZ) is False
assert (x**2 + 1 in QQ) is False
assert (x**2 + 1 in RR) is False
assert (x**2 + 1 in CC) is False
assert (x**2 + 1 in ALG) is False
assert (x**2 + 1 in ZZ[x]) is True
assert (x**2 + 1 in QQ[x]) is True
assert (x**2 + 1 in RR[x]) is True
assert (x**2 + 1 in ZZ[x, y]) is True
assert (x**2 + 1 in QQ[x, y]) is True
assert (x**2 + 1 in RR[x, y]) is True
assert (x**2 + y**2 in EX) is True
assert (x**2 + y**2 in ZZ) is False
assert (x**2 + y**2 in QQ) is False
assert (x**2 + y**2 in RR) is False
assert (x**2 + y**2 in CC) is False
assert (x**2 + y**2 in ALG) is False
assert (x**2 + y**2 in ZZ[x]) is False
assert (x**2 + y**2 in QQ[x]) is False
assert (x**2 + y**2 in RR[x]) is False
assert (x**2 + y**2 in ZZ[x, y]) is True
assert (x**2 + y**2 in QQ[x, y]) is True
assert (x**2 + y**2 in RR[x, y]) is True
assert (Rational(3, 2)*x/(y + 1) - z in QQ[x, y, z]) is False
def test_Domain_get_ring():
assert ZZ.has_assoc_Ring is True
assert QQ.has_assoc_Ring is True
assert ZZ[x].has_assoc_Ring is True
assert QQ[x].has_assoc_Ring is True
assert ZZ[x, y].has_assoc_Ring is True
assert QQ[x, y].has_assoc_Ring is True
assert ZZ.frac_field(x).has_assoc_Ring is True
assert QQ.frac_field(x).has_assoc_Ring is True
assert ZZ.frac_field(x, y).has_assoc_Ring is True
assert QQ.frac_field(x, y).has_assoc_Ring is True
assert EX.has_assoc_Ring is False
assert RR.has_assoc_Ring is False
assert ALG.has_assoc_Ring is False
assert ZZ.get_ring() == ZZ
assert QQ.get_ring() == ZZ
assert ZZ[x].get_ring() == ZZ[x]
assert QQ[x].get_ring() == QQ[x]
assert ZZ[x, y].get_ring() == ZZ[x, y]
assert QQ[x, y].get_ring() == QQ[x, y]
assert ZZ.frac_field(x).get_ring() == ZZ[x]
assert QQ.frac_field(x).get_ring() == QQ[x]
assert ZZ.frac_field(x, y).get_ring() == ZZ[x, y]
assert QQ.frac_field(x, y).get_ring() == QQ[x, y]
assert EX.get_ring() == EX
assert RR.get_ring() == RR
# XXX: This should also be like RR
raises(DomainError, lambda: ALG.get_ring())
def test_Domain_get_field():
assert EX.has_assoc_Field is True
assert ZZ.has_assoc_Field is True
assert QQ.has_assoc_Field is True
assert RR.has_assoc_Field is True
assert ALG.has_assoc_Field is True
assert ZZ[x].has_assoc_Field is True
assert QQ[x].has_assoc_Field is True
assert ZZ[x, y].has_assoc_Field is True
assert QQ[x, y].has_assoc_Field is True
assert EX.get_field() == EX
assert ZZ.get_field() == QQ
assert QQ.get_field() == QQ
assert RR.get_field() == RR
assert ALG.get_field() == ALG
assert ZZ[x].get_field() == ZZ.frac_field(x)
assert QQ[x].get_field() == QQ.frac_field(x)
assert ZZ[x, y].get_field() == ZZ.frac_field(x, y)
assert QQ[x, y].get_field() == QQ.frac_field(x, y)
def test_Domain_get_exact():
assert EX.get_exact() == EX
assert ZZ.get_exact() == ZZ
assert QQ.get_exact() == QQ
assert RR.get_exact() == QQ
assert ALG.get_exact() == ALG
assert ZZ[x].get_exact() == ZZ[x]
assert QQ[x].get_exact() == QQ[x]
assert ZZ[x, y].get_exact() == ZZ[x, y]
assert QQ[x, y].get_exact() == QQ[x, y]
assert ZZ.frac_field(x).get_exact() == ZZ.frac_field(x)
assert QQ.frac_field(x).get_exact() == QQ.frac_field(x)
assert ZZ.frac_field(x, y).get_exact() == ZZ.frac_field(x, y)
assert QQ.frac_field(x, y).get_exact() == QQ.frac_field(x, y)
def test_Domain_convert():
assert QQ.convert(10e-52) == QQ(1684996666696915, 1684996666696914987166688442938726917102321526408785780068975640576)
R, x = ring("x", ZZ)
assert ZZ.convert(x - x) == 0
assert ZZ.convert(x - x, R.to_domain()) == 0
def test_PolynomialRing__init():
R, = ring("", ZZ)
assert ZZ.poly_ring() == R.to_domain()
def test_FractionField__init():
F, = field("", ZZ)
assert ZZ.frac_field() == F.to_domain()
def test_inject():
assert ZZ.inject(x, y, z) == ZZ[x, y, z]
assert ZZ[x].inject(y, z) == ZZ[x, y, z]
assert ZZ.frac_field(x).inject(y, z) == ZZ.frac_field(x, y, z)
raises(GeneratorsError, lambda: ZZ[x].inject(x))
def test_Domain_map():
seq = ZZ.map([1, 2, 3, 4])
assert all(ZZ.of_type(elt) for elt in seq)
seq = ZZ.map([[1, 2, 3, 4]])
assert all(ZZ.of_type(elt) for elt in seq[0]) and len(seq) == 1
def test_Domain___eq__():
assert (ZZ[x, y] == ZZ[x, y]) is True
assert (QQ[x, y] == QQ[x, y]) is True
assert (ZZ[x, y] == QQ[x, y]) is False
assert (QQ[x, y] == ZZ[x, y]) is False
assert (ZZ.frac_field(x, y) == ZZ.frac_field(x, y)) is True
assert (QQ.frac_field(x, y) == QQ.frac_field(x, y)) is True
assert (ZZ.frac_field(x, y) == QQ.frac_field(x, y)) is False
assert (QQ.frac_field(x, y) == ZZ.frac_field(x, y)) is False
assert RealField()[x] == RR[x]
def test_Domain__algebraic_field():
alg = ZZ.algebraic_field(sqrt(2))
assert alg.ext.minpoly == Poly(x**2 - 2)
assert alg.dom == QQ
alg = QQ.algebraic_field(sqrt(2))
assert alg.ext.minpoly == Poly(x**2 - 2)
assert alg.dom == QQ
alg = alg.algebraic_field(sqrt(3))
assert alg.ext.minpoly == Poly(x**4 - 10*x**2 + 1)
assert alg.dom == QQ
def test_PolynomialRing_from_FractionField():
F, x,y = field("x,y", ZZ)
R, X,Y = ring("x,y", ZZ)
f = (x**2 + y**2)/(x + 1)
g = (x**2 + y**2)/4
h = x**2 + y**2
assert R.to_domain().from_FractionField(f, F.to_domain()) is None
assert R.to_domain().from_FractionField(g, F.to_domain()) == X**2/4 + Y**2/4
assert R.to_domain().from_FractionField(h, F.to_domain()) == X**2 + Y**2
F, x,y = field("x,y", QQ)
R, X,Y = ring("x,y", QQ)
f = (x**2 + y**2)/(x + 1)
g = (x**2 + y**2)/4
h = x**2 + y**2
assert R.to_domain().from_FractionField(f, F.to_domain()) is None
assert R.to_domain().from_FractionField(g, F.to_domain()) == X**2/4 + Y**2/4
assert R.to_domain().from_FractionField(h, F.to_domain()) == X**2 + Y**2
def test_FractionField_from_PolynomialRing():
R, x,y = ring("x,y", QQ)
F, X,Y = field("x,y", ZZ)
f = 3*x**2 + 5*y**2
g = x**2/3 + y**2/5
assert F.to_domain().from_PolynomialRing(f, R.to_domain()) == 3*X**2 + 5*Y**2
assert F.to_domain().from_PolynomialRing(g, R.to_domain()) == (5*X**2 + 3*Y**2)/15
def test_FF_of_type():
assert FF(3).of_type(FF(3)(1)) is True
assert FF(5).of_type(FF(5)(3)) is True
assert FF(5).of_type(FF(7)(3)) is False
def test___eq__():
assert not QQ[x] == ZZ[x]
assert not QQ.frac_field(x) == ZZ.frac_field(x)
def test_RealField_from_sympy():
assert RR.convert(S.Zero) == RR.dtype(0)
assert RR.convert(S(0.0)) == RR.dtype(0.0)
assert RR.convert(S.One) == RR.dtype(1)
assert RR.convert(S(1.0)) == RR.dtype(1.0)
assert RR.convert(sin(1)) == RR.dtype(sin(1).evalf())
def test_not_in_any_domain():
check = illegal + [x] + [
float(i) for i in illegal if i != S.ComplexInfinity]
for dom in (ZZ, QQ, RR, CC, EX):
for i in check:
if i == x and dom == EX:
continue
assert i not in dom, (i, dom)
raises(CoercionFailed, lambda: dom.convert(i))
def test_ModularInteger():
F3 = FF(3)
a = F3(0)
assert isinstance(a, F3.dtype) and a == 0
a = F3(1)
assert isinstance(a, F3.dtype) and a == 1
a = F3(2)
assert isinstance(a, F3.dtype) and a == 2
a = F3(3)
assert isinstance(a, F3.dtype) and a == 0
a = F3(4)
assert isinstance(a, F3.dtype) and a == 1
a = F3(F3(0))
assert isinstance(a, F3.dtype) and a == 0
a = F3(F3(1))
assert isinstance(a, F3.dtype) and a == 1
a = F3(F3(2))
assert isinstance(a, F3.dtype) and a == 2
a = F3(F3(3))
assert isinstance(a, F3.dtype) and a == 0
a = F3(F3(4))
assert isinstance(a, F3.dtype) and a == 1
a = -F3(1)
assert isinstance(a, F3.dtype) and a == 2
a = -F3(2)
assert isinstance(a, F3.dtype) and a == 1
a = 2 + F3(2)
assert isinstance(a, F3.dtype) and a == 1
a = F3(2) + 2
assert isinstance(a, F3.dtype) and a == 1
a = F3(2) + F3(2)
assert isinstance(a, F3.dtype) and a == 1
a = F3(2) + F3(2)
assert isinstance(a, F3.dtype) and a == 1
a = 3 - F3(2)
assert isinstance(a, F3.dtype) and a == 1
a = F3(3) - 2
assert isinstance(a, F3.dtype) and a == 1
a = F3(3) - F3(2)
assert isinstance(a, F3.dtype) and a == 1
a = F3(3) - F3(2)
assert isinstance(a, F3.dtype) and a == 1
a = 2*F3(2)
assert isinstance(a, F3.dtype) and a == 1
a = F3(2)*2
assert isinstance(a, F3.dtype) and a == 1
a = F3(2)*F3(2)
assert isinstance(a, F3.dtype) and a == 1
a = F3(2)*F3(2)
assert isinstance(a, F3.dtype) and a == 1
a = 2/F3(2)
assert isinstance(a, F3.dtype) and a == 1
a = F3(2)/2
assert isinstance(a, F3.dtype) and a == 1
a = F3(2)/F3(2)
assert isinstance(a, F3.dtype) and a == 1
a = F3(2)/F3(2)
assert isinstance(a, F3.dtype) and a == 1
a = 1 % F3(2)
assert isinstance(a, F3.dtype) and a == 1
a = F3(1) % 2
assert isinstance(a, F3.dtype) and a == 1
a = F3(1) % F3(2)
assert isinstance(a, F3.dtype) and a == 1
a = F3(1) % F3(2)
assert isinstance(a, F3.dtype) and a == 1
a = F3(2)**0
assert isinstance(a, F3.dtype) and a == 1
a = F3(2)**1
assert isinstance(a, F3.dtype) and a == 2
a = F3(2)**2
assert isinstance(a, F3.dtype) and a == 1
F7 = FF(7)
a = F7(3)**100000000000
assert isinstance(a, F7.dtype) and a == 4
a = F7(3)**-100000000000
assert isinstance(a, F7.dtype) and a == 2
a = F7(3)**S(2)
assert isinstance(a, F7.dtype) and a == 2
assert bool(F3(3)) is False
assert bool(F3(4)) is True
F5 = FF(5)
a = F5(1)**(-1)
assert isinstance(a, F5.dtype) and a == 1
a = F5(2)**(-1)
assert isinstance(a, F5.dtype) and a == 3
a = F5(3)**(-1)
assert isinstance(a, F5.dtype) and a == 2
a = F5(4)**(-1)
assert isinstance(a, F5.dtype) and a == 4
assert (F5(1) < F5(2)) is True
assert (F5(1) <= F5(2)) is True
assert (F5(1) > F5(2)) is False
assert (F5(1) >= F5(2)) is False
assert (F5(3) < F5(2)) is False
assert (F5(3) <= F5(2)) is False
assert (F5(3) > F5(2)) is True
assert (F5(3) >= F5(2)) is True
assert (F5(1) < F5(7)) is True
assert (F5(1) <= F5(7)) is True
assert (F5(1) > F5(7)) is False
assert (F5(1) >= F5(7)) is False
assert (F5(3) < F5(7)) is False
assert (F5(3) <= F5(7)) is False
assert (F5(3) > F5(7)) is True
assert (F5(3) >= F5(7)) is True
assert (F5(1) < 2) is True
assert (F5(1) <= 2) is True
assert (F5(1) > 2) is False
assert (F5(1) >= 2) is False
assert (F5(3) < 2) is False
assert (F5(3) <= 2) is False
assert (F5(3) > 2) is True
assert (F5(3) >= 2) is True
assert (F5(1) < 7) is True
assert (F5(1) <= 7) is True
assert (F5(1) > 7) is False
assert (F5(1) >= 7) is False
assert (F5(3) < 7) is False
assert (F5(3) <= 7) is False
assert (F5(3) > 7) is True
assert (F5(3) >= 7) is True
raises(NotInvertible, lambda: F5(0)**(-1))
raises(NotInvertible, lambda: F5(5)**(-1))
raises(ValueError, lambda: FF(0))
raises(ValueError, lambda: FF(2.1))
def test_QQ_int():
assert int(QQ(2**2000, 3**1250)) == 455431
assert int(QQ(2**100, 3)) == 422550200076076467165567735125
def test_RR_double():
assert RR(3.14) > 1e-50
assert RR(1e-13) > 1e-50
assert RR(1e-14) > 1e-50
assert RR(1e-15) > 1e-50
assert RR(1e-20) > 1e-50
assert RR(1e-40) > 1e-50
def test_RR_Float():
f1 = Float("1.01")
f2 = Float("1.0000000000000000000001")
assert f1._prec == 53
assert f2._prec == 80
assert RR(f1)-1 > 1e-50
assert RR(f2)-1 < 1e-50 # RR's precision is lower than f2's
RR2 = RealField(prec=f2._prec)
assert RR2(f1)-1 > 1e-50
assert RR2(f2)-1 > 1e-50 # RR's precision is equal to f2's
def test_CC_double():
assert CC(3.14).real > 1e-50
assert CC(1e-13).real > 1e-50
assert CC(1e-14).real > 1e-50
assert CC(1e-15).real > 1e-50
assert CC(1e-20).real > 1e-50
assert CC(1e-40).real > 1e-50
assert CC(3.14j).imag > 1e-50
assert CC(1e-13j).imag > 1e-50
assert CC(1e-14j).imag > 1e-50
assert CC(1e-15j).imag > 1e-50
assert CC(1e-20j).imag > 1e-50
assert CC(1e-40j).imag > 1e-50
| |
#!/usr/bin/env python
"""
Web Service Simulator
@author: Matthew Kennard <matthew.kennard@oneresult.co.uk>
"""
import BaseHTTPServer
import sys
import os
import time
import random
import re
import types
from urlparse import urlparse, parse_qs
from optparse import OptionParser
import ConfigParser
import string
import subprocess
import tempfile
import traceback
import StringIO
import code
from fsevents import Observer
from fsevents import Stream
# Address the server will listen on
IP_ADDRESS = '127.0.0.1'
# Port the server will listen on (80 will require being run as root)
PORT = 8000
call_handler = None
httpd = None
class ParameterMatch(object):
"""
Corresponds to a v_<key> = [!~]<value> in the server
Will see whether a particular key and value combination
matches this defined parameter. A parameter can be an
inverse_match (matches if the value being tested is
not a particular value), and can be an optional value
so if not specified then matches
"""
def __init__(self, key, value, inverse_match=False, optional=False):
"""
@param key: The key
@param value: The value which will be a match
@param inverse_match: Whether should match if NOT value
@param optional: Whether this is an optional parameter
"""
self.key = key
self.value = value
self.inverse_match = inverse_match
self.optional = optional
def match(self, possible_key, possible_value):
"""
@return: True if possible_value matches self.value (unless of course
inverse_match or optional is set)
"""
if self.key == possible_key:
if possible_value is None and self.optional:
return True
if self.value == possible_value and not self.inverse_match:
return True
if self.value != possible_value and self.inverse_match:
return True
return False
class Response(object):
"""
Corresponds to a particular response that a call might give
"""
def __init__(self, name, response_string, content_type, status, parameters):
"""
@param name: The name of the call
@param response_string: The response string which should be returned
if the response matches
@param status: The HTTP status code which should be returned if the
response matches
@param parameters: List of ParameterMatch objects which must be
matched for the response to match
"""
self.name = name
self.response_string = response_string
self.content_type = content_type
self.status = status
self.parameters = parameters
def generate_response(self, data_dict):
return (self.status, self.response_string, self.content_type)
def match(self, data_dict):
"""
Try and match a response against POST and GET values
@param data_dict: Dictionary taken from the POST and GET values
@return: (HTTP status code, response string). If this response does
not match the parameters passed in the data_dict then the status
code will be 0
"""
for parameter in self.parameters:
value = data_dict.get(parameter.key)
if type(value) == types.ListType:
value = value[0]
if not parameter.match(parameter.key, value):
print '%s does not match %s' % (parameter.key, value)
return (0, '', 'text/plain')
# TODO: Substitute data_dict into response_string
return self.generate_response(data_dict)
class ResponseCommand(Response):
def __init__(self, name, response_command, content_type, status, parameters, working_dir):
"""
@param name: The name of the call
@param response_command: A shell command which will be run the stdout
from which will be returned. The data_dict will be used to substitute
into the command
@param content_type: e.g. text/json
@param status: The HTTP status code which should be returned if the
response matches
@param parameters: List of ParameterMatch objects which must be
matched for the response to match
"""
self.name = name
self.response_command = string.Template(response_command)
self.content_type = content_type
self.status = status
self.parameters = parameters
self.working_dir = working_dir
def generate_response(self, data_dict):
command = self.response_command.safe_substitute(data_dict)
p = subprocess.Popen(command,
cwd=self.working_dir,
shell=True,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
stdout, stderr = p.communicate()
if p.returncode == 0:
return (self.status, stdout, self.content_type)
else:
print 'Command "%s" returned non-zero exit status: %d' % (command, p.returncode)
print stdout
print stderr
return (0, '', 'text/plain')
class PythonResult(object):
def __init__(self):
self.status = 200
self.content_type = None
class ResponsePython(Response):
def __init__(self, name, response_python, content_type, status, parameters, working_dir):
"""
@param name: The name of the call
@param response_python: The Python code to execute. The data_dict will be available
in the global variable data
@param content_type: e.g. text/json
@param status: The HTTP status code which should be returned if the
response matches
@param parameters: List of ParameterMatch objects which must be
matched for the response to match
"""
self.name = name
self.response_python = self.process_python(response_python)
self.content_type = content_type
self.status = status
self.parameters = parameters
self.working_dir = working_dir
def process_python(self, python):
# Since config files are trimmed of whitespace normal Python indentation
# won't work. So instead of spaces .'s are used. If a line is prefixed
# this .'s then these should be converted to whitespace
return re.sub('^(\.*)', lambda match: ' ' * len(match.group(0)), python, flags=re.MULTILINE)
def generate_response(self, data_dict):
data = {}
for k, v in data_dict.items():
if type(v) == types.ListType:
data[k] = v[0]
else:
data[k] = v
# If the Python code wants to return anything then it will need to write
# to the stream out
out = StringIO.StringIO()
result = PythonResult()
# The Python code can modify the status code and content type by modifying result
result.content_type = self.content_type
result.status = self.status
interpreter = code.InteractiveConsole({'data': data, 'out': out, 'result': result})
for line in self.response_python.split('\n'):
interpreter.push(line)
interpreter.push('\n')
return (result.status, out.getvalue(), result.content_type)
class Call(object):
"""
A call represents a web service API call and is defined in a call file
"""
def __init__(self, call_definition_file):
"""
@param call_definition_file: The file defining the call
"""
self.path = None
self.path_placeholders = []
self.method = None
self.timeout = 0
self.timeout_perc = 0
self.responses = []
self.read_definition_file(call_definition_file)
def match(self, path, method):
"""
@return: True if this call matches a particular path and HTTP method
"""
return self.path.match(path) and method == self.method
def handle(self, path, body, data_dict):
"""
Handle the call given the HTTP body and parameters (data_dict)
@param body: HTTP body
@param data_dict: GET, POST and path values
@return: (HTTP status code, response string). If no responses match
given the data_dict then a HTTP status 500 internal server error
will be returned
"""
# Add any placeholders as parameters to data_dict
placeholder_values = self.path.match(path).groups()
for i, placeholder_value in enumerate(placeholder_values):
data_dict[self.path_placeholders[i]] = placeholder_value
for response in self.responses:
status, response_string, content_type = response.match(data_dict)
if status > 0:
# See if a timeout should be simulated
if random.random() < self.timeout_perc:
print 'Simulating timeout'
time.sleep(self.timeout)
return (status, response_string, content_type)
print '=' * 80
print 'Error returning 500'
print data_dict
return (500, 'Internal server error', 'text/plain')
def read_definition_file(self, call_definition_file):
"""
Read the call definition file and use it to initialise this object
@param call_definition_file: Path to the file defining the call
"""
config = ConfigParser.RawConfigParser(
{'timeout': 0,
'content_type': 'text/plain',
}
)
config.read(call_definition_file)
# Read call section
self.path = config.get('call', 'path')
self.method = config.get('call', 'method')
self.timeout = config.getfloat('call', 'timeout')
if self.timeout:
self.timeout_perc = config.getfloat('call', 'timeout_perc')
else:
self.timeout = 0
response_names = [response.strip() for response in config.get('call', 'responses').split(',')]
working_dir = os.path.dirname(call_definition_file)
# Read the individual response sections
for response_name in response_names:
response = None
response_string = None
response_command = None
response_python = None
content_type = config.get(response_name, 'content_type')
if config.has_option(response_name, 'response'):
response_string = config.get(response_name, 'response')
elif config.has_option(response_name, 'response_command'):
response_command = config.get(response_name, 'response_command')
elif config.has_option(response_name, 'response_file'):
response_file = config.get(response_name, 'response_file')
if response_file is not None:
path = os.path.join(working_dir, response_file)
response_string = open(path, 'rb').read()
else:
print 'Response file does not exist!'
sys.exit(1)
elif config.has_option(response_name, 'response_python'):
response_python = config.get(response_name, 'response_python')
status = config.getint(response_name, 'status')
parameters = []
for name, value in config.items(response_name):
if not name.startswith('v_'):
continue
key = name[2:]
inverse_match = False
optional = False
if value.startswith('!'):
inverse_match = True
value = value[1:]
elif value.startswith('~'):
optional = True
value = value[1:]
parameter = ParameterMatch(key, value, inverse_match, optional)
parameters.append(parameter)
if response_string is not None:
response = Response(response_name, response_string, content_type, status, parameters)
elif response_command is not None:
response = ResponseCommand(response_name, response_command, content_type, status, parameters, working_dir)
elif response_python is not None:
response = ResponsePython(response_name, response_python, content_type, status, parameters, working_dir)
self.responses.append(response)
# Convert the path into a regular expression
self.path_placeholders = []
for placeholder in re.findall('\$([A-Za-z_][A-Za-z0-9_\.]*)', self.path):
self.path = self.path.replace('$' + placeholder, '([A-Za-z0-9@_\.]+)')
self.path_placeholders.append(placeholder)
self.path = re.compile(self.path)
class CallHandler(object):
"""
Passes the HTTP request to the appropriate Call to be handled. If no
appropriate Call can be found then a 500 internal server error is returned
"""
def __init__(self, call_definition_dir):
"""
@param call_definition_dir: The set of calls which the server will
handle
"""
self.call_definition_dir = call_definition_dir
self.calls = []
self.read_call_definitions()
def read_call_definitions(self):
"""
Read in all the call definitions
"""
for definition_file in os.listdir(self.call_definition_dir):
if definition_file.startswith('.'):
continue
definition_path = os.path.join(self.call_definition_dir, definition_file)
try:
self.calls.append(Call(definition_path))
except:
print 'Error reading call definition %s' % (definition_path, )
traceback.print_exc()
def handle_call(self, command, path, body, data_dict):
"""
Handle a web service API call
@param command: HTTP command - e.g. GET
@param path: path, e.g. /login
@param body: the request body
@param data_dict: A dictionary of GET and POST values
"""
for call in self.calls:
if call.match(path, command):
return call.handle(path, body, data_dict)
break
print '=' * 80
print 'Error returning 500'
print data_dict
return (500, 'Internal server error', 'text/plain')
class HttpRequestHandler(BaseHTTPServer.BaseHTTPRequestHandler):
"""
Handles the HTTP requests and passes them off to the call handler
"""
def handle_request(self):
command = self.command
path = self.path
content_length = self.headers.getheader('content-length')
# For writing body to a temporary file
tmp = tempfile.NamedTemporaryFile(delete=False)
if content_length:
body = self.rfile.read(int(content_length))
data_dict = parse_qs(body)
else:
data_dict = {}
body = ''
# Write body to a temporary file
data_dict['_body_file'] = tmp.name
tmp.write(body)
tmp.close()
# Update the data dictionary with the query parameters
data_dict.update(parse_qs(urlparse(path).query))
path_minus_params = urlparse(path).path.strip('/')
status, response_string, content_type = call_handler.handle_call(command, path_minus_params, body, data_dict)
self.send_response(status)
self.send_header('Content-type', content_type)
self.end_headers()
self.wfile.write(response_string)
self.wfile.write('\n')
self.wfile.close()
def do_GET(self):
self.handle_request()
def do_POST(self):
self.handle_request()
def do_PUT(self):
self.handle_request()
def do_DELETE(self):
self.handle_request()
def definition_change(subpath, mask):
"""
If it is detected that one of the call definition files has changed,
one has been deleted or one has been added then this function will
restart the server
@param subpath: The path where the change was detected
@param mask
"""
# Ignore files which end in .db
if subpath.endswith('.db'):
return
print 'Definition file changed in %s' % (subpath, )
print 'Restarting server...'
httpd.shutdown()
os.execv(sys.executable, [sys.executable] + sys.argv)
def main():
usage = 'usage: %prog [options] CALL_DIRECTORY'
parser = OptionParser(usage=usage)
parser.add_option('-p', '--port', dest='port',
help='port to listen on')
parser.add_option('-a', '--address', dest='address',
help='address to listen on')
(options, args) = parser.parse_args()
if len(args) != 1:
parser.print_help()
sys.exit(1)
call_definition_path = args[0]
if options.port:
port = int(options.port)
else:
port = PORT
if options.address:
ip_address = options.address
else:
ip_address = IP_ADDRESS
# Monitor the call definition path to restart the
# server if any of the files change, or new ones
# are added
observer = Observer()
observer.start()
stream = Stream(definition_change, call_definition_path)
observer.schedule(stream)
global call_handler
call_handler = CallHandler(call_definition_path)
server_class = BaseHTTPServer.HTTPServer
global httpd
httpd = server_class((ip_address, port), HttpRequestHandler)
print 'WebServiceSimulator started'
try:
httpd.serve_forever()
except KeyboardInterrupt:
pass
finally:
print 'Shutting down web service simulator'
httpd.server_close()
sys.exit(0)
if __name__ == '__main__':
main()
| |
import json
import os
import unittest
import mock
from diff_tripleo_builds import diff_builds
# execute w/ python -m unittest
# sys.path.append(os.path.abspath('..'))
class TestDiffTripleOBuilds(unittest.TestCase):
def setUp(self):
self.diff = diff_builds.DiffBuilds()
# from get_logs, nice_list
self.control_list = {'fribidi-1.0.4-8.el8',
'fribidi-1.0.5-9.el8',
'fribidi-1.0.5-11.el8',
'python3-pyasn1-modules-0.4.6-3.el8.noarch',
'lvm2-8:2.03.08-3.el8',
'python3-pip-wheel-9.0.3-16.el8',
'foo-8:2.8.08-3.el8'
}
self.test_list = {'fribidi-1.0.5-8.el8',
'python3-pyasn1-modules-0.4.6-3.el8.noarch',
'lvm2-8:2.04.08-3.el8',
'python3-pip-wheel-9.0.3-16.el8',
'bar-8:2.8.08-3.el8'
}
self.version_list = {'foobar-1.0.0-2.el8',
'foobar-1.0.1-2.el8',
'foobar-1.1.0-2.el8',
'foobar-1.1.0-99.el8'
}
self.ignore_packages_empty = {}
self.rpms_control_json = {}
self.rpms_test_json = {}
full_path = os.path.dirname(os.path.abspath(__file__))
with open(full_path + '/data/rpms_control.json') as json_file:
self.rpms_control_json = json.load(json_file)
with open(full_path + '/data/rpms_test.json') as json_file:
self.rpms_test_json = json.load(json_file)
with open(full_path + '/data/container_rpms') as file:
self.container_rpms = file.read()
with open(full_path + '/data/upstream_container_list') as file:
self.upstream_container_html = file.read()
with open(full_path + '/data/upstream2_container_list') as file:
self.upstream2_container_html = file.read()
with open(full_path + '/data/downstream_container_list') as file:
self.downstream_container_html = file.read()
def _mock_response(
self,
status=200,
content="CONTENT",
json_data=None,
raise_for_status=None):
mock_resp = mock.Mock()
# mock raise_for_status call w/optional error
mock_resp.raise_for_status = mock.Mock()
if raise_for_status:
mock_resp.raise_for_status.side_effect = raise_for_status
# set status code and content
mock_resp.status_code = status
mock_resp.content = content
# add json data if provided
if json_data:
mock_resp.json = mock.Mock(
return_value=json_data
)
return mock_resp
@mock.patch('requests.get')
def test_upstream_containers_dir(self, mock_get):
# the containers directory is rendered in
# different ways upstream/downstream
html = self.upstream_container_html
mock_resp = self._mock_response(content=html)
mock_get.return_value = mock_resp
containers = self.diff.get_directory_list("foo", "undercloud")
self.assertEqual(len(containers), 72)
self.assertEqual(containers[0], 'container-puppet-crond')
self.assertEqual(containers[71], 'tempest_init_logs')
@mock.patch('requests.get')
def test_upstream_containers2_dir(self, mock_get):
# the containers directory is rendered in
# different ways upstream/downstream
html = self.upstream2_container_html
mock_resp = self._mock_response(content=html)
mock_get.return_value = mock_resp
containers = self.diff.get_directory_list("foo", "undercloud")
self.assertEqual(len(containers), 70)
self.assertEqual(containers[0], 'container-puppet-crond')
self.assertEqual(containers[69], 'tempest_init_logs')
@mock.patch('requests.get')
def test_downstream_containers_dir(self, mock_get):
# the containers directory is rendered in
# different ways upstream/downstream
html = self.downstream_container_html
mock_resp = self._mock_response(content=html)
mock_get.return_value = mock_resp
containers = self.diff.get_directory_list("foo", "undercloud")
self.assertEqual(len(containers), 38)
self.assertEqual(containers[0], 'create_swift_temp_url_key')
self.assertEqual(containers[37], 'zaqar_websocket')
def test_parse_container_rpms(self):
# start after the log file has been split
# into container info and rpm_info
dict_of_containers = {}
container_info_temp = self.container_rpms.split("\n")
dict_of_containers["test_container"] = container_info_temp
parsed_list = self.diff.process_containers_step2(dict_of_containers)
self.assertEqual(len(parsed_list['test_container']), 40)
def test_parse_list_control(self):
result = self.diff.parse_list(self.control_list)
self.assertEqual(len(self.control_list), 7)
self.assertEqual(len(result), 5)
self.assertIn(('', '1.0.4', '8.el8'), result['fribidi'])
self.assertIn(('', '1.0.5', '11.el8'), result['fribidi'])
self.assertIn(('', '1.0.5', '9.el8'), result['fribidi'])
def test_parse_list_test(self):
result = self.diff.parse_list(self.test_list)
self.assertEqual(len(self.test_list), 5)
self.assertEqual(len(result), 5)
def test_find_highest_version(self):
packages = self.diff.parse_list(
self.control_list, )
result = self.diff.find_highest_version(packages)
self.assertEqual(len(result), 5)
self.assertNotIn('1.0.4', result['fribidi'])
self.assertEqual(['1.0.5', '1.0.5-11.el8'], result['fribidi'])
def test_find_highest_version_cpaas_edition(self):
packages = self.diff.parse_list(
self.control_list, )
result = self.diff.find_highest_version(packages, cpaas=True)
self.assertEqual(len(result), 5)
self.assertNotIn('1.0.4', result['fribidi'])
# 1.0.5-11 is really the highest but the cpaas diff ignores
# the -11 and -9 are equal.
self.assertIn('1.0.5', result['fribidi'])
def test_nvr(self):
packages = self.diff.parse_list(
self.version_list, )
result = self.diff.find_highest_version(packages)
self.assertEqual(len(packages), 1)
self.assertEqual(['1.1.0', '1.1.0-99.el8'], result['foobar'])
def test_diff_packages(self):
c_packages = self.diff.parse_list(
self.control_list, self.ignore_packages_empty)
t_packages = self.diff.parse_list(
self.test_list, self.ignore_packages_empty)
c_highest = self.diff.find_highest_version(c_packages)
t_highest = self.diff.find_highest_version(t_packages)
package_diff = self.diff.diff_packages(
c_highest, t_highest)
# ensure package diff has the right packages
self.assertEqual(len(package_diff), 4)
self.assertIn('foo', package_diff.keys())
self.assertIn('bar', package_diff.keys())
self.assertIn('lvm2', package_diff.keys())
# ensure package in control but not test is correct
self.assertEqual(['2.8.08', '8-2.8.08-3.el8'], package_diff['foo'][0])
self.assertEqual(['0', 'not installed'], package_diff['foo'][1])
# ensure package in test but not control is correct
self.assertEqual(['0', 'not installed'], package_diff['bar'][0])
self.assertEqual(['2.8.08', '8-2.8.08-3.el8'], package_diff['bar'][1])
def test_ignore_packages(self):
ignore_packages = {"fribidi", "python3-pyasn1-modules",
"lvm2", "python3-pip-wheel"
}
c_packages = self.diff.parse_list(
self.control_list, ignore_packages=ignore_packages)
t_packages = self.diff.parse_list(
self.test_list, ignore_packages=ignore_packages)
c_highest = self.diff.find_highest_version(c_packages)
t_highest = self.diff.find_highest_version(t_packages)
package_diff = self.diff.diff_packages(
c_highest, t_highest)
package_foo = [['2.8.08', '8-2.8.08-3.el8'], ['0', 'not installed']]
package_bar = [['0', 'not installed'], ['2.8.08', '8-2.8.08-3.el8']]
self.assertEqual(len(package_diff), 2)
self.assertEqual(package_foo, package_diff['foo'])
self.assertEqual(package_bar, package_diff['bar'])
def test_compose_dir(self):
control_list = self.rpms_control_json
test_list = self.rpms_test_json
c_packages = self.diff.parse_compose(control_list)
t_packages = self.diff.parse_compose(test_list)
c_packages = self.diff.parse_list(c_packages)
t_packages = self.diff.parse_list(t_packages)
c_highest = self.diff.find_highest_version(c_packages)
t_highest = self.diff.find_highest_version(t_packages)
package_diff = self.diff.diff_packages(c_highest, t_highest)
self.assertEqual(len(package_diff), 1045)
if __name__ == '__main__':
unittest.main()
| |
'''
neutralContour.py
Copyright (C) 2010 Jeff Berry
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
'''
import sys, os, math, subprocess
import LabelWindow
#import matplotlib.pylab as p
from numpy import *
from mpl_toolkits.mplot3d.axes3d import Axes3D
from matplotlib import cm
from matplotlib.figure import Figure
from matplotlib.backends.backend_gtkagg import FigureCanvasGTKAgg as FigureCanvas
from scipy.io.wavfile import read as wavread
import pygtk
pygtk.require("2.0")
import gtk
import gtk.glade
import gobject
class NeutralTongue:
def __init__(self, contours, neutral, SHOW_LINGUAGRAM, SHOW_NEUTRAL, SHOW_WAVEFORM, SHOW_SPECTROGRAM):
'''center points determined by transforming the point (426, 393) several times
with peterotron, and taking the average.
'''
self.static_dir = os.getcwd() + '/'
#self.centerX = 710
#self.centerY = 638
# these come from hand tuning to find the smallest range of y values of polar mags
self.centerX = 665
self.centerY = 525
self.gladefile = self.static_dir + "LinguaViewer.glade"
self.wTree = gtk.glade.XML(self.gladefile, "window1")
self.win = self.wTree.get_widget("window1")
self.win.set_title(contours)
self.title = contours
self.mainVBox = self.wTree.get_widget("vbox1")
dic = { "on_window1_destroy": self.onDestroy,
"on_tbPlay_clicked" : self.playSound,
"on_tbSave_clicked" : self.onSave,
"on_tbLabel_clicked": self.onLabel}
self.wTree.signal_autoconnect(dic)
self.X, self.Y = self.loadContours(contours)
self.wavname = contours[:-4] + ".wav"
#Linguagram
if (SHOW_LINGUAGRAM == True):
x1 = array(self.X)
y1 = array(self.Y)
Z = []
for i in range(len(self.X)):
zs = []
for j in range(32):
zs.append(i+1)
Z.append(zs)
z1 = array(Z)
self.fig = Figure()
canvas = FigureCanvas(self.fig)
#ax = Axes3D(self.fig, rect=[-.23,-.2,1.447,1.4])
ax = self.fig.add_subplot(1, 1, 1, projection='3d')
self.fig.subplots_adjust(left=-0.23, bottom=0, right=1.215, top=1)
ax.mouse_init()
surf = ax.plot_surface(z1, -x1, -y1, rstride=1, cstride=1, cmap=cm.jet)
ax.view_init(90,-90)
canvas.show()
canvas.set_size_request(600, 200)
self.mainVBox.pack_start(canvas, True, True)
#Neutral
if (SHOW_NEUTRAL == True):
cx, cy = self.getNeutral(neutral)
cmags = self.makePolar(cx, cy)
M = self.batchConvert2Polar(self.X, self.Y)
#D = self.batchGetMinD(M, cmags)
fakeX = []
for i in range(len(M)):
xs = []
for j in range(1,33):
xs.append(j)
fakeX.append(xs)
x1 = array(fakeX)
y1 = array(M)
Z = []
for i in range(len(M)):
zs = []
for j in range(32):
zs.append(i)
Z.append(zs)
z1 = array(Z)
self.fig3 = Figure()
canvas3 = FigureCanvas(self.fig3)
ax = self.fig3.add_subplot(1, 1, 1, projection='3d')
self.fig3.subplots_adjust(left=-0.23, bottom=0, right=1.215, top=1)
ax.mouse_init()
ax.plot_surface(z1, -x1, y1, rstride=1, cstride=1, cmap=cm.jet)
ax.view_init(90,-90)
canvas3.show()
canvas3.set_size_request(600, 200)
self.mainVBox.pack_start(canvas3, True, True)
#Waveform
windowsize = 0
self.fig2 = Figure()
canvas2 = FigureCanvas(self.fig2)
if (SHOW_WAVEFORM == True):
fs, snd = wavread(self.wavname)
chan = snd[:,0]
t=array(range(len(chan)))/float(fs);
if SHOW_SPECTROGRAM == True:
wavax = self.fig2.add_subplot(2, 1, 1)
else:
wavax = self.fig2.add_subplot(1, 1, 1)
wavax.plot(t,chan,'black');
wavax.set_xlim(0,max(t))
windowsize += 200
#Spectrogram
if (SHOW_SPECTROGRAM == True):
'''This calls Praat to get the spectrogram and adds it to the viewer'''
specname = contours[:-4] + '.Spectrogram'
cleanname = contours[:-4] + '.clean'
cmd = ['/Applications/Praat.app/Contents/MacOS/Praat', self.static_dir + 'makeSpec.praat', self.wavname, specname]
proc = subprocess.Popen(cmd)
status = proc.wait()
cmd2 = ['bash', self.static_dir + 'cleanspec.sh', specname, cleanname]
proc2 = subprocess.Popen(cmd2)
status2 = proc2.wait()
f = open(cleanname, 'r').readlines()
last = len(f)-1
x = f[last].split('\t')
rows = int(x[0])
cols = int(x[1])
img = zeros((rows, cols))
for i in range(len(f)):
x = f[i][:-1].split('\t')
img[int(x[0])-1,int(x[1])-1] = float(x[2])
img = log(img)
if SHOW_WAVEFORM == True:
specax = self.fig2.add_subplot(2, 1, 2)
else:
specax = self.fig2.add_subplot(1, 1, 1)
specax.imshow(img, cmap=cm.gray_r, origin='lower', aspect='auto')
windowsize += 200
# show it
if (SHOW_WAVEFORM == True) or (SHOW_SPECTROGRAM == True):
canvas2.show()
canvas2.set_size_request(600, windowsize)
self.mainVBox.pack_start(canvas2, True, True)
self.SHOW_LINGUAGRAM = SHOW_LINGUAGRAM
self.SHOW_NEUTRAL = SHOW_NEUTRAL
self.SHOW_WAVEFORM = SHOW_WAVEFORM
self.SHOW_SPECTROGRAM = SHOW_SPECTROGRAM
self.windowsize = windowsize
def playSound(self, event):
cmd = ['/Applications/Praat.app/Contents/MacOS/Praat', self.static_dir + 'playSound.praat', self.wavname]
proc = subprocess.Popen(cmd)
status = proc.wait()
def onSave(self, event):
fc = gtk.FileChooserDialog(title='Save Image File', parent=None,
action=gtk.FILE_CHOOSER_ACTION_SAVE,
buttons=(gtk.STOCK_CANCEL, gtk.RESPONSE_CANCEL,
gtk.STOCK_SAVE, gtk.RESPONSE_OK))
g_directory = fc.get_current_folder()
fc.set_current_folder(g_directory)
fc.set_default_response(gtk.RESPONSE_OK)
fc.set_select_multiple(False)
response = fc.run()
if response == gtk.RESPONSE_OK:
savename = fc.get_filename()
g_directory = fc.get_current_folder()
self.saveNcrop(savename)
fc.destroy()
def saveNcrop(self, savename):
if os.path.exists("tmp1.png"):
p = subprocess.Popen(['rm', 'tmp1.png'])
p.wait()
if os.path.exists("tmp2.png"):
p = subprocess.Popen(['rm', 'tmp2.png'])
p.wait()
if os.path.exists("tmp3.png"):
p = subprocess.Popen(['rm', 'tmp3.png'])
p.wait()
if (self.SHOW_LINGUAGRAM == True):
self.fig.savefig("tmp1.png", format="png", pad_inches=0)
resize = ['convert', 'tmp1.png', '-resize', '800x250!', 'tmp1.png']
p = subprocess.Popen(resize)
p.wait()
chop = ['convert', 'tmp1.png', '-gravity', 'South', '-chop', '0x40', 'tmp1.png']
p = subprocess.Popen(chop)
s = p.wait()
if (self.SHOW_NEUTRAL == True):
self.fig3.savefig("tmp2.png", format="png", pad_inches=0)
resize = ['convert', 'tmp2.png', '-resize', '800x250!', 'tmp2.png']
p = subprocess.Popen(resize)
p.wait()
chop = ['convert', 'tmp2.png', '-gravity', 'South', '-chop', '0x40', 'tmp2.png']
p = subprocess.Popen(chop)
s = p.wait()
if (self.SHOW_LINGUAGRAM == True):
chop = ['convert', 'tmp2.png', '-chop', '0x40', 'tmp2.png']
p = subprocess.Popen(chop)
s = p.wait()
if (self.SHOW_WAVEFORM == True) or (self.SHOW_SPECTROGRAM == True):
self.fig2.savefig("tmp3.png", format="png", pad_inches=0)
chop = ['convert', 'tmp3.png', '-chop', '0x40', 'tmp3.png']
p = subprocess.Popen(chop)
s = p.wait()
cmd = ['montage', 'tmp*.png', '-mode', 'Concatenate', '-tile', '1x', savename]
proc = subprocess.Popen(cmd)
status = proc.wait()
cmd = ['convert', savename, '-resize', '600x', savename]
proc = subprocess.Popen(cmd)
status = proc.wait()
def onLabel(self, event):
self.saveNcrop('labelwindowbackground.png')
self.win.destroy()
#LabelWindow.LabelWindow('labelwindowbackground.png', self.title, len(self.X))
LabelWindow.LabelWindow([self.title], self.SHOW_LINGUAGRAM, self.SHOW_NEUTRAL, self.SHOW_WAVEFORM, self.SHOW_SPECTROGRAM)
def onDestroy(self, event):
self.win.destroy()
def getNeutral(self, infile):
'''Finds the neutral tongue by averaging the values of the neutral tongue
traces.
'''
f = open(infile, 'r').readlines()
xaves = []
yaves = []
for i in range(1,33):
for j in range(i,len(f),32):
xs = []
ys = []
l = f[j][:-1].split('\t')
xs.append(eval(l[2]))
ys.append(eval(l[3]))
xaves.append(sum(xs)/len(xs))
yaves.append(sum(ys)/len(ys))
return xaves, yaves
def makePolar(self, ContourX, ContourY):
mags = []
for i in range(len(ContourX)):
dist = math.sqrt((ContourX[i]-self.centerX)**2 + (ContourY[i]-self.centerY)**2)
mags.append(dist)
return mags
def testPolar(self, x, y, cx, cy):
'''Use this to find better center coords for polar transform.
'''
mags = []
for i in range(len(x)):
dist = math.sqrt((x[i]-cx)**2 + (y[i]-cy)**2)
mags.append(dist)
#p.plot(range(32), mags)
def loadContours(self, infile):
'''Opens a .csv file and returns contents as matrices of x and y vectors --
1 column of the x matrix corresponds to 1 column of y matrix, to make a single
frame.
'''
f = open(infile, 'r').readlines()
X = []
Y = []
for i in range(0,len(f),32):
xs = []
ys = []
for j in range(32):
l = f[i+j][:-1].split('\t')
xs.append(eval(l[2]))
ys.append(eval(l[3]))
X.append(xs)
Y.append(ys)
return X, Y
def vertDist(self, Y1, Y2):
ds = []
for i in range(len(Y1)):
ds.append(Y1[i]-Y2[i])
return ds
def subtractMinD(self, Contour):
ds = []
minD = 1000
for i in range(len(Contour)):
if abs(Contour[i]) < minD:
minD = abs(Contour[i])
for j in range(len(Contour)):
if Contour[j] < 0:
ds.append(Contour[j]+minD)
else:
ds.append(Contour[j]-minD)
return ds
#def plotC(self, Contour):
# p.plot(range(len(Contour)), Contour)
def batchConvert2Polar(self, X, Y):
M = []
for i in range(len(X)):
M.append(self.makePolar(X[i],Y[i]))
return M
def batchGetMinD(self, M, center):
D = []
for i in range(len(M)):
D.append(self.subtractMinD(self.vertDist(M[i], center)))
return D
def getFrame(self, filenames, token):
f = open(filenames, 'r').readlines()
frames = []
for i in f:
x = i.split('/')
if x[0] == token:
y = i[:-5].split('_')
frames.append(int(y[1]))
return min(frames)
if __name__ == "__main__":
#demo(sys.argv[1], 'neutral.csv')
NeutralTongue(sys.argv[1], 'neutral.csv', True, True, True, True)
gtk.main()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.