gt stringclasses 1 value | context stringlengths 2.49k 119k |
|---|---|
# -*- coding: utf-8 -*-
import sys, os, re
# Check Sphinx version
import sphinx
if sphinx.__version__ < "1.1":
raise RuntimeError("Sphinx 1.1 or newer required")
needs_sphinx = '1.1'
# -----------------------------------------------------------------------------
# General configuration
# -----------------------------------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
sys.path.insert(0, os.path.abspath('../sphinxext'))
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.mathjax', 'numpydoc',
'sphinx.ext.intersphinx', 'sphinx.ext.coverage',
'sphinx.ext.autosummary']
# Determine if the matplotlib has a recent enough version of the
# plot_directive.
try:
from matplotlib.sphinxext import plot_directive
except ImportError:
use_matplotlib_plot_directive = False
else:
try:
use_matplotlib_plot_directive = (plot_directive.__version__ >= 2)
except AttributeError:
use_matplotlib_plot_directive = False
if use_matplotlib_plot_directive:
extensions.append('matplotlib.sphinxext.plot_directive')
else:
raise RuntimeError("You need a recent enough version of matplotlib")
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# General substitutions.
project = 'SciPy'
copyright = '2008-2014, The Scipy community'
# The default replacements for |version| and |release|, also used in various
# other places throughout the built documents.
import scipy
version = re.sub(r'\.dev-.*$', r'.dev', scipy.__version__)
release = scipy.__version__
print "Scipy (VERSION %s)" % (version,)
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
today_fmt = '%B %d, %Y'
# List of documents that shouldn't be included in the build.
#unused_docs = []
# The reST default role (used for this markup: `text`) to use for all documents.
default_role = "autolink"
# List of directories, relative to source directories, that shouldn't be searched
# for source files.
exclude_dirs = []
# If true, '()' will be appended to :func: etc. cross-reference text.
add_function_parentheses = False
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# -----------------------------------------------------------------------------
# HTML output
# -----------------------------------------------------------------------------
themedir = os.path.join(os.pardir, 'scipy-sphinx-theme', '_theme')
if os.path.isdir(themedir):
html_theme = 'scipy'
html_theme_path = [themedir]
if 'scipyorg' in tags:
# Build for the scipy.org website
html_theme_options = {
"edit_link": True,
"sidebar": "right",
"scipy_org_logo": True,
"rootlinks": [("http://scipy.org/", "Scipy.org"),
("http://docs.scipy.org/", "Docs")]
}
else:
# Default build
html_theme_options = {
"edit_link": False,
"sidebar": "left",
"scipy_org_logo": False,
"rootlinks": []
}
html_logo = '_static/scipyshiny_small.png'
html_sidebars = {'index': 'indexsidebar.html'}
else:
# Build without scipy.org sphinx theme present
if 'scipyorg' in tags:
raise RuntimeError("Get the scipy-sphinx-theme first, "
"via git submodule init & update")
else:
html_style = 'scipy_fallback.css'
html_logo = '_static/scipyshiny_small.png'
html_sidebars = {'index': 'indexsidebar.html'}
html_title = "%s v%s Reference Guide" % (project, version)
html_static_path = ['_static']
html_last_updated_fmt = '%b %d, %Y'
html_additional_pages = {}
html_use_modindex = True
html_copy_source = False
html_file_suffix = '.html'
htmlhelp_basename = 'scipy'
pngmath_use_preview = True
pngmath_dvipng_args = ['-gamma', '1.5', '-D', '96', '-bg', 'Transparent']
# -----------------------------------------------------------------------------
# LaTeX output
# -----------------------------------------------------------------------------
# The paper size ('letter' or 'a4').
#latex_paper_size = 'letter'
# The font size ('10pt', '11pt' or '12pt').
#latex_font_size = '10pt'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, document class [howto/manual]).
_stdauthor = 'Written by the SciPy community'
latex_documents = [
('index', 'scipy-ref.tex', 'SciPy Reference Guide', _stdauthor, 'manual'),
# ('user/index', 'scipy-user.tex', 'SciPy User Guide',
# _stdauthor, 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# Additional stuff for the LaTeX preamble.
latex_preamble = r'''
\usepackage{amsmath}
\DeclareUnicodeCharacter{00A0}{\nobreakspace}
% In the parameters etc. sections, align uniformly, and adjust label emphasis
\usepackage{expdlist}
\let\latexdescription=\description
\let\endlatexdescription=\enddescription
\renewenvironment{description}%
{\begin{latexdescription}[\setleftmargin{60pt}\breaklabel\setlabelstyle{\bfseries\itshape}]}%
{\end{latexdescription}}
% Make Examples/etc section headers smaller and more compact
\makeatletter
\titleformat{\paragraph}{\normalsize\normalfont\bfseries\itshape}%
{\py@NormalColor}{0em}{\py@NormalColor}{\py@NormalColor}
\titlespacing*{\paragraph}{0pt}{1ex}{0pt}
\makeatother
% Save vertical space in parameter lists and elsewhere
\makeatletter
\renewenvironment{quote}%
{\list{}{\topsep=0pt%
\parsep \z@ \@plus\p@}%
\item\relax}%
{\endlist}
\makeatother
% Fix footer/header
\renewcommand{\chaptermark}[1]{\markboth{\MakeUppercase{\thechapter.\ #1}}{}}
\renewcommand{\sectionmark}[1]{\markright{\MakeUppercase{\thesection.\ #1}}}
'''
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
latex_use_modindex = False
# -----------------------------------------------------------------------------
# Intersphinx configuration
# -----------------------------------------------------------------------------
intersphinx_mapping = {
'http://docs.python.org/dev': None,
'http://docs.scipy.org/doc/numpy': None,
}
# -----------------------------------------------------------------------------
# Numpy extensions
# -----------------------------------------------------------------------------
# If we want to do a phantom import from an XML file for all autodocs
phantom_import_file = 'dump.xml'
# Generate plots for example sections
numpydoc_use_plots = True
# -----------------------------------------------------------------------------
# Autosummary
# -----------------------------------------------------------------------------
if sphinx.__version__ >= "0.7":
import glob
autosummary_generate = glob.glob("*.rst")
# -----------------------------------------------------------------------------
# Coverage checker
# -----------------------------------------------------------------------------
coverage_ignore_modules = r"""
""".split()
coverage_ignore_functions = r"""
test($|_) (some|all)true bitwise_not cumproduct pkgload
generic\.
""".split()
coverage_ignore_classes = r"""
""".split()
coverage_c_path = []
coverage_c_regexes = {}
coverage_ignore_c_items = {}
#------------------------------------------------------------------------------
# Plot
#------------------------------------------------------------------------------
plot_pre_code = """
import numpy as np
np.random.seed(123)
"""
plot_include_source = True
plot_formats = [('png', 96), 'pdf']
plot_html_show_formats = False
import math
phi = (math.sqrt(5) + 1)/2
font_size = 13*72/96.0 # 13 px
plot_rcparams = {
'font.size': font_size,
'axes.titlesize': font_size,
'axes.labelsize': font_size,
'xtick.labelsize': font_size,
'ytick.labelsize': font_size,
'legend.fontsize': font_size,
'figure.figsize': (3*phi, 3),
'figure.subplot.bottom': 0.2,
'figure.subplot.left': 0.2,
'figure.subplot.right': 0.9,
'figure.subplot.top': 0.85,
'figure.subplot.wspace': 0.4,
'text.usetex': False,
}
if not use_matplotlib_plot_directive:
import matplotlib
matplotlib.rcParams.update(plot_rcparams)
# -----------------------------------------------------------------------------
# Source code links
# -----------------------------------------------------------------------------
import inspect
from os.path import relpath, dirname
for name in ['sphinx.ext.linkcode', 'linkcode', 'numpydoc.linkcode']:
try:
__import__(name)
extensions.append(name)
break
except ImportError:
pass
else:
print "NOTE: linkcode extension not found -- no links to source generated"
def linkcode_resolve(domain, info):
"""
Determine the URL corresponding to Python object
"""
if domain != 'py':
return None
modname = info['module']
fullname = info['fullname']
submod = sys.modules.get(modname)
if submod is None:
return None
obj = submod
for part in fullname.split('.'):
try:
obj = getattr(obj, part)
except:
return None
try:
fn = inspect.getsourcefile(obj)
except:
fn = None
if not fn:
try:
fn = inspect.getsourcefile(sys.modules[obj.__module__])
except:
fn = None
if not fn:
return None
try:
source, lineno = inspect.findsource(obj)
except:
lineno = None
if lineno:
linespec = "#L%d" % (lineno + 1)
else:
linespec = ""
fn = relpath(fn, start=dirname(scipy.__file__))
if 'dev' in scipy.__version__:
return "http://github.com/scipy/scipy/blob/master/scipy/%s%s" % (
fn, linespec)
else:
return "http://github.com/scipy/scipy/blob/v%s/scipy/%s%s" % (
scipy.__version__, fn, linespec)
| |
"""
Tests for finder.
"""
from django.template.response import TemplateResponse
from django.test import RequestFactory
from mock import Mock, patch
from tests import case
class FinderDecoratorTest(case.DBTestCase):
"""Tests for the finder view decorator."""
@property
def finder(self):
"""The decorator under test."""
from moztrap.view.lists.decorators import finder
return finder
def on_response(self, response, decorator=None, request=None):
"""Apply given decorator to dummy view, return given response."""
decorator = decorator or self.finder(Mock())
request = request or RequestFactory().get("/")
@decorator
def view(request):
return response
return view(request)
def on_template_response(self, context, **kwargs):
"""Run TemplateResponse with given context through decorated view."""
request = kwargs.setdefault("request", RequestFactory().get("/"))
res = TemplateResponse(request, "some/template.html", context)
return self.on_response(res, **kwargs)
def test_returns_non_template_response(self):
"""Returns a non-TemplateResponse unmodified, without error."""
res = self.on_response("blah")
self.assertEqual(res, "blah")
def test_uses_wraps(self):
"""Preserves docstring and name of original view func."""
@self.finder(Mock())
def myview(request, some_id):
"""docstring"""
self.assertEqual(myview.func_name, "myview")
self.assertEqual(myview.func_doc, "docstring")
def test_passes_on_args(self):
"""Arguments are passed on to original view func."""
record = []
@self.finder(Mock())
def myview(request, *args, **kwargs):
record.extend([args, kwargs])
myview(RequestFactory().get("/"), "a", b=2)
self.assertEqual(record, [("a",), {"b": 2}])
@patch("moztrap.view.lists.finder.render")
def test_ajax(self, render):
"""Ajax response is rendered column template."""
render.return_value = "some HTML"
MockFinder = Mock()
f = MockFinder.return_value
f.column_template.return_value = "some/finder/_column.html"
f.objects.return_value = ["some", "objects"]
req = RequestFactory().get(
"/some/url",
{"finder": "1", "col": "things", "id": "2"},
HTTP_X_REQUESTED_WITH="XMLHttpRequest")
res = self.on_template_response(
{}, request=req, decorator=self.finder(MockFinder))
self.assertEqual(res, "some HTML")
self.assertEqual(
render.call_args[0][1:],
(
"some/finder/_column.html",
{
"colname": "things",
"finder": {
"finder": f,
"things": ["some", "objects"]
}
}
)
)
f.column_template.assert_called_with("things")
f.objects.assert_called_with("things", "2")
def test_no_ajax(self):
"""Non-ajax response has finder with top-column objects in context."""
MockFinder = Mock()
f = MockFinder.return_value
top_col = Mock()
top_col.name = "top"
f.columns = [top_col]
f.objects.return_value = ["some", "objects"]
res = self.on_template_response({}, decorator=self.finder(MockFinder))
self.assertIs(res.context_data["finder"]["finder"], f)
self.assertEqual(
res.context_data["finder"]["top"],
["some", "objects"]
)
f.objects.assert_called_with("top")
class FinderTest(case.DBTestCase):
"""Tests for Finder."""
@property
def ManageFinder(self):
"""ManageFinder; a sample finder subclass to exercise Finder."""
from moztrap.view.manage.finders import ManageFinder
return ManageFinder
def test_columns_by_name(self):
"""Index of columns by name."""
f = self.ManageFinder()
self.assertEqual(
sorted((n, c.name) for (n, c) in f.columns_by_name.items()),
[
("products", "products"),
("productversions", "productversions"),
("runs", "runs"),
("suites", "suites"),
]
)
def test_parent_columns(self):
"""Maps column name to parent column."""
f = self.ManageFinder()
self.assertEqual(
sorted((n, c.name) for (n, c) in f.parent_columns.items()),
[
("productversions", "products"),
("runs", "productversions"),
("suites", "runs"),
]
)
def test_child_columns(self):
"""Maps column name to child column."""
f = self.ManageFinder()
self.assertEqual(
sorted((n, c.name) for (n, c) in f.child_columns.items()),
[
("products", "productversions"),
("productversions", "runs"),
("runs", "suites")
]
)
def test_columns_by_model(self):
"""Index of columns by model."""
f = self.ManageFinder()
self.assertEqual(
sorted(
((m, c.name) for (m, c) in f.columns_by_model.items()),
key=lambda o: o[1]
),
[
(self.model.Product, "products"),
(self.model.ProductVersion, "productversions"),
(self.model.Run, "runs"),
(self.model.Suite, "suites"),
]
)
def test_column_template(self):
"""Joins finder base template to column template name."""
f = self.ManageFinder()
self.assertEqual(f.column_template("runs"), "manage/finder/_runs.html")
def test_bad_column_name(self):
"""Bad column name raises ValueError."""
f = self.ManageFinder()
with self.assertRaises(ValueError):
f.column_template("doesnotexist")
def test_goto_url(self):
"""Goto url is manage url for child objects, filtered by parent."""
f = self.ManageFinder()
obj = self.model.Suite(pk=2)
self.assertEqual(f.goto_url(obj), "/manage/cases/?filter-suite=2")
def test_goto_url_bad_object(self):
"""Goto url returns None if given object from unknown class."""
f = self.ManageFinder()
self.assertEqual(f.goto_url(Mock()), None)
def test_child_column_for_obj(self):
"""Returns child column name for given object."""
f = self.ManageFinder()
obj = self.model.Product()
child_col = f.child_column_for_obj(obj)
self.assertEqual(child_col, "productversions")
def test_child_column_for_bad_obj(self):
"""Returns None if obj isn't of a model class in this finder."""
f = self.ManageFinder()
child_col = f.child_column_for_obj(Mock())
self.assertEqual(child_col, None)
def test_child_column_for_last_obj(self):
"""Returns None if given object from final-column class."""
f = self.ManageFinder()
obj = self.model.Suite()
child_col = f.child_column_for_obj(obj)
self.assertEqual(child_col, None)
def test_child_query_url(self):
"""Returns ajax query url for list of child objects in next column."""
f = self.ManageFinder()
obj = self.model.Run(pk=5)
url = f.child_query_url(obj)
self.assertEqual(url, "?finder=1&col=suites&id=5")
def test_child_query_url_none(self):
"""Returns None for final column."""
f = self.ManageFinder()
obj = self.model.Suite(pk=5)
url = f.child_query_url(obj)
self.assertEqual(url, None)
def test_objects(self):
"""Without parent, objects is just pass-through to column objects."""
f = self.ManageFinder()
p = self.F.ProductFactory.create()
objects = f.objects("products")
self.assertEqual(list(objects), [p])
def test_objects_of_parent(self):
"""With parent, objects filters by parent."""
f = self.ManageFinder()
pv = self.F.ProductVersionFactory.create()
self.F.ProductVersionFactory.create()
objects = f.objects("productversions", pv.product.pk)
self.assertEqual(list(objects), [pv])
def test_parent_via_m2m(self):
"""Parent filtering also works via m2m relationship."""
f = self.ManageFinder()
rs = self.F.RunSuiteFactory.create()
self.F.SuiteFactory.create()
objects = f.objects("suites", rs.run.pk)
self.assertEqual(list(objects), [rs.suite])
def test_no_parent_relationship(self):
"""If no relationship to parent model is found, raises ValueError."""
from moztrap.view.lists.finder import Finder, Column
class BadFinder(Finder):
columns = [
Column(
"products",
"_products.html",
self.model.Product.objects.all()
),
Column("runs", "_runs.html", self.model.Run.objects.all()),
]
f = BadFinder()
with self.assertRaises(ValueError):
f.objects("runs", 1)
def test_objects_of_no_parent(self):
"""Passing in parent for top column raises ValueError."""
f = self.ManageFinder()
with self.assertRaises(ValueError):
f.objects("products", 3)
def test_objects_bad_col(self):
"""Asking for objects of bad column raises ValueError."""
f = self.ManageFinder()
with self.assertRaises(ValueError):
f.objects("doesnotexist")
class ColumnTest(case.DBTestCase):
"""Tests for finder Column."""
@property
def column(self):
from moztrap.view.lists.finder import Column
return Column
def test_objects(self):
"""Objects method is just .all() on given queryset."""
qs = Mock()
c = self.column("thing", "_things.html", qs)
objects = c.objects()
self.assertIs(objects, qs.all.return_value)
@patch("moztrap.view.lists.finder.filter_url")
def test_goto_url(self, filter_url):
"""goto_url method calls filter_url if goto is given."""
c = self.column("thing", "_things.html", Mock(), "goto_name")
obj = Mock()
url = c.goto_url(obj)
self.assertIs(url, filter_url.return_value)
filter_url.assert_called_with("goto_name", obj)
def test_no_goto_url(self):
"""goto_url method just returns None if no goto is given."""
c = self.column("thing", "_things.html", Mock())
url = c.goto_url(Mock())
self.assertIs(url, None)
| |
#!/usr/bin/env python
# The MIT License (MIT)
#
# Copyright (c) 2013 Keita Kita
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
# mount_image_partitions
#
# A script that mount or unmount partitions in a disk image.
import argparse
import collections
import os.path
import subprocess
import sys
import fdisk_output_parser
class OperationFailedError(Exception):
def __init__(self, cause):
self.__cause = cause
@property
def cause(self):
return self.__cause
def detach_loopback_device(loopback_device_file):
subprocess.call(['losetup', '-d', loopback_device_file])
def attach_partitions(
partitions, loop_device_file_prefix, loop_device_start_number,
image_file):
u'''
Attach partitions in an image file to loop devices.
Arguments:
partitions : A list of attaching Partitions.
loop_device_file_prefix : A prefix of loop device file.
loop_device_start_number : Start number of loop device.
image_file : An image file that contains the partitions.
Return:
A dictionary that maps loop device to attached partition.
Raise:
OperationFailedError : When attaching partitions are failed.
'''
loop_device_number = loop_device_start_number
attached_partition_map = collections.OrderedDict()
try:
# Attach partitions.
for attaching_partition in partitions:
loop_device_file = loop_device_file_prefix + \
str(loop_device_number)
subprocess.check_output([
'losetup', '-o', str(attaching_partition.start_offset_bytes),
'--sizelimit', str(attaching_partition.end_offset_bytes),
loop_device_file, image_file], stderr=subprocess.STDOUT)
attached_partition_map[loop_device_file] = attaching_partition
loop_device_number += 1
except subprocess.CalledProcessError, e:
try:
# Detach attached partitions.
for loop_device_file in attached_partition_map.iterkeys():
detach_loopback_device(loop_device_file)
except subprocess.CalledProcessError, cleanUpError:
raise OperationFailedError(cleanUpError)
raise OperationFailedError(e)
return attached_partition_map
def print_attaching_result(loop_device_partition_map):
u'''
Print the result of attaching result.
Argument:
loop_device_partition_map : A dictionary that maps loop device file to
Partition.
'''
print 'number, loop device, system of partition'
number = 0
for loop_device_file, partition in loop_device_partition_map.iteritems():
print '%d, %s, %s' % (number, loop_device_file, partition.system)
number += 1
def detach_partitions(
partitions_count, loop_device_file_prefix, loop_device_start_number):
u'''
Detach partitions.
Arguments:
partitions_count : A count of attached partitions.
loop_device_file_prefix : A prefix of loop device file.
loop_device_start_number : Start number of loop device.
Return:
A list of detached loop device files.
Raise:
OperationFailedError : When detaching partitions are failed.
'''
detached_loop_device_files = []
try:
for loop_device_number in range(
loop_device_start_number,
loop_device_start_number + partitions_count):
loop_device_file = loop_device_file_prefix + \
str(loop_device_number)
subprocess.check_output(
['losetup', '-d', loop_device_file], stderr=subprocess.STDOUT)
detached_loop_device_files.append(loop_device_file)
except subprocess.CalledProcessError, e:
raise OperationFailedError(e)
return detached_loop_device_files
def print_detaching_result(detached_loop_device_files):
u'''
Print the result of detaching loop device files.
Argument:
detached_loop_device_files : A list of detached loop device files.
'''
print u'Detached loop device :'
for loop_device_file in detached_loop_device_files:
print loop_device_file
def detect_partitons(image_file):
u'''
Detect partitions in the image file.
Argument:
image_file : An image file.
Return:
A list of Partition in the image file.
Raise:
OperationFailedError : When detection partitions are failed.
'''
try:
# Find a free loop device because the loop device with the start
# number is used when detach is requested.
loop_device_file_for_detecting = subprocess.check_output(
['losetup', '-f'], stderr=subprocess.STDOUT).strip()
except subprocess.CalledProcessError, e:
raise OperationFailedError(e)
try:
# Attach the image file.
subprocess.check_output(
['losetup', loop_device_file_for_detecting, image_file],
stderr=subprocess.STDOUT)
# Get fdisk output.
fdisk_output = subprocess.check_output(
['fdisk', '-lu', loop_device_file_for_detecting],
stderr=subprocess.STDOUT)
except subprocess.CalledProcessError, e:
detach_loopback_device(loop_device_file_for_detecting)
raise OperationFailedError(e)
try:
# Detach from the loop device for detecting partitions.
detach_loopback_device(loop_device_file_for_detecting)
except subprocess.CalledProcessError, e:
raise OperationFailedError(e)
try:
return fdisk_output_parser.detect_partitions(fdisk_output)
except fdisk_output_parser.ParseError, e:
raise OperationFailedError(e)
def main(loop_device_file_prefix, loop_device_start_number, is_attach,
image_file):
# Check the image file is available.
# If it is not available, exit with help message.
if not os.path.exists(image_file):
print >>sys.stderr, u'Image file is not found : ' + image_file
sys.exit(1)
# Detect partitions in the image file.
partitions = detect_partitons(image_file)
# If attach is requested, attach partitions.
# If detach is requested, detach partitions.
# And print result.
if is_attach:
result = attach_partitions(
partitions, loop_device_file_prefix, loop_device_start_number,
image_file)
print_attaching_result(result)
else:
result = detach_partitions(
len(partitions), loop_device_file_prefix, loop_device_start_number)
print_detaching_result(result)
def create_command_line_parser():
parser = argparse.ArgumentParser(
description=
u'Attach or detach partitions in a disk image to loop devices.')
parser.add_argument(
'-l', '--loopdevice', dest='loop_device_files_prefix',
default=u'/dev/loop', help=u'Prefix of loop device files.')
parser.add_argument(
'-s', '--start_number', dest='loop_device_start_number',
type=int, default=0, help=u'Start number of loop device files.')
parser.add_argument(
'-d', '--detach', dest='is_attach', action='store_false',
default=True, help=u'Detach partitions.')
parser.add_argument(
'image_file', metavar='IMAGE_FILE', nargs='?',
help="Path of an image file.")
return parser
if __name__ == '__main__':
# Parse command line arguments.
parser = create_command_line_parser()
arguments = parser.parse_args()
# Call main function if image file is available.
# If image file is not available, exit with help message.
if arguments.image_file:
try:
main(
arguments.loop_device_files_prefix,
arguments.loop_device_start_number,
arguments.is_attach,
arguments.image_file)
except OperationFailedError, e:
causeException = e.cause
if isinstance(causeException, subprocess.CalledProcessError):
print >>sys.stderr, causeException.output + str(causeException)
else:
print >>sys.stderr, causeException
sys.exit(1)
else:
parser.print_help()
sys.exit(1)
| |
'''
Created on 2016/3/24
:author: hubo
'''
from vlcp.config.config import defaultconfig
import vlcp.service.kvdb.storage as storage
import vlcp.service.kvdb.redisnotifier as redisnotifier
from vlcp.server.module import depend, Module, call_api, api
import vlcp.utils.dataobject as dataobj
from vlcp.event.runnable import RoutineContainer
from vlcp.event.event import Event, withIndices, M_
from time import time
from copy import deepcopy
from vlcp.event.core import QuitException, syscall_removequeue
import itertools
from vlcp.utils.dataobject import AlreadyExistsException, UniqueKeyReference,\
MultiKeyReference, DataObjectSet, UniqueKeySet, WeakReferenceObject,\
MultiKeySet, ReferenceObject, request_context
from contextlib import closing
import functools
import copy
from vlcp.utils.exceptions import AsyncTransactionLockException, StaleResultException,\
TransactionRetryExceededException, TransactionTimeoutException, WalkKeyNotRetrieved
try:
from itertools import izip
except ImportError:
izip = zip
@withIndices()
class RetrieveRequestSend(Event):
pass
@withIndices('id')
class RetrieveReply(Event):
pass
def _str(b):
if isinstance(b, str):
return b
elif isinstance(b, bytes):
return b.decode('utf-8')
else:
return str(b)
def _str2(b):
if isinstance(b, str):
return b
elif isinstance(b, bytes):
return b.decode('utf-8')
elif hasattr(b, 'getkey'):
return b.getkey()
else:
return str(b)
class _NeedMoreKeysException(Exception):
pass
@defaultconfig
@depend(storage.KVStorage, redisnotifier.UpdateNotifier)
class ObjectDB(Module):
"""
Abstract transaction layer for KVDB
"""
service = True
# Priority for object update event
_default_objectupdatepriority = 450
# Enable debugging mode for updater: all updaters will be called for an extra time
# to make sure it does not crash with multiple calls
_default_debuggingupdater = False
def __init__(self, server):
Module.__init__(self, server)
self._managed_objs = {}
self._watches = {}
self._requestids = {}
self._watchedkeys = set()
self._requests = []
self._transactno = 0
self._stale = False
self._updatekeys = set()
self._update_version = {}
self._cache = None
self._pending_gc = 0
self.apiroutine = RoutineContainer(self.scheduler)
self.apiroutine.main = self._update
self.routines.append(self.apiroutine)
self.create_api(api(self.mget, self.apiroutine),
api(self.get, self.apiroutine),
api(self.mgetonce, self.apiroutine),
api(self.getonce, self.apiroutine),
api(self.mwatch, self.apiroutine),
api(self.watch, self.apiroutine),
api(self.munwatch, self.apiroutine),
api(self.unwatch, self.apiroutine),
api(self.unwatchall, self.apiroutine),
api(self.transact, self.apiroutine),
api(self.watchlist),
api(self.walk, self.apiroutine),
api(self.gettimestamp, self.apiroutine),
api(self.asynctransact, self.apiroutine),
api(self.writewalk, self.apiroutine),
api(self.asyncwritewalk, self.apiroutine)
)
def _set_watch(self, key, requestid):
self._watches.setdefault(key, set()).add(requestid)
self._requestids.setdefault(requestid, set()).add(key)
def _remove_watch(self, key, requestid):
s = self._watches.get(key)
if s:
s.discard(requestid)
if not s:
del self._watches[key]
s = self._requestids.get(requestid)
if s:
s.discard(key)
if not s:
del self._requestids[requestid]
def _remove_all_watches(self, requestid):
s = self._requestids.get(requestid)
if s is not None:
for k in s:
s2 = self._watches.get(k)
if s2:
s2.discard(requestid)
if not s2:
del self._watches[k]
del self._requestids[requestid]
async def load(self, container):
self.scheduler.queue.addSubQueue(\
self.objectupdatepriority, dataobj.DataObjectUpdateEvent.createMatcher(), 'dataobjectupdate')
self._notifier = await call_api(container, 'updatenotifier', 'createnotifier')
await Module.load(self, container)
self.routines.append(self._notifier)
async def unload(self, container, force=False):
await container.syscall(syscall_removequeue(self.scheduler.queue, 'dataobjectupdate'))
await Module.unload(self, container, force=force)
async def _update(self):
timestamp = '%012x' % (int(time() * 1000),) + '-'
notification_matcher = self._notifier.notification_matcher(False)
def copywithkey(obj, key):
newobj = deepcopy(obj)
if hasattr(newobj, 'setkey'):
newobj.setkey(key)
return newobj
def getversion(obj):
if obj is None:
return (0, -1)
else:
return (getattr(obj, 'kvdb_createtime', 0), getattr(obj, 'kvdb_updateversion', 0))
def isnewer(obj, version):
if obj is None:
return version[1] != -1
else:
return getversion(obj) > version
request_matcher = RetrieveRequestSend.createMatcher()
def onupdate(event, matcher):
update_keys = self._watchedkeys.intersection([_str(k) for k in event.keys])
self._updatekeys.update(update_keys)
if event.extrainfo:
for k,v in zip(event.keys, event.extrainfo):
k = _str(k)
if k in update_keys:
v = tuple(v)
oldv = self._update_version.get(k, (0, -1))
if oldv < v:
self._update_version[k] = v
else:
for k in event.keys:
try:
del self._update_version[_str(k)]
except KeyError:
pass
async def updateinner():
processing_requests = []
# New managed keys
retrieve_list = set()
orig_retrieve_list = set()
retrieveonce_list = set()
orig_retrieveonce_list = set()
processing_request_ids = set()
# Retrieved values are stored in update_result before merging into current storage
update_result = {}
# key => [(walker_func, (original_keys, rid)), ...]
walkers = {}
# Use the loop count as a revision identifier, then the valid revisions of the value
# in update_result is a range, from the last loop count the value changed
# (or -1 if not changed), to the last loop count the value is retrieved
#
# each walker can only walk on keys that shares at least one revision to ensure the
# values are consistent. If no revision could be shared, all the keys must be retrieved
# again to get a consistent view
revision_min = {}
revision_max = {}
self._loopCount = 0
# A request-id -> retrieve set dictionary to store the saved keys
savelist = {}
# (start_key, walker_func, rid) => set(used_keys)
walker_used_keys = {}
# used_key => [(start_key, walker_func, (original_keys, rid)), ...]
used_key_ref = {}
def _update_walker_ref(start_key, walker, original_keys, rid, used_keys):
old_used_keys = walker_used_keys.get((start_key, walker, rid), ())
for k in old_used_keys:
if k not in used_keys:
old_list = used_key_ref[k]
for i, v in enumerate(old_list):
if v[0] == start_key and v[1] == walker and v[2][1] == rid:
break
else:
continue
old_list[i:] = old_list[i+1:]
for k in used_keys:
if k not in old_used_keys:
used_key_ref.setdefault(k, []).append((start_key, walker, (original_keys, rid)))
walker_used_keys[(start_key, walker, rid)] = set(used_keys)
# (start_key, walker, rid) => cached_result
finished_walkers = {}
def _dirty_walkers(new_values):
for k in new_values:
if k in used_key_ref:
for start_key, walker, (_, rid) in used_key_ref[k]:
finished_walkers.pop((start_key, walker, rid), None)
async def updateloop():
while (retrieve_list or self._updatekeys or self._requests):
# default walker, default walker cached, customized walker, customized walker cached
_performance_counters = [0, 0, 0, 0]
# Updated keys
update_list = set()
if self._loopCount >= 10 and not retrieve_list:
if not self._updatekeys:
break
elif self._loopCount >= 100:
# Too many updates, we must stop to respond
self._logger.warning("There are still database updates after 100 loops of mget, respond with potential inconsistent values")
break
if self._updatekeys:
update_list.update(self._updatekeys)
self._updatekeys.clear()
if self._requests:
# Processing requests
for r in self._requests:
if r[2] == 'unwatch':
try:
for k in r[0]:
self._remove_watch(k, r[3])
# Do not need to wait
except Exception as exc:
await self.apiroutine.wait_for_send(RetrieveReply(r[1], exception = exc))
else:
await self.apiroutine.wait_for_send(RetrieveReply(r[1], result = None))
elif r[2] == 'unwatchall':
if r[3] in processing_request_ids:
# unwatch a processing request
# pend this request until all requests are processed
processing_requests.append(r)
else:
try:
self._remove_all_watches(r[3])
except Exception as exc:
await self.apiroutine.wait_for_send(RetrieveReply(r[1], exception = exc))
else:
await self.apiroutine.wait_for_send(RetrieveReply(r[1], result = None))
elif r[2] == 'watch':
retrieve_list.update(r[0])
orig_retrieve_list.update(r[0])
for k in r[0]:
self._set_watch(k, r[3])
processing_requests.append(r)
processing_request_ids.add(r[3])
elif r[2] == 'get':
retrieve_list.update(r[0])
orig_retrieve_list.update(r[0])
processing_requests.append(r)
processing_request_ids.add(r[3])
elif r[2] == 'walk':
retrieve_list.update(r[0])
processing_requests.append(r)
for k,v in r[3].items():
walkers.setdefault(k, []).append((v, (r[0], r[1])))
processing_request_ids.add(r[4])
else:
retrieveonce_list.update(r[0])
orig_retrieveonce_list.update(r[0])
processing_requests.append(r)
del self._requests[:]
if retrieve_list:
watch_keys = tuple(k for k in retrieve_list if k not in self._watchedkeys)
# Add watch_keys to notification
if watch_keys:
for k in watch_keys:
if k in update_result:
self._update_version[k] = getversion(update_result[k])
await self._notifier.add_listen(*watch_keys)
self._watchedkeys.update(watch_keys)
get_list_set = update_list.union(itertools.chain((k for k in retrieve_list
if k not in update_result and k not in self._managed_objs),
(k for k in retrieveonce_list
if k not in update_result and k not in self._managed_objs)))
get_list = list(get_list_set)
new_values = set()
if get_list:
try:
result, self._cache = await call_api(
self.apiroutine,
'kvstorage',
'mgetwithcache',
{'keys': get_list, 'cache': self._cache}
)
except QuitException:
raise
except Exception:
# Serve with cache
if not self._stale:
self._logger.warning('KVStorage retrieve failed, serve with cache', exc_info = True)
self._stale = True
# Discard all retrieved results
update_result.clear()
# Retry update later
self._updatekeys.update(update_list)
#break
revision_min.clear()
revision_max.clear()
else:
self._stale = False
for k,v in izip(get_list, result):
# Update revision information
revision_max[k] = self._loopCount
if k not in update_result:
if k not in self._managed_objs:
# A newly retrieved key
revision_min[k] = self._loopCount
old_value = None
else:
old_value = self._managed_objs[k]
else:
old_value = update_result[k]
# Check if the value is changed
if old_value is not v and getversion(old_value) != getversion(v):
revision_min[k] = self._loopCount
new_values.add(k)
else:
if k not in revision_min:
revision_min[k] = -1
if old_value is not v:
if v is not None and hasattr(v, 'setkey'):
v.setkey(k)
if k in self._watchedkeys and k not in self._update_version:
self._update_version[k] = getversion(v)
update_result.update(zip(get_list, result))
# Disable cache for walkers with updated keys
_dirty_walkers(new_values)
# All keys which should be retrieved in next loop
new_retrieve_list = set()
# Keys which should be retrieved in next loop for a single walk
new_retrieve_keys = set()
# Keys that are used in current walk will be retrieved again in next loop
used_keys = set()
# We separate the data with revisions to prevent inconsistent result
def create_walker(orig_key, strict=True):
revision_range = [revision_min.get(orig_key, -1), revision_max.get(orig_key, -1)]
def _walk_with_revision(key):
if hasattr(key, 'getkey'):
key = key.getkey()
key = _str(key)
if key not in self._watchedkeys:
# This key is not retrieved, raise a KeyError, and record this key
new_retrieve_keys.add(key)
raise WalkKeyNotRetrieved(key)
elif self._stale:
if key not in self._managed_objs:
new_retrieve_keys.add(key)
used_keys.add(key)
return self._managed_objs.get(key)
elif key not in update_result and key not in self._managed_objs:
# This key is not retrieved, raise a KeyError, and record this key
new_retrieve_keys.add(key)
raise WalkKeyNotRetrieved(key)
# Check revision
current_revision = (
max(revision_min.get(key, -1), revision_range[0]),
min(revision_max.get(key, -1), revision_range[1])
)
if current_revision[1] < current_revision[0]:
# revisions cannot match
new_retrieve_keys.add(key)
if strict:
used_keys.add(key)
raise WalkKeyNotRetrieved(key)
else:
# update revision range
revision_range[:] = current_revision
if key in update_result:
used_keys.add(key)
return update_result[key]
else:
used_keys.add(key)
return self._managed_objs[key]
return _walk_with_revision
_default_walker_dup_check = set()
def default_walker(key, obj, walk, _circle_detect = None):
if _circle_detect is None:
_circle_detect = set()
if key in _circle_detect:
return
else:
_circle_detect.add(key)
if hasattr(obj, 'kvdb_internalref'):
rl = obj.kvdb_internalref()
for k in rl:
try:
newobj = walk(k)
except KeyError:
pass
else:
if newobj is not None:
default_walker(k, newobj, walk, _circle_detect)
def _do_default_walker(k):
if k not in _default_walker_dup_check:
_default_walker_dup_check.add(k)
_performance_counters[0] += 1
if (k, None, None) not in finished_walkers:
v = update_result.get(k)
if v is not None:
new_retrieve_keys.clear()
used_keys.clear()
default_walker(k, v, create_walker(k, False))
if new_retrieve_keys:
new_retrieve_list.update(new_retrieve_keys)
self._updatekeys.update(used_keys)
self._updatekeys.add(k)
else:
_all_used_keys = used_keys.union([k])
_update_walker_ref(k, None, None, None, _all_used_keys)
finished_walkers[(k, None, None)] = None
else:
_update_walker_ref(k, None, None, None, [k])
finished_walkers[(k, None, None)] = None
else:
_performance_counters[1] += 1
for k in orig_retrieve_list:
_do_default_walker(k)
savelist.clear()
for k,ws in walkers.items():
# k: the walker key
# ws: list of [walker_func, (request_original_keys, rid)]
# Retry every walker, starts with k, with the value of v
if k in update_result:
# The value is newly retrieved
v = update_result.get(k)
else:
# Use the stored value
v = self._managed_objs.get(k)
if ws:
for w,r in list(ws):
# w: walker_func
# r: (request_original_keys, rid)
# Custom walker
_performance_counters[2] += 1
_cache_key = (k, w, r[1])
if _cache_key in finished_walkers:
_performance_counters[3] += 1
savelist.setdefault(r[1], set()).update(finished_walkers[_cache_key])
else:
_local_save_list = set()
def save(key):
if hasattr(key, 'getkey'):
key = key.getkey()
key = _str(key)
if key != k and key not in used_keys:
raise ValueError('Cannot save a key without walk')
_local_save_list.add(key)
try:
new_retrieve_keys.clear()
used_keys.clear()
w(k, v, create_walker(k), save)
except Exception as exc:
# if one walker failed, the whole request is failed, remove all walkers
self._logger.warning("A walker raises an exception which rolls back the whole walk process. "
"walker = %r, start key = %r, new_retrieve_keys = %r, used_keys = %r",
w, k, new_retrieve_keys, used_keys, exc_info=True)
for orig_k in r[0]:
if orig_k in walkers:
walkers[orig_k][:] = [(w0, r0) for w0,r0 in walkers[orig_k] if r0[1] != r[1]]
processing_requests[:] = [r0 for r0 in processing_requests if r0[1] != r[1]]
savelist.pop(r[1])
await self.apiroutine.wait_for_send(RetrieveReply(r[1], exception = exc))
else:
savelist.setdefault(r[1], set()).update(_local_save_list)
if new_retrieve_keys:
new_retrieve_list.update(new_retrieve_keys)
self._updatekeys.update(used_keys)
self._updatekeys.add(k)
else:
_all_used_keys = used_keys.union([k])
_update_walker_ref(k, w, r[0], r[1], _all_used_keys)
finished_walkers[_cache_key] = _local_save_list
for save in savelist.values():
for k in save:
_do_default_walker(k)
retrieve_list.clear()
retrieveonce_list.clear()
retrieve_list.update(new_retrieve_list)
self._logger.debug("Loop %d: %d default walker (%d cached), %d customized walker (%d cached)",
self._loopCount,
*_performance_counters)
self._loopCount += 1
if self._stale:
watch_keys = tuple(k for k in retrieve_list if k not in self._watchedkeys)
if watch_keys:
await self._notifier.add_listen(*watch_keys)
self._watchedkeys.update(watch_keys)
break
while True:
await self.apiroutine.with_callback(updateloop(), onupdate, notification_matcher)
if self._loopCount >= 100 or self._stale:
break
# If some updated result is newer than the notification version, we should wait for the notification
should_wait = False
for k,v in update_result.items():
if k in self._watchedkeys:
oldv = self._update_version.get(k)
if oldv is not None and isnewer(v, oldv):
should_wait = True
break
if should_wait:
timeout, ev, m = await self.apiroutine.wait_with_timeout(0.2, notification_matcher)
if timeout:
break
else:
onupdate(ev, m)
else:
break
# Update result
send_events = []
self._transactno += 1
transactid = '%s%016x' % (timestamp, self._transactno)
update_objs = []
for k,v in update_result.items():
if k in self._watchedkeys:
if v is None:
oldv = self._managed_objs.get(k)
if oldv is not None:
if hasattr(oldv, 'kvdb_detach'):
oldv.kvdb_detach()
update_objs.append((k, oldv, dataobj.DataObjectUpdateEvent.DELETED))
else:
update_objs.append((k, None, dataobj.DataObjectUpdateEvent.DELETED))
del self._managed_objs[k]
else:
oldv = self._managed_objs.get(k)
if oldv is not None:
if oldv != v:
if oldv and hasattr(oldv, 'kvdb_update'):
oldv.kvdb_update(v)
update_objs.append((k, oldv, dataobj.DataObjectUpdateEvent.UPDATED))
else:
if hasattr(oldv, 'kvdb_detach'):
oldv.kvdb_detach()
self._managed_objs[k] = v
update_objs.append((k, v, dataobj.DataObjectUpdateEvent.UPDATED))
else:
self._managed_objs[k] = v
update_objs.append((k, v, dataobj.DataObjectUpdateEvent.UPDATED))
for k in update_result.keys():
v = self._managed_objs.get(k)
if v is not None and hasattr(v, 'kvdb_retrievefinished'):
v.kvdb_retrievefinished(self._managed_objs)
allkeys = tuple(k for k,_,_ in update_objs)
send_events.extend((dataobj.DataObjectUpdateEvent(k, transactid, t, object = v, allkeys = allkeys) for k,v,t in update_objs))
# Process requests
unwatchall = []
for r in processing_requests:
if r[2] == 'get':
objs = [self._managed_objs.get(k) for k in r[0]]
for k,v in zip(r[0], objs):
if v is not None:
self._set_watch(k, r[3])
result = [o.create_reference() if o is not None and hasattr(o, 'create_reference') else o
for o in objs]
elif r[2] == 'watch':
result = [(v.create_reference() if hasattr(v, 'create_reference') else v)
if v is not None else dataobj.ReferenceObject(k)
for k,v in ((k,self._managed_objs.get(k)) for k in r[0])]
elif r[2] == 'walk':
saved_keys = list(savelist.get(r[1], []))
for k in saved_keys:
self._set_watch(k, r[4])
objs = [self._managed_objs.get(k) for k in saved_keys]
result = (saved_keys,
[o.create_reference() if hasattr(o, 'create_reference') else o
if o is not None else dataobj.ReferenceObject(k)
for k,o in zip(saved_keys, objs)])
elif r[2] == 'unwatchall':
# Remove watches after all results are processed
unwatchall.append(r[3])
result = None
else:
result = [copywithkey(update_result.get(k, self._managed_objs.get(k)), k) for k in r[0]]
send_events.append(RetrieveReply(r[1], result = result, stale = self._stale))
for requestid in unwatchall:
self._remove_all_watches(requestid)
async def output_result():
for e in send_events:
await self.apiroutine.wait_for_send(e)
await self.apiroutine.with_callback(output_result(), onupdate, notification_matcher)
self._pending_gc += 1
async def _gc():
# Use DFS to remove unwatched objects
mark_set = set()
def dfs(k):
if k in mark_set:
return
mark_set.add(k)
v = self._managed_objs.get(k)
if v is not None and hasattr(v, 'kvdb_internalref'):
for k2 in v.kvdb_internalref():
dfs(k2)
for k in self._watches.keys():
dfs(k)
remove_keys = self._watchedkeys.difference(mark_set)
if remove_keys:
self._watchedkeys.difference_update(remove_keys)
await self._notifier.remove_listen(*tuple(remove_keys))
for k in remove_keys:
if k in self._managed_objs:
del self._managed_objs[k]
if k in self._update_version:
del self._update_version[k]
if self._cache is not None:
self._cache.gc(self._managed_objs)
self._pending_gc = 0
while True:
if not self._updatekeys and not self._requests:
if self._pending_gc >= 10:
await self.apiroutine.with_callback(_gc(), onupdate, notification_matcher)
continue
elif self._pending_gc:
timeout, ev, m = await self.apiroutine.wait_with_timeout(1, notification_matcher, request_matcher)
if timeout:
await self.apiroutine.with_callback(_gc(), onupdate, notification_matcher)
continue
else:
ev, m = await M_(notification_matcher, request_matcher)
if m is notification_matcher:
onupdate(ev, m)
await updateinner()
async def mget(self, keys, requestid, nostale = False):
"Get multiple objects and manage them. Return references to the objects."
keys = tuple(_str2(k) for k in keys)
notify = not self._requests
rid = object()
self._requests.append((keys, rid, 'get', requestid))
if notify:
await self.apiroutine.wait_for_send(RetrieveRequestSend())
ev = await RetrieveReply.createMatcher(rid)
if hasattr(ev, 'exception'):
raise ev.exception
if nostale and ev.stale:
raise StaleResultException(ev.result)
return ev.result
async def get(self, key, requestid, nostale = False):
"""
Get an object from specified key, and manage the object.
Return a reference to the object or None if not exists.
"""
r = await self.mget([key], requestid, nostale)
return r[0]
async def mgetonce(self, keys, nostale = False):
"Get multiple objects, return copies of them. Referenced objects are not retrieved."
keys = tuple(_str2(k) for k in keys)
notify = not self._requests
rid = object()
self._requests.append((keys, rid, 'getonce'))
if notify:
await self.apiroutine.wait_for_send(RetrieveRequestSend())
ev = await RetrieveReply.createMatcher(rid)
if hasattr(ev, 'exception'):
raise ev.exception
if nostale and ev.stale:
raise StaleResultException(ev.result)
return ev.result
async def getonce(self, key, nostale = False):
"Get a object without manage it. Return a copy of the object, or None if not exists. Referenced objects are not retrieved."
r = await self.mgetonce([key], nostale)
return r[0]
async def watch(self, key, requestid, nostale = False):
"""
Try to find an object and return a reference. Use ``reference.isdeleted()`` to test
whether the object exists.
Use ``reference.wait(container)`` to wait for the object to be existed.
"""
r = await self.mwatch([key], requestid, nostale)
return r[0]
async def mwatch(self, keys, requestid, nostale = False):
"Try to return all the references, see ``watch()``"
keys = tuple(_str2(k) for k in keys)
notify = not self._requests
rid = object()
self._requests.append((keys, rid, 'watch', requestid))
if notify:
await self.apiroutine.wait_for_send(RetrieveRequestSend())
ev = await RetrieveReply.createMatcher(rid)
if hasattr(ev, 'exception'):
raise ev.exception
if nostale and ev.stale:
raise StaleResultException(ev.result)
return ev.result
async def unwatch(self, key, requestid):
"Cancel management of a key"
await self.munwatch([key], requestid)
async def unwatchall(self, requestid):
"Cancel management for all keys that are managed by requestid"
notify = not self._requests
rid = object()
self._requests.append(((), rid, 'unwatchall', requestid))
if notify:
await self.apiroutine.wait_for_send(RetrieveRequestSend())
ev = await RetrieveReply.createMatcher(rid)
if hasattr(ev, 'exception'):
raise ev.exception
async def munwatch(self, keys, requestid):
"Cancel management of keys"
keys = tuple(_str2(k) for k in keys)
notify = not self._requests
rid = object()
self._requests.append((keys, rid, 'unwatch', requestid))
if notify:
await self.apiroutine.wait_for_send(RetrieveRequestSend())
ev = await RetrieveReply.createMatcher(rid)
if hasattr(ev, 'exception'):
raise ev.exception
async def transact(self, keys, updater, withtime = False, maxtime = 60):
"""
Try to update keys in a transact, with an ``updater(keys, values)``,
which returns ``(updated_keys, updated_values)``.
The updater may be called more than once. If ``withtime = True``,
the updater should take three parameters:
``(keys, values, timestamp)`` with timestamp as the server time
"""
keys = tuple(_str2(k) for k in keys)
updated_ref = [None, None]
extra_keys = []
extra_key_set = []
auto_remove_keys = set()
orig_len = len(keys)
def updater_with_key(keys, values, timestamp):
# Automatically manage extra keys
remove_uniquekeys = []
remove_multikeys = []
update_uniquekeys = []
update_multikeys = []
keystart = orig_len + len(auto_remove_keys)
for v in values[:keystart]:
if v is not None:
if hasattr(v, 'kvdb_uniquekeys'):
remove_uniquekeys.extend((k,v.create_weakreference()) for k in v.kvdb_uniquekeys())
if hasattr(v, 'kvdb_multikeys'):
remove_multikeys.extend((k,v.create_weakreference()) for k in v.kvdb_multikeys())
if self.debuggingupdater:
# Updater may be called more than once, ensure that this updater does not crash
# on multiple calls
kc = keys[:orig_len]
vc = [v.clone_instance() if v is not None and hasattr(v, 'clone_instance') else deepcopy(v) for v in values[:orig_len]]
if withtime:
updated_keys, updated_values = updater(kc, vc, timestamp)
else:
updated_keys, updated_values = updater(kc, vc)
if withtime:
updated_keys, updated_values = updater(keys[:orig_len], values[:orig_len], timestamp)
else:
updated_keys, updated_values = updater(keys[:orig_len], values[:orig_len])
for v in updated_values:
if v is not None:
if hasattr(v, 'kvdb_uniquekeys'):
update_uniquekeys.extend((k,v.create_weakreference()) for k in v.kvdb_uniquekeys())
if hasattr(v, 'kvdb_multikeys'):
update_multikeys.extend((k,v.create_weakreference()) for k in v.kvdb_multikeys())
extrakeysdict = dict(zip(keys[keystart:keystart + len(extra_keys)], values[keystart:keystart + len(extra_keys)]))
extrakeysetdict = dict(zip(keys[keystart + len(extra_keys):keystart + len(extra_keys) + len(extra_key_set)],
values[keystart + len(extra_keys):keystart + len(extra_keys) + len(extra_key_set)]))
tempdict = {}
old_values = dict(zip(keys, values))
updated_keyset = set(updated_keys)
try:
append_remove = set()
autoremove_keys = set()
# Use DFS to find auto remove keys
def dfs(k):
if k in autoremove_keys:
return
autoremove_keys.add(k)
if k not in old_values:
append_remove.add(k)
else:
oldv = old_values[k]
if oldv is not None and hasattr(oldv, 'kvdb_autoremove'):
for k2 in oldv.kvdb_autoremove():
dfs(k2)
for k,v in zip(updated_keys, updated_values):
if v is None:
dfs(k)
if append_remove:
raise _NeedMoreKeysException()
for k,v in remove_uniquekeys:
if v.getkey() not in updated_keyset and v.getkey() not in auto_remove_keys:
# This key is not updated, keep the indices untouched
continue
if k not in extrakeysdict:
raise _NeedMoreKeysException()
elif extrakeysdict[k] is not None and extrakeysdict[k].ref.getkey() == v.getkey():
# If the unique key does not reference to the correct object
# there may be an error, but we ignore this.
# Save in a temporary dictionary. We may restore it later.
tempdict[k] = extrakeysdict[k]
extrakeysdict[k] = None
setkey = UniqueKeyReference.get_keyset_from_key(k)
if setkey not in extrakeysetdict:
raise _NeedMoreKeysException()
else:
ks = extrakeysetdict[setkey]
if ks is None:
ks = UniqueKeySet.create_from_key(setkey)
extrakeysetdict[setkey] = ks
ks.set.dataset().discard(WeakReferenceObject(k))
for k,v in remove_multikeys:
if v.getkey() not in updated_keyset and v.getkey() not in auto_remove_keys:
# This key is not updated, keep the indices untouched
continue
if k not in extrakeysdict:
raise _NeedMoreKeysException()
else:
mk = extrakeysdict[k]
if mk is not None:
mk.set.dataset().discard(v)
if not mk.set.dataset():
tempdict[k] = extrakeysdict[k]
extrakeysdict[k] = None
setkey = MultiKeyReference.get_keyset_from_key(k)
if setkey not in extrakeysetdict:
raise _NeedMoreKeysException()
else:
ks = extrakeysetdict[setkey]
if ks is None:
ks = MultiKeySet.create_from_key(setkey)
extrakeysetdict[setkey] = ks
ks.set.dataset().discard(WeakReferenceObject(k))
for k,v in update_uniquekeys:
if k not in extrakeysdict:
raise _NeedMoreKeysException()
elif extrakeysdict[k] is not None and extrakeysdict[k].ref.getkey() != v.getkey():
raise AlreadyExistsException('Unique key conflict for %r and %r, with key %r' % \
(extrakeysdict[k].ref.getkey(), v.getkey(), k))
elif extrakeysdict[k] is None:
lv = tempdict.get(k, None)
if lv is not None and lv.ref.getkey() == v.getkey():
# Restore this value
nv = lv
else:
nv = UniqueKeyReference.create_from_key(k)
nv.ref = ReferenceObject(v.getkey())
extrakeysdict[k] = nv
setkey = UniqueKeyReference.get_keyset_from_key(k)
if setkey not in extrakeysetdict:
raise _NeedMoreKeysException()
else:
ks = extrakeysetdict[setkey]
if ks is None:
ks = UniqueKeySet.create_from_key(setkey)
extrakeysetdict[setkey] = ks
ks.set.dataset().add(nv.create_weakreference())
for k,v in update_multikeys:
if k not in extrakeysdict:
raise _NeedMoreKeysException()
else:
mk = extrakeysdict[k]
if mk is None:
mk = tempdict.get(k, None)
if mk is None:
mk = MultiKeyReference.create_from_key(k)
mk.set = DataObjectSet()
setkey = MultiKeyReference.get_keyset_from_key(k)
if setkey not in extrakeysetdict:
raise _NeedMoreKeysException()
else:
ks = extrakeysetdict[setkey]
if ks is None:
ks = MultiKeySet.create_from_key(setkey)
extrakeysetdict[setkey] = ks
ks.set.dataset().add(mk.create_weakreference())
mk.set.dataset().add(v)
extrakeysdict[k] = mk
except _NeedMoreKeysException:
# Prepare the keys
extra_keys[:] = list(set(itertools.chain((k for k,v in remove_uniquekeys if v.getkey() in updated_keyset or v.getkey() in autoremove_keys),
(k for k,v in remove_multikeys if v.getkey() in updated_keyset or v.getkey() in autoremove_keys),
(k for k,_ in update_uniquekeys),
(k for k,_ in update_multikeys))))
extra_key_set[:] = list(set(itertools.chain((UniqueKeyReference.get_keyset_from_key(k) for k,v in remove_uniquekeys if v.getkey() in updated_keyset or v.getkey() in autoremove_keys),
(MultiKeyReference.get_keyset_from_key(k) for k,v in remove_multikeys if v.getkey() in updated_keyset or v.getkey() in autoremove_keys),
(UniqueKeyReference.get_keyset_from_key(k) for k,_ in update_uniquekeys),
(MultiKeyReference.get_keyset_from_key(k) for k,_ in update_multikeys))))
auto_remove_keys.clear()
auto_remove_keys.update(autoremove_keys.difference(keys[:orig_len])
.difference(extra_keys)
.difference(extra_key_set))
raise
else:
extrakeys_list = list(extrakeysdict.items())
extrakeyset_list = list(extrakeysetdict.items())
autoremove_list = list(autoremove_keys.difference(updated_keys)
.difference(extrakeysdict.keys())
.difference(extrakeysetdict.keys()))
return (tuple(itertools.chain(updated_keys,
(k for k,_ in extrakeys_list),
(k for k,_ in extrakeyset_list),
autoremove_list)),
tuple(itertools.chain(updated_values,
(v for _,v in extrakeys_list),
(v for _,v in extrakeyset_list),
[None] * len(autoremove_list))))
def object_updater(keys, values, timestamp):
old_version = {}
for k, v in zip(keys, values):
if v is not None and hasattr(v, 'setkey'):
v.setkey(k)
if v is not None and hasattr(v, 'kvdb_createtime'):
old_version[k] = (getattr(v, 'kvdb_createtime'), getattr(v, 'kvdb_updateversion', 1))
updated_keys, updated_values = updater_with_key(keys, values, timestamp)
updated_ref[0] = tuple(updated_keys)
new_version = []
for k,v in zip(updated_keys, updated_values):
if v is None:
new_version.append((timestamp, -1))
elif k in old_version:
ov = old_version[k]
setattr(v, 'kvdb_createtime', ov[0])
setattr(v, 'kvdb_updateversion', ov[1] + 1)
new_version.append((ov[0], ov[1] + 1))
else:
setattr(v, 'kvdb_createtime', timestamp)
setattr(v, 'kvdb_updateversion', 1)
new_version.append((timestamp, 1))
updated_ref[1] = new_version
return (updated_keys, updated_values)
start_time = self.apiroutine.scheduler.current_time
retry_times = 1
while True:
try:
await call_api(self.apiroutine, 'kvstorage', 'updateallwithtime',
{'keys': keys + tuple(auto_remove_keys) + \
tuple(extra_keys) + tuple(extra_key_set),
'updater': object_updater})
except _NeedMoreKeysException:
if maxtime is not None and\
self.apiroutine.scheduler.current_time - start_time > maxtime:
raise TransactionTimeoutException
retry_times += 1
except Exception:
self._logger.debug("Transaction %r interrupted in %r retries", updater, retry_times)
raise
else:
self._logger.debug("Transaction %r done in %r retries", updater, retry_times)
break
# Short cut update notification
update_keys = self._watchedkeys.intersection(updated_ref[0])
self._updatekeys.update(update_keys)
for k,v in zip(updated_ref[0], updated_ref[1]):
k = _str(k)
if k in update_keys:
v = tuple(v)
oldv = self._update_version.get(k, (0, -1))
if oldv < v:
self._update_version[k] = v
if not self._requests:
# Fake notification
await self.apiroutine.wait_for_send(RetrieveRequestSend())
await self._notifier.publish(updated_ref[0], updated_ref[1])
async def gettimestamp(self):
"""
Get a timestamp from database server
"""
_timestamp = None
def _updater(keys, values, timestamp):
nonlocal _timestamp
_timestamp = timestamp
return ((), ())
await call_api(self.apiroutine, 'kvstorage', 'updateallwithtime',
{'keys': (),
'updater': _updater})
return _timestamp
def watchlist(self, requestid = None):
"""
Return a dictionary whose keys are database keys, and values are lists of request ids.
Optionally filtered by request id
"""
return dict((k,list(v)) for k,v in self._watches.items() if requestid is None or requestid in v)
async def walk(self, keys, walkerdict, requestid, nostale = False):
"""
Recursively retrieve keys with customized functions.
walkerdict is a dictionary ``key->walker(key, obj, walk, save)``.
"""
keys = tuple(_str2(k) for k in keys)
notify = not self._requests
rid = object()
self._requests.append((keys, rid, 'walk', dict(walkerdict), requestid))
if notify:
await self.apiroutine.wait_for_send(RetrieveRequestSend())
ev = await RetrieveReply.createMatcher(rid)
if hasattr(ev, 'exception'):
raise ev.exception
if nostale and ev.stale:
raise StaleResultException(ev.result)
return ev.result
async def asynctransact(self, asyncupdater, withtime = False,
maxretry = None, maxtime=60):
"""
Read-Write transaction with asynchronous operations.
First, the `asyncupdater` is called with `asyncupdater(last_info, container)`.
`last_info` is the info from last `AsyncTransactionLockException`.
When `asyncupdater` is called for the first time, last_info = None.
The async updater should be an async function, and return
`(updater, keys)`. The `updater` should
be a valid updater function used in `transaction` API. `keys` will
be the keys used in the transaction.
The async updater can return None to terminate the transaction
without exception.
After the call, a transaction is automatically started with the
return values of `asyncupdater`.
`updater` can raise `AsyncTransactionLockException` to restart
the transaction from `asyncupdater`.
:param asyncupdater: An async updater `asyncupdater(last_info, container)`
which returns `(updater, keys)`
:param withtime: Whether the returned updater need a timestamp
:param maxretry: Limit the max retried times
:param maxtime: Limit the execution time. The transaction is abandoned
if still not completed after `maxtime` seconds.
"""
start_time = self.apiroutine.scheduler.current_time
def timeleft():
if maxtime is None:
return None
else:
time_left = maxtime + start_time - \
self.apiroutine.scheduler.current_time
if time_left <= 0:
raise TransactionTimeoutException
else:
return time_left
retry_times = 0
last_info = None
while True:
timeout, r = \
await self.apiroutine.execute_with_timeout(
timeleft(),
asyncupdater(last_info, self.apiroutine)
)
if timeout:
raise TransactionTimeoutException
if r is None:
return
updater, keys = r
try:
await self.transact(keys, updater, withtime, timeleft())
except AsyncTransactionLockException as e:
retry_times += 1
if maxretry is not None and retry_times > maxretry:
raise TransactionRetryExceededException
# Check time left
timeleft()
last_info = e.info
except Exception:
self._logger.debug("Async transaction %r interrupted in %r retries", asyncupdater, retry_times + 1)
raise
else:
self._logger.debug("Async transaction %r done in %r retries", asyncupdater, retry_times + 1)
break
async def writewalk(self, keys, walker, withtime = False, maxtime = 60):
"""
A read-write transaction with walkers
:param keys: initial keys used in walk. Provide keys already known to
be necessary to optimize the transaction.
:param walker: A walker should be `walker(walk, write)`,
where `walk` is a function `walk(key)->value`
to get a value from the database, and
`write` is a function `write(key, value)`
to save value to the database.
A value can be write to a database any times.
A `walk` called after `write` is guaranteed
to retrieve the previously written value.
:param withtime: if withtime=True, an extra timestamp parameter is given to
walkers, so walker should be
`walker(walk, write, timestamp)`
:param maxtime: max execution time of this transaction
"""
@functools.wraps(walker)
async def _asyncwalker(last_info, container):
return (keys, walker)
return await self.asyncwritewalk(_asyncwalker, withtime, maxtime)
async def asyncwritewalk(self, asyncwalker, withtime = False, maxtime = 60):
"""
A read-write transaction with walker factory
:param asyncwalker: an async function called as `asyncwalker(last_info, container)`
and returns (keys, walker), which
are the same as parameters of `writewalk`
:param keys: initial keys used in walk
:param walker: A walker should be `walker(walk, write)`,
where `walk` is a function `walk(key)->value`
to get a value from the database, and
`write` is a function `write(key, value)`
to save value to the database.
A value can be write to a database any times.
A `walk` called after `write` is guaranteed
to retrieve the previously written value.
raise AsyncTransactionLockException in walkers
to restart the transaction
:param withtime: if withtime=True, an extra timestamp parameter is given to
walkers, so walkers should be
`walker(key, value, walk, write, timestamp)`
:param maxtime: max execution time of this transaction
"""
@functools.wraps(asyncwalker)
async def _asyncupdater(last_info, container):
if last_info is not None:
from_walker, real_info = last_info
if not from_walker:
keys, orig_keys, walker = real_info
else:
r = await asyncwalker(real_info, container)
if r is None:
return None
keys, walker = r
orig_keys = keys
else:
r = await asyncwalker(None, container)
if r is None:
return None
keys, walker = r
orig_keys = keys
@functools.wraps(walker)
def _updater(keys, values, timestamp):
_stored_objs = dict(zip(keys, values))
if self.debuggingupdater:
_stored_old_values = {k: v.jsonencode()
for k,v in zip(keys, values)
if hasattr(v, 'jsonencode')}
# Keys written by walkers
_walker_write_dict = {}
_lost_keys = set()
_used_keys = set()
def _walk(key):
if key not in _stored_objs:
_lost_keys.add(key)
raise WalkKeyNotRetrieved(key)
else:
if key not in _walker_write_dict:
_used_keys.add(key)
return _stored_objs[key]
def _write(key, value):
_walker_write_dict[key] = value
_stored_objs[key] = value
try:
if withtime:
walker(_walk, _write, timestamp)
else:
walker(_walk, _write)
except AsyncTransactionLockException as e:
raise AsyncTransactionLockException((True, e.info))
if _lost_keys:
_lost_keys.update(_used_keys)
_lost_keys.update(orig_keys)
raise AsyncTransactionLockException((False, (_lost_keys, orig_keys, walker)))
if self.debuggingupdater:
# Check if there are changes not written
for k, v in _stored_old_values.items():
if k not in _walker_write_dict:
v2 = _stored_objs[k]
assert hasattr(v2, 'jsonencode') and v2.jsonencode() == v
if _walker_write_dict:
return tuple(zip(*_walker_write_dict.items()))
else:
return (), ()
return (_updater, keys)
return await self.asynctransact(_asyncupdater, True, maxtime=maxtime)
| |
from datetime import timedelta
import pytest
from django.urls import reverse
from django.core.files.uploadedfile import SimpleUploadedFile
from django.utils import timezone
from conference.cfp import dump_relevant_talk_information_to_dict
from conference.models import TALK_STATUS, TALK_LEVEL
try:
from pycon.settings import CONFERENCE_TIMESLOTS
except ImportError:
CONFERENCE_TIMESLOTS = None
from tests.factories import (
EventFactory,
UserFactory,
TalkFactory,
ConferenceTagFactory,
TalkSpeakerFactory,
)
from tests.common_tools import get_default_conference, redirects_to, template_used, make_user
pytestmark = [pytest.mark.django_db]
def test_talk_view_as_anonymous(client):
original_abstract = "Hello!\nGoto http://example.com"
# newlines should become BR, urls should be linked
expected_abstract = ('Hello!<br>Goto <a href="http://example.com" rel="nofollow">'
'http://example.com</a>')
get_default_conference()
talk = TalkFactory()
talk.setAbstract(original_abstract)
url = reverse("talks:talk", args=[talk.slug])
resp = client.get(url)
assert resp.status_code == 200
assert talk.title in resp.content.decode()
assert talk.sub_title in resp.content.decode()
assert expected_abstract in resp.content.decode()
assert "update talk" not in resp.content.decode().lower()
def test_talk_view_as_owner(user_client):
tomorrow = timezone.now().date() + timedelta(days=1)
get_default_conference(conference_end=tomorrow)
talk = TalkFactory(created_by=user_client.user, status=TALK_STATUS.accepted)
url = reverse("talks:talk", args=[talk.slug])
resp = user_client.get(url)
assert resp.status_code == 200
assert talk.title in resp.content.decode()
assert talk.sub_title in resp.content.decode()
assert talk.get_abstract() in resp.content.decode()
assert "update talk" in resp.content.decode().lower()
def test_cannot_update_talk_if_anonymous(client):
get_default_conference()
talk = TalkFactory(status=TALK_STATUS.accepted)
url = reverse("talks:update_talk", args=[talk.slug])
resp = client.get(url)
assert redirects_to(resp, reverse("accounts:login"))
def test_cannot_update_talk_if_not_owner(user_client):
get_default_conference()
talk = TalkFactory(status=TALK_STATUS.accepted)
url = reverse("talks:update_talk", args=[talk.slug])
resp = user_client.get(url)
assert resp.status_code == 403
def test_cannot_update_talk_if_talk_status_not_accepted(user_client):
get_default_conference()
talk = TalkFactory(created_by=user_client.user, status=TALK_STATUS.proposed)
url = reverse("talks:update_talk", args=[talk.slug])
resp = user_client.get(url)
assert resp.status_code == 403
def test_can_update_if_is_speaker_but_not_owner(user_client):
get_default_conference()
talk = TalkFactory(created_by=make_user(), status=TALK_STATUS.accepted)
# Make the user a Speaker at the Talk
TalkSpeakerFactory(talk=talk, speaker__user=user_client.user)
url = reverse("talks:update_talk", args=[talk.slug])
resp = user_client.get(url)
assert resp.status_code == 200
def test_cannot_update_talk_if_conference_has_finished(user_client):
yesterday = timezone.now().date() - timedelta(days=1)
get_default_conference(conference_end=yesterday)
talk = TalkFactory(created_by=user_client.user, status=TALK_STATUS.accepted)
url = reverse("talks:update_talk", args=[talk.slug])
resp = user_client.get(url)
assert resp.status_code == 403
def test_update_talk_get(user_client):
tomorrow = timezone.now().date() + timedelta(days=1)
get_default_conference(conference_end=tomorrow)
talk = TalkFactory(created_by=user_client.user, status=TALK_STATUS.accepted)
url = reverse("talks:update_talk", args=[talk.slug])
resp = user_client.get(url)
assert resp.status_code == 200
def test_update_talk_post(user_client):
tomorrow = timezone.now().date() + timedelta(days=1)
get_default_conference(conference_end=tomorrow)
talk = TalkFactory(created_by=user_client.user, status=TALK_STATUS.accepted)
url = reverse("talks:update_talk", args=[talk.slug])
tags = ConferenceTagFactory.create_batch(size=3)
post_data = {
"title": "new title",
"sub_title": "new sub title",
"abstract": "new abstract",
"abstract_short": "new short abstract",
"prerequisites": "new prerequisites",
"level": TALK_LEVEL.advanced,
"domain_level": TALK_LEVEL.advanced,
"tags": ",".join(tag.name for tag in tags),
"i_accept_speaker_release": True,
}
if CONFERENCE_TIMESLOTS and \
isinstance(CONFERENCE_TIMESLOTS, (list, tuple)):
post_data['availability'] = [CONFERENCE_TIMESLOTS[0][0], ]
resp = user_client.post(url, data=post_data)
talk.refresh_from_db()
assert redirects_to(resp, talk.get_absolute_url())
assert talk.title == post_data["title"]
assert talk.sub_title == post_data["sub_title"]
assert talk.get_abstract() == post_data["abstract"]
assert talk.abstract_short == post_data["abstract_short"]
assert talk.prerequisites == post_data["prerequisites"]
assert talk.level == post_data["level"]
assert talk.domain_level == post_data["domain_level"]
assert set(talk.tags.all().values_list("pk", flat=True)) == set(
[tag.pk for tag in tags]
)
def test_update_talk_post_fails_if_release_not_agreed(user_client):
tomorrow = timezone.now().date() + timedelta(days=1)
get_default_conference(conference_end=tomorrow)
talk = TalkFactory(created_by=user_client.user, status=TALK_STATUS.accepted)
url = reverse("talks:update_talk", args=[talk.slug])
tags = ConferenceTagFactory.create_batch(size=3)
post_data = {
"title": "new title",
"sub_title": "new sub title",
"abstract": "new abstract",
"abstract_short": "new short abstract",
"prerequisites": "new prerequisites",
"level": TALK_LEVEL.advanced,
"domain_level": TALK_LEVEL.advanced,
"tags": ",".join(tag.name for tag in tags),
"i_accept_speaker_release": False,
}
resp = user_client.post(url, data=post_data)
# We do not advance and talk should be unchanged in DB
assert resp.status_code == 200
# Make sure that nothing changed.
orig_talk_dict = dump_relevant_talk_information_to_dict(talk)
talk.refresh_from_db()
new_talk_dict = dump_relevant_talk_information_to_dict(talk)
assert orig_talk_dict == new_talk_dict
def test_anonymous_cannot_get_submit_slides(client):
get_default_conference()
talk = TalkFactory(status=TALK_STATUS.accepted)
url = reverse("talks:submit_slides", args=[talk.slug])
response = client.get(url)
assert redirects_to(response, reverse('accounts:login'))
def test_not_author_cannot_get_submit_slides(user_client):
get_default_conference()
talk = TalkFactory(status=TALK_STATUS.accepted)
url = reverse("talks:submit_slides", args=[talk.slug])
response = user_client.get(url)
assert response.status_code == 403
def test_author_can_get_submit_slides(user_client):
get_default_conference()
talk = TalkFactory(created_by=user_client.user, status=TALK_STATUS.accepted)
url = reverse("talks:submit_slides", args=[talk.slug])
response = user_client.get(url)
assert response.status_code == 200
assert template_used(response, "conference/talks/update_talk.html")
def test_author_can_post_submit_slides(user_client):
get_default_conference()
talk = TalkFactory(created_by=user_client.user, status=TALK_STATUS.accepted)
assert not talk.slides
url = reverse("talks:submit_slides", args=[talk.slug])
payload = {"slides": SimpleUploadedFile('slides.pdf', 'pdf content'.encode())}
response = user_client.post(url, data=payload)
assert redirects_to(response, talk.get_absolute_url())
talk.refresh_from_db()
assert talk.slides
def test_author_can_post_submit_slides_url(user_client):
get_default_conference()
talk = TalkFactory(created_by=user_client.user, status=TALK_STATUS.accepted)
url = reverse("talks:submit_slides", args=[talk.slug])
payload = {"slides_url": "https://epstage.europython.eu"}
response = user_client.post(url, data=payload)
assert redirects_to(response, talk.get_absolute_url())
talk.refresh_from_db()
assert talk.slides_url
def test_author_can_post_submit_repository_url(user_client):
get_default_conference()
talk = TalkFactory(created_by=user_client.user, status=TALK_STATUS.accepted)
url = reverse("talks:submit_slides", args=[talk.slug])
payload = {"repository_url": "https://epstage.europython.eu"}
response = user_client.post(url, data=payload)
assert redirects_to(response, talk.get_absolute_url())
talk.refresh_from_db()
assert talk.repository_url
def test_submit_slides_url_on_talk_detail_page(client):
"""
The submit slides button only appears if the user is the author of the talk.
"""
get_default_conference()
talk = TalkFactory(created_by=UserFactory(), status=TALK_STATUS.accepted)
url = talk.get_absolute_url()
submit_slides_url = reverse("talks:submit_slides", args=[talk.slug])
# URL does not appear for anonymous users
response = client.get(url)
assert 'submit slides' not in response.content.decode().lower()
assert submit_slides_url not in response.content.decode()
# URL does not appear for authenticated users
client.force_login(UserFactory())
response = client.get(url)
assert 'submit slides' not in response.content.decode().lower()
assert submit_slides_url not in response.content.decode()
# URL does appear for the talk owner
client.force_login(talk.created_by)
response = client.get(url)
assert 'submit slides' in response.content.decode().lower()
assert submit_slides_url in response.content.decode()
def test_view_slides_url_on_talk_detail_page(client):
"""
The download slides button only appears if the slides have been uploaded.
"""
get_default_conference()
talk = TalkFactory(status=TALK_STATUS.accepted)
url = talk.get_absolute_url()
# Slides URL does not appear when the slides haven't been uploaded
response = client.get(url)
assert not talk.slides
assert 'download/view slides' not in response.content.decode().lower()
# Slides URL does appear when the slides have been uploaded
talk.slides = SimpleUploadedFile('slides.pdf', 'pdf content'.encode())
talk.save()
response = client.get(url)
assert 'download/view slides' in response.content.decode().lower()
def test_talk_for_other_than_current_conference(client):
"""
Only display talks for the current conference.
"""
get_default_conference()
other_conference = get_default_conference(code='ep_other')
talk_in_other_conference = TalkFactory(conference=other_conference.code)
url = reverse("talks:talk", args=[talk_in_other_conference.slug])
resp = client.get(url)
assert resp.status_code == 404
def test_show_talk_link_in_schedule(client):
"""
The talk url points to the schedule, with correct talk slug, and time in utc
"""
get_default_conference()
talk = TalkFactory(status=TALK_STATUS.accepted)
event = EventFactory(talk=talk)
url = talk.get_absolute_url()
response = client.get(url)
html = response.content.decode()
time_range = event.get_time_range()
utc_start = event.get_utc_start_datetime()
schedule_hash = utc_start.strftime('%H:%M-UTC')
assert f"{talk.slug}#{schedule_hash}" in html
schedule_string = event.get_schedule_string()
assert schedule_string in html
| |
#!/usr/bin/env python
"""Deform the lithosphere with 1D or 2D flexure.
Landlab component that implements a 1 and 2D lithospheric flexure
model.
Examples
--------
Create a grid on which we will run the flexure calculations.
>>> from landlab import RasterModelGrid
>>> from landlab.components.flexure import Flexure
>>> grid = RasterModelGrid((5, 4), xy_spacing=(1.e4, 1.e4))
>>> lith_press = grid.add_zeros("lithosphere__overlying_pressure_increment", at="node")
Check the fields that are used as input to the flexure component.
>>> Flexure.input_var_names # doctest: +NORMALIZE_WHITESPACE
('lithosphere__overlying_pressure_increment',)
Check the units for the fields.
>>> Flexure.var_units('lithosphere__overlying_pressure_increment')
'Pa'
If you are not sure about one of the input or output variables, you can
get help for specific variables.
>>> Flexure.var_help('lithosphere__overlying_pressure_increment')
name: lithosphere__overlying_pressure_increment
description:
Applied pressure to the lithosphere over a time step
units: Pa
unit agnostic: True
at: node
intent: in
>>> flex = Flexure(grid)
In creating the component, a field (initialized with zeros) was added to the
grid. Reset the interior nodes for the loading.
>>> dh = grid.at_node['lithosphere__overlying_pressure_increment']
>>> dh = dh.reshape(grid.shape)
>>> dh[1:-1, 1:-1] = flex.gamma_mantle
>>> flex.update()
>>> flex.output_var_names
('lithosphere_surface__elevation_increment',)
>>> flex.grid.at_node['lithosphere_surface__elevation_increment']
... # doctest: +NORMALIZE_WHITESPACE
array([ 0., 0., 0., 0.,
0., 1., 1., 0.,
0., 1., 1., 0.,
0., 1., 1., 0.,
0., 0., 0., 0.])
"""
import numpy as np
from landlab import Component
from .funcs import get_flexure_parameter
class Flexure(Component):
"""Deform the lithosphere with 1D or 2D flexure.
Landlab component that implements a 1 and 2D lithospheric flexure
model.
Examples
--------
>>> from landlab import RasterModelGrid
>>> from landlab.components.flexure import Flexure
>>> grid = RasterModelGrid((5, 4), xy_spacing=(1.e4, 1.e4))
>>> lith_press = grid.add_zeros(
... "lithosphere__overlying_pressure_increment", at="node"
... )
>>> flex = Flexure(grid)
>>> flex.name
'Flexure'
>>> flex.input_var_names
('lithosphere__overlying_pressure_increment',)
>>> flex.output_var_names
('lithosphere_surface__elevation_increment',)
>>> sorted(flex.units) # doctest: +NORMALIZE_WHITESPACE
[('lithosphere__overlying_pressure_increment', 'Pa'),
('lithosphere_surface__elevation_increment', 'm')]
>>> flex.grid.number_of_node_rows
5
>>> flex.grid.number_of_node_columns
4
>>> flex.grid is grid
True
>>> np.all(grid.at_node['lithosphere_surface__elevation_increment'] == 0.)
True
>>> np.all(grid.at_node['lithosphere__overlying_pressure_increment'] == 0.)
True
>>> flex.update()
>>> np.all(grid.at_node['lithosphere_surface__elevation_increment'] == 0.)
True
>>> load = grid.at_node['lithosphere__overlying_pressure_increment']
>>> load[4] = 1e9
>>> dz = grid.at_node['lithosphere_surface__elevation_increment']
>>> np.all(dz == 0.)
True
>>> flex.update()
>>> np.all(grid.at_node['lithosphere_surface__elevation_increment'] == 0.)
False
References
----------
**Required Software Citation(s) Specific to this Component**
Hutton, E., Syvitski, J. (2008). Sedflux 2.0: An advanced process-response
model that generates three-dimensional stratigraphy. Computers &
Geosciences. 34(10), 1319-1337.
https://dx.doi.org/10.1016/j.cageo.2008.02.013
**Additional References**
Lambeck, K.: Geophysical Geodesy, The Slow Deformations of the Earth,
Clarendon Press, Oxford, UK, 718 pp., 1988.
"""
_name = "Flexure"
_unit_agnostic = True
_cite_as = r"""@article{hutton2008sedflux,
title={Sedflux 2.0: An advanced process-response model that generates three-dimensional stratigraphy},
author={Hutton, Eric WH and Syvitski, James PM},
journal={Computers \& Geosciences},
volume={34},
number={10},
pages={1319--1337},
year={2008},
publisher={Pergamon}
}"""
_info = {
"lithosphere__overlying_pressure_increment": {
"dtype": float,
"intent": "in",
"optional": False,
"units": "Pa",
"mapping": "node",
"doc": "Applied pressure to the lithosphere over a time step",
},
"lithosphere_surface__elevation_increment": {
"dtype": float,
"intent": "out",
"optional": False,
"units": "m",
"mapping": "node",
"doc": "The change in elevation of the top of the lithosphere (the land surface) in one timestep",
},
}
def __init__(
self,
grid,
eet=65e3,
youngs=7e10,
method="airy",
rho_mantle=3300.0,
gravity=9.80665,
n_procs=1,
):
"""Initialize the flexure component.
Parameters
----------
grid : RasterModelGrid
A grid.
eet : float, optional
Effective elastic thickness (m).
youngs : float, optional
Young's modulus.
method : {'airy', 'flexure'}, optional
Method to use to calculate deflections.
rho_mantle : float, optional
Density of the mantle (kg / m^3).
gravity : float, optional
Acceleration due to gravity (m / s^2).
n_procs : int, optional
Number of processors to use for calculations.
"""
if method not in ("airy", "flexure"):
raise ValueError("{method}: method not understood".format(method=method))
super().__init__(grid)
self._youngs = youngs
self._method = method
self._rho_mantle = rho_mantle
self._gravity = gravity
self.eet = eet
self._n_procs = n_procs
self.initialize_output_fields()
self._r = self._create_kei_func_grid(
self._grid.shape, (self._grid.dy, self._grid.dx), self.alpha
)
@property
def eet(self):
"""Effective elastic thickness (m)."""
return self._eet
@eet.setter
def eet(self, new_val):
if new_val <= 0:
raise ValueError("Effective elastic thickness must be positive.")
self._eet = new_val
self._r = self._create_kei_func_grid(
self._grid.shape, (self._grid.dy, self._grid.dx), self.alpha
)
@property
def youngs(self):
"""Young's modulus of lithosphere (Pa)."""
return self._youngs
@property
def rho_mantle(self):
"""Density of mantle (kg/m^3)."""
return self._rho_mantle
@property
def gamma_mantle(self):
"""Specific density of mantle (N/m^3)."""
return self._rho_mantle * self._gravity
@property
def gravity(self):
"""Acceleration due to gravity (m/s^2)."""
return self._gravity
@property
def method(self):
"""Name of method used to calculate deflections."""
return self._method
@property
def alpha(self):
"""Flexure parameter (m)."""
return get_flexure_parameter(
self._eet, self._youngs, 2, gamma_mantle=self.gamma_mantle
)
@staticmethod
def _create_kei_func_grid(shape, xy_spacing, alpha):
from scipy.special import kei
dx, dy = np.meshgrid(
np.arange(shape[1]) * xy_spacing[1], np.arange(shape[0]) * xy_spacing[0]
)
return kei(np.sqrt(dx ** 2 + dy ** 2) / alpha)
def update(self):
"""Update fields with current loading conditions."""
load = self._grid.at_node["lithosphere__overlying_pressure_increment"]
deflection = self._grid.at_node["lithosphere_surface__elevation_increment"]
new_load = load.copy()
deflection.fill(0.0)
if self.method == "airy":
deflection[:] = new_load / self.gamma_mantle
else:
self.subside_loads(new_load, out=deflection)
def subside_loads(self, loads, out=None):
"""Subside surface due to multiple loads.
Parameters
----------
loads : ndarray of float
Loads applied to each grid node.
out : ndarray of float, optional
Buffer to place resulting deflection values.
Returns
-------
ndarray of float
Deflections caused by the loading.
"""
if out is None:
out = np.zeros(self._grid.shape, dtype=np.float)
dz = out.reshape(self._grid.shape)
load = loads.reshape(self._grid.shape)
from .cfuncs import subside_grid_in_parallel
subside_grid_in_parallel(
dz,
load * self._grid.dx * self._grid.dy,
self._r,
self.alpha,
self.gamma_mantle,
self._n_procs,
)
return out
| |
#!/usr/bin/env python
"""
conference.py -- Udacity conference server-side Python App Engine API;
uses Google Cloud Endpoints
"""
__author__ = 'stevenbarnhurst@gmail.com (Steven Barnhurst)'
from datetime import datetime
import endpoints
from protorpc import messages
from protorpc import message_types
from protorpc import remote
from google.appengine.api import memcache
from google.appengine.api import taskqueue
from google.appengine.ext import ndb
from models import ConflictException
from models import Profile
from models import ProfileMiniForm
from models import ProfileForm
from models import StringMessage
from models import BooleanMessage
from models import Conference
from models import ConferenceForm
from models import ConferenceForms
from models import ConferenceQueryForm
from models import ConferenceQueryForms
from models import TeeShirtSize
from models import Session
from models import SessionInForm
from models import SessionOutForm
from models import SessionForms
from models import Speaker
from models import SpeakerForm
from models import SpeakerForms
from settings import WEB_CLIENT_ID
from settings import ANDROID_CLIENT_ID
from settings import IOS_CLIENT_ID
from settings import ANDROID_AUDIENCE
from utils import getUserId
EMAIL_SCOPE = endpoints.EMAIL_SCOPE
API_EXPLORER_CLIENT_ID = endpoints.API_EXPLORER_CLIENT_ID
MEMCACHE_ANNOUNCEMENTS_KEY = "RECENT_ANNOUNCEMENTS"
ANNOUNCEMENT_TPL = ('Last chance to attend! The following conferences '
'are nearly sold out: %s')
FEATURED_SPEAKER_STR = '%s is speaking at: '
MEMCACHE_FEATURED_SPEAKER_KEY = "FEATURED_SPEAKER"
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
DEFAULTS = {
"city": "Default City",
"maxAttendees": 0,
"seatsAvailable": 0,
"topics": ["Default", "Topic"],
}
SESSION_DEFAULTS = {
"highlights": [],
"duration": '1',
"typeOfSession": "Types",
}
SESSION_TYPES = [
'lecture',
'workshop',
'presentation',
'roundtable',
'panel',
'think tank',
'professional development',
'other'
]
OPERATORS = {
'EQ': '=',
'GT': '>',
'GTEQ': '>=',
'LT': '<',
'LTEQ': '<=',
'NE': '!='
}
FIELDS = {
'CITY': 'city',
'TOPIC': 'topics',
'MONTH': 'month',
'MAX_ATTENDEES': 'maxAttendees',
}
# - - - - - - Request Containers - - - - - - - -
# Containers names do not necessarily restrict their usage to conference functions
CONF_GET_REQUEST = endpoints.ResourceContainer(
message_types.VoidMessage,
websafeKey=messages.StringField(1, required=True),
)
CONF_GET_BY_TYPE_REQUEST = endpoints.ResourceContainer(
message_types.VoidMessage,
websafeKey=messages.StringField(1, required=True),
type=messages.StringField(2),
)
CONF_GET_BY_TIME_TYPES_REQUEST = endpoints.ResourceContainer(
message_types.VoidMessage,
time=messages.StringField(1),
types=messages.StringField(2, repeated=True),
)
CONF_GET_BY_TIME_REQUEST = endpoints.ResourceContainer(
message_types.VoidMessage,
time=messages.StringField(1),
)
CONF_GET_BY_TYPES_REQUEST = endpoints.ResourceContainer(
message_types.VoidMessage,
types=messages.StringField(1, repeated=True),
)
SPEAKER_GET_BY = endpoints.ResourceContainer(
message_types.VoidMessage,
name=messages.StringField(1),
email=messages.StringField(2),
)
# - - - - - - - - - - Endpoints Start - - - - - - - - - - - - - - - -
@endpoints.api(name='conference', version='v1', audiences=[ANDROID_AUDIENCE],
allowed_client_ids=[WEB_CLIENT_ID, API_EXPLORER_CLIENT_ID, ANDROID_CLIENT_ID, IOS_CLIENT_ID],
scopes=[EMAIL_SCOPE])
class ConferenceApi(remote.Service):
"""Conference API v0.1"""
# - - - Helpers - - - - - - - - - - - - - - - - - - - - - -
def _validateKey(self, websafeKey):
"""Takes a websafe key and returns the entity (obj) and its key."""
# NDB accepts trail and lead whitespaces;
# this allows duplicates because Python sees it as different strings
if websafeKey:
key = websafeKey.strip()
try:
key = ndb.Key(urlsafe=key)
obj = key.get()
except:
# except is purposely left vague; multiple types of error.
raise endpoints.BadRequestException(
'The key is of an incorrect format: %s' % key)
if not obj:
raise endpoints.NotFoundException(
'No conference found with key: %s' % key)
return obj, key
else:
raise endpoints.BadRequestException(
'No websafe key was received with request.')
def _validateUser(self):
"""Verifies user authorization and returns user obj and its id"""
# preload necessary data items
user = endpoints.get_current_user()
if not user:
raise endpoints.UnauthorizedException('Authorization required')
user_id = getUserId(user)
return user, user_id
# - - - Conference objects - - - - - - - - - - - - - - - - -
def _copyConferenceToForm(self, conf, displayName):
"""Copy relevant fields from Conference to ConferenceForm."""
cf = ConferenceForm()
for field in cf.all_fields():
if hasattr(conf, field.name):
# convert Date to date string; just copy others
if field.name.endswith('Date'):
setattr(cf, field.name, str(getattr(conf, field.name)))
else:
setattr(cf, field.name, getattr(conf, field.name))
elif field.name == "websafeKey":
setattr(cf, field.name, conf.key.urlsafe())
if displayName:
setattr(cf, 'organizerDisplayName', displayName)
cf.check_initialized()
return cf
def _createConferenceObject(self, request):
"""Create or update Conference object, returning ConferenceForm/request."""
user, user_id = self._validateUser()
# this assures that a profile has been created before creating a conf;
self._getProfileFromUser()
if not request.name:
raise endpoints.BadRequestException("Conference 'name' field required")
# copy ConferenceForm/ProtoRPC Message into dict
data = {field.name: getattr(request, field.name) for field in request.all_fields()}
del data['websafeKey']
del data['organizerDisplayName']
# add default values for those missing (both data model & outbound Message)
for df in DEFAULTS:
if data[df] in (None, []):
data[df] = DEFAULTS[df]
setattr(request, df, DEFAULTS[df])
# convert dates from strings to Date objects; set month based on start_date
if data['startDate']:
data['startDate'] = datetime.strptime(data['startDate'][:10], "%Y-%m-%d").date()
data['month'] = data['startDate'].month
else:
data['month'] = 0
if data['endDate']:
data['endDate'] = datetime.strptime(data['endDate'][:10], "%Y-%m-%d").date()
# set seatsAvailable to be same as maxAttendees on creation
if data["maxAttendees"] > 0:
data["seatsAvailable"] = data["maxAttendees"]
# generate Profile Key based on user ID and Conference
# ID based on Profile key get Conference key from ID
p_key = ndb.Key(Profile, user_id)
prof = p_key.get()
c_id = Conference.allocate_ids(size=1, parent=p_key)[0]
c_key = ndb.Key(Conference, c_id, parent=p_key)
data['key'] = c_key
data['organizerUserId'] = request.organizerUserId = user_id
# create Conference, send email to organizer confirming
# creation of Conference & return (modified) ConferenceForm
Conference(**data).put()
taskqueue.add(params={'email': user.email(), 'conferenceInfo': repr(request)},
url='/tasks/send_confirmation_email')
conf = c_key.get()
return self._copyConferenceToForm(conf, getattr(prof, 'displayName'))
@ndb.transactional()
def _updateConferenceObject(self, request):
user, user_id = self._validateUser()
conf, c_key = self._validateKey(request.websafeKey)
# check that user is owner
if user_id != conf.organizerUserId:
raise endpoints.ForbiddenException('Only the owner can update the conference.')
# Not getting all the fields, so don't create a new object; just
# copy relevant fields from ConferenceForm to Conference object
for field in request.all_fields():
data = getattr(request, field.name)
# only copy fields where we get data
if data not in (None, []):
# special handling for dates (convert string to Date)
if field.name in ('startDate', 'endDate'):
data = datetime.strptime(data, "%Y-%m-%d").date()
if field.name == 'startDate':
conf.month = data.month
# write to Conference object
setattr(conf, field.name, data)
conf.put()
prof = ndb.Key(Profile, user_id).get()
return self._copyConferenceToForm(conf, getattr(prof, 'displayName'))
@endpoints.method(message_types.VoidMessage, ConferenceForms,
path='conference/user',
http_method='GET', name='conferenceGetCreated')
def conferenceGetCreated(self, request):
"""Return conferences created by user."""
# make sure user is authed
user, user_id = self._validateUser()
# create ancestor query for all key matches for this user
confs = Conference.query(ancestor=ndb.Key(Profile, user_id))
prof = ndb.Key(Profile, user_id).get()
# return set of ConferenceForm objects per Conference
return ConferenceForms(
items=[self._copyConferenceToForm(conf, getattr(prof, 'displayName')) for conf in confs]
)
@endpoints.method(ConferenceForm, ConferenceForm,
path='conference',
http_method='PUT', name='conferenceUpdate')
def conferenceUpdate(self, request):
"""Update conference w/provided fields & return w/updated info."""
return self._updateConferenceObject(request)
@endpoints.method(CONF_GET_REQUEST, ConferenceForm,
path='conference/{websafeKey}',
http_method='GET', name='conferenceGet')
def conferenceGet(self, request):
"""Return requested conference by websafeKey."""
conf, c_key = self._validateKey(request.websafeKey)
prof = conf.key.parent().get()
# return ConferenceForm
return self._copyConferenceToForm(conf, getattr(prof, 'displayName'))
@endpoints.method(ConferenceForm, ConferenceForm, path='conference',
http_method='POST', name='conferenceCreate')
def conferenceCreate(self, request):
"""Create new conference."""
return self._createConferenceObject(request)
# - - - Announcements - - - - - - - - - - - - - - - - - - - -
@staticmethod
def _cacheAnnouncement():
"""Create Announcement & assign to memcache; used by
memcache cron job & putAnnouncement().
"""
confs = Conference.query(ndb.AND(
Conference.seatsAvailable <= 5,
Conference.seatsAvailable > 0)
).fetch(projection=[Conference.name])
if confs:
# If there are almost sold out conferences,
# format announcement and set it in memcache
announcement = ANNOUNCEMENT_TPL % (
', '.join(conf.name for conf in confs))
memcache.set(MEMCACHE_ANNOUNCEMENTS_KEY, announcement)
else:
# If there are no sold out conferences,
# delete the memcache announcements entry
announcement = ""
memcache.delete(MEMCACHE_ANNOUNCEMENTS_KEY)
return announcement
@endpoints.method(message_types.VoidMessage, StringMessage,
path='conference/announcement',
http_method='GET', name='announcementGet')
def announcementGet(self, request):
"""Return Announcement from memcache."""
return StringMessage(data=memcache.get(MEMCACHE_ANNOUNCEMENTS_KEY) or "")
# - - - Registration - - - - - - - - - - - - - - - - - - - -
@ndb.transactional(xg=True)
def _registerForConference(self, request, reg=True):
"""Register or unregister user for selected conference."""
retval = None
prof = self._getProfileFromUser() # get user Profile
conf, c_key = self._validateKey(request.websafeKey)
# register
if reg:
# check if user already registered otherwise add
if c_key in prof.conferencesToAttend:
raise ConflictException(
"You have already registered for this conference")
# check if seats avail
if conf.seatsAvailable <= 0:
raise ConflictException(
"There are no seats available.")
# register user, take away one seat
prof.conferencesToAttend.append(c_key)
conf.seatsAvailable -= 1
retval = True
# unregister
else:
# check if user already registered
if c_key in prof.conferencesToAttend:
# unregister user, add back one seat
prof.conferencesToAttend.remove(c_key)
conf.seatsAvailable += 1
retval = True
else:
retval = False
# write things back to the datastore & return
prof.put()
conf.put()
return BooleanMessage(data=retval)
@endpoints.method(message_types.VoidMessage, ConferenceForms,
path='conference/registration',
http_method='GET', name='conferenceGetToAttend')
def conferenceGetToAttend(self, request):
"""Get a list of conferences that user has registered for."""
prof = self._getProfileFromUser() # get user Profile
# conf_keys = [ndb.Key(urlsafe=wsck) for wsck in prof.conferencesToAttend]
conferences = ndb.get_multi(prof.conferencesToAttend)
# get organizers
organisers = [ndb.Key(Profile, conf.organizerUserId) for conf in conferences]
profiles = ndb.get_multi(organisers)
# put display names in a dict for easier fetching
names = {}
for profile in profiles:
names[profile.key.id()] = profile.displayName
# return set of ConferenceForm objects per Conference
return ConferenceForms(items=[self._copyConferenceToForm(conf, names[conf.organizerUserId]) \
for conf in conferences])
@endpoints.method(CONF_GET_REQUEST, BooleanMessage,
path='conference/registration',
http_method='POST', name='conferenceRegisterFor')
def conferenceRegisterFor(self, request):
"""Register user for selected conference."""
return self._registerForConference(request)
@endpoints.method(CONF_GET_REQUEST, BooleanMessage,
path='conference/registration',
http_method='DELETE', name='conferenceUnregisterFrom')
def conferenceUnregisterFrom(self, request):
"""Unregister user from selected conference."""
return self._registerForConference(request, reg=False)
# - - - - Queries for Conf - - - - - -
def _getQuery(self, request):
"""Return formatted query from the submitted filters."""
q = Conference.query()
inequality_filter, filters = self._formatFilters(request.filters)
# If exists, sort on inequality filter first
if not inequality_filter:
q = q.order(Conference.name)
else:
q = q.order(ndb.GenericProperty(inequality_filter))
q = q.order(Conference.name)
for filtr in filters:
if filtr["field"] in ["month", "maxAttendees"]:
filtr["value"] = int(filtr["value"])
formatted_query = ndb.query.FilterNode(filtr["field"], filtr["operator"], filtr["value"])
q = q.filter(formatted_query)
return q
def _formatFilters(self, filters):
"""Parse, check validity and format user supplied filters."""
formatted_filters = []
inequality_field = None
for f in filters:
filtr = {field.name: getattr(f, field.name) for field in f.all_fields()}
try:
filtr["field"] = FIELDS[filtr["field"]]
filtr["operator"] = OPERATORS[filtr["operator"]]
except KeyError:
raise endpoints.BadRequestException("Filter contains invalid field or operator.")
# Every operation except "=" is an inequality
if filtr["operator"] != "=":
# check if inequality operation has been used in previous filters
# disallow the filter if inequality was performed on a different field before
# track the field on which the inequality operation is performed
if inequality_field and inequality_field != filtr["field"]:
raise endpoints.BadRequestException("Inequality filter is allowed on only one field.")
else:
inequality_field = filtr["field"]
formatted_filters.append(filtr)
return (inequality_field, formatted_filters)
@endpoints.method(ConferenceQueryForms, ConferenceForms, path='conference',
http_method='GET', name='conferenceQuery')
def conferenceQuery(self, request):
"""Query for conferences by criteria or query all if none specified."""
conferences = self._getQuery(request)
# need to fetch organiser displayName from profiles
# get all keys and use get_multi for speed
organisers = [(ndb.Key(Profile, conf.organizerUserId)) for conf in conferences]
profiles = ndb.get_multi(organisers)
# put display names in a dict for easier fetching
names = {}
for profile in profiles:
names[profile.key.id()] = profile.displayName
# return individual ConferenceForm object per Conference
return ConferenceForms(
items=[self._copyConferenceToForm(conf, names[conf.organizerUserId])
for conf in conferences])
# - - - Profile objects - - - - - - - - - - - - - - - - - - -
def _copyProfileToForm(self, prof):
"""Copy relevant fields from Profile to ProfileForm."""
# copy relevant fields from Profile to ProfileForm
pf = ProfileForm()
for field in pf.all_fields():
if hasattr(prof, field.name):
# convert t-shirt string to Enum; just copy others
if field.name == 'teeShirtSize':
setattr(pf, field.name, getattr(TeeShirtSize, getattr(prof, field.name)))
else:
setattr(pf, field.name, getattr(prof, field.name))
pf.check_initialized()
return pf
def _getProfileFromUser(self):
"""Return user Profile from datastore, creating new one if non-existent."""
user, user_id = self._validateUser()
p_key = ndb.Key(Profile, user_id)
profile = p_key.get()
# create new Profile if not there
if not profile:
profile = Profile(
key=p_key,
displayName=user.nickname(),
mainEmail=user.email(),
teeShirtSize=str(TeeShirtSize.NOT_SPECIFIED),
)
profile.put()
return profile # return Profile
def _doProfile(self, save_request=None):
"""Get user Profile and return to user, possibly updating it first."""
# get user Profile
prof = self._getProfileFromUser()
# if saveProfile(), process user-modifyable fields
if save_request:
for field in ('displayName', 'teeShirtSize'):
if hasattr(save_request, field):
val = getattr(save_request, field)
if val:
setattr(prof, field, str(val))
# if field == 'teeShirtSize':
# setattr(prof, field, str(val).upper())
# else:
# setattr(prof, field, val)
prof.put()
# return ProfileForm
return self._copyProfileToForm(prof)
@endpoints.method(message_types.VoidMessage, ProfileForm,
path='profile', http_method='GET', name='profileGet')
def profileGet(self, request):
"""Return user profile."""
return self._doProfile()
@endpoints.method(ProfileMiniForm, ProfileForm,
path='profile', http_method='POST', name='profileSave')
def profileSave(self, request):
"""Update & return user profile."""
return self._doProfile(request)
# - - - - - - - - - - - - Sessions - - - - - - - - - - - - - -
def _copySessionToForm(self, sess):
"""Copy relevant fields from Session to SessionOutForm."""
sf = SessionOutForm()
if sess.speakerKey:
# Has knowledge of the SessionOutModel fields
speaker = sess.speakerKey.get()
sf.speakerName = speaker.name
sf.speakerBio = speaker.bio
sf.speakerCredentials = speaker.credentials
sf.speakerTitle = speaker.title
sf.speakerEmail = speaker.email
for field in sf.all_fields():
if hasattr(sess, field.name):
# convert Date to date string; just copy others
if field.name == 'date':
setattr(sf, field.name, str(getattr(sess, field.name)))
elif field.name == 'startTime':
setattr(sf, field.name, str(getattr(sess, field.name)))
else:
setattr(sf, field.name, getattr(sess, field.name))
elif field.name == "websafeKey":
setattr(sf, field.name, sess.key.urlsafe())
sf.check_initialized()
return sf
def _createSessionObject(self, request):
"""Create or update Session object, returning SessionInForm/request."""
# preload necessary data items
user, user_id = self._validateUser()
# copy ConferenceForm/ProtoRPC Message into dict
data = {field.name: getattr(request, field.name) for field in request.all_fields()}
# check if conf exists given websafeKey
conf, data['conferenceKey'] = self._validateKey(request.websafeConferenceKey)
if conf.organizerUserId != user_id:
raise endpoints.UnauthorizedException('User is not conference organizer.')
if request.websafeSpeakerKey:
# will raise error if speaker key is invalid
speaker, data['speakerKey'] = self._validateKey(request.websafeSpeakerKey)
# add default values for those missing (both data model & outbound Message)
for df in SESSION_DEFAULTS:
if data[df] in (None, []):
data[df] = SESSION_DEFAULTS[df]
setattr(request, df, SESSION_DEFAULTS[df])
# convert dates from strings to Date objects; set month based on start_date
if data['date']:
data['date'] = datetime.strptime(data['date'][:10], "%Y-%m-%d").date()
if data['startTime']:
data['startTime'] = datetime.strptime(data['startTime'], "%H:%M").time()
sess_id = Session.allocate_ids(size=1, parent=data['conferenceKey'])[0]
sess_key = ndb.Key(Session, sess_id, parent=data['conferenceKey'])
data['key'] = sess_key
del data['websafeKey']
del data['websafeSpeakerKey']
del data['websafeConferenceKey']
# create Session, send email to organizer confirming
# creation of Session and return SessionForm
Session(**data).put()
sess = sess_key.get()
if request.websafeSpeakerKey:
taskqueue.add(params={'websafeSpeakerKey': request.websafeSpeakerKey, 'websafeConferenceKey': request.websafeConferenceKey},
url='/tasks/set_featured_speaker')
taskqueue.add(params={'email': user.email(), 'conferenceInfo': repr(request)},
url='/tasks/send_confirmation_email')
return self._copySessionToForm(sess)
@endpoints.method(CONF_GET_BY_TYPE_REQUEST, SessionForms,
path='session/conference/type',
http_method='GET', name='sessionGetByConferenceByType')
def sessionGetByConferenceByType(self, request):
"""Return sessions under conference by type."""
# create ancestor query for all key matches for this user
conf, c_key = self._validateKey(request.websafeKey)
c_sessions = Session.query(ancestor=c_key)
c_sessions = c_sessions.filter(Session.typeOfSession == request.type)
# return set of ConferenceForm objects per Conference
return SessionForms(
items=[self._copySessionToForm(sess) for sess in c_sessions]
)
@endpoints.method(CONF_GET_REQUEST, SessionForms,
path='session/conference',
http_method='GET', name='sessionGetByConference')
def sessionGetByConference(self, request):
"""Return sessions under conference."""
# create ancestor query for all key matches for this user
c_sessions = Session.query(ancestor=ndb.Key(urlsafe=request.websafeKey))
return SessionForms(
items=[self._copySessionToForm(sess) for sess in c_sessions]
)
@endpoints.method(CONF_GET_REQUEST, SessionForms,
path='session/speaker',
http_method='GET', name='sessionGetBySpeaker')
def sessionGetBySpeaker(self, request):
"""Return sessions by a speaker's websafeKey."""
# create query for all key matches for this speaker
speaker, s_key = self._validateKey(request.websafeKey)
sessions = Session.query(Session.speakerKey == s_key)
# return set of SessionOutForm objects for speaker
return SessionForms(
items=[self._copySessionToForm(sess) for sess in sessions]
)
@endpoints.method(SessionInForm, SessionOutForm,
path='session',
http_method='POST', name='sessionCreate')
def sessionCreate(self, request):
"""Create new session."""
return self._createSessionObject(request)
# - - - - - - - - - - - - Wishlist - - - - - - - - - - - - - -
def _editWishlist(self, request, reg=True):
"""Add or remove session from user's wishlist."""
retval = None
prof = self._getProfileFromUser() # get user Profile
# check if session exists given websafeKey
sess, s_key = self._validateKey(request.websafeKey)
# register
if reg:
# check if session is already in wishlist
if s_key in prof.sessionsWishlist:
raise ConflictException("This session is already in your wishlist.")
# register user, take away one seat
prof.sessionsWishlist.append(s_key)
retval = True
# unregister
else:
# check if user already registered
if s_key in prof.sessionsWishlist:
# unregister user, add back one seat
prof.sessionsWishlist.remove(s_key)
retval = True
else:
retval = False
# write things back to the datastore & return
prof.put()
return BooleanMessage(data=retval)
@endpoints.method(message_types.VoidMessage, SessionForms,
path='session/wishlist',
http_method='GET', name='sessionsGetFromWishlist')
def sessionsGetFromWishlist(self, request):
"""Query for all the sessions that the user is interested in"""
prof = self._getProfileFromUser() # get user Profile
# sess_keys = [ndb.Key(urlsafe=wsk) for wsk in prof.sessionsWishlist]
sessions = [s_key.get() for s_key in prof.sessionsWishlist]
# return set of SessionOutForm objects per Session
return SessionForms(items=[self._copySessionToForm(sess) for sess in sessions])
@endpoints.method(CONF_GET_REQUEST, BooleanMessage,
path='session/wishlist',
http_method='POST', name='sessionAddToWishlist')
def sessionAddToWishlist(self, request):
"""Adds the session to the user's list of sessions they are interested in attending"""
return self._editWishlist(request)
@endpoints.method(CONF_GET_REQUEST, BooleanMessage,
path='session/wishlist',
http_method='DELETE', name='sessionDeleteFromWishlist')
def sessionDeleteFromWishlist(self, request):
"""Delete session from user wishlist."""
return self._editWishlist(request, reg=False)
# - - - - - - - - - - - - Additional Queries - - - - - - - - - - - - - -
@endpoints.method(CONF_GET_BY_TYPES_REQUEST, SessionForms, path='session/types',
http_method='GET', name='sessionGetOfTypes')
def sessionGetOfTypes(self, request):
"""Return sessions for given types. """
# create ancestor query for all key matches for this user
if not request.types:
raise endpoints.BadRequestException("No types were given.")
sessions = Session.query()
# can accept array
sessions = sessions.filter(Session.typeOfSession.IN(request.types))
# return set of ConferenceForm objects per Conference
return SessionForms(items=[self._copySessionToForm(sess) for sess in sessions])
@endpoints.method(CONF_GET_BY_TIME_REQUEST, SessionForms, path='session/time',
http_method='GET', name='sessionGetByTime')
def sessionGetByTime(self, request):
"""Return sessions starting at/after a certain time"""
# create ancestor query for all key matches for this user
sessionTime = datetime.strptime(request.time, "%H:%M").time()
sessions = Session.query(Session.startTime >= sessionTime)
# return set of SessionForm objects
return SessionForms(items=[self._copySessionToForm(sess) for sess in sessions])
@endpoints.method(CONF_GET_BY_TIME_TYPES_REQUEST, SessionForms, path='session/time/types',
http_method='GET', name='sessionGetByTimeByNotTypes')
def sessionGetByTimeByNotTypes(self, request):
"""Return sessions starting at/after a certain time and by types not given."""
# create ancestor query for all key matches for this user
sessionTime = datetime.strptime(request.time, "%H:%M").time()
types = set(SESSION_TYPES) - set(request.types)
sessions = Session.query()
# can accept array
sessions = sessions.filter(Session.startTime >= sessionTime)
sessions = sessions.filter(Session.typeOfSession.IN(types))
# return set of ConferenceForm objects per Conference
return SessionForms(items=[self._copySessionToForm(sess) for sess in sessions])
# - - - Announcements - - - - - - - - - - - - - - - - - - - -
@staticmethod
def _cacheFeaturedSpeaker(request):
"""Create Announcement for featured speaker & assign to memcache."""
c_key = ndb.Key(urlsafe=request.get('websafeConferenceKey'))
s_key = ndb.Key(urlsafe=request.get('websafeSpeakerKey'))
sessions = Session.query(Session.conferenceKey == c_key)
sessions = sessions.filter(Session.speakerKey == s_key)
sessions = sessions.fetch()
if len(sessions) >= 2:
speaker = s_key.get()
# If there are almost sold out conferences,
# format announcement and set it in memcache
announcement = FEATURED_SPEAKER_STR % speaker.name
announcement += ', '.join(sess.name for sess in sessions)
memcache.set(MEMCACHE_FEATURED_SPEAKER_KEY, announcement)
@endpoints.method(message_types.VoidMessage, StringMessage, path='speaker/featured',
http_method='GET', name='speakerGetFeatured')
def speakerGetFeatured(self, request):
"""Return featured speaker from memcache, if existent."""
return StringMessage(data=memcache.get(MEMCACHE_FEATURED_SPEAKER_KEY) or "")
# - - - - - - - - - - - - Speaker - - - - - - - - - - - - - -
def _copySpeakerToForm(self, speaker):
"""Copy relevant fields from Speaker to SpeakerForm."""
sf = SpeakerForm()
for field in sf.all_fields():
if hasattr(speaker, field.name):
setattr(sf, field.name, getattr(speaker, field.name))
elif field.name == "websafeKey":
setattr(sf, field.name, speaker.key.urlsafe())
sf.check_initialized()
return sf
def _createSpeakerObject(self, request):
"""Create or update Speaker object, returning SpeakerForm/request."""
# ensure user auth
user, u_id = self._validateUser()
if not request.name:
raise endpoints.BadRequestException("Speaker 'name' field required")
if request.email:
q = Speaker.query(Speaker.email == request.email)
entity = q.get()
if entity:
raise endpoints.ForbiddenException('Email is already registered with a speaker.')
# copy SpeakerForm/ProtoRPC Message into dict
data = {field.name: getattr(request, field.name) for field in request.all_fields()}
del data['websafeKey']
s_key = Speaker(**data).put()
speaker = s_key.get()
return self._copySpeakerToForm(speaker)
@endpoints.method(CONF_GET_REQUEST, SpeakerForm, path='speaker/{websafeKey}',
http_method='GET', name='speakerGet')
def speakerGet(self, request):
"""Return speaker info for websafeKey."""
speaker, s_key = self._validateKey(request.websafeKey)
return self._copySpeakerToForm(speaker)
@endpoints.method(SPEAKER_GET_BY, SpeakerForms,
path='speaker',
http_method='GET', name='speakerQuery')
def speakerQuery(self, request):
"""Return speakers by email, name, or query all if no criteria specified."""
# should only return one speaker
if request.email:
speakers = Speaker.query(Speaker.email == request.email)
# can return multiple
elif request.name:
speakers = Speaker.query(Speaker.name == request.name)
# returns all if no email or name specified
else:
speakers = Speaker.query()
# return set of SpeakerForm objects per speaker matched
return SpeakerForms(
items=[self._copySpeakerToForm(speaker) for speaker in speakers]
)
@endpoints.method(SpeakerForm, SpeakerForm,
path='speaker',
http_method='POST', name='speakerCreate')
def speakerCreate(self, request):
"""Create new speaker."""
return self._createSpeakerObject(request)
api = endpoints.api_server([ConferenceApi], restricted=False) # register API
| |
"""
General utilities.
MIT license.
Copyright (c) 2017 Isaac Muse <isaacmuse@gmail.com>
"""
from ..markdown.inlinepatterns import InlineProcessor
import xml.etree.ElementTree as etree
from collections import namedtuple
import sys
import copy
import re
import html.parser
from urllib.request import pathname2url, url2pathname
from urllib.parse import urlparse
html_parser = html.parser.HTMLParser()
RE_WIN_DRIVE_LETTER = re.compile(r"^[A-Za-z]$")
RE_WIN_DRIVE_PATH = re.compile(r"^[A-Za-z]:(?:\\.*)?$")
RE_URL = re.compile('(http|ftp)s?|data|mailto|tel|news')
RE_WIN_DEFAULT_PROTOCOL = re.compile(r"^///[A-Za-z]:(?:/.*)?$")
if sys.platform.startswith('win'):
_PLATFORM = "windows"
elif sys.platform == "darwin": # pragma: no cover
_PLATFORM = "osx"
else:
_PLATFORM = "linux"
def is_win(): # pragma: no cover
"""Is Windows."""
return _PLATFORM == "windows"
def is_linux(): # pragma: no cover
"""Is Linux."""
return _PLATFORM == "linux"
def is_mac(): # pragma: no cover
"""Is macOS."""
return _PLATFORM == "osx"
def url2path(path):
"""Path to URL."""
return url2pathname(path)
def path2url(url):
"""URL to path."""
path = pathname2url(url)
# If on windows, replace the notation to use a default protocol `///` with nothing.
if is_win() and RE_WIN_DEFAULT_PROTOCOL.match(path):
path = path.replace('///', '', 1)
return path
def get_code_points(s):
"""Get the Unicode code points."""
return [c for c in s]
def get_ord(c):
"""Get Unicode ord."""
return ord(c)
def get_char(value):
"""Get the Unicode char."""
return chr(value)
def escape_chars(md, echrs):
"""
Add chars to the escape list.
Don't just append as it modifies the global list permanently.
Make a copy and extend **that** copy so that only this Markdown
instance gets modified.
"""
escaped = copy.copy(md.ESCAPED_CHARS)
for ec in echrs:
if ec not in escaped:
escaped.append(ec)
md.ESCAPED_CHARS = escaped
def parse_url(url):
"""
Parse the URL.
Try to determine if the following is a file path or
(as we will call anything else) a URL.
We return it slightly modified and combine the path parts.
We also assume if we see something like c:/ it is a Windows path.
We don't bother checking if this **is** a Windows system, but
'nix users really shouldn't be creating weird names like c: for their folder.
"""
is_url = False
is_absolute = False
scheme, netloc, path, params, query, fragment = urlparse(html_parser.unescape(url))
if RE_URL.match(scheme):
# Clearly a URL
is_url = True
elif scheme == '' and netloc == '' and path == '':
# Maybe just a URL fragment
is_url = True
elif scheme == 'file' and (RE_WIN_DRIVE_PATH.match(netloc)):
# file://c:/path or file://c:\path
path = '/' + (netloc + path).replace('\\', '/')
netloc = ''
is_absolute = True
elif scheme == 'file' and netloc.startswith('\\'):
# file://\c:\path or file://\\path
path = (netloc + path).replace('\\', '/')
netloc = ''
is_absolute = True
elif scheme == 'file':
# file:///path
is_absolute = True
elif RE_WIN_DRIVE_LETTER.match(scheme):
# c:/path
path = '/%s:%s' % (scheme, path.replace('\\', '/'))
scheme = 'file'
netloc = ''
is_absolute = True
elif scheme == '' and netloc != '' and url.startswith('//'):
# //file/path
path = '//' + netloc + path
scheme = 'file'
netloc = ''
is_absolute = True
elif scheme != '' and netloc != '':
# A non-file path or strange URL
is_url = True
elif path.startswith(('/', '\\')):
# /root path
is_absolute = True
return (scheme, netloc, path, params, query, fragment, is_url, is_absolute)
class PatSeqItem(namedtuple('PatSeqItem', ['pattern', 'builder', 'tags'])):
"""Pattern sequence item item."""
class PatternSequenceProcessor(InlineProcessor):
"""Processor for handling complex nested patterns such as strong and em matches."""
PATTERNS = []
def build_single(self, m, tag, idx):
"""Return single tag."""
el1 = etree.Element(tag)
text = m.group(2)
self.parse_sub_patterns(text, el1, None, idx)
return el1
def build_double(self, m, tags, idx):
"""Return double tag."""
tag1, tag2 = tags.split(",")
el1 = etree.Element(tag1)
el2 = etree.Element(tag2)
text = m.group(2)
self.parse_sub_patterns(text, el2, None, idx)
el1.append(el2)
if len(m.groups()) == 3:
text = m.group(3)
self.parse_sub_patterns(text, el1, el2, idx)
return el1
def build_double2(self, m, tags, idx):
"""Return double tags (variant 2): `<strong>text <em>text</em></strong>`."""
tag1, tag2 = tags.split(",")
el1 = etree.Element(tag1)
el2 = etree.Element(tag2)
text = m.group(2)
self.parse_sub_patterns(text, el1, None, idx)
text = m.group(3)
el1.append(el2)
self.parse_sub_patterns(text, el2, None, idx)
return el1
def parse_sub_patterns(self, data, parent, last, idx):
"""
Parses sub patterns.
`data` (`str`):
text to evaluate.
`parent` (`etree.Element`):
Parent to attach text and sub elements to.
`last` (`etree.Element`):
Last appended child to parent. Can also be None if parent has no children.
`idx` (`int`):
Current pattern index that was used to evaluate the parent.
"""
offset = 0
pos = 0
length = len(data)
while pos < length:
# Find the start of potential emphasis or strong tokens
if self.compiled_re.match(data, pos):
matched = False
# See if the we can match an emphasis/strong pattern
for index, item in enumerate(self.PATTERNS):
# Only evaluate patterns that are after what was used on the parent
if index <= idx:
continue
m = item.pattern.match(data, pos)
if m:
# Append child nodes to parent
# Text nodes should be appended to the last
# child if present, and if not, it should
# be added as the parent's text node.
text = data[offset:m.start(0)]
if text:
if last is not None:
last.tail = text
else:
parent.text = text
el = self.build_element(m, item.builder, item.tags, index)
parent.append(el)
last = el
# Move our position past the matched hunk
offset = pos = m.end(0)
matched = True
if not matched:
# We matched nothing, move on to the next character
pos += 1
else:
# Increment position as no potential emphasis start was found.
pos += 1
# Append any leftover text as a text node.
text = data[offset:]
if text:
if last is not None:
last.tail = text
else:
parent.text = text
def build_element(self, m, builder, tags, index):
"""Element builder."""
if builder == 'double2':
return self.build_double2(m, tags, index)
elif builder == 'double':
return self.build_double(m, tags, index)
else:
return self.build_single(m, tags, index)
def handleMatch(self, m, data):
"""Parse patterns."""
el = None
start = None
end = None
for index, item in enumerate(self.PATTERNS):
m1 = item.pattern.match(data, m.start(0))
if m1:
start = m1.start(0)
end = m1.end(0)
el = self.build_element(m1, item.builder, item.tags, index)
break
return el, start, end
class PymdownxDeprecationWarning(UserWarning): # pragma: no cover
"""Deprecation warning for Pymdownx that is not hidden."""
| |
# -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'FamilleRatings'
db.create_table(u'famille_familleratings', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('created_at', self.gf('django.db.models.fields.DateField')(auto_now_add=True, blank=True)),
('updated_at', self.gf('django.db.models.fields.DateField')(auto_now=True, blank=True)),
('reliability', self.gf('django.db.models.fields.PositiveSmallIntegerField')(default=0, blank=True)),
('amability', self.gf('django.db.models.fields.PositiveSmallIntegerField')(default=0, blank=True)),
('serious', self.gf('django.db.models.fields.PositiveSmallIntegerField')(default=0, blank=True)),
('ponctuality', self.gf('django.db.models.fields.PositiveSmallIntegerField')(default=0, blank=True)),
('famille', self.gf('django.db.models.fields.related.ForeignKey')(related_name='ratings', to=orm['famille.Famille'])),
))
db.send_create_signal('famille', ['FamilleRatings'])
# Adding model 'PrestataireRatings'
db.create_table(u'famille_prestataireratings', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('created_at', self.gf('django.db.models.fields.DateField')(auto_now_add=True, blank=True)),
('updated_at', self.gf('django.db.models.fields.DateField')(auto_now=True, blank=True)),
('reliability', self.gf('django.db.models.fields.PositiveSmallIntegerField')(default=0, blank=True)),
('amability', self.gf('django.db.models.fields.PositiveSmallIntegerField')(default=0, blank=True)),
('serious', self.gf('django.db.models.fields.PositiveSmallIntegerField')(default=0, blank=True)),
('ponctuality', self.gf('django.db.models.fields.PositiveSmallIntegerField')(default=0, blank=True)),
('prestataire', self.gf('django.db.models.fields.related.ForeignKey')(related_name='ratings', to=orm['famille.Prestataire'])),
))
db.send_create_signal('famille', ['PrestataireRatings'])
def backwards(self, orm):
# Deleting model 'FamilleRatings'
db.delete_table(u'famille_familleratings')
# Deleting model 'PrestataireRatings'
db.delete_table(u'famille_prestataireratings')
models = {
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Group']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Permission']"}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'famille.enfant': {
'Meta': {'object_name': 'Enfant'},
'created_at': ('django.db.models.fields.DateField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'e_birthday': ('django.db.models.fields.DateField', [], {'null': 'True', 'db_column': "'birthday'", 'blank': 'True'}),
'e_name': ('django.db.models.fields.CharField', [], {'max_length': '20', 'db_column': "'name'"}),
'e_school': ('django.db.models.fields.CharField', [], {'max_length': '50', 'null': 'True', 'db_column': "'school'", 'blank': 'True'}),
'famille': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'enfants'", 'to': "orm['famille.Famille']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'updated_at': ('django.db.models.fields.DateField', [], {'auto_now': 'True', 'blank': 'True'})
},
'famille.famille': {
'Meta': {'object_name': 'Famille'},
'baby': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'cdt_periscolaire': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'city': ('django.db.models.fields.CharField', [], {'max_length': '40', 'null': 'True', 'blank': 'True'}),
'country': ('django.db.models.fields.CharField', [], {'default': "'France'", 'max_length': '20', 'blank': 'True'}),
'created_at': ('django.db.models.fields.DateField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.CharField', [], {'max_length': '400', 'null': 'True', 'blank': 'True'}),
'devoirs': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'diploma': ('django.db.models.fields.CharField', [], {'max_length': '10', 'null': 'True', 'blank': 'True'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '100'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '50', 'blank': 'True'}),
'geolocation': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['famille.Geolocation']", 'unique': 'True', 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'langue': ('django.db.models.fields.CharField', [], {'max_length': '10', 'blank': 'True'}),
'menage': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50', 'blank': 'True'}),
'non_fumeur': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'nuit': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'permis': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'plan': ('django.db.models.fields.CharField', [], {'default': "'basic'", 'max_length': '20', 'blank': 'True'}),
'postal_code': ('django.db.models.fields.CharField', [], {'max_length': '8', 'null': 'True', 'blank': 'True'}),
'profile_pic': ('famille.utils.fields.ContentTypeRestrictedFileField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'psc1': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'repassage': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'sortie_ecole': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'street': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'tarif': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'tel': ('django.db.models.fields.CharField', [], {'max_length': '15', 'null': 'True', 'blank': 'True'}),
'tel_visible': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'type': ('django.db.models.fields.CharField', [], {'max_length': '10', 'null': 'True', 'blank': 'True'}),
'type_garde': ('django.db.models.fields.CharField', [], {'max_length': '10', 'null': 'True', 'blank': 'True'}),
'type_presta': ('django.db.models.fields.CharField', [], {'max_length': '10', 'null': 'True', 'blank': 'True'}),
'updated_at': ('django.db.models.fields.DateField', [], {'auto_now': 'True', 'blank': 'True'}),
'urgence': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'user': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['auth.User']", 'unique': 'True'})
},
'famille.famillefavorite': {
'Meta': {'object_name': 'FamilleFavorite'},
'created_at': ('django.db.models.fields.DateField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'famille': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'favorites'", 'to': "orm['famille.Famille']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'object_id': ('django.db.models.fields.IntegerField', [], {}),
'object_type': ('django.db.models.fields.CharField', [], {'max_length': '20'}),
'updated_at': ('django.db.models.fields.DateField', [], {'auto_now': 'True', 'blank': 'True'})
},
'famille.familleplanning': {
'Meta': {'object_name': 'FamillePlanning'},
'comment': ('django.db.models.fields.CharField', [], {'max_length': '50', 'null': 'True', 'blank': 'True'}),
'created_at': ('django.db.models.fields.DateField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'famille': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'planning'", 'to': "orm['famille.Famille']"}),
'frequency': ('django.db.models.fields.CharField', [], {'max_length': '10', 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'schedule': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['famille.Schedule']", 'symmetrical': 'False'}),
'start_date': ('django.db.models.fields.DateField', [], {'default': 'datetime.datetime.now'}),
'updated_at': ('django.db.models.fields.DateField', [], {'auto_now': 'True', 'blank': 'True'}),
'weekday': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['famille.Weekday']", 'symmetrical': 'False'})
},
'famille.familleratings': {
'Meta': {'object_name': 'FamilleRatings'},
'amability': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '0', 'blank': 'True'}),
'created_at': ('django.db.models.fields.DateField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'famille': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'ratings'", 'to': "orm['famille.Famille']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'ponctuality': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '0', 'blank': 'True'}),
'reliability': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '0', 'blank': 'True'}),
'serious': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '0', 'blank': 'True'}),
'updated_at': ('django.db.models.fields.DateField', [], {'auto_now': 'True', 'blank': 'True'})
},
'famille.geolocation': {
'Meta': {'object_name': 'Geolocation'},
'created_at': ('django.db.models.fields.DateField', [], {'auto_now_add': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'lat': ('django.db.models.fields.FloatField', [], {}),
'lon': ('django.db.models.fields.FloatField', [], {}),
'updated_at': ('django.db.models.fields.DateField', [], {'auto_now': 'True', 'blank': 'True'})
},
'famille.prestataire': {
'Meta': {'object_name': 'Prestataire'},
'baby': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'cdt_periscolaire': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'city': ('django.db.models.fields.CharField', [], {'max_length': '40', 'null': 'True', 'blank': 'True'}),
'country': ('django.db.models.fields.CharField', [], {'default': "'France'", 'max_length': '20', 'blank': 'True'}),
'created_at': ('django.db.models.fields.DateField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.CharField', [], {'max_length': '400', 'null': 'True', 'blank': 'True'}),
'devoirs': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'diploma': ('django.db.models.fields.CharField', [], {'max_length': '10', 'null': 'True', 'blank': 'True'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '100'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '50', 'blank': 'True'}),
'geolocation': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['famille.Geolocation']", 'unique': 'True', 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'level_de': ('django.db.models.fields.CharField', [], {'max_length': '10', 'null': 'True', 'blank': 'True'}),
'level_en': ('django.db.models.fields.CharField', [], {'max_length': '10', 'null': 'True', 'blank': 'True'}),
'level_es': ('django.db.models.fields.CharField', [], {'max_length': '10', 'null': 'True', 'blank': 'True'}),
'level_it': ('django.db.models.fields.CharField', [], {'max_length': '10', 'null': 'True', 'blank': 'True'}),
'menage': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50', 'blank': 'True'}),
'non_fumeur': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'nuit': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'other_language': ('django.db.models.fields.CharField', [], {'max_length': '50', 'null': 'True', 'blank': 'True'}),
'permis': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'plan': ('django.db.models.fields.CharField', [], {'default': "'basic'", 'max_length': '20', 'blank': 'True'}),
'postal_code': ('django.db.models.fields.CharField', [], {'max_length': '8', 'null': 'True', 'blank': 'True'}),
'profile_pic': ('famille.utils.fields.ContentTypeRestrictedFileField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'psc1': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'repassage': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'resume': ('famille.utils.fields.ContentTypeRestrictedFileField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'sortie_ecole': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'street': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'sub_types': ('django.db.models.fields.CharField', [], {'max_length': '40'}),
'tarif': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'tel': ('django.db.models.fields.CharField', [], {'max_length': '15', 'null': 'True', 'blank': 'True'}),
'tel_visible': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'type': ('django.db.models.fields.CharField', [], {'max_length': '40'}),
'type_garde': ('django.db.models.fields.CharField', [], {'max_length': '10', 'null': 'True', 'blank': 'True'}),
'updated_at': ('django.db.models.fields.DateField', [], {'auto_now': 'True', 'blank': 'True'}),
'urgence': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'user': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['auth.User']", 'unique': 'True'})
},
'famille.prestatairefavorite': {
'Meta': {'object_name': 'PrestataireFavorite'},
'created_at': ('django.db.models.fields.DateField', [], {'auto_now_add': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'object_id': ('django.db.models.fields.IntegerField', [], {}),
'object_type': ('django.db.models.fields.CharField', [], {'max_length': '20'}),
'prestataire': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'favorites'", 'to': "orm['famille.Prestataire']"}),
'updated_at': ('django.db.models.fields.DateField', [], {'auto_now': 'True', 'blank': 'True'})
},
'famille.prestataireplanning': {
'Meta': {'object_name': 'PrestatairePlanning'},
'comment': ('django.db.models.fields.CharField', [], {'max_length': '50', 'null': 'True', 'blank': 'True'}),
'created_at': ('django.db.models.fields.DateField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'frequency': ('django.db.models.fields.CharField', [], {'max_length': '10', 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'prestataire': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'planning'", 'to': "orm['famille.Prestataire']"}),
'schedule': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['famille.Schedule']", 'symmetrical': 'False'}),
'start_date': ('django.db.models.fields.DateField', [], {'default': 'datetime.datetime.now'}),
'updated_at': ('django.db.models.fields.DateField', [], {'auto_now': 'True', 'blank': 'True'}),
'weekday': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['famille.Weekday']", 'symmetrical': 'False'})
},
'famille.prestataireratings': {
'Meta': {'object_name': 'PrestataireRatings'},
'amability': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '0', 'blank': 'True'}),
'created_at': ('django.db.models.fields.DateField', [], {'auto_now_add': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'ponctuality': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '0', 'blank': 'True'}),
'prestataire': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'ratings'", 'to': "orm['famille.Prestataire']"}),
'reliability': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '0', 'blank': 'True'}),
'serious': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '0', 'blank': 'True'}),
'updated_at': ('django.db.models.fields.DateField', [], {'auto_now': 'True', 'blank': 'True'})
},
'famille.reference': {
'Meta': {'object_name': 'Reference'},
'created_at': ('django.db.models.fields.DateField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'missions': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'phone': ('django.db.models.fields.CharField', [], {'max_length': '15', 'null': 'True', 'blank': 'True'}),
'prestataire': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'references'", 'to': "orm['famille.Prestataire']"}),
'referenced_user': ('django.db.models.fields.related.OneToOneField', [], {'blank': 'True', 'related_name': "'reference_of'", 'unique': 'True', 'null': 'True', 'to': "orm['famille.Famille']"}),
'updated_at': ('django.db.models.fields.DateField', [], {'auto_now': 'True', 'blank': 'True'})
},
'famille.schedule': {
'Meta': {'object_name': 'Schedule'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '30'})
},
'famille.weekday': {
'Meta': {'object_name': 'Weekday'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '15'})
}
}
complete_apps = ['famille']
| |
""" LambdaMART
----------
Applies a LambdaMART, a pairwise learning to rank technique, for
recommending reviews.
Usage:
$ python -m algo.ltr.lambdamart [-l <learning_rate>] [-n <leaves>]
[-t <trees>] [-f <feature_set>] [-b <bias>]
where:
<learning_rate> is a float which update step in each stage,
<leaves> is the number of leaves of each tree (2^depth - 1),
<trees> is the number of trees to build the model with,
<feature_set> is the set of features used, defined in algo/const.py,
<bias> is either 'y' or 'n'.
"""
from commands import getstatusoutput, getoutput
from sys import argv
from numpy import nan, isnan
from pickle import load
from algo.const import NUM_SETS, RANK_SIZE, REP, REVIEW_FEATS, AUTHOR_FEATS, \
VOTER_FEATS, SIM_FEATS, CONN_FEATS
from perf.metrics import calculate_rmse, calculate_avg_ndcg
from util.avg_model import compute_avg_user, compute_avg_model
from util.bias import BiasModel
from util.scaling import fit_scaler, fit_scaler_by_query, scale_features
_ALPHA = 0.01
_L = 10
_T = 100
_FEAT_TYPE = 'cap'
_BIAS = False
_PKL_DIR = 'out/pkl'
_VAL_DIR = 'out/val'
_DATA_DIR = 'out/data'
_MODEL_DIR = 'out/model'
_OUTPUT_DIR = 'out/test'
_CONF_STR = None
def load_args():
""" Loads arguments.
Args:
None.
Returns:
None. Global variables are updated.
"""
i = 1
while i < len(argv):
if argv[i] == '-l':
global _ALPHA
_ALPHA = float(argv[i+1])
elif argv[i] == '-n':
global _L
_L = int(argv[i+1])
elif argv[i] == '-t':
global _T
_T = int(argv[i+1])
elif argv[i] == '-f' and argv[i+1] in REVIEW_FEATS:
global _FEAT_TYPE
_FEAT_TYPE = argv[i+1]
elif argv[i] == '-b' and argv[i+1] in ['y', 'n']:
global _BIAS
_BIAS = True if argv[i+1] == 'y' else False
else:
print ('Usage: \n'
'$ python -m algo.ltr.lambdamart [-l <learning_rate>] [-n <leaves>] '
'[-t <trees>] [-f <feature_set>] [-b <bias>]')
exit()
i = i + 2
global _CONF_STR
_CONF_STR = 'l:%f,n:%s,t:%s,f:%s,b:%s' % (_ALPHA, _L, _T, _FEAT_TYPE,
'y' if _BIAS else 'n')
def model_str(x, y, qid):
""" Models features to a string, in the required format for SVMRank. The
format consists in outputting the vote id, followed by the 'qid:<qid>'
where <qid> is the query id and, then, by features in the format
'<i>:<value>', where <i> is the index of the feature (dimension) and
<value> is its value. Also, <qid> should be sorted over lines.
Args:
x: feature vector of the instance.
y: truth value of the instance.
qid: query id of the instance.
Returns:
A string with the line ready to insert in output file.
"""
line = '%d qid:%s ' % (y, qid)
count = 1
for i in xrange(x.size):
line += '%d:%f ' % (count, x[i])
count += 1
return line
def output_model(X, y, qid, outfile):
""" Outputs the model to a file, with one vote in each line.
Args:
X: list of feature lists,
y: list of truth values (might be None, in which case it does not matter
the truth, for validation and test).
qid: list of query ids associated to each instance.
outfile: file descriptior to output.
Returns:
None. Outputs to outfile.
"""
index = range(len(X))
index = sorted(index, key=lambda i: qid[i])
for i in index:
print >> outfile, model_str(X[i], 0 if y is None else y[i] , qid[i])
return index
def generate_input(reviews, users, sim, conn, votes, avg_user, avg_sim, avg_conn):
""" Generates input for the regression problem by turning all features
associated to each entity into a vector.
Args:
reviews: dictionary of reviews.
users: dictionary of users.
sim: dictionary of author-voter similary, indexed by the pair.
conn: dictionary of author-voter connection strength, indexed by the
pair.
votes: list of votes to extract features from.
avg_user: dictionary of an average user for mean imputation.
avg_sim: dictionary of an average similarity relation.
avg_conn: dictionary of an average connection strength relation.
Returns:
A triple with an list of features' lists, a list of true votes and a
list with query ids.
"""
X = []
y = []
qid = []
voter_map = {}
q_count = 0
for vote in votes:
example = []
review = reviews[vote['review']]
for feature in REVIEW_FEATS[_FEAT_TYPE]:
example.append(review[feature])
author = users[vote['author']] if vote['author'] in users else avg_user
for feature in AUTHOR_FEATS[_FEAT_TYPE]:
if isnan(author[feature]):
example.append(avg_user[feature])
else:
example.append(author[feature])
voter = users[vote['voter']] if vote['voter'] in users else avg_user
av = (author['id'], voter['id'])
u_sim = sim[av] if av in sim else avg_sim
for feature in SIM_FEATS[_FEAT_TYPE]:
if isnan(u_sim[feature]):
example.append(avg_sim[feature])
else:
example.append(u_sim[feature])
u_conn = conn[av] if av in conn else avg_conn
for feature in CONN_FEATS[_FEAT_TYPE]:
if isnan(u_conn[feature]):
example.append(avg_conn[feature])
else:
example.append(u_conn[feature])
X.append(example)
y.append(vote['vote'])
qid.append(int(vote['voter']))
return X, y, qid
def main():
""" Predicts votes by applying LambdaMART technique.
Args:
None.
Returns:
None.
"""
load_args()
for i in xrange(NUM_SETS):
print 'Reading data'
reviews = load(open('%s/reviews-%d.pkl' % (_PKL_DIR, i), 'r'))
users = load(open('%s/users-%d.pkl' % (_PKL_DIR, i), 'r'))
train = load(open('%s/train-%d.pkl' % (_PKL_DIR, i), 'r'))
test = load(open('%s/test-%d.pkl' % (_PKL_DIR, i), 'r'))
val = load(open('%s/validation-%d.pkl' % (_PKL_DIR, i), 'r'))
sim = load(open('%s/sim-%d.pkl' % (_PKL_DIR, i), 'r'))
conn = load(open('%s/conn-%d.pkl' % (_PKL_DIR, i), 'r'))
train_truth = [v['vote'] for v in train]
if _BIAS:
bias = BiasModel()
train = bias.fit_transform(train, reviews)
print 'Creating average user (for mean imputation)'
avg_user = compute_avg_user(users)
avg_sim = compute_avg_model(sim)
avg_conn = compute_avg_model(conn)
X_train, y_train, qid_train = generate_input(reviews, users, sim, conn,
train, avg_user, avg_sim, avg_conn)
X_val, _, qid_val = generate_input(reviews, users, sim, conn, val,
avg_user, avg_sim, avg_conn)
X_test, _, qid_test = generate_input(reviews, users, sim, conn,
test, avg_user, avg_sim, avg_conn)
scaler = fit_scaler('minmax', X_train)
X_train = scale_features(scaler, X_train)
X_val = scale_features(scaler, X_val)
X_test = scale_features(scaler, X_test)
print 'Outputting model'
outfile = open('%s/rank_train-%s-%d.dat' % (_DATA_DIR, _CONF_STR, i), 'w')
train_index = output_model(X_train, y_train, qid_train, outfile)
outfile.close()
outfile = open('%s/rank_val-%s-%d.dat' % (_DATA_DIR, _CONF_STR, i), 'w')
val_index = output_model(X_val, None, qid_val, outfile)
outfile.close()
outfile = open('%s/rank_test-%s-%d.dat' % (_DATA_DIR, _CONF_STR, i), 'w')
test_index = output_model(X_test, None, qid_test, outfile)
outfile.close()
for j in xrange(REP):
print 'Fitting model'
print getoutput(('java -jar lib/ranklib/RankLib.jar -train '
'%s/rank_train-%s-%d.dat -save %s/lambdamart_model-%s-%d-%d.dat '
'-gmax 5 -ranker 6 -metric2t NDCG@5 -tree %d -leaf %d -shrinkage '
'%f') % (_DATA_DIR, _CONF_STR, i, _MODEL_DIR, _CONF_STR, i, j, _T,
_L, _ALPHA))
print 'Evaluating in train'
print getoutput(('java -jar lib/ranklib/RankLib.jar -load '
'%s/lambdamart_model-%s-%d-%d.dat -rank %s/rank_train-%s-%d.dat '
'-score %s/rank_pred_train-%s-%d-%d.dat -gmax 5 -metric2T NDCG@5') % \
(_MODEL_DIR, _CONF_STR, i, j, _DATA_DIR, _CONF_STR, i, _DATA_DIR,
_CONF_STR, i, j))
raw_pred = []
predfile = open('%s/rank_pred_train-%s-%d-%d.dat' % (_DATA_DIR, _CONF_STR,
i, j), 'r')
raw_pred = [float(p.strip().split()[2]) for p in predfile]
predfile.close()
pred = [raw_pred[k] for k in train_index]
if _BIAS:
bias.add_bias(train, reviews, pred)
print '~ Training error on set %d repetition %d' % (i, j)
print 'RMSE: %f' % calculate_rmse(pred, train_truth)
print 'nDCG@%d: %f' % (RANK_SIZE, calculate_avg_ndcg(train, reviews,
pred, train_truth, RANK_SIZE))
print 'Predicting in validation'
print getoutput(('java -jar lib/ranklib/RankLib.jar -load '
'%s/lambdamart_model-%s-%d-%d.dat -rank %s/rank_val-%s-%d.dat '
'-score %s/rank_pred_val-%s-%d-%d.dat -gmax 5 -metric2T NDCG@5') % \
(_MODEL_DIR, _CONF_STR, i, j, _DATA_DIR, _CONF_STR, i, _DATA_DIR,
_CONF_STR, i, j))
predfile = open('%s/rank_pred_val-%s-%d-%d.dat' % (_DATA_DIR, _CONF_STR,
i, j), 'r')
raw_pred = [float(p.strip().split()[2]) for p in predfile]
predfile.close()
pred = [raw_pred[k] for k in val_index]
if _BIAS:
bias.add_bias(val, reviews, pred)
output = open('%s/lambdamart-%s-%d-%d.dat' % (_VAL_DIR, _CONF_STR, i, j),
'w')
for p in pred:
print >> output, p
output.close()
print 'Predicting in test'
print getoutput(('java -jar lib/ranklib/RankLib.jar -load '
'%s/lambdamart_model-%s-%d-%d.dat -rank %s/rank_test-%s-%d.dat '
'-score %s/rank_pred_test-%s-%d-%d.dat -gmax 5 -metric2T NDCG@5') % \
(_MODEL_DIR, _CONF_STR, i, j, _DATA_DIR, _CONF_STR, i, _DATA_DIR,
_CONF_STR, i, j))
predfile = open('%s/rank_pred_test-%s-%d-%d.dat' % (_DATA_DIR, _CONF_STR,
i, j), 'r')
raw_pred = [float(p.strip().split()[2]) for p in predfile]
predfile.close()
pred = [raw_pred[k] for k in test_index]
if _BIAS:
bias.add_bias(test, reviews, pred)
output = open('%s/lambdamart-%s-%d-%d.dat' % (_OUTPUT_DIR, _CONF_STR, i,
j), 'w')
for p in pred:
print >> output, p
output.close()
if __name__ == '__main__':
main()
| |
#Copyright ReportLab Europe Ltd. 2000-2004
#see license.txt for license details
#history http://www.reportlab.co.uk/cgi-bin/viewcvs.cgi/public/reportlab/trunk/reportlab/tools/docco/examples.py
import string
testannotations="""
def annotations(canvas):
from reportlab.lib.units import inch
canvas.drawString(inch, 2.5*inch,
"setAuthor, setTitle, setSubject have no visible effect")
canvas.drawString(inch, inch, "But if you are viewing this document dynamically")
canvas.drawString(inch, 0.5*inch, "please look at File/Document Info")
canvas.setAuthor("the ReportLab Team")
canvas.setTitle("ReportLab PDF Generation User Guide")
canvas.setSubject("How to Generate PDF files using the ReportLab modules")
"""
# magic function making module
test1 = """
def f(a,b):
print "it worked", a, b
return a+b
"""
test2 = """
def g(n):
if n==0: return 1
else: return n*g(n-1)
"""
testhello = """
def hello(c):
from reportlab.lib.units import inch
# move the origin up and to the left
c.translate(inch,inch)
# define a large font
c.setFont("Helvetica", 14)
# choose some colors
c.setStrokeColorRGB(0.2,0.5,0.3)
c.setFillColorRGB(1,0,1)
# draw some lines
c.line(0,0,0,1.7*inch)
c.line(0,0,1*inch,0)
# draw a rectangle
c.rect(0.2*inch,0.2*inch,1*inch,1.5*inch, fill=1)
# make text go straight up
c.rotate(90)
# change color
c.setFillColorRGB(0,0,0.77)
# say hello (note after rotate the y coord needs to be negative!)
c.drawString(0.3*inch, -inch, "Hello World")
"""
testcoords = """
def coords(canvas):
from reportlab.lib.units import inch
from reportlab.lib.colors import pink, black, red, blue, green
c = canvas
c.setStrokeColor(pink)
c.grid([inch, 2*inch, 3*inch, 4*inch], [0.5*inch, inch, 1.5*inch, 2*inch, 2.5*inch])
c.setStrokeColor(black)
c.setFont("Times-Roman", 20)
c.drawString(0,0, "(0,0) the Origin")
c.drawString(2.5*inch, inch, "(2.5,1) in inches")
c.drawString(4*inch, 2.5*inch, "(4, 2.5)")
c.setFillColor(red)
c.rect(0,2*inch,0.2*inch,0.3*inch, fill=1)
c.setFillColor(green)
c.circle(4.5*inch, 0.4*inch, 0.2*inch, fill=1)
"""
testtranslate = """
def translate(canvas):
from reportlab.lib.units import cm
canvas.translate(2.3*cm, 0.3*cm)
coords(canvas)
"""
testscale = """
def scale(canvas):
canvas.scale(0.75, 0.5)
coords(canvas)
"""
testscaletranslate = """
def scaletranslate(canvas):
from reportlab.lib.units import inch
canvas.setFont("Courier-BoldOblique", 12)
# save the state
canvas.saveState()
# scale then translate
canvas.scale(0.3, 0.5)
canvas.translate(2.4*inch, 1.5*inch)
canvas.drawString(0, 2.7*inch, "Scale then translate")
coords(canvas)
# forget the scale and translate...
canvas.restoreState()
# translate then scale
canvas.translate(2.4*inch, 1.5*inch)
canvas.scale(0.3, 0.5)
canvas.drawString(0, 2.7*inch, "Translate then scale")
coords(canvas)
"""
testmirror = """
def mirror(canvas):
from reportlab.lib.units import inch
canvas.translate(5.5*inch, 0)
canvas.scale(-1.0, 1.0)
coords(canvas)
"""
testRGBcolors = """
def colorsRGB(canvas):
from reportlab.lib import colors
from reportlab.lib.units import inch
black = colors.black
y = x = 0; dy=inch*3/4.0; dx=inch*5.5/5; w=h=dy/2; rdx=(dx-w)/2
rdy=h/5.0; texty=h+2*rdy
canvas.setFont("Helvetica",10)
for [namedcolor, name] in (
[colors.lavenderblush, "lavenderblush"],
[colors.lawngreen, "lawngreen"],
[colors.lemonchiffon, "lemonchiffon"],
[colors.lightblue, "lightblue"],
[colors.lightcoral, "lightcoral"]):
canvas.setFillColor(namedcolor)
canvas.rect(x+rdx, y+rdy, w, h, fill=1)
canvas.setFillColor(black)
canvas.drawCentredString(x+dx/2, y+texty, name)
x = x+dx
y = y + dy; x = 0
for rgb in [(1,0,0), (0,1,0), (0,0,1), (0.5,0.3,0.1), (0.4,0.5,0.3)]:
r,g,b = rgb
canvas.setFillColorRGB(r,g,b)
canvas.rect(x+rdx, y+rdy, w, h, fill=1)
canvas.setFillColor(black)
canvas.drawCentredString(x+dx/2, y+texty, "r%s g%s b%s"%rgb)
x = x+dx
y = y + dy; x = 0
for gray in (0.0, 0.25, 0.50, 0.75, 1.0):
canvas.setFillGray(gray)
canvas.rect(x+rdx, y+rdy, w, h, fill=1)
canvas.setFillColor(black)
canvas.drawCentredString(x+dx/2, y+texty, "gray: %s"%gray)
x = x+dx
"""
testCMYKcolors = """
def colorsCMYK(canvas):
from reportlab.lib.colors import CMYKColor, PCMYKColor
from reportlab.lib.units import inch
# creates a black CMYK ; CMYKColor use real values
black = CMYKColor(0,0,0,1)
# creates a cyan CMYK ; PCMYKColor use integer values
cyan = PCMYKColor(100,0,0,0)
y = x = 0; dy=inch*3/4.0; dx=inch*5.5/5; w=h=dy/2; rdx=(dx-w)/2
rdy=h/5.0; texty=h+2*rdy
canvas.setFont("Helvetica",10)
y = y + dy; x = 0
for cmyk in [(1,0,0,0), (0,1,0,0), (0,0,1,0), (0,0,0,1), (0,0,0,0)]:
c,m,y1,k = cmyk
canvas.setFillColorCMYK(c,m,y1,k)
canvas.rect(x+rdx, y+rdy, w, h, fill=1)
canvas.setFillColor(black)
canvas.drawCentredString(x+dx/2, y+texty, "c%s m%s y%s k%s"%cmyk)
x = x+dx
"""
testspumoni = """
def spumoni(canvas):
from reportlab.lib.units import inch
from reportlab.lib.colors import pink, green, brown, white
x = 0; dx = 0.4*inch
for i in range(4):
for color in (pink, green, brown):
canvas.setFillColor(color)
canvas.rect(x,0,dx,3*inch,stroke=0,fill=1)
x = x+dx
canvas.setFillColor(white)
canvas.setStrokeColor(white)
canvas.setFont("Helvetica-Bold", 85)
canvas.drawCentredString(2.75*inch, 1.3*inch, "SPUMONI")
"""
testspumoni2 = """
def spumoni2(canvas):
from reportlab.lib.units import inch
from reportlab.lib.colors import pink, green, brown, white, black
# draw the previous drawing
spumoni(canvas)
# now put an ice cream cone on top of it:
# first draw a triangle (ice cream cone)
p = canvas.beginPath()
xcenter = 2.75*inch
radius = 0.45*inch
p.moveTo(xcenter-radius, 1.5*inch)
p.lineTo(xcenter+radius, 1.5*inch)
p.lineTo(xcenter, 0)
canvas.setFillColor(brown)
canvas.setStrokeColor(black)
canvas.drawPath(p, fill=1)
# draw some circles (scoops)
y = 1.5*inch
for color in (pink, green, brown):
canvas.setFillColor(color)
canvas.circle(xcenter, y, radius, fill=1)
y = y+radius
"""
testalpha = """
def alpha(canvas):
from reportlab.graphics.shapes import Rect
from reportlab.lib.colors import Color, black, blue, red
red50transparent = Color( 100, 0, 0, alpha=0.5)
c = canvas
c.setFillColor(black)
c.setFont('Helvetica', 10)
c.drawString(25,180, 'solid')
c.setFillColor(blue)
c.rect(25,25,100,100, fill=True, stroke=False)
c.setFillColor(red)
c.rect(100,75,100,100, fill=True, stroke=False)
c.setFillColor(black)
c.drawString(225,180, 'transparent')
c.setFillColor(blue)
c.rect(225,25,100,100, fill=True, stroke=False)
c.setFillColor(red50transparent)
c.rect(300,75,100,100, fill=True, stroke=False)
"""
testoverprint = """
def overPrint(canvas):
from reportlab.graphics.shapes import Rect
from reportlab.lib.colors import PCMYKColor, PCMYKColorSep
black = PCMYKColorSep(0, 0, 0, 100, spotName='black',density=100)
blue = PCMYKColorSep(91.0, 43.0, 0.0, 0.0, spotName='PANTONE 285 CV',density=100)
red = PCMYKColorSep( 0.0, 100.0, 91.0, 0.0, spotName='PANTONE 485 CV',density=100)
c = canvas
c.setFillColor(black)
c.setFont('Helvetica', 10)
c.drawString(25,180, 'overprint')
c.setFillOverprint(True)
c.setFillColor(blue)
c.rect(25,25,100,100, fill=True, stroke=False)
c.setFillColor(red)
c.rect(100,75,100,100, fill=True, stroke=False)
c.setFillColor(black)
c.drawString(225,180, 'knockout')
c.setFillOverprint(False)
c.setFillColor(blue)
c.rect(225,25,100,100, fill=True, stroke=False)
c.setFillColor(red)
c.rect(300,75,100,100, fill=True, stroke=False)
"""
testbezier = """
def bezier(canvas):
from reportlab.lib.colors import yellow, green, red, black
from reportlab.lib.units import inch
i = inch
d = i/4
# define the bezier curve control points
x1,y1, x2,y2, x3,y3, x4,y4 = d,1.5*i, 1.5*i,d, 3*i,d, 5.5*i-d,3*i-d
# draw a figure enclosing the control points
canvas.setFillColor(yellow)
p = canvas.beginPath()
p.moveTo(x1,y1)
for (x,y) in [(x2,y2), (x3,y3), (x4,y4)]:
p.lineTo(x,y)
canvas.drawPath(p, fill=1, stroke=0)
# draw the tangent lines
canvas.setLineWidth(inch*0.1)
canvas.setStrokeColor(green)
canvas.line(x1,y1,x2,y2)
canvas.setStrokeColor(red)
canvas.line(x3,y3,x4,y4)
# finally draw the curve
canvas.setStrokeColor(black)
canvas.bezier(x1,y1, x2,y2, x3,y3, x4,y4)
"""
testbezier2 = """
def bezier2(canvas):
from reportlab.lib.colors import yellow, green, red, black
from reportlab.lib.units import inch
# make a sequence of control points
xd,yd = 5.5*inch/2, 3*inch/2
xc,yc = xd,yd
dxdy = [(0,0.33), (0.33,0.33), (0.75,1), (0.875,0.875),
(0.875,0.875), (1,0.75), (0.33,0.33), (0.33,0)]
pointlist = []
for xoffset in (1,-1):
yoffset = xoffset
for (dx,dy) in dxdy:
px = xc + xd*xoffset*dx
py = yc + yd*yoffset*dy
pointlist.append((px,py))
yoffset = -xoffset
for (dy,dx) in dxdy:
px = xc + xd*xoffset*dx
py = yc + yd*yoffset*dy
pointlist.append((px,py))
# draw tangent lines and curves
canvas.setLineWidth(inch*0.1)
while pointlist:
[(x1,y1),(x2,y2),(x3,y3),(x4,y4)] = pointlist[:4]
del pointlist[:4]
canvas.setLineWidth(inch*0.1)
canvas.setStrokeColor(green)
canvas.line(x1,y1,x2,y2)
canvas.setStrokeColor(red)
canvas.line(x3,y3,x4,y4)
# finally draw the curve
canvas.setStrokeColor(black)
canvas.bezier(x1,y1, x2,y2, x3,y3, x4,y4)
"""
testpencil = """
def pencil(canvas, text="No.2"):
from reportlab.lib.colors import yellow, red, black,white
from reportlab.lib.units import inch
u = inch/10.0
canvas.setStrokeColor(black)
canvas.setLineWidth(4)
# draw erasor
canvas.setFillColor(red)
canvas.circle(30*u, 5*u, 5*u, stroke=1, fill=1)
# draw all else but the tip (mainly rectangles with different fills)
canvas.setFillColor(yellow)
canvas.rect(10*u,0,20*u,10*u, stroke=1, fill=1)
canvas.setFillColor(black)
canvas.rect(23*u,0,8*u,10*u,fill=1)
canvas.roundRect(14*u, 3.5*u, 8*u, 3*u, 1.5*u, stroke=1, fill=1)
canvas.setFillColor(white)
canvas.rect(25*u,u,1.2*u,8*u, fill=1,stroke=0)
canvas.rect(27.5*u,u,1.2*u,8*u, fill=1, stroke=0)
canvas.setFont("Times-Roman", 3*u)
canvas.drawCentredString(18*u, 4*u, text)
# now draw the tip
penciltip(canvas,debug=0)
# draw broken lines across the body.
canvas.setDash([10,5,16,10],0)
canvas.line(11*u,2.5*u,22*u,2.5*u)
canvas.line(22*u,7.5*u,12*u,7.5*u)
"""
testpenciltip = """
def penciltip(canvas, debug=1):
from reportlab.lib.colors import tan, black, green
from reportlab.lib.units import inch
u = inch/10.0
canvas.setLineWidth(4)
if debug:
canvas.scale(2.8,2.8) # make it big
canvas.setLineWidth(1) # small lines
canvas.setStrokeColor(black)
canvas.setFillColor(tan)
p = canvas.beginPath()
p.moveTo(10*u,0)
p.lineTo(0,5*u)
p.lineTo(10*u,10*u)
p.curveTo(11.5*u,10*u, 11.5*u,7.5*u, 10*u,7.5*u)
p.curveTo(12*u,7.5*u, 11*u,2.5*u, 9.7*u,2.5*u)
p.curveTo(10.5*u,2.5*u, 11*u,0, 10*u,0)
canvas.drawPath(p, stroke=1, fill=1)
canvas.setFillColor(black)
p = canvas.beginPath()
p.moveTo(0,5*u)
p.lineTo(4*u,3*u)
p.lineTo(5*u,4.5*u)
p.lineTo(3*u,6.5*u)
canvas.drawPath(p, stroke=1, fill=1)
if debug:
canvas.setStrokeColor(green) # put in a frame of reference
canvas.grid([0,5*u,10*u,15*u], [0,5*u,10*u])
"""
testnoteannotation = """
from reportlab.platypus.flowables import Flowable
class NoteAnnotation(Flowable):
'''put a pencil in the margin.'''
def wrap(self, *args):
return (1,10) # I take up very little space! (?)
def draw(self):
canvas = self.canv
canvas.translate(-10,-10)
canvas.rotate(180)
canvas.scale(0.2,0.2)
pencil(canvas, text="NOTE")
"""
testhandannotation = """
from reportlab.platypus.flowables import Flowable
from reportlab.lib.colors import tan, green
class HandAnnotation(Flowable):
'''A hand flowable.'''
def __init__(self, xoffset=0, size=None, fillcolor=tan, strokecolor=green):
from reportlab.lib.units import inch
if size is None: size=4*inch
self.fillcolor, self.strokecolor = fillcolor, strokecolor
self.xoffset = xoffset
self.size = size
# normal size is 4 inches
self.scale = size/(4.0*inch)
def wrap(self, *args):
return (self.xoffset, self.size)
def draw(self):
canvas = self.canv
canvas.setLineWidth(6)
canvas.setFillColor(self.fillcolor)
canvas.setStrokeColor(self.strokecolor)
canvas.translate(self.xoffset+self.size,0)
canvas.rotate(90)
canvas.scale(self.scale, self.scale)
hand(canvas, debug=0, fill=1)
"""
lyrics = '''\
well she hit Net Solutions
and she registered her own .com site now
and filled it up with yahoo profile pics
she snarfed in one night now
and she made 50 million when Hugh Hefner
bought up the rights now
and she'll have fun fun fun
til her Daddy takes the keyboard away'''
lyrics = string.split(lyrics, "\n")
testtextsize = """
def textsize(canvas):
from reportlab.lib.units import inch
from reportlab.lib.colors import magenta, red
canvas.setFont("Times-Roman", 20)
canvas.setFillColor(red)
canvas.drawCentredString(2.75*inch, 2.5*inch, "Font size examples")
canvas.setFillColor(magenta)
size = 7
y = 2.3*inch
x = 1.3*inch
for line in lyrics:
canvas.setFont("Helvetica", size)
canvas.drawRightString(x,y,"%s points: " % size)
canvas.drawString(x,y, line)
y = y-size*1.2
size = size+1.5
"""
teststar = """
def star(canvas, title="Title Here", aka="Comment here.",
xcenter=None, ycenter=None, nvertices=5):
from math import pi
from reportlab.lib.units import inch
radius=inch/3.0
if xcenter is None: xcenter=2.75*inch
if ycenter is None: ycenter=1.5*inch
canvas.drawCentredString(xcenter, ycenter+1.3*radius, title)
canvas.drawCentredString(xcenter, ycenter-1.4*radius, aka)
p = canvas.beginPath()
p.moveTo(xcenter,ycenter+radius)
from math import pi, cos, sin
angle = (2*pi)*2/5.0
startangle = pi/2.0
for vertex in range(nvertices-1):
nextangle = angle*(vertex+1)+startangle
x = xcenter + radius*cos(nextangle)
y = ycenter + radius*sin(nextangle)
p.lineTo(x,y)
if nvertices==5:
p.close()
canvas.drawPath(p)
"""
testjoins = """
def joins(canvas):
from reportlab.lib.units import inch
# make lines big
canvas.setLineWidth(5)
star(canvas, "Default: mitered join", "0: pointed", xcenter = 1*inch)
canvas.setLineJoin(1)
star(canvas, "Round join", "1: rounded")
canvas.setLineJoin(2)
star(canvas, "Bevelled join", "2: square", xcenter=4.5*inch)
"""
testcaps = """
def caps(canvas):
from reportlab.lib.units import inch
# make lines big
canvas.setLineWidth(5)
star(canvas, "Default", "no projection",xcenter = 1*inch,
nvertices=4)
canvas.setLineCap(1)
star(canvas, "Round cap", "1: ends in half circle", nvertices=4)
canvas.setLineCap(2)
star(canvas, "Square cap", "2: projects out half a width", xcenter=4.5*inch,
nvertices=4)
"""
testdashes = """
def dashes(canvas):
from reportlab.lib.units import inch
# make lines big
canvas.setDash(6,3)
star(canvas, "Simple dashes", "6 points on, 3 off", xcenter = 1*inch)
canvas.setDash(1,2)
star(canvas, "Dots", "One on, two off")
canvas.setDash([1,1,3,3,1,4,4,1], 0)
star(canvas, "Complex Pattern", "[1,1,3,3,1,4,4,1]", xcenter=4.5*inch)
"""
testcustomfont1 = """
def customfont1(canvas):
# we know some glyphs are missing, suppress warnings
import reportlab.rl_config
reportlab.rl_config.warnOnMissingFontGlyphs = 0
import rl_doc_utils
from reportlab.pdfbase import pdfmetrics
afmFile, pfbFile = rl_doc_utils.getJustFontPaths()
justFace = pdfmetrics.EmbeddedType1Face(afmFile, pfbFile)
faceName = 'DarkGardenMK' # pulled from AFM file
pdfmetrics.registerTypeFace(justFace)
justFont = pdfmetrics.Font('DarkGardenMK',
faceName,
'WinAnsiEncoding')
pdfmetrics.registerFont(justFont)
canvas.setFont('DarkGardenMK', 32)
canvas.drawString(10, 150, 'This should be in')
canvas.drawString(10, 100, 'DarkGardenMK')
"""
testttffont1 = """
def ttffont1(canvas):
# we know some glyphs are missing, suppress warnings
import reportlab.rl_config
reportlab.rl_config.warnOnMissingFontGlyphs = 0
from reportlab.pdfbase import pdfmetrics
from reportlab.pdfbase.ttfonts import TTFont
pdfmetrics.registerFont(TTFont('Vera', 'Vera.ttf'))
from reportlab.pdfgen.canvas import Canvas
canvas.setFont('Vera', 32)
canvas.drawString(10, 150, "Some UTF-8 text encoded")
canvas.drawString(10, 100, "in the Vera TT Font!")
"""
testcursormoves1 = """
def cursormoves1(canvas):
from reportlab.lib.units import inch
textobject = canvas.beginText()
textobject.setTextOrigin(inch, 2.5*inch)
textobject.setFont("Helvetica-Oblique", 14)
for line in lyrics:
textobject.textLine(line)
textobject.setFillGray(0.4)
textobject.textLines('''
With many apologies to the Beach Boys
and anyone else who finds this objectionable
''')
canvas.drawText(textobject)
"""
testcursormoves2 = """
def cursormoves2(canvas):
from reportlab.lib.units import inch
textobject = canvas.beginText()
textobject.setTextOrigin(2, 2.5*inch)
textobject.setFont("Helvetica-Oblique", 14)
for line in lyrics:
textobject.textOut(line)
textobject.moveCursor(14,14) # POSITIVE Y moves down!!!
textobject.setFillColorRGB(0.4,0,1)
textobject.textLines('''
With many apologies to the Beach Boys
and anyone else who finds this objectionable
''')
canvas.drawText(textobject)
"""
testcharspace = """
def charspace(canvas):
from reportlab.lib.units import inch
textobject = canvas.beginText()
textobject.setTextOrigin(3, 2.5*inch)
textobject.setFont("Helvetica-Oblique", 10)
charspace = 0
for line in lyrics:
textobject.setCharSpace(charspace)
textobject.textLine("%s: %s" %(charspace,line))
charspace = charspace+0.5
textobject.setFillGray(0.4)
textobject.textLines('''
With many apologies to the Beach Boys
and anyone else who finds this objectionable
''')
canvas.drawText(textobject)
"""
testwordspace = """
def wordspace(canvas):
from reportlab.lib.units import inch
textobject = canvas.beginText()
textobject.setTextOrigin(3, 2.5*inch)
textobject.setFont("Helvetica-Oblique", 12)
wordspace = 0
for line in lyrics:
textobject.setWordSpace(wordspace)
textobject.textLine("%s: %s" %(wordspace,line))
wordspace = wordspace+2.5
textobject.setFillColorCMYK(0.4,0,0.4,0.2)
textobject.textLines('''
With many apologies to the Beach Boys
and anyone else who finds this objectionable
''')
canvas.drawText(textobject)
"""
testhorizontalscale = """
def horizontalscale(canvas):
from reportlab.lib.units import inch
textobject = canvas.beginText()
textobject.setTextOrigin(3, 2.5*inch)
textobject.setFont("Helvetica-Oblique", 12)
horizontalscale = 80 # 100 is default
for line in lyrics:
textobject.setHorizScale(horizontalscale)
textobject.textLine("%s: %s" %(horizontalscale,line))
horizontalscale = horizontalscale+10
textobject.setFillColorCMYK(0.0,0.4,0.4,0.2)
textobject.textLines('''
With many apologies to the Beach Boys
and anyone else who finds this objectionable
''')
canvas.drawText(textobject)
"""
testleading = """
def leading(canvas):
from reportlab.lib.units import inch
textobject = canvas.beginText()
textobject.setTextOrigin(3, 2.5*inch)
textobject.setFont("Helvetica-Oblique", 14)
leading = 8
for line in lyrics:
textobject.setLeading(leading)
textobject.textLine("%s: %s" %(leading,line))
leading = leading+2.5
textobject.setFillColorCMYK(0.8,0,0,0.3)
textobject.textLines('''
With many apologies to the Beach Boys
and anyone else who finds this objectionable
''')
canvas.drawText(textobject)
"""
testhand = """
def hand(canvas, debug=1, fill=0):
(startx, starty) = (0,0)
curves = [
( 0, 2), ( 0, 4), ( 0, 8), # back of hand
( 5, 8), ( 7,10), ( 7,14),
(10,14), (10,13), ( 7.5, 8), # thumb
(13, 8), (14, 8), (17, 8),
(19, 8), (19, 6), (17, 6),
(15, 6), (13, 6), (11, 6), # index, pointing
(12, 6), (13, 6), (14, 6),
(16, 6), (16, 4), (14, 4),
(13, 4), (12, 4), (11, 4), # middle
(11.5, 4), (12, 4), (13, 4),
(15, 4), (15, 2), (13, 2),
(12.5, 2), (11.5, 2), (11, 2), # ring
(11.5, 2), (12, 2), (12.5, 2),
(14, 2), (14, 0), (12.5, 0),
(10, 0), (8, 0), (6, 0), # pinky, then close
]
from reportlab.lib.units import inch
if debug: canvas.setLineWidth(6)
u = inch*0.2
p = canvas.beginPath()
p.moveTo(startx, starty)
ccopy = list(curves)
while ccopy:
[(x1,y1), (x2,y2), (x3,y3)] = ccopy[:3]
del ccopy[:3]
p.curveTo(x1*u,y1*u,x2*u,y2*u,x3*u,y3*u)
p.close()
canvas.drawPath(p, fill=fill)
if debug:
from reportlab.lib.colors import red, green
(lastx, lasty) = (startx, starty)
ccopy = list(curves)
while ccopy:
[(x1,y1), (x2,y2), (x3,y3)] = ccopy[:3]
del ccopy[:3]
canvas.setStrokeColor(red)
canvas.line(lastx*u,lasty*u, x1*u,y1*u)
canvas.setStrokeColor(green)
canvas.line(x2*u,y2*u, x3*u,y3*u)
(lastx,lasty) = (x3,y3)
"""
testhand2 = """
def hand2(canvas):
canvas.translate(20,10)
canvas.setLineWidth(3)
canvas.setFillColorRGB(0.1, 0.3, 0.9)
canvas.setStrokeGray(0.5)
hand(canvas, debug=0, fill=1)
"""
testfonts = """
def fonts(canvas):
from reportlab.lib.units import inch
text = "Now is the time for all good men to..."
x = 1.8*inch
y = 2.7*inch
for font in canvas.getAvailableFonts():
canvas.setFont(font, 10)
canvas.drawString(x,y,text)
canvas.setFont("Helvetica", 10)
canvas.drawRightString(x-10,y, font+":")
y = y-13
"""
testarcs = """
def arcs(canvas):
from reportlab.lib.units import inch
canvas.setLineWidth(4)
canvas.setStrokeColorRGB(0.8, 1, 0.6)
# draw rectangles enclosing the arcs
canvas.rect(inch, inch, 1.5*inch, inch)
canvas.rect(3*inch, inch, inch, 1.5*inch)
canvas.setStrokeColorRGB(0, 0.2, 0.4)
canvas.setFillColorRGB(1, 0.6, 0.8)
p = canvas.beginPath()
p.moveTo(0.2*inch, 0.2*inch)
p.arcTo(inch, inch, 2.5*inch,2*inch, startAng=-30, extent=135)
p.arc(3*inch, inch, 4*inch, 2.5*inch, startAng=-45, extent=270)
canvas.drawPath(p, fill=1, stroke=1)
"""
testvariousshapes = """
def variousshapes(canvas):
from reportlab.lib.units import inch
inch = int(inch)
canvas.setStrokeGray(0.5)
canvas.grid(range(0,11*inch/2,inch/2), range(0,7*inch/2,inch/2))
canvas.setLineWidth(4)
canvas.setStrokeColorRGB(0, 0.2, 0.7)
canvas.setFillColorRGB(1, 0.6, 0.8)
p = canvas.beginPath()
p.rect(0.5*inch, 0.5*inch, 0.5*inch, 2*inch)
p.circle(2.75*inch, 1.5*inch, 0.3*inch)
p.ellipse(3.5*inch, 0.5*inch, 1.2*inch, 2*inch)
canvas.drawPath(p, fill=1, stroke=1)
"""
testclosingfigures = """
def closingfigures(canvas):
from reportlab.lib.units import inch
h = inch/3.0; k = inch/2.0
canvas.setStrokeColorRGB(0.2,0.3,0.5)
canvas.setFillColorRGB(0.8,0.6,0.2)
canvas.setLineWidth(4)
p = canvas.beginPath()
for i in (1,2,3,4):
for j in (1,2):
xc,yc = inch*i, inch*j
p.moveTo(xc,yc)
p.arcTo(xc-h, yc-k, xc+h, yc+k, startAng=0, extent=60*i)
# close only the first one, not the second one
if j==1:
p.close()
canvas.drawPath(p, fill=1, stroke=1)
"""
testforms = """
def forms(canvas):
#first create a form...
canvas.beginForm("SpumoniForm")
#re-use some drawing functions from earlier
spumoni(canvas)
canvas.endForm()
#then draw it
canvas.doForm("SpumoniForm")
"""
def doctemplateillustration(canvas):
from reportlab.lib.units import inch
canvas.setFont("Helvetica", 10)
canvas.drawString(inch/4.0, 2.75*inch, "DocTemplate")
W = 4/3.0*inch
H = 2*inch
Wd = x = inch/4.0
Hd =y = inch/2.0
for name in ("two column", "chapter page", "title page"):
canvas.setFillColorRGB(0.5,1.0,1.0)
canvas.rect(x,y,W,H, fill=1)
canvas.setFillColorRGB(0,0,0)
canvas.drawString(x+inch/8, y+H-Wd, "PageTemplate")
canvas.drawCentredString(x+W/2.0, y-Wd, name)
x = x+W+Wd
canvas.saveState()
d = inch/16
dW = (W-3*d)/2.0
hD = H -2*d-Wd
canvas.translate(Wd+d, Hd+d)
for name in ("left Frame", "right Frame"):
canvas.setFillColorRGB(1.0,0.5,1.0)
canvas.rect(0,0, dW,hD, fill=1)
canvas.setFillGray(0.7)
dd= d/2.0
ddH = (hD-6*dd)/5.0
ddW = dW-2*dd
yy = dd
xx = dd
for i in range(5):
canvas.rect(xx,yy,ddW,ddH, fill=1, stroke=0)
yy = yy+ddH+dd
canvas.setFillColorRGB(0,0,0)
canvas.saveState()
canvas.rotate(90)
canvas.drawString(d,-dW/2, name)
canvas.restoreState()
canvas.translate(dW+d,0)
canvas.restoreState()
canvas.setFillColorRGB(1.0, 0.5, 1.0)
mx = Wd+W+Wd+d
my = Hd+d
mW = W-2*d
mH = H-d-Hd
canvas.rect(mx, my, mW, mH, fill=1)
canvas.rect(Wd+2*(W+Wd)+d, Hd+3*d, W-2*d, H/2.0, fill=1)
canvas.setFillGray(0.7)
canvas.rect(Wd+2*(W+Wd)+d+dd, Hd+5*d, W-2*d-2*dd, H/2.0-2*d-dd, fill=1)
xx = mx+dd
yy = my+mH/5.0
ddH = (mH-6*dd-mH/5.0)/3.0
ddW = mW - 2*dd
for i in range(3):
canvas.setFillGray(0.7)
canvas.rect(xx,yy,ddW,ddH, fill=1, stroke=1)
canvas.setFillGray(0)
canvas.drawString(xx+dd/2.0,yy+dd/2.0, "flowable %s" %(157-i))
yy = yy+ddH+dd
canvas.drawCentredString(3*Wd+2*W+W/2, Hd+H/2.0, "First Flowable")
canvas.setFont("Times-BoldItalic", 8)
canvas.setFillGray(0)
canvas.drawCentredString(mx+mW/2.0, my+mH+3*dd, "Chapter 6: Lubricants")
canvas.setFont("Times-BoldItalic", 10)
canvas.drawCentredString(3*Wd+2*W+W/2, Hd+H-H/4, "College Life")
# D = dir()
g = globals()
Dprime = {}
from types import StringType
from string import strip
for (a,b) in g.items():
if a[:4]=="test" and type(b) is StringType:
#print 'for', a
#print b
b = strip(b)
exec(b+'\n')
platypussetup = """
from reportlab.platypus import SimpleDocTemplate, Paragraph, Spacer
from reportlab.lib.styles import getSampleStyleSheet
from reportlab.rl_config import defaultPageSize
from reportlab.lib.units import inch
PAGE_HEIGHT=defaultPageSize[1]; PAGE_WIDTH=defaultPageSize[0]
styles = getSampleStyleSheet()
"""
platypusfirstpage = """
Title = "Hello world"
pageinfo = "platypus example"
def myFirstPage(canvas, doc):
canvas.saveState()
canvas.setFont('Times-Bold',16)
canvas.drawCentredString(PAGE_WIDTH/2.0, PAGE_HEIGHT-108, Title)
canvas.setFont('Times-Roman',9)
canvas.drawString(inch, 0.75 * inch, "First Page / %s" % pageinfo)
canvas.restoreState()
"""
platypusnextpage = """
def myLaterPages(canvas, doc):
canvas.saveState()
canvas.setFont('Times-Roman',9)
canvas.drawString(inch, 0.75 * inch, "Page %d %s" % (doc.page, pageinfo))
canvas.restoreState()
"""
platypusgo = """
def go():
doc = SimpleDocTemplate("phello.pdf")
Story = [Spacer(1,2*inch)]
style = styles["Normal"]
for i in range(100):
bogustext = ("This is Paragraph number %s. " % i) *20
p = Paragraph(bogustext, style)
Story.append(p)
Story.append(Spacer(1,0.2*inch))
doc.build(Story, onFirstPage=myFirstPage, onLaterPages=myLaterPages)
"""
if __name__=="__main__":
# then do the platypus hello world
for b in platypussetup, platypusfirstpage, platypusnextpage, platypusgo:
b = strip(b)
exec(b+'\n')
go()
| |
# The core functionalty of PyEphem lives in the C-language _libastro
# module, which packages the astronomy routines from XEphem as
# convenient Python types.
import ephem._libastro as _libastro
from math import pi
__version__ = '3.7.6.0'
twopi = pi * 2.
halfpi = pi / 2.
quarterpi = pi / 4.
eighthpi = pi / 8.
degree = pi / 180.
arcminute = degree / 60.
arcsecond = arcminute / 60.
half_arcsecond = arcsecond / 2.
tiny = arcsecond / 360.
c = 299792458. # exact speed of light in meters/second
meters_per_au = _libastro.meters_per_au
earth_radius = _libastro.earth_radius
moon_radius = _libastro.moon_radius
sun_radius = _libastro.sun_radius
B1900 = 2415020.3135 - _libastro.MJD0
B1950 = 2433282.4235 - _libastro.MJD0
J2000 = _libastro.J2000
# We make available several basic types from _libastro.
Angle = _libastro.Angle
degrees = _libastro.degrees
hours = _libastro.hours
Date = _libastro.Date
hour = 1. / 24.
minute = hour / 60.
second = minute / 60.
default_newton_precision = second / 10.
delta_t = _libastro.delta_t
julian_date = _libastro.julian_date
Body = _libastro.Body
Planet = _libastro.Planet
PlanetMoon = _libastro.PlanetMoon
FixedBody = _libastro.FixedBody
EllipticalBody = _libastro.EllipticalBody
ParabolicBody = _libastro.ParabolicBody
HyperbolicBody = _libastro.HyperbolicBody
EarthSatellite = _libastro.EarthSatellite
readdb = _libastro.readdb
readtle = _libastro.readtle
constellation = _libastro.constellation
separation = _libastro.separation
now = _libastro.now
millennium_atlas = _libastro.millennium_atlas
uranometria = _libastro.uranometria
uranometria2000 = _libastro.uranometria2000
# We also create a Python class ("Mercury", "Venus", etcetera) for
# each planet and moon for which _libastro offers specific algorithms.
for index, classname, name in _libastro.builtin_planets():
exec('''
class %(name)s(_libastro.%(classname)s):
"Create a Body instance representing %(name)s"
__planet__ = %(index)r
''' % dict(name=name, classname=classname, index=index))
del index, classname, name
# We now replace two of the classes we have just created, because
# _libastro actually provides separate types for two of the bodies.
Jupiter = _libastro.Jupiter
Saturn = _libastro.Saturn
Moon = _libastro.Moon
# Newton's method.
def newton(f, x0, x1, precision=default_newton_precision):
"""Return an x-value at which the given function reaches zero.
Stops and declares victory once the x-value is within ``precision``
of the solution, which defaults to a half-second of clock time.
"""
f0, f1 = f(x0), f(x1)
while f1 and abs(x1 - x0) > precision and f1 != f0:
x0, x1 = x1, x1 + (x1 - x0) / (f0/f1 - 1)
f0, f1 = f1, f(x1)
return x1
# Find equinoxes and solstices.
_sun = Sun() # used for computing equinoxes
def holiday(d0, motion, offset):
"""Function that assists the finding of equinoxes and solstices."""
def f(d):
_sun.compute(d)
return (_sun.ra + eighthpi) % quarterpi - eighthpi
d0 = Date(d0)
_sun.compute(d0)
angle_to_cover = motion - (_sun.ra + offset) % motion
if abs(angle_to_cover) < tiny:
angle_to_cover = motion
d = d0 + 365.25 * angle_to_cover / twopi
return date(newton(f, d, d + hour))
def previous_vernal_equinox(date):
"""Return the date of the previous vernal equinox."""
return holiday(date, -twopi, 0)
def next_vernal_equinox(date):
"""Return the date of the next vernal equinox."""
return holiday(date, twopi, 0)
def previous_summer_solstice(date):
"""Return the date of the previous summer solstice."""
return holiday(date, -twopi, pi + halfpi)
def next_summer_solstice(date):
"""Return the date of the next summer solstice."""
return holiday(date, twopi, pi + halfpi)
def previous_autumnal_equinox(date):
"""Return the date of the previous autumnal equinox."""
return holiday(date, -twopi, pi)
def next_autumnal_equinox(date):
"""Return the date of the next autumnal equinox."""
return holiday(date, twopi, pi)
def previous_winter_solstice(date):
"""Return the date of the previous winter solstice."""
return holiday(date, -twopi, halfpi)
def next_winter_solstice(date):
"""Return the date of the next winter solstice."""
return holiday(date, twopi, halfpi)
# Common synonyms.
next_spring_equinox = next_vernal_equinox
previous_spring_equinox = previous_vernal_equinox
next_fall_equinox = next_autumn_equinox = next_autumnal_equinox
previous_fall_equinox = previous_autumn_equinox = previous_autumnal_equinox
# More-general functions that find any equinox or solstice.
def previous_equinox(date):
"""Return the date of the previous equinox."""
return holiday(date, -pi, 0)
def next_equinox(date):
"""Return the date of the next equinox."""
return holiday(date, pi, 0)
def previous_solstice(date):
"""Return the date of the previous solstice."""
return holiday(date, -pi, halfpi)
def next_solstice(date):
"""Return the date of the next solstice."""
return holiday(date, pi, halfpi)
# Find phases of the Moon.
_moon = Moon() # used for computing Moon phases
def _find_moon_phase(d0, motion, target):
"""Function that assists the finding of moon phases."""
def f(d):
_sun.compute(d)
_moon.compute(d)
slon = _libastro.eq_ecl(d, _sun.g_ra, _sun.g_dec)[0]
mlon = _libastro.eq_ecl(d, _moon.g_ra, _moon.g_dec)[0]
return (mlon - slon - antitarget) % twopi - pi
antitarget = target + pi
d0 = Date(d0)
f0 = f(d0)
angle_to_cover = (- f0) % motion
if abs(angle_to_cover) < tiny:
angle_to_cover = motion
d = d0 + 29.53 * angle_to_cover / twopi
return date(newton(f, d, d + hour))
def previous_new_moon(date):
"""Return the date of the previous New Moon."""
return _find_moon_phase(date, -twopi, 0)
def next_new_moon(date):
"""Return the date of the next New Moon."""
return _find_moon_phase(date, twopi, 0)
def previous_first_quarter_moon(date):
"""Return the date of the previous First Quarter Moon."""
return _find_moon_phase(date, -twopi, halfpi)
def next_first_quarter_moon(date):
"""Return the date of the next First Quarter Moon."""
return _find_moon_phase(date, twopi, halfpi)
def previous_full_moon(date):
"""Return the date of the previous Full Moon."""
return _find_moon_phase(date, -twopi, pi)
def next_full_moon(date):
"""Return the date of the next Full Moon."""
return _find_moon_phase(date, twopi, pi)
def previous_last_quarter_moon(date):
"""Return the date of the previous Last Quarter Moon."""
return _find_moon_phase(date, -twopi, pi + halfpi)
def next_last_quarter_moon(date):
"""Return the date of the next Last Quarter Moon."""
return _find_moon_phase(date, twopi, pi + halfpi)
# We provide a Python extension to our _libastro "Observer" class that
# can search for circumstances like transits.
class CircumpolarError(ValueError): pass
class NeverUpError(CircumpolarError): pass
class AlwaysUpError(CircumpolarError): pass
def describe_riset_search(method):
method.__doc__ += """, returning its date.
The search starts at the `date` of this `Observer` and is limited to
the single circuit of the sky, from antitransit to antitransit, that
the `body` was in the middle of describing at that date and time.
If the body did not, in fact, cross the horizon in the direction you
are asking about during that particular circuit, then the search
must raise a `CircumpolarError` exception like `NeverUpError` or
`AlwaysUpError` instead of returning a date.
"""
return method
class Observer(_libastro.Observer):
"""A location on earth for which positions are to be computed.
An `Observer` instance allows you to compute the positions of
celestial bodies as seen from a particular latitude and longitude on
the Earth's surface. The constructor takes no parameters; instead,
set its attributes once you have created it. Defaults:
`date` - the moment the `Observer` is created
`lat` - zero degrees latitude
`lon` - zero degrees longitude
`elevation` - 0 meters above sea level
`horizon` - 0 degrees
`epoch` - J2000
`temp` - 15 degrees Celsius
`pressure` - 1010 mBar
"""
__slots__ = [ 'name' ]
elev = _libastro.Observer.elevation
def copy(self):
o = self.__class__()
o.date = self.date
o.lat = self.lat
o.lon = self.lon
o.elev = self.elev
o.horizon = self.horizon
o.epoch = self.epoch
o.temp = self.temp
o.pressure = self.pressure
return o
def __repr__(self):
"""Return a useful textual representation of this Observer."""
return ('<ephem.Observer date=%r epoch=%r'
" lon='%s' lat='%s' elevation=%sm"
' horizon=%s temp=%sC pressure=%smBar>'
% (str(self.date), str(self.epoch),
self.lon, self.lat, self.elevation,
self.horizon, self.temp, self.pressure))
def compute_pressure(self):
"""Set the atmospheric pressure for the current elevation."""
# Formula from the ISA Standard Atmosphere
self.pressure = (1013.25 * (1 - 0.0065 * self.elevation / 288.15)
** 5.2558761132785179)
def _compute_transit(self, body, start, sign, offset):
"""Internal function used to compute transits."""
if isinstance(body, EarthSatellite):
raise TypeError(
'the next and previous transit methods do not'
' support earth satellites because of their speed;'
' please use the higher-resolution next_pass() method'
)
def f(d):
self.date = d
body.compute(self)
return degrees(offset - sidereal_time() + body.g_ra).znorm
if start is not None:
self.date = start
sidereal_time = self.sidereal_time
body.compute(self)
ha = sidereal_time() - body.g_ra
ha_to_move = (offset - ha) % (sign * twopi)
if abs(ha_to_move) < tiny:
ha_to_move = sign * twopi
d = self.date + ha_to_move / twopi
result = Date(newton(f, d, d + minute))
return result
def _previous_transit(self, body, start=None):
"""Find the previous passage of a body across the meridian."""
return self._compute_transit(body, start, -1., 0.)
def _next_transit(self, body, start=None):
"""Find the next passage of a body across the meridian."""
return self._compute_transit(body, start, +1., 0.)
def _previous_antitransit(self, body, start=None):
"""Find the previous passage of a body across the anti-meridian."""
return self._compute_transit(body, start, -1., pi)
def _next_antitransit(self, body, start=None):
"""Find the next passage of a body across the anti-meridian."""
return self._compute_transit(body, start, +1., pi)
def previous_transit(self, body, start=None):
"""Find the previous passage of a body across the meridian."""
original_date = self.date
d = self._previous_transit(body, start)
self.date = original_date
return d
def next_transit(self, body, start=None):
"""Find the next passage of a body across the meridian."""
original_date = self.date
d = self._next_transit(body, start)
self.date = original_date
return d
def previous_antitransit(self, body, start=None):
"""Find the previous passage of a body across the anti-meridian."""
original_date = self.date
d = self._previous_antitransit(body, start)
self.date = original_date
return d
def next_antitransit(self, body, start=None):
"""Find the next passage of a body across the anti-meridian."""
original_date = self.date
d = self._next_antitransit(body, start)
self.date = original_date
return d
def disallow_circumpolar(self, declination):
"""Raise an exception if the given declination is circumpolar.
Raises NeverUpError if an object at the given declination is
always below this Observer's horizon, or AlwaysUpError if such
an object would always be above the horizon.
"""
if abs(self.lat - declination) >= halfpi:
raise NeverUpError('The declination %s never rises'
' above the horizon at latitude %s'
% (declination, self.lat))
if abs(self.lat + declination) >= halfpi:
raise AlwaysUpError('The declination %s is always'
' above the horizon at latitude %s'
% (declination, self.lat))
def _riset_helper(self, body, start, use_center, rising, previous):
"""Internal function for finding risings and settings."""
if isinstance(body, EarthSatellite):
raise TypeError(
'the rising and settings methods do not'
' support earth satellites because of their speed;'
' please use the higher-resolution next_pass() method'
)
def visit_transit():
d = (previous and self._previous_transit(body)
or self._next_transit(body)) # if-then
if body.alt + body.radius * use_radius - self.horizon <= 0:
raise NeverUpError('%r transits below the horizon at %s'
% (body.name, d))
return d
def visit_antitransit():
d = (previous and self._previous_antitransit(body)
or self._next_antitransit(body)) # if-then
if body.alt + body.radius * use_radius - self.horizon >= 0:
raise AlwaysUpError('%r is still above the horizon at %s'
% (body.name, d))
return d
# Determine whether we should offset the result for the radius
# of the object being measured, or instead pretend that rising
# and setting happens when its center crosses the horizon.
if use_center:
use_radius = 0.0
else:
use_radius = 1.0
# Save self.date so that we can restore it before returning.
original_date = self.date
# Start slightly to one side of the start date, to prevent
# repeated calls from returning the same solution over and over.
if start is not None:
self.date = start
if previous:
self.date -= default_newton_precision
else:
self.date += default_newton_precision
# Take a big leap towards the solution, then Newton takes us home.
body.compute(self)
heading_downward = (rising == previous) # "==" is inverted "xor"
if heading_downward:
on_lower_cusp = (body.alt + body.radius * use_radius
- self.horizon > tiny)
else:
on_lower_cusp = (body.alt + body.radius * use_radius
- self.horizon < - tiny)
az = body.az
on_right_side_of_sky = ((rising == (az < pi)) # inverted "xor"
or (az < tiny
or pi - tiny < az < pi + tiny
or twopi - tiny < az))
def f(d):
self.date = d
body.compute(self)
return body.alt + body.radius * use_radius - self.horizon
try:
if on_lower_cusp and on_right_side_of_sky:
d0 = self.date
elif heading_downward:
d0 = visit_transit()
else:
d0 = visit_antitransit()
if heading_downward:
d1 = visit_antitransit()
else:
d1 = visit_transit()
d = (d0 + d1) / 2.
result = Date(newton(f, d, d + minute))
return result
finally:
self.date = original_date
@describe_riset_search
def previous_rising(self, body, start=None, use_center=False):
"""Search for the given body's previous rising"""
return self._riset_helper(body, start, use_center, True, True)
@describe_riset_search
def previous_setting(self, body, start=None, use_center=False):
"""Search for the given body's previous setting"""
return self._riset_helper(body, start, use_center, False, True)
@describe_riset_search
def next_rising(self, body, start=None, use_center=False):
"""Search for the given body's next rising"""
return self._riset_helper(body, start, use_center, True, False)
@describe_riset_search
def next_setting(self, body, start=None, use_center=False):
"""Search for the given body's next setting"""
return self._riset_helper(body, start, use_center, False, False)
def next_pass(self, body):
"""Return the next rising, culmination, and setting of a satellite."""
if not isinstance(body, EarthSatellite):
raise TypeError(
'the next_pass() method is only for use with'
' EarthSatellite objects because of their high speed'
)
return _libastro._next_pass(self, body)
del describe_riset_search
# Time conversion.
def localtime(date):
"""Convert a PyEphem date into local time, returning a Python datetime."""
import calendar, time, datetime
timetuple = time.localtime(calendar.timegm(date.tuple()))
return datetime.datetime(*timetuple[:7])
# Coordinate transformations.
class Coordinate(object):
def __init__(self, *args, **kw):
# Accept an optional "epoch" keyword argument.
epoch = kw.pop('epoch', None)
if epoch is not None:
self.epoch = epoch = Date(epoch)
if kw:
raise TypeError('"epoch" is the only keyword argument'
' you can use during %s instantiation'
% (type(self).__name__))
# Interpret a single-argument initialization.
if len(args) == 1:
a = args[0]
if isinstance(a, Body):
a = Equatorial(a.a_ra, a.a_dec, epoch = a.a_epoch)
for cls in (Equatorial, Ecliptic, Galactic):
if isinstance(a, cls):
# If the user omitted an "epoch" keyword, then
# use the epoch of the other object.
if epoch is None:
self.epoch = epoch = a.epoch
# If we are initialized from another of the same
# kind of coordinate and epoch, simply copy the
# coordinates and epoch into this new object.
if isinstance(self, cls) and epoch == a.epoch:
self.set(*a.get())
return
# Otherwise, convert.
ra, dec = a.to_radec()
if epoch != a.epoch:
ra, dec = _libastro.precess(
a.epoch, epoch, ra, dec
)
self.from_radec(ra, dec)
return
raise TypeError(
'a single argument used to initialize %s() must be either'
' a coordinate or a Body, not an %r' % (type(a).__name__,)
)
# Two arguments are interpreted as (ra, dec) or (lon, lat).
elif len(args) == 2:
self.set(*args)
if epoch is None:
self.epoch = epoch = Date(J2000)
else:
raise TypeError(
'to initialize %s you must pass either a Body,'
' another coordinate, or two coordinate values,'
' but not: %r' % (type(self).__name__, args,)
)
class Equatorial(Coordinate):
"""An equatorial sky coordinate in right ascension and declination."""
def get(self):
return self.ra, self.dec
def set(self, ra, dec):
self.ra, self.dec = hours(ra), degrees(dec)
to_radec = get
from_radec = set
class LonLatCoordinate(Coordinate):
"""A coordinate that is measured with a longitude and latitude."""
def set(self, lon, lat):
self.lon, self.lat = degrees(lon), degrees(lat)
def get(self):
return self.lon, self.lat
@property
def long(self):
return self.lon
@long.setter
def long(self, value):
self.lon = value
class Ecliptic(LonLatCoordinate):
"""An ecliptic latitude and longitude."""
def to_radec(self):
return _libastro.ecl_eq(self.epoch, self.lon, self.lat)
def from_radec(self, ra, dec):
self.lon, self.lat = _libastro.eq_ecl(self.epoch, ra, dec)
class Galactic(LonLatCoordinate):
"""A galactic latitude and longitude."""
def to_radec(self):
return _libastro.gal_eq(self.epoch, self.lon, self.lat)
def from_radec(self, ra, dec):
self.lon, self.lat = _libastro.eq_gal(self.epoch, ra, dec)
# For backwards compatibility, provide lower-case names for our Date
# and Angle classes, and also allow "Lon" to be spelled "Long".
date = Date
angle = Angle
LongLatCoordinate = LonLatCoordinate
# Catalog boostraps. Each of these functions imports a catalog
# module, then replaces itself with the function of the same name that
# lives inside of the catalog.
def star(name, *args, **kwargs):
"""Load the stars database and return a star."""
global star
import ephem.stars
star = ephem.stars.star
return star(name, *args, **kwargs)
def city(name):
"""Load the cities database and return a city."""
global city
import ephem.cities
city = ephem.cities.city
return city(name)
| |
#
# Copyright (c) 2008-2015 Citrix Systems, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License")
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from nssrc.com.citrix.netscaler.nitro.resource.base.base_resource import base_resource
from nssrc.com.citrix.netscaler.nitro.resource.base.base_resource import base_response
from nssrc.com.citrix.netscaler.nitro.service.options import options
from nssrc.com.citrix.netscaler.nitro.exception.nitro_exception import nitro_exception
from nssrc.com.citrix.netscaler.nitro.util.nitro_util import nitro_util
class snmpcommunity(base_resource) :
""" Configuration for community resource. """
def __init__(self) :
self._communityname = ""
self._permissions = ""
self.___count = 0
@property
def communityname(self) :
"""The SNMP community string. Can consist of 1 to 31 characters that include uppercase and lowercase letters,numbers and special characters.
The following requirement applies only to the NetScaler CLI:
If the string includes one or more spaces, enclose the name in double or single quotation marks (for example, "my string" or 'my string').<br/>Minimum length = 1.
"""
try :
return self._communityname
except Exception as e:
raise e
@communityname.setter
def communityname(self, communityname) :
"""The SNMP community string. Can consist of 1 to 31 characters that include uppercase and lowercase letters,numbers and special characters.
The following requirement applies only to the NetScaler CLI:
If the string includes one or more spaces, enclose the name in double or single quotation marks (for example, "my string" or 'my string').<br/>Minimum length = 1
"""
try :
self._communityname = communityname
except Exception as e:
raise e
@property
def permissions(self) :
"""The SNMP V1 or V2 query-type privilege that you want to associate with this SNMP community.<br/>Possible values = GET, GET_NEXT, GET_BULK, SET, ALL.
"""
try :
return self._permissions
except Exception as e:
raise e
@permissions.setter
def permissions(self, permissions) :
"""The SNMP V1 or V2 query-type privilege that you want to associate with this SNMP community.<br/>Possible values = GET, GET_NEXT, GET_BULK, SET, ALL
"""
try :
self._permissions = permissions
except Exception as e:
raise e
def _get_nitro_response(self, service, response) :
""" converts nitro response into object and returns the object array in case of get request.
"""
try :
result = service.payload_formatter.string_to_resource(snmpcommunity_response, response, self.__class__.__name__)
if(result.errorcode != 0) :
if (result.errorcode == 444) :
service.clear_session(self)
if result.severity :
if (result.severity == "ERROR") :
raise nitro_exception(result.errorcode, str(result.message), str(result.severity))
else :
raise nitro_exception(result.errorcode, str(result.message), str(result.severity))
return result.snmpcommunity
except Exception as e :
raise e
def _get_object_name(self) :
""" Returns the value of object identifier argument
"""
try :
if (self.communityname) :
return str(self.communityname)
return None
except Exception as e :
raise e
@classmethod
def add(cls, client, resource) :
""" Use this API to add snmpcommunity.
"""
try :
if type(resource) is not list :
addresource = snmpcommunity()
addresource.communityname = resource.communityname
addresource.permissions = resource.permissions
return addresource.add_resource(client)
else :
if (resource and len(resource) > 0) :
addresources = [ snmpcommunity() for _ in range(len(resource))]
for i in range(len(resource)) :
addresources[i].communityname = resource[i].communityname
addresources[i].permissions = resource[i].permissions
result = cls.add_bulk_request(client, addresources)
return result
except Exception as e :
raise e
@classmethod
def delete(cls, client, resource) :
""" Use this API to delete snmpcommunity.
"""
try :
if type(resource) is not list :
deleteresource = snmpcommunity()
if type(resource) != type(deleteresource):
deleteresource.communityname = resource
else :
deleteresource.communityname = resource.communityname
return deleteresource.delete_resource(client)
else :
if type(resource[0]) != cls :
if (resource and len(resource) > 0) :
deleteresources = [ snmpcommunity() for _ in range(len(resource))]
for i in range(len(resource)) :
deleteresources[i].communityname = resource[i]
else :
if (resource and len(resource) > 0) :
deleteresources = [ snmpcommunity() for _ in range(len(resource))]
for i in range(len(resource)) :
deleteresources[i].communityname = resource[i].communityname
result = cls.delete_bulk_request(client, deleteresources)
return result
except Exception as e :
raise e
@classmethod
def get(cls, client, name="", option_="") :
""" Use this API to fetch all the snmpcommunity resources that are configured on netscaler.
"""
try :
if not name :
obj = snmpcommunity()
response = obj.get_resources(client, option_)
else :
if type(name) != cls :
if type(name) is not list :
obj = snmpcommunity()
obj.communityname = name
response = obj.get_resource(client, option_)
else :
if name and len(name) > 0 :
response = [snmpcommunity() for _ in range(len(name))]
obj = [snmpcommunity() for _ in range(len(name))]
for i in range(len(name)) :
obj[i] = snmpcommunity()
obj[i].communityname = name[i]
response[i] = obj[i].get_resource(client, option_)
return response
except Exception as e :
raise e
@classmethod
def get_filtered(cls, client, filter_) :
""" Use this API to fetch filtered set of snmpcommunity resources.
filter string should be in JSON format.eg: "port:80,servicetype:HTTP".
"""
try :
obj = snmpcommunity()
option_ = options()
option_.filter = filter_
response = obj.getfiltered(client, option_)
return response
except Exception as e :
raise e
@classmethod
def count(cls, client) :
""" Use this API to count the snmpcommunity resources configured on NetScaler.
"""
try :
obj = snmpcommunity()
option_ = options()
option_.count = True
response = obj.get_resources(client, option_)
if response :
return response[0].__dict__['___count']
return 0
except Exception as e :
raise e
@classmethod
def count_filtered(cls, client, filter_) :
""" Use this API to count filtered the set of snmpcommunity resources.
Filter string should be in JSON format.eg: "port:80,servicetype:HTTP".
"""
try :
obj = snmpcommunity()
option_ = options()
option_.count = True
option_.filter = filter_
response = obj.getfiltered(client, option_)
if response :
return response[0].__dict__['___count']
return 0
except Exception as e :
raise e
class Permissions:
GET = "GET"
GET_NEXT = "GET_NEXT"
GET_BULK = "GET_BULK"
SET = "SET"
ALL = "ALL"
class snmpcommunity_response(base_response) :
def __init__(self, length=1) :
self.snmpcommunity = []
self.errorcode = 0
self.message = ""
self.severity = ""
self.sessionid = ""
self.snmpcommunity = [snmpcommunity() for _ in range(length)]
| |
from __future__ import absolute_import
import time
import json
import datetime
from datetime import date
from django.db import transaction
from django.contrib.gis.geos import Point
from celery import shared_task
from onadata.apps.fieldsight.models import Organization, Project, Site, Region, SiteType
from onadata.apps.userrole.models import UserRole
from onadata.apps.eventlog.models import FieldSightLog, CeleryTaskProgress
from channels import Group as ChannelGroup
from django.contrib.auth.models import User, Group
from onadata.apps.fieldsight.fs_exports.formParserForExcelReport import parse_form_response
from io import BytesIO
from django.shortcuts import get_object_or_404
from onadata.apps.fsforms.models import FieldSightXF, FInstance, Stage
from django.core.files.storage import default_storage
from django.core.files.base import ContentFile
from django.db.models import Prefetch
from .generatereport import PDFReport
import os, tempfile, zipfile
from django.conf import settings
from django.http import HttpResponse
from django.core.servers.basehttp import FileWrapper
from openpyxl import Workbook
from openpyxl.styles import Font
from django.core.files.storage import get_storage_class
from onadata.libs.utils.viewer_tools import get_path
from PIL import Image
import tempfile, zipfile
from onadata.libs.utils.viewer_tools import get_path
import pyexcel as p
from .metaAttribsGenerator import get_form_answer, get_form_sub_status, get_form_submission_count, get_form_ques_ans_status
from django.conf import settings
from django.db.models import Sum, Case, When, IntegerField
from django.core.exceptions import MultipleObjectsReturned
from onadata.apps.fsforms.reports_util import get_images_for_site_all
@shared_task()
def site_download_zipfile(task_prog_obj_id, size):
time.sleep(5)
task = CeleryTaskProgress.objects.get(pk=task_prog_obj_id)
task.status = 1
task.save()
try:
default_storage = get_storage_class()()
buffer = BytesIO()
datas = get_images_for_site_all(str(task.object_id))
urls = list(datas["result"])
archive = zipfile.ZipFile(buffer, 'w', zipfile.ZIP_DEFLATED)
index=0
username=urls[0]['_attachments']['download_url'].split('/')[2]
for url in urls:
index+=1
if default_storage.exists(get_path(url['_attachments']['filename'], size)):
with tempfile.NamedTemporaryFile(mode="wb") as temp:
file = default_storage.open(get_path(url['_attachments']['filename'], size))
img=Image.open(file)
img.save(temp, img.format)
# filename = '/srv/fieldsight/fieldsight-kobocat'+url['_attachments']['filename'] # Select your files here.
archive.write(temp.name, url['_attachments']['filename'].split('/')[2])
archive.close()
buffer.seek(0)
zipFile = buffer.getvalue()
if default_storage.exists(task.content_object.identifier + '/files/'+task.content_object.name+'.zip'):
default_storage.delete(task.content_object.identifier + '/files/'+task.content_object.name+'.zip')
zipFile_url = default_storage.save(task.content_object.identifier + '/files/'+task.content_object.name+'.zip', ContentFile(zipFile))
task.file.name = zipFile_url
task.status = 2
task.save()
buffer.close()
noti = task.logs.create(source=task.user, type=32, title="Image Zip generation in site",
recipient=task.user, content_object=task, extra_object=task.content_object,
extra_message=" <a href='"+ task.file.url +"'>Image Zip file </a> generation in site")
except Exception as e:
task.description = "ERROR: " + str(e.message)
task.status = 3
task.save()
print 'Report Gen Unsuccesfull. %s' % e
print e.__dict__
noti = task.logs.create(source=task.user, type=432, title="Image Zip generation in site",
content_object=task.content_object, recipient=task.user,
extra_message="@error " + u'{}'.format(e.message))
buffer.close()
@shared_task(time_limit=7200, soft_time_limit=7200)
def generate_stage_status_report(task_prog_obj_id, project_id):
time.sleep(5)
task = CeleryTaskProgress.objects.get(pk=task_prog_obj_id)
project = Project.objects.get(pk=project_id)
task.status = 1
task.save()
try:
data=[]
ss_index = []
form_ids = []
stages_rows = []
head_row = ["Site ID", "Name", "Region ID", "Address", "Latitude", "longitude", "Status"]
query={}
stages = project.stages.filter(stage__isnull=True)
for stage in stages:
sub_stages = stage.parent.all()
if len(sub_stages):
head_row.append("Stage :"+stage.name)
stages_rows.append("Stage :"+stage.name)
ss_index.append(str(""))
for ss in sub_stages:
head_row.append("Sub Stage :"+ss.name)
ss_index.append(str(ss.stage_forms.id))
form_ids.append(str(ss.stage_forms.id))
query[str(ss.stage_forms.id)] = Sum(
Case(
When(site_instances__project_fxf_id=ss.stage_forms.id, then=1),
default=0, output_field=IntegerField()
))
query['flagged'] = Sum(
Case(
When(site_instances__form_status=2, site_instances__project_fxf_id__in=form_ids, then=1),
default=0, output_field=IntegerField()
))
query['rejected'] = Sum(
Case(
When(site_instances__form_status=1, site_instances__project_fxf_id__in=form_ids, then=1),
default=0, output_field=IntegerField()
))
query['submission'] = Sum(
Case(
When(site_instances__project_fxf_id__in=form_ids, then=1),
default=0, output_field=IntegerField()
))
head_row.extend(["Site Visits", "Submission Count", "Flagged Submission", "Rejected Submission"])
data.append(head_row)
sites = Site.objects.filter(project_id=project.id).values('id','identifier', 'name', 'region__identifier', 'address').annotate(**query)
site_visits = settings.MONGO_DB.instances.aggregate([{"$match":{"fs_project": project.id, "fs_project_uuid": {"$in":form_ids}}}, { "$group" : {
"_id" : {
"fs_site": "$fs_site",
"date": { "$substr": [ "$start", 0, 10 ] }
},
}
}, { "$group": { "_id": "$_id.fs_site", "visits": {
"$push": {
"date":"$_id.date"
}
}
}}])['result']
site_dict = {}
site_objs = Site.objects.filter(project_id=project_id)
for site_obj in site_objs:
site_dict[site_obj.id] = {'visits':0,'site_status':site_obj.site_status, 'latitude':site_obj.latitude,'longitude':site_obj.longitude}
for site_visit in site_visits:
try:
site_dict[int(site_visit['_id'])]['visits'] = len(site_visit['visits'])
except:
pass
try:
for site in sites:
# import pdb; pdb.set_trace();
site_row = [site['identifier'], site['name'], site['region__identifier'], site['address'], site_dict[site.get('id')]['latitude'], site_dict[site.get('id')]['longitude'], site_dict[site.get('id')]['site_status']]
for stage in ss_index:
site_row.append(site.get(stage, ""))
site_row.extend([site_dict[site.get('id')]['visits'], site['submission'], site['flagged'], site['rejected']])
data.append(site_row)
except:
pass
p.save_as(array=data, dest_file_name="media/stage-report/{}_stage_data.xls".format(project.id))
xl_data = open("media/stage-report/{}_stage_data.xls".format(project.id), "rb")
#Its only quick fix for now, save it in aws bucket whenever possible.
task.file.name = xl_data.name
task.status = 2
task.save()
noti = task.logs.create(source=task.user, type=32, title="Site Stage Progress report generation in Project",
recipient=task.user, content_object=project, extra_object=project,
extra_message=" <a href='/"+ "media/stage-report/{}_stage_data.xls".format(project.id) +"'>Site Stage Progress report </a> generation in project")
except Exception as e:
task.description = "ERROR: " + str(e.message)
task.status = 3
task.save()
print 'Report Gen Unsuccesfull. %s' % e
print e.__dict__
noti = task.logs.create(source=task.user, type=432, title="Site Stage Progress report generation in Project",
content_object=project, recipient=task.user,
extra_message="@error " + u'{}'.format(e.message))
@shared_task()
def UnassignUser(task_prog_obj_id, user_id, sites, regions, projects, group_id):
time.sleep(5)
user = User.objects.get(pk=user_id)
task = CeleryTaskProgress.objects.get(pk=task_prog_obj_id)
task.status=1
task.save()
try:
count = 0
with transaction.atomic():
if sites:
for site_id in sites:
roles=UserRole.objects.filter(user_id=user_id, site_id = site_id, group_id = group_id, ended_at=None)
for role in roles:
role.ended_at = datetime.datetime.now()
role.save()
count = count + 1
if regions:
for region_id in regions:
sites = Site.objects.filter(region_id=region_id[1:])
for site_id in sites:
roles=UserRole.objects.filter(user_id=user_id, site_id = site_id, group_id = group_id, ended_at=None)
for role in roles:
role.ended_at = datetime.datetime.now()
role.save()
count = count + 1
if projects:
for project_id in projects:
sites = Site.objects.filter(project_id = project_id[1:])
for site_id in sites:
roles=UserRole.objects.filter(user_id=user_id, site_id = site_id, group_id = group_id, ended_at=None)
for role in roles:
role.ended_at = datetime.datetime.now()
role.save()
count = count + 1
task.status = 2
task.save()
if group_id == "3":
extra_message= "removed " + str(count) + "Reviewer Roles"
else:
extra_message= "removed " + str(count) + " Supervisor Roles"
noti = task.logs.create(source=task.user, type=35, title="Remove Roles",
content_object=user.user_profile, recipient=task.user,
extra_message=extra_message)
except Exception as e:
task.description = "ERROR: " + str(e.message)
task.status = 3
task.save()
print 'Role Remove Unsuccesfull. %s' % e
print e.__dict__
noti = task.logs.create(source=task.user, type=432, title="Role Remove for ",
content_object=user.user_profile, recipient=task.user,
extra_message="@error " + u'{}'.format(e.message))
@shared_task()
def UnassignAllProjectRolesAndSites(task_prog_obj_id, project_id):
time.sleep(5)
task = CeleryTaskProgress.objects.get(pk=task_prog_obj_id)
task.status=1
task.save()
project = Project.all_objects.get(pk=project_id)
try:
sites_count = 0
roles_count = 0
with transaction.atomic():
roles=UserRole.objects.filter(project_id = project_id, ended_at=None)
for role in roles:
role.ended_at = datetime.datetime.now()
role.save()
roles_count = roles_count + 1
sites=Site.objects.filter(project_id = project_id)
for site in sites:
site.is_active = False
site.save()
sites_count = sites_count + 1
task.status = 2
task.save()
extra_message= "removed " + str(roles_count) + " User Roles and " + str(sites_count) + " sites "
noti = task.logs.create(source=task.user, type=35, title="Remove Roles",
content_object=project, recipient=task.user,
extra_message=extra_message)
except Exception as e:
task.description = "ERROR: " + str(e.message)
task.status = 3
task.save()
print 'Role Remove Unsuccesfull. %s' % e
print e.__dict__
noti = task.logs.create(source=task.user, type=432, title="Role Remove for ",
content_object=project, recipient=task.user,
extra_message="@error " + u'{}'.format(e.message))
@shared_task()
def UnassignAllSiteRoles(task_prog_obj_id, site_id):
time.sleep(5)
site = Site.all_objects.get(pk=site_id)
task = CeleryTaskProgress.objects.get(pk=task_prog_obj_id)
task.status=1
task.save()
try:
count = 0
with transaction.atomic():
roles=UserRole.objects.filter(site_id = site_id, ended_at=None)
for role in roles:
role.ended_at = datetime.datetime.now()
role.save()
count = count + 1
task.status = 2
task.save()
extra_message= "removed " + str(count) + " User Roles "
noti = task.logs.create(source=task.user, type=35, title="Remove Roles",
content_object=site, recipient=task.user,
extra_message=extra_message)
except Exception as e:
task.description = "ERROR: " + str(e.message)
task.status = 3
task.save()
print 'Role Remove Unsuccesfull. %s' % e
print e.__dict__
noti = task.logs.create(source=task.user, type=432, title="Role Remove for ",
content_object=site, recipient=task.user,
extra_message="@error " + u'{}'.format(e.message))
def get_site_type(value):
try:
return int(value)
except:
return 0
@shared_task()
def bulkuploadsites(task_prog_obj_id, source_user, sites, pk):
time.sleep(2)
project = Project.objects.get(pk=pk)
# task_id = bulkuploadsites.request.id
task = CeleryTaskProgress.objects.get(pk=task_prog_obj_id)
task.content_object = project
task.status=1
task.save()
count = ""
try:
sites
count = len(sites)
task.description = "Bulk Upload of "+str(count)+" Sites."
task.save()
new_sites = 0
updated_sites = 0
with transaction.atomic():
i=0
interval = count/20
for site in sites:
# time.sleep(0.7)
print(i)
site = dict((k, v) for k, v in site.iteritems() if v is not '')
lat = site.get("longitude", 85.3240)
long = site.get("latitude", 27.7172)
if lat == "":
lat = 85.3240
if long == "":
long = 27.7172
location = Point(round(float(lat), 6), round(float(long), 6), srid=4326)
region_idf = site.get("region_id", None)
type_identifier = get_site_type(site.get("type", "0"))
_site, created = Site.objects.get_or_create(identifier=str(site.get("identifier")),
project=project)
if created:
new_sites += 1
else:
updated_sites += 1
if type_identifier > 0:
site_type = SiteType.objects.get(identifier=type_identifier, project=project)
_site.type = site_type
region = None
if region_idf is not None:
region = Region.objects.get(identifier=str(region_idf), project = project)
_site.region = region
_site.name = site.get("name")
_site.phone = site.get("phone")
_site.address = site.get("address")
_site.public_desc = site.get("public_desc")
_site.additional_desc = site.get("additional_desc")
_site.location = location
# _site.logo = "logo/default_site_image.png"
meta_ques = project.site_meta_attributes
myanswers = {}
for question in meta_ques:
if question['question_type'] not in ['Form','FormSubStat','FormSubCountQuestion','FormQuestionAnswerStatus']:
myanswers[question['question_name']]=site.get(question['question_name'], "")
_site.site_meta_attributes_ans = myanswers
_site.save()
i += 1
if i > interval:
interval = i+interval
bulkuploadsites.update_state('PROGRESS', meta={'current': i, 'total': count})
task.status = 2
task.save()
extra_message= ""
if new_sites > 0 and updated_sites > 0:
extra_message = " updated " + str(updated_sites) + " Sites and" + " created " + str(new_sites) + " Sites"
elif new_sites > 0 and updated_sites == 0:
extra_message = " created " + str(new_sites) + " Sites"
elif new_sites == 0 and updated_sites > 0:
extra_message = " updated " + str(updated_sites) + " Sites"
noti = project.logs.create(source=source_user, type=12, title="Bulk Sites",
organization=project.organization,
project=project, content_object=project, extra_object=project,
extra_message=extra_message)
except Exception as e:
task.description = "ERROR: " + str(e.message)
task.status = 3
task.save()
print 'Site Upload Unsuccesfull. %s' % e
print e.__dict__
noti = project.logs.create(source=source_user, type=412, title="Bulk Sites",
content_object=project, recipient=source_user,
extra_message=str(count) + " Sites @error " + u'{}'.format(e.message))
@shared_task()
def generateCustomReportPdf(task_prog_obj_id, source_user, site_id, base_url, fs_ids, start_date, end_date, removeNullField):
time.sleep(5)
task = CeleryTaskProgress.objects.get(pk=task_prog_obj_id)
task.status = 1
site=get_object_or_404(Site, pk=site_id)
task.content_object = site
task.save()
try:
buffer = BytesIO()
report = PDFReport(buffer, 'Letter')
pdf = report.generateCustomSiteReport(site_id, base_url, fs_ids, start_date, end_date, removeNullField)
buffer.seek(0)
pdf = buffer.getvalue()
pdf_url = default_storage.save(site.name + '/pdf/'+site.name+'-submissions.pdf', ContentFile(pdf))
buffer.close()
task.file.name = pdf_url
task.status = 2
task.save()
noti = task.logs.create(source=source_user, type=32, title="Pdf Report generation in site",
recipient=source_user, content_object=task, extra_object=site,
extra_message=" <a href='"+ task.file.url +"'>Pdf report</a> generation in site")
except Exception as e:
task.description = "ERROR: " + str(e.message)
task.status = 3
task.save()
print 'Report Gen Unsuccesfull. %s' % e
print e.__dict__
noti = task.logs.create(source=source_user, type=432, title="Pdf Report generation in site",
content_object=site, recipient=source_user,
extra_message="@error " + u'{}'.format(e.message))
buffer.close()
def siteDetailsGenerator(project, sites, ws):
try:
header_columns = [ {'id': 'identifier' ,'name':'identifier'},
{'id': 'name','name':'name'},
{'id': 'site_type_identifier','name':'type'},
{'id': 'phone','name':'phone'},
{'id': 'address','name':'address'},
{'id': 'public_desc','name':'public_desc'},
{'id': 'additional_desc','name':'additional_desc'},
{'id': 'latitude','name':'latitude'},
{'id': 'longitude','name':'longitude'}, ]
if project.cluster_sites:
header_columns += [{'id':'region_identifier', 'name':'region_id'}, ]
meta_ques = project.site_meta_attributes
for question in meta_ques:
header_columns += [{'id': question['question_name'], 'name':question['question_name']}]
get_answer_questions = []
get_sub_count_questions = []
get_sub_status_questions = []
get_answer_status_questions = []
site_list = {}
meta_ref_sites = {}
site_submission_count = {}
site_sub_status = {}
for meta in meta_ques:
if meta['question_type'] == 'FormSubStat':
get_sub_status_questions.append(meta)
elif meta['question_type'] == 'FormSubCountQuestion':
get_sub_count_questions.append(meta)
if get_sub_count_questions:
query = {}
for meta in get_sub_count_questions:
query[meta['question_name']] = Sum(
Case(
When(site_instances__project_fxf_id=meta['form_id'], then=1),
default=0, output_field=IntegerField()
))
results = sites.values('id',).annotate(**query)
for submission_count in results:
site_submission_count[submission_count['id']] = submission_count
if get_sub_status_questions:
query = {}
for meta in get_sub_status_questions:
for submission in FInstance.objects.filter(project_id=project.id, project_fxf_id=meta['form_id']).values('site_id', 'date').distinct('site_id').order_by('site_id', '-instance_id'):
try:
site_sub_status[meta['form_id']][submission['site_id']] = "Last submitted on " + submission['date'].strftime("%d %b %Y %I:%M %P")
except:
site_sub_status[meta['form_id']] = {submission['site_id']:"Last submitted on " + submission['date'].strftime("%d %b %Y %I:%M %P")}
#Optimized query, only one query per link type meta attribute which covers all site's answers.
def generate(project_id, site_map, meta, identifiers, selected_metas):
project_id = str(project_id)
sub_meta_ref_sites = {}
sub_site_map = {}
sitenew = Site.objects.filter(identifier__in = identifiers, project_id = project_id)
for site in sitenew:
if project_id == str(project.id):
continue
identifier = site_map.get(site.identifier)
if not site.site_meta_attributes_ans:
meta_ans = {}
else:
meta_ans = site.site_meta_attributes_ans
for meta in selected_metas.get(project_id, []):
if meta.get('question_type') == "Link":
link_answer=str(meta_ans.get(meta.get('question_name'), ""))
if link_answer != "":
if meta['question_name'] in sub_site_map:
if site.identifier in sub_site_map[meta['question_name']]:
sub_site_map[meta['question_name']][link_answer].append(identifier)
else:
sub_site_map[meta['question_name']][link_answer] = [identifier]
else:
sub_site_map[meta['question_name']] = {}
sub_site_map[meta['question_name']][link_answer] = [identifier]
for idf in identifier:
if meta['question_name'] in sub_meta_ref_sites:
sub_meta_ref_sites[meta['question_name']].append(meta_ans.get(meta['question_name']))
else:
sub_meta_ref_sites[meta['question_name']] = [meta_ans.get(meta['question_name'])]
else:
for idf in identifier:
site_list[idf][project_id+"-"+meta.get('question_name')] = meta_ans.get(meta.get('question_name'), "")
for meta in selected_metas.get(project_id, []):
head = header_columns
head += [{'id':project_id+"-"+meta.get('question_name'), 'name':meta.get('question_text')}]
if meta.get('question_type') == "Link":
generate(meta['project_id'], sub_site_map.get(meta['question_name'], []), meta, sub_meta_ref_sites.get(meta['question_name'], []), selected_metas)
for site in sites:
columns = {'identifier':site.identifier, 'name':site.name, 'site_type_identifier':site.type.identifier if site.type else "", 'phone':site.phone, 'address':site.address, 'public_desc':site.public_desc, 'additional_desc':site.additional_desc, 'latitude':site.latitude,
'longitude':site.longitude, }
if project.cluster_sites:
columns['region_identifier'] = site.region.identifier if site.region else ""
meta_ques = project.site_meta_attributes
meta_ans = site.site_meta_attributes_ans
for question in meta_ques:
if question['question_type'] == 'FormSubCountQuestion':
columns[question['question_name']] = site_submission_count[site.id][question['question_name']]
elif question['question_type'] == 'FormSubStat':
columns[question['question_name']] = site_sub_status[question['form_id']].get(site.id, '') if question['form_id'] in site_sub_status else ''
elif question['question_type'] in ['Form','FormQuestionAnswerStatus']:
columns[question['question_name']] = ""
else:
if question['question_name'] in meta_ans:
columns[question['question_name']] = meta_ans[question['question_name']]
if question['question_type'] == "Link" and meta_ans[question['question_name']] != "":
if question.get('question_name') in meta_ref_sites:
meta_ref_sites[question.get('question_name')].append(meta_ans[question['question_name']])
else:
meta_ref_sites[question.get('question_name')] = [meta_ans[question['question_name']]]
else:
columns[question['question_name']] = '-'
site_list[site.id] = columns
for meta in meta_ques:
if meta['question_type'] == "Link":
site_map = {}
for key, value in site_list.items():
if value[meta['question_name']] != "":
identifier = str(value.get(meta['question_name']))
if identifier in site_map:
site_map[identifier].append(key)
else:
site_map[identifier] = [key]
generate(meta['project_id'], site_map, meta, meta_ref_sites.get(meta['question_name'], []), meta.get('metas'))
elif meta['question_type'] == 'Form':
get_answer_questions.append(meta)
elif meta['question_type'] == 'FormQuestionAnswerStatus':
get_answer_status_questions.append(meta)
for meta in get_answer_questions:
form_owner = None
query = settings.MONGO_DB.instances.aggregate([{"$match":{"fs_project": project.id, "fs_project_uuid": str(meta['form_id'])}}, { "$group" : {
"_id" : "$fs_site",
"answer": { '$last': "$"+meta['question']['name'] }
}
}])
print project.id, meta['form_id'], meta['question']['name']
for submission in query['result']:
try:
if meta['question']['type'] in ['photo', 'video', 'audio'] and submission['answer'] is not "":
if not form_owner:
form_owner = FieldSightXF.objects.select_related('xf__user').get(pk=meta['form_id']).xf.user.username
site_list[int(submission['_id'])][meta['question_name']] = 'http://app.fieldsight.org/attachment/medium?media_file='+ +'/attachments/'+submission['answer']
if not meta['question']['type'] == "repeat":
site_list[int(submission['_id'])][meta['question_name']] = submission['answer']
except:
pass
for meta in get_answer_status_questions:
query = settings.MONGO_DB.instances.aggregate([{"$match":{"fs_project": project.id, "fs_project_uuid": str(meta['form_id'])}}, { "$group" : {
"_id" : "$fs_site",
"answer": { '$last': "$"+meta['question']['name'] }
}
}])
for submission in query['result']:
try:
if submission['answer'] and submission['answer'] != "":
site_list[int(submission['_id'])][meta['question_name']] = "Answered"
else:
site_list[int(submission['_id'])][meta['question_name']] = "Not Answered"
except:
pass
row_num = 0
header_row=[]
for col_num in range(len(header_columns)):
# header_cell=WriteOnlyCell(ws, value=header_columns[col_num]['name'])
# header_cell=Font(name='Courier', size=16)
header_row.append(header_columns[col_num]['name'])
ws.append(header_row)
for key,site in site_list.iteritems():
# ws.append([site.get(header_columns[col_num]['id']) for col_num in range(len(header_columns))])
row=[]
for col_num in range(len(header_columns)):
row.append(site.get(header_columns[col_num]['id'], ""))
ws.append(row)
return True, 'success'
except Exception as e:
return False, e.message
# project = Project.objects.get(pk=137)
# sites = project.sites.all()
# siteDetailsGenerator(project, sites, None)
@shared_task(time_limit=600, soft_time_limit=600)
def generateSiteDetailsXls(task_prog_obj_id, source_user, project_id, region_id):
time.sleep(5)
task = CeleryTaskProgress.objects.get(pk=task_prog_obj_id)
task.status = 1
project = get_object_or_404(Project, pk=project_id)
task.content_object = project
task.save()
try:
buffer = BytesIO()
wb = Workbook()
ws = wb.active
ws.title='Sites Detail'
sites = project.sites.all().order_by('identifier')
if region_id:
if isinstance(region_id, list):
sites = project.sites.filter(is_active=True, region_id__in=region_id).order_by('identifier')
else:
if region_id == "0":
sites = project.sites.filter(is_active=True, region_id=None).order_by('identifier')
else:
sites = project.sites.filter(is_active=True, region_id=region_id).order_by('identifier')
else:
sites = project.sites.filter(is_active=True).order_by('identifier')
status, message = siteDetailsGenerator(project, sites, ws)
if not status:
raise ValueError(message)
wb.save(buffer)
buffer.seek(0)
xls = buffer.getvalue()
xls_url = default_storage.save(project.name + '/sites/'+project.name+'-details.xlsx', ContentFile(xls))
buffer.close()
task.file.name = xls_url
task.status = 2
task.save()
task.logs.create(source=source_user, type=32, title="Site details xls generation in project",
recipient=source_user, content_object=task, extra_object=project,
extra_message=" <a href='" + task.file.url +"'>Xls sites detail report</a> generation in project")
except Exception as e:
task.description = "ERROR: " + str(e.message)
task.status = 3
print e.__dict__
task.save()
task.logs.create(source=source_user, type=432, title="Xls Report generation in project",
content_object=project, recipient=source_user,
extra_message="@error " + u'{}'.format(e.message))
buffer.close()
@shared_task(time_limit=7200, soft_time_limit=7200)
def exportProjectSiteResponses(task_prog_obj_id, source_user, project_id, base_url, fs_ids, start_date, end_date, filterRegion, filterSiteTypes):
time.sleep(5)
task = CeleryTaskProgress.objects.get(pk=task_prog_obj_id)
task.status = 1
project=get_object_or_404(Project, pk=project_id)
task.content_object = project
task.save()
try:
buffer = BytesIO()
sites = project.sites.filter(is_active=True)
if filterRegion:
sites = sites.filter(region_id__in=filterRegion)
# fs_ids = FieldSightXF.objects.filter(project_id = project.id).values('id')
# startdate="2016-05-01"
# enddate= "2018-06-05"
if filterSiteTypes:
sites = sites.filter(type_id__in=filterSiteTypes)
sites = sites.values('id')
response_sites=[]
split_startdate = start_date.split('-')
split_enddate = end_date.split('-')
new_startdate = date(int(split_startdate[0]), int(split_startdate[1]), int(split_startdate[2]))
end = date(int(split_enddate[0]), int(split_enddate[1]), int(split_enddate[2]))
new_enddate = end + datetime.timedelta(days=1)
forms = FieldSightXF.objects.select_related('xf').filter(pk__in=fs_ids, is_survey=False, is_deleted=False).prefetch_related(Prefetch('project_form_instances', queryset=FInstance.objects.select_related('instance').filter(site_id__in=sites, date__range=[new_startdate, new_enddate]))).order_by('-is_staged', 'is_scheduled')
wb = Workbook()
ws_site_details = wb.active
ws_site_details.title = "Site Details"
form_id = 0
form_names=[]
def generate_sheet_name(form_name):
form_names.append(form_name)
occurance = form_names.count(form_name)
if occurance > 1 and len(form_name) > 25:
sheet_name = form_name[:25] + ".." + "(" +str(occurance)+ ")"
elif occurance > 1 and len(form_name) < 25:
sheet_name = form_name + "(" +str(occurance)+ ")"
elif len(form_name) > 29:
sheet_name = form_name[:29] + ".."
else:
sheet_name = form_name
for ch in ["[", "]", "*", "?", ":", "/"]:
if ch in sheet_name:
sheet_name=sheet_name.replace(ch,"_")
return sheet_name
for form in forms:
form_id += 1
sheet_name = generate_sheet_name(form.xf.title)
ws=wb.create_sheet(title=sheet_name)
head_columns = [{'question_name':'No Submission','question_label':'No Submission'}]
repeat_questions = []
repeat_answers = []
ws.append(['Header'])
for formresponse in form.project_form_instances.all():
if formresponse.site:
if not formresponse.site_id in response_sites:
response_sites.append(formresponse.site_id)
questions, answers, r_question_answers = parse_form_response(json.loads(form.xf.json)['children'], formresponse.instance.json, base_url, form.xf.user.username)
answers['identifier'] = formresponse.site.identifier
answers['name'] = formresponse.site.name
answers['status'] = formresponse.get_form_status_display()
if r_question_answers:
repeat_answers.append({'name': formresponse.site.name, 'identifier': formresponse.site.identifier, 'repeated': r_question_answers })
if len([{'question_name':'identifier','question_label':'identifier'}, {'question_name':'name','question_label':'name'}] + questions) > len(head_columns):
head_columns = [{'question_name':'identifier','question_label':'identifier'}, {'question_name':'name','question_label':'name'}, {'question_name':'status','question_label':'status'}] + questions
row=[]
for col_num in range(len(head_columns)):
row.append(answers.get(head_columns[col_num]['question_name'], ""))
ws.append(row)
for col_num in range(len(head_columns)):
ws.cell(row=1, column=col_num+1).value = head_columns[col_num].get('question_label', "")
if repeat_answers:
for group_id, group in repeat_answers[0]['repeated'].items():
sheet_name = generate_sheet_name("rep-"+group_id)
print sheet_name
wr=wb.create_sheet(title=sheet_name)
wr.append(['Header'])
for repeat in repeat_answers:
for answer in repeat['repeated'][group_id]['answers']:
row = [repeat['identifier'], repeat['name']]
for question in group['questions']:
row.append(answer.get(question['question_name'], ""))
wr.append(row)
wr.cell(row=1, column=1).value = 'Identifier'
wr.cell(row=1, column=2).value = 'Name'
#for loop needed.
for col_num in range(len(group['questions'])):
wr.cell(row=1, column=col_num+3).value = group['questions'][col_num]['question_label']
if not forms:
ws = wb.create_sheet(title='No Forms')
sites = Site.objects.filter(pk__in=response_sites)
status, message = siteDetailsGenerator(project, sites, ws_site_details)
if not status:
raise ValueError(message)
wb.save(buffer)
buffer.seek(0)
xls = buffer.getvalue()
xls_url = default_storage.save(project.name + '/xls/'+project.name+'-submissions.xls', ContentFile(xls))
buffer.close()
task.status = 2
task.file.name = xls_url
task.save()
noti = task.logs.create(source=source_user, type=32, title="Xls Report generation in project",
recipient=source_user, content_object=task, extra_object=project,
extra_message=" <a href='"+ task.file.url +"'>Xls report</a> generation in project")
except Exception as e:
task.description = "ERROR: " + str(e.message)
task.status = 3
task.save()
print 'Report Gen Unsuccesfull. %s' % e
print e.__dict__
noti = task.logs.create(source=source_user, type=432, title="Xls Report generation in project",
content_object=project, recipient=source_user,
extra_message="@error " + u'{}'.format(e.message))
buffer.close()
@shared_task()
def importSites(task_prog_obj_id, source_user, f_project, t_project, meta_attributes, regions, ignore_region):
time.sleep(2)
task = CeleryTaskProgress.objects.get(pk=task_prog_obj_id)
task.content_object = t_project
task.status=1
task.save()
try:
def filterbyquestion_name(seq, value):
for el in seq:
if (not meta_attributes) or (meta_attributes and el.get('question_name') in meta_attributes):
if el.get('question_name')==value:
return True
return False
# migrate metas
if t_project.site_meta_attributes:
t_metas = t_project.site_meta_attributes
f_metas = f_project.site_meta_attributes
for f_meta in f_metas:
# print t_metas
# print ""
check = filterbyquestion_name(t_metas, f_meta.get('question_name'))
if not check:
t_metas.append(f_meta)
region_map = {}
t_project_sites = t_project.sites.filter(is_active=True).values_list('identifier', flat=True)
# migrate regions
if f_project.cluster_sites and not ignore_region:
t_project_regions = t_project.project_region.filter(is_active=True).values_list('identifier', flat=True)
t_project.cluster_sites=True
# To handle whole project or a single region migrate
region_objs = f_project.project_region.filter(id__in=regions, is_active=True)
for region in region_objs:
f_region_id = region.id
if region.identifier in t_project_regions:
t_region_id = t_project.project_region.get(identifier=region.identifier, is_active=True).id
else:
region.id=None
region.project_id=t_project.id
region.save()
t_region_id = region.id
region_map[f_region_id]=t_region_id
t_project.save()
# getting Sites
sites = f_project.sites.filter(is_active=True, region_id__in=regions)
if 0 in regions:
unassigned_sites = f_project.sites.filter(is_active=True, region_id=None)
sites = sites | unassigned_sites
else:
sites = f_project.sites.filter(is_active=True)
def get_t_region_id(f_region_id):
# To get new region id without a query
if f_region_id is not None and f_region_id in region_map:
return region_map[f_region_id]
else:
return None
# migrate sites
for site in sites:
site.id = None
site.project_id = t_project.id
if site.identifier in t_project_sites:
site.identifier = str(site.identifier) + "IFP" + str(f_project.id)
if f_project.cluster_sites and not ignore_region:
site.region_id = get_t_region_id(site.region_id)
else:
site.region_id = None
site.save()
task.status = 2
task.save()
if f_project.cluster_sites and not ignore_region:
noti = FieldSightLog.objects.create(source=source_user, type=30, title="Bulk Project import sites",
content_object=t_project, recipient=source_user,
extra_object=f_project, extra_message="Project Sites import from " + str(len(regions))+" Regions of ")
else:
noti = FieldSightLog.objects.create(source=source_user, type=29, title="Bulk Project import sites",
content_object=t_project, recipient=source_user,
extra_object=f_project)
except Exception as e:
task.description = "ERROR: " + str(e.message)
task.status = 3
task.save()
print e.__dict__
if f_project.cluster_sites and not ignore_region:
noti = FieldSightLog.objects.create(source=source_user, type=430, title="Bulk Project import sites",
content_object=t_project, recipient=source_user,
extra_object=f_project, extra_message="Project Sites import from "+str(len(regions))+" Regions of ")
else:
noti = FieldSightLog.objects.create(source=source_user, type=429, title="Bulk Project import sites",
content_object=t_project, recipient=source_user,
extra_object=f_project)
@shared_task()
def multiuserassignproject(task_prog_obj_id, source_user, org_id, projects, users, group_id):
time.sleep(2)
org = Organization.objects.get(pk=org_id)
projects_count = len(projects)
users_count = len(users)
task_id = multiuserassignproject.request.id
task = CeleryTaskProgress.objects.get(pk=task_prog_obj_id)
task.content_object = org
task.description = "Assign "+str(users_count)+" people in "+str(projects_count)+" projects."
task.status=1
task.save()
try:
with transaction.atomic():
roles_created = 0
for project_id in projects:
project = Project.objects.get(pk=project_id)
for user in users:
try:
role, created = UserRole.objects.get_or_create(user_id=user, project_id=project_id,
organization_id=org.id,
group_id=group_id, ended_at=None)
if created:
roles_created += 1
except MultipleObjectsReturned:
redundant_ids = UserRole.objects.filter(user_id=user, project_id=project_id,
organization_id=org.id, group_id=group_id, ended_at=None).order_by('id').values('id')[1:]
UserRole.objects.filter(pk__in=redundant_ids).update(ended_at=datetime.datetime.now())
# description = "{0} was assigned as Project Manager in {1}".format(
# role.user.get_full_name(), role.project)
# noti = role.logs.create(source=role.user, type=6, title=description, description=description,
# content_object=role.project, extra_object=self.request.user)
# result = {}
# result['description'] = description
# result['url'] = noti.get_absolute_url()
# ChannelGroup("notify-{}".format(role.organization.id)).send({"text": json.dumps(result)})
# ChannelGroup("project-{}".format(role.project.id)).send({"text": json.dumps(result)})
# ChannelGroup("notify-0").send({"text": json.dumps(result)})
task.status = 2
task.save()
if roles_created == 0:
noti = FieldSightLog.objects.create(source=source_user, type=23, title="Task Completed.",
content_object=org, recipient=source_user,
extra_message=str(roles_created) + " new Project Manager Roles in " + str(projects_count) + " projects ")
else:
noti = FieldSightLog.objects.create(source=source_user, type=21, title="Bulk Project User Assign",
content_object=org, organization=org,
extra_message=str(roles_created) + " new Project Manager Roles in " + str(projects_count) + " projects ")
except Exception as e:
task.description = "ERROR: " + str(e.message)
task.status = 3
task.save()
print e.__dict__
noti = FieldSightLog.objects.create(source=source_user, type=421, title="Bulk Project User Assign",
content_object=org, recipient=source_user,
extra_message=str(users_count)+" people in "+str(projects_count)+" projects ")
@shared_task()
def multiuserassignsite(task_prog_obj_id, source_user, project_id, sites, users, group_id):
time.sleep(2)
project = Project.objects.get(pk=project_id)
group_name = Group.objects.get(pk=group_id).name
sites_count = len(sites)
users_count = len(users)
task_id = multiuserassignsite.request.id
task = CeleryTaskProgress.objects.get(pk=task_prog_obj_id)
task.content_object = project
task.description = "Assign "+str(users_count)+" people in "+str(sites_count)+" sites."
task.status=1
task.save()
try:
with transaction.atomic():
roles_created = 0
for site_id in sites:
site = Site.objects.get(pk=site_id)
for user in users:
try:
role, created = UserRole.objects.get_or_create(user_id=user, site_id=site.id,
project__id=project.id, organization__id=site.project.organization_id, group_id=group_id, ended_at=None)
if created:
roles_created += 1
except MultipleObjectsReturned:
redundant_ids = UserRole.objects.filter(user_id=user, site_id=site.id, project__id=project.id, organization__id=site.project.organization_id, group_id=group_id, ended_at=None).order_by('id').values('id')[1:]
UserRole.objects.filter(pk__in=redundant_ids).update(ended_at=datetime.datetime.now())
# description = "{0} was assigned as {1} in {2}".format(
# role.user.get_full_name(), role.lgroup.name, role.project)
# noti_type = 8
# if data.get('group') == "Reviewer":
# noti_type =7
# noti = role.logs.create(source=role.user, type=noti_type, title=description,
# description=description, content_type=site, extra_object=self.request.user,
# site=role.site)
# result = {}
# result['description'] = description
# result['url'] = noti.get_absolute_url()
# ChannelGroup("notify-{}".format(role.organization.id)).send({"text": json.dumps(result)})
# ChannelGroup("project-{}".format(role.project.id)).send({"text": json.dumps(result)})
# ChannelGroup("site-{}".format(role.site.id)).send({"text": json.dumps(result)})
# ChannelGroup("notify-0").send({"text": json.dumps(result)})
# Device = get_device_model()
# if Device.objects.filter(name=role.user.email).exists():
# message = {'notify_type':'Assign Site', 'site':{'name': site.name, 'id': site.id}}
# Device.objects.filter(name=role.user.email).send_message(message)
task.status = 2
task.save()
if roles_created == 0:
noti = FieldSightLog.objects.create(source=source_user, type=23, title="Task Completed.",
content_object=project, recipient=source_user,
extra_message="All "+str(users_count) +" users were already assigned as "+ group_name +" in " + str(sites_count) + " selected sites ")
else:
noti = FieldSightLog.objects.create(source=source_user, type=22, title="Bulk site User Assign",
content_object=project, organization=project.organization, project=project,
extra_message=str(roles_created) + " new "+ group_name +" Roles in " + str(sites_count) + " sites ")
except Exception as e:
task.status = 3
task.description = "ERROR: " + str(e.message)
print e.__dict__
task.save()
noti = FieldSightLog.objects.create(source=source_user, type=422, title="Bulk Sites User Assign",
content_object=project, recipient=source_user,
extra_message=group_name +" for "+str(users_count)+" people in "+str(sites_count)+" sites ")
@shared_task()
def multiuserassignregion(task_prog_obj_id, source_user, project_id, regions, users, group_id):
time.sleep(2)
project = Project.objects.get(pk=project_id)
group_name = Group.objects.get(pk=group_id).name
sites_count = len(regions)
users_count = len(users)
task_id = multiuserassignregion.request.id
task = CeleryTaskProgress.objects.get(pk=task_prog_obj_id)
task.content_object = project
task.description = "Assign "+str(users_count)+" people in "+str(sites_count)+" regions."
task.status=1
task.save()
try:
with transaction.atomic():
roles_created = 0
for region_id in regions:
if region_id == "0":
sites = Site.objects.filter(region__isnull=True, project_id=project_id).values('id')
else:
sites = Site.objects.filter(region_id = region_id, project_id=project_id).values('id')
for site_id in sites:
for user in users:
site = Site.objects.filter(pk=site_id['id']).first()
if site and site.project_id == project.id:
try:
role, created = UserRole.objects.get_or_create(user_id=user, site_id=site_id['id'],
project__id=project.id, organization__id=project.organization_id, group_id=group_id, ended_at=None)
if created:
roles_created += 1
except MultipleObjectsReturned:
redundant_ids = UserRole.objects.filter(user_id=user, site_id=site_id['id'],
project__id=project.id, organization__id=project.organization_id, group_id=group_id, ended_at=None).order_by('id').values('id')[1:]
UserRole.objects.filter(pk__in=redundant_ids).update(ended_at=datetime.datetime.now())
# description = "{0} was assigned as {1} in {2}".format(
# role.user.get_full_name(), role.lgroup.name, role.project)
# noti_type = 8
# if data.get('group') == "Reviewer":
# noti_type =7
# noti = role.logs.create(source=role.user, type=noti_type, title=description,
# description=description, content_type=site, extra_object=self.request.user,
# site=role.site)
# result = {}
# result['description'] = description
# result['url'] = noti.get_absolute_url()
# ChannelGroup("notify-{}".format(role.organization.id)).send({"text": json.dumps(result)})
# ChannelGroup("project-{}".format(role.project.id)).send({"text": json.dumps(result)})
# ChannelGroup("site-{}".format(role.site.id)).send({"text": json.dumps(result)})
# ChannelGroup("notify-0").send({"text": json.dumps(result)})
# Device = get_device_model()
# if Device.objects.filter(name=role.user.email).exists():
# message = {'notify_type':'Assign Site', 'site':{'name': site.name, 'id': site.id}}
# Device.objects.filter(name=role.user.email).send_message(message)
task.status = 2
task.save()
if roles_created == 0:
noti = FieldSightLog.objects.create(source=source_user, type=23, title="Task Completed.",
content_object=project, recipient=source_user,
extra_message="All "+str(users_count) +" users were already assigned as "+ group_name +" in " + str(sites_count) + " selected regions ")
else:
noti = FieldSightLog.objects.create(source=source_user, type=22, title="Bulk site User Assign",
content_object=project, organization=project.organization, project=project,
extra_message=str(roles_created) + " new "+ group_name +" Roles in " + str(sites_count) + " regions ")
except Exception as e:
print 'Bulk role assign Unsuccesfull. ------------------------------------------%s' % e
task.description = "Assign "+str(users_count)+" people in "+str(sites_count)+" regions. ERROR: " + str(e)
task.status = 3
task.save()
print e.__dict__
noti = FieldSightLog.objects.create(source=source_user, type=422, title="Bulk Region User Assign",
content_object=project, recipient=source_user,
extra_message=group_name +" for "+str(users_count)+" people in "+str(sites_count)+" regions ")
@shared_task(time_limit=18000, soft_time_limit=18000)
def auto_generate_stage_status_report():
projects = Project.objects.filter(active=True)
for project in projects:
if Site.objects.filter(project_id=project.id).count() < 2000:
continue
else:
try:
data = []
ss_index = {}
stages_rows = []
head_row = ["Site ID", "Name", "Region ID", "Latitude", "longitude", "Status"]
stages = project.stages.filter(stage__isnull=True)
for stage in stages:
sub_stages = stage.parent.all()
if len(sub_stages):
head_row.append("Stage :"+stage.name)
stages_rows.append("Stage :"+stage.name)
for ss in sub_stages:
head_row.append("Sub Stage :"+ss.name)
ss_index.update({head_row.index("Sub Stage :"+ss.name): ss.id})
head_row.extend(["Site Visits", "Submission Count", "Flagged Submission", "Rejected Submission"])
data.append(head_row)
total_cols = len(head_row) - 6 # for non stages
for site in project.sites.filter(is_active=True, is_survey=False):
flagged_count = 0
rejected_count = 0
submission_count = 0
if site.region:
site_row = [site.identifier, site.name, site.region.identifier, site.latitude, site.longitude, site.site_status]
else:
site_row = [site.identifier, site.name, site.region_id, site.latitude, site.longitude, site.site_status]
site_row.extend([None]*total_cols)
for k, v in ss_index.items():
if Stage.objects.filter(id=v).count() == 1:
site_sub_stage = Stage.objects.get(id=v)
site_row[k] = site_sub_stage.site_submission_count(v, site.id)
submission_count += site_row[k]
flagged_count += site_sub_stage.flagged_submission_count(v, site.id)
rejected_count += site_sub_stage.rejected_submission_count(v, site.id)
else:
site_row[k] = 0
site_visits = settings.MONGO_DB.instances.aggregate([{"$match":{"fs_site": str(site.id)}}, { "$group" : {
"_id" :
{ "$substr": [ "$start", 0, 10 ] }
}
}])['result']
site_row[-1] = rejected_count
site_row[-2] = flagged_count
site_row[-3] = submission_count
site_row[-4] = len(site_visits)
data.append(site_row)
p.save_as(array=data, dest_file_name="media/stage-report/{}_stage_data.xls".format(project.id))
xl_data = open("media/stage-report/{}_stage_data.xls".format(project.id), "rb")
#Its only quick fix for now, save it in aws bucket whenever possible.
project.progress_report = xl_data.name
project.save()
except Exception as e:
print 'Report Gen Unsuccesfull. %s' % e
print e.__dict__
def sendNotification(notification, recipient):
result={}
result['id']= noti.id,
result['source_uid']= source_user.id,
result['source_name']= source_user.username,
result['source_img']= source_user.user_profile.profile_picture.url,
result['get_source_url']= noti.get_source_url(),
result['get_event_name']= project.name,
result['get_event_url']= noti.get_event_url(),
result['get_extraobj_name']= None,
result['get_extraobj_url']= None,
result['get_absolute_url']= noti.get_absolute_url(),
result['type']= 412,
result['date']= str(noti.date),
result['extra_message']= str(count) + " Sites @error " + u'{}'.format(e.message),
result['seen_by']= [],
ChannelGroup("notif-user-{}".format(recipient.id)).send({"text": json.dumps(result)})
| |
#!/usr/bin/env python
import sys
import json
import shlex
import subprocess
from collections import MutableMapping, Mapping
def check_output(*popenargs, **kwargs):
"""Run command with arguments and return its output as a byte string.
Backported from Python 2.7 as it's implemented as pure python on stdlib.
>>> check_output(['/usr/bin/python', '--version'])
Python 2.6.2
https://gist.github.com/edufelipe/1027906
"""
suppress_err_output = kwargs.pop('suppress_err_output', False)
process = subprocess.Popen(stdout=subprocess.PIPE, stderr=subprocess.PIPE,
*popenargs, **kwargs)
output, stderr = process.communicate()
retcode = process.poll()
if retcode:
cmd = kwargs.get("args")
if cmd is None:
cmd = popenargs[0]
error = subprocess.CalledProcessError(retcode, cmd)
if not suppress_err_output:
sys.stderr.write(stderr)
error.stderr = stderr
error.output = output
raise error
return output
def unicode_to_string(text):
if isinstance(text, unicode):
return text.encode('ascii', 'ignore')
if isinstance(text, list):
return [unicode_to_string(a) for a in text]
if isinstance(text, dict):
return dict((unicode_to_string(key), unicode_to_string(
value)) for key, value in text.iteritems())
return text
class CtxLogger(object):
def _logger(self, message, level):
cmd = ['ctx', 'logger', level, message]
return check_output(cmd)
def debug(self, message):
return self._logger(level='debug', message=message)
def info(self, message):
return self._logger(level='info', message=message)
def warn(self, message):
return self._logger(level='warn', message=message)
def warning(self, message):
return self._logger(level='warn', message=message)
def error(self, message):
return self._logger(level='error', message=message)
# TODO: set immutable properties here.
class CtxNodeProperties(Mapping):
def __init__(self, relationship=None):
self.relationship = relationship
def __getitem__(self, property_name):
cmd = ['ctx', '-j', 'node', 'properties', property_name]
if self.relationship:
cmd.insert(2, self.relationship)
try:
# suppressing key error output that is displayed even if
# the error is not raised
result = json.loads(check_output(cmd, suppress_err_output=True))
except subprocess.CalledProcessError as ex:
if 'illegal path:' in ex.stderr:
raise KeyError(property_name)
else:
raise
return unicode_to_string(result)
def get_all(self):
cmd = ['ctx', '-j', 'node', 'properties']
result = json.loads(check_output(cmd))
return unicode_to_string(result)
def __len__(self):
return len(self.get_all())
def __iter__(self):
return iter(self.get_all())
def __contains__(self, element):
return element in self.get_all()
class CtxNode(object):
def __init__(self, relationship=None):
self.relationship = relationship
def _node(self, prop):
cmd = ['ctx', '-j', 'node', prop]
result = json.loads(check_output(cmd))
return unicode_to_string(result)
@property
def properties(self):
return CtxNodeProperties(self.relationship)
@property
def id(self):
return self._node('id')
@property
def name(self):
return self._node('name')
@property
def type(self):
return self._node('type')
class CtxInstanceRuntimeProperties(MutableMapping):
def __init__(self, relationship=None):
self.relationship = relationship
def __getitem__(self, property_name):
cmd = ['ctx', '-j', 'instance', 'runtime_properties', property_name]
if self.relationship:
cmd.insert(2, self.relationship)
try:
result = json.loads(check_output(cmd, suppress_err_output=True))
except subprocess.CalledProcessError as e:
if 'illegal path:' in e.stderr:
raise KeyError(property_name)
else:
raise
return unicode_to_string(result)
def __setitem__(self, property_name, value):
cmd = ['ctx', 'instance', 'runtime_properties', property_name,
'@{0}'.format(json.dumps(value))]
if self.relationship:
cmd.insert(1, self.relationship)
return check_output(cmd)
def __delitem__(self, property_name):
self[property_name] = None
def get_all(self):
cmd = ['ctx', '-j', 'instance', 'runtime_properties']
result = json.loads(check_output(cmd))
return unicode_to_string(result)
def __len__(self):
return len(self.get_all())
def __iter__(self):
return iter(self.get_all())
def __contains__(self, element):
return element in self.get_all()
class CtxNodeInstance(object):
def __init__(self, relationship=None):
self.relationship = relationship
def _instance(self, prop):
cmd = ['ctx', '-j', 'instance', prop]
if self.relationship:
cmd.insert(2, self.relationship)
result = json.loads(check_output(cmd))
return unicode_to_string(result)
@property
def runtime_properties(self):
return CtxInstanceRuntimeProperties(self.relationship)
@property
def host_ip(self):
return self._instance('host_ip')
@property
def id(self):
return self._instance('id')
@property
def relationships(self):
return self._instance('relationships')
class CtxRelationshipInstance(object):
def __init__(self, relationship):
self.relationship = relationship
@property
def instance(self):
return CtxNodeInstance(self.relationship)
@property
def node(self):
return CtxNode(self.relationship)
class Ctx(object):
def __init__(self):
self.logger = CtxLogger()
self.node = CtxNode()
self.instance = CtxNodeInstance()
self.target = CtxRelationshipInstance('target')
self.source = CtxRelationshipInstance('source')
def __call__(self, command_ref):
ctx_command = shlex.split(command_ref)
ctx_command.insert(0, 'ctx')
return check_output(ctx_command)
def returns(self, data):
cmd = ['ctx', '-j', 'returns', str(data)]
return json.loads(check_output(cmd))
def abort_operation(self, message=''):
cmd = ['ctx', 'abort_operation']
if message:
cmd.append(message)
subprocess.check_call(cmd)
def retry_operation(self, message=''):
cmd = ['ctx', 'retry_operation']
if message:
cmd.append(message)
subprocess.check_call(cmd)
# TODO: support kwargs for both download_resource and ..render
def download_resource(self, source, destination=''):
cmd = ['ctx', 'download-resource', source]
if destination:
cmd.append(destination)
return check_output(cmd)
def download_resource_and_render(self, source, destination='',
params=None):
cmd = ['ctx', 'download-resource-and-render', source]
if destination:
cmd.append(destination)
if params:
kwargs = {'template_variables': params}
if not isinstance(params, dict):
self.abort_operation('Expecting params to be in the form of '
'dict.')
cmd.append('@{0}'.format(json.dumps(kwargs)))
return check_output(cmd)
ctx = Ctx()
| |
"""
Run a Job
"""
import yaml
import json
import decimal
import re
import sys
import time
import commands
import subprocess
from log import log
from error import Error
import platform
class InputNotCollectable(Exception):
"""Cannot collect all required input from available collection methods and inputs"""
def Run(run_spec, command_options, command_args, input_data=None):
"""Run a job"""
# Get the job spec name
if len(command_args) < 1:
Error('Missing job spec name to run', command_options)
job_spec_key = command_args[0]
# Get the job spec path, from the run spec job key
if job_spec_key in run_spec['jobs']:
job_spec_path = run_spec['jobs'][job_spec_key]
else:
Error('Missing job spec key in run spec: %s' % job_spec_key, command_options)
# Load the job spec
try:
job_spec = yaml.load(open(job_spec_path))
except Exception, e:
Error('Failed to load job spec: %s: %s' % (job_spec_path, e), command_options)
# Fail if the platform is not in this job spec
if command_options['platform'] not in job_spec['run']:
Error('This platform is not supported by this job, no run commands available: %s' % command_options['platform'], command_options)
# Initiate run procedures
if not input_data:
log('Retrieving input data manually')
input_data = RetrieveInputData(run_spec, job_spec, job_spec_path, command_options, command_args)
log('Input Data: %s' % input_data)
# Run it...
log('Running job: %s' % job_spec['data']['name'])
# Get the run items for our current platform
result_data = {'started':time.time(), 'run_results':[], 'success':None}
platform_run_items = job_spec['run'][command_options['platform']]
# Run each platform run item
for run_item in platform_run_items:
run_result = RunItem(run_spec, job_spec, job_spec_path, run_item, input_data, command_options, command_args)
# Test this data. If this fails we will abort and exit the program, not continuing on below.
run_test_results = TestRunResult(run_spec, job_spec, job_spec_path, run_item, input_data, run_result, result_data, command_options, command_args)
run_result['test_results'] = run_test_results
# Test overall success of this run item
run_result['success'] = True
for test_result in run_test_results:
if not test_result['success']:
run_result['success'] = False
break
# Track all the run results for our overall job
result_data['run_results'].append(run_result)
# Stop running any more run items if we failed the last one. We need all of them to be successful to keep going.
if run_result['success'] == False:
log('Failed run test, aborting any more run items...')
break
# Wrap everything up
result_data['finished'] = time.time()
result_data['duration'] = result_data['finished'] - result_data['started']
# Ensure we know who ran this
result_data['hostname'] = platform.GetHostname()
result_data['platform'] = platform.GetPlatform()
# If every one of our run results was a success, then we are successful
all_success = True
for run_result in result_data['run_results']:
if not run_result['success']:
all_success = False
break
# If we were not always successful, then we are not successful
if not all_success:
result_data['success'] = False
else:
result_data['success'] = True
log('Run Result Data: %s' % result_data)
# Report the results
#ReportResult()...
pass
# Return result data
return result_data
def RetrieveInputData(runspec, job_spec, job_spec_path, command_options, command_args):
"""Returns the input_data, with all required data, or throws a InputNotCollectable exception if it cannot be collected"""
log('Retrieving Input Data')
# Start with no input fields
input_data = {}
# If data was passed in directly with our command options (Websource/API invocation method, not available from CLI), update dict
if 'input_data' in command_options:
input_data.update(command_options['input_data'])
# Load any input from file, if specified
if command_options['input_path']:
# JSON
if command_options['input_path'].endswith('.json'):
# Attempt to load the specified input path
try:
input_data_loaded = json.load(open(command_options['input_path']))
input_data.update(input_data_loaded)
except Exception, e:
Error('Failed to load input path: %s: %s' % (command_options['input_path'], e), command_options)
# YAML
elif command_options['input_path'].endswith('.yaml'):
# Attempt to load the specified input path
try:
input_data_loaded = yaml.load(open(command_options['input_path']))
input_data.update(input_data_loaded)
except Exception, e:
Error('Failed to load input path: %s: %s' % (command_options['input_path'], e), command_options)
# Unknown input file type, fail
else:
Error('Unknown input file data type (suffic unknown, acceptable: .yaml, .json): %s' % command_options['input_path'], command_options)
# Collect and validate input fields
validated_input = {}
missing_input = []
# Validate Input from input path and determine input we still do not have, which needs to be collected
#NOTE(g): Note this is done before collection (which needs to be validated as well) to reduce wasted time/effort if it's going
# to fail on this input, better to do it before making the user input the collected data interactively.
for (key, value) in job_spec['input'].items():
# If we have this key in input data, validate
if key in input_data:
# Get the validated and processed input.
#NOTE(g): Any errors abort the run, so no error checking is necessary.
validated_input[key] = ValidateInput(job_spec, job_spec_path, key, input_data[key], command_options)
# Else, add to our missing input to collect interactively
else:
missing_input.append(key)
# If we have validated input, log about it
if validated_input:
log('Validated input file data: %s item(s). Collecting %s item(s) interactively.' % (len(validated_input), len(missing_input)))
else:
log('No fields validated by input file. Collecting %s item(s) interactively.' % len(missing_input))
# If we have missing input, but specified non-interactive in our options, fail
if missing_input and command_options['noninteractive']:
Error('Non-interactive run mode was missing input fields from input data: %s' % ', '.join(missing_input), command_options)
# Else, if we have missing input, collect it via our collection specification
elif missing_input:
# Collect all our missing input (whether in Collect block or not, Collect block just gives more detail for prompting for data)
collected_input = CollectInput(job_spec, job_spec_path, missing_input, command_options)
# Validate collected input
for (collected_key, collected_value) in collected_input.items():
# Get the validated and processed input.
#NOTE(g): Any errors abort the run, so no error checking is necessary.
validated_input[collected_key] = ValidateInput(job_spec, job_spec_path, collected_key, collected_value, command_options)
# Remove the collected key from missing input, not missing any more
missing_input.remove(collected_key)
# Check for code logic failure, still missing input keys after all of them should have been collected one way or another
if missing_input:
raise Exception('Code Logic Error: Failed to collect all the data between input data files and interactive collection: Missing input keys: %s' % missing_input)
# Return our validated input fields
return validated_input
def ValidateInput(job_spec, job_spec_path, key, value, command_options):
"""If successful, returns the processed value that passes validation. If unsuccessful the run terminates with Error()."""
# Get validation information
input_validation = job_spec['input'][key]
# There must be a type to validate on, any error aborts
if 'type' not in input_validation:
Error('Invalid input validation spec in job_spec: %s: Input "%s": No type was specified for validation' % (job_spec_path, key), command_options)
# Text validation
if input_validation['type'] == 'text':
validated_value = str(value)
# Minimum length: optional validation
if 'min length' in input_validation and len(validated_value) < input_validation['min']:
Error('Input Validation: Value is less than minimum (job_spec_path="%s", input_key="%s"): %s (min size = %s)' % (job_spec_path, key, len(validated_value), input_validation['min']), command_options)
# Minimum length: optional validation
if 'max length' in input_validation and len(validated_value) < input_validation['max']:
Error('Input Validation: Value is more than maximum (job_spec_path="%s", input_key="%s"): %s (max size = %s)' % (job_spec_path, key, len(validated_value), input_validation['max']), command_options)
# Regex Match Validation: optional validation
if 'regex validate' in input_validation and not re.findall(input_validation['regex validate'], validated_value):
Error('Input Validation: Regex match not found (job_spec_path="%s", input_key="%s"): %s (regex = "%s")' % (job_spec_path, key, validated_value, input_validation['regex validate']), command_options)
# Integer
elif input_validation['type'] in ['integer', 'int']:
# Coerce to integer, or fail
try:
validated_value = int(value)
except Exception, e:
Error('Input Validation: Value is not an Integer (job_spec_path="%s", input_key="%s"): %s' % (job_spec_path, key, value), command_options)
# Minimum value: optional validation
if 'min' in input_validation and validated_value < input_validation['min']:
Error('Input Validation: Value is less than minimum (job_spec_path="%s", input_key="%s"): %s < %s (min)' % (job_spec_path, key, validated_value, input_validation['min']), command_options)
# Minimum value: optional validation
if 'max' in input_validation and validated_value < input_validation['max']:
Error('Input Validation: Value is more than maximum (job_spec_path="%s", input_key="%s"): %s > %s (max)' % (job_spec_path, key, validated_value, input_validation['max']), command_options)
# Decimal
elif input_validation['type'] == 'decimal':
validated_value = decimal.Decimal(value)
# Minimum value: optional validation
if 'min' in input_validation and validated_value < input_validation['min']:
Error('Input Validation: Value is less than minimum (job_spec_path="%s", input_key="%s"): %s < %s (min)' % (job_spec_path, key, value, input_validation['min']), command_options)
# Minimum value: optional validation
if 'max' in input_validation and validated_value < input_validation['max']:
Error('Input Validation: Value is more than maximum (job_spec_path="%s", input_key="%s"): %s > %s (max)' % (job_spec_path, key, value, input_validation['max']), command_options)
# Unknown - fail
else:
Error('Unknown input validation type (job_spec_path="%s", input_key="%s"): %s' % (job_spec_path, key, input_validation['type']), command_options)
return validated_value
def CollectInput(job_spec, job_spec_path, missing_input, command_options):
"""Returns a dict of input fields (key/value) after prompting the user for them interactively.
First it prompts for fields that are not in the Collect block (ones that should be sent non-interactively if invoked
in a distributed manner (not CLI based)). Second it prompts for fields that are specified in a Collect block, and have
actual prompting information. In a distributed invocation, Collect block fields would be collected in a web form or something,
CLI invocation will get this readline method.
"""
log('Collecting Input')
collected_data = {}
# Find what fields are in the Collect block
collect_block_keys = []
for item in job_spec['collect']:
for (item_set_key, item_set_value) in item['set'].items():
collect_block_keys.append(item_set_key)
# First pass - Collect input fields that are not in the Collect block (no prompt information)
missing_input_keys = list(missing_input)
missing_input_keys.sort()
for key in missing_input_keys:
# Skip collect block keys, we will do them after we do the keys not in the Collect block
#TODO(g): Maybe this is an uncessary idea, but I like the idea of separating them for a few reasons, so doing it this way
if key in collect_block_keys:
continue
# Prompt
print '\nEnter input data field: %s: ' % key,
sys.stdout.flush()
# Read input
collected_data[key] = sys.stdin.readline()
# Second pass - Collect input fields that are in the Collect block
for collect_item in job_spec['collect']:
print '\nCollect data for: %(label)s: %(info)s' % collect_item
for item_set_field in collect_item['set']:
# If this item is already specified in input data (not in missing_input list), skip it
if item_set_field not in missing_input:
print '\nField value specified in input data: %s' % item_set_field
continue
# Prompt
print '\nEnter field: %s: ' % item_set_field,
sys.stdout.flush()
# Read input
collected_data[item_set_field] = sys.stdin.readline()[:-1] # Strip ending new-line character
return collected_data
def RunItem(run_spec, job_spec, job_spec_path, run_item, input_data, command_options, command_args):
"""Run a job platform run item."""
log('Run Item: %s' % run_item)
# Start
result = {'started':time.time()}
# Execute
command = run_item['execute']
# If we have python string formatting to do with our data, do it
if re.findall('%\(.*?\)s', command):
command = command % input_data
log('Run Command: %s' % command)
result['command'] = command
#TODO(g): Get a distinct data stream for STDOUT and STDERR, also need to poll these so I can report on long running jobs.
# This just gets us working.
#(status, output) = commands.getstatusoutput(command)
(status, output, output_error) = RunShell(command)
# Finish
result['finished'] = time.time()
result['duration'] = result['finished'] - result['started']
result['exit_code'] = status
#TODO(g): Separate STDOUT and STDERR, so we can operate on them differently. For now, just make them the same to get things going...
result['stdout'] = output
result['stderr'] = output_error
return result
def TestRunResult(run_spec, job_spec, job_spec_path, run_item, input_data, run_result, result_data, command_options, command_args):
"""Test the run_result for the run_item. Abort, report and exit if we fail any of the tests."""
log('Test Run Result: %s' % run_result)
# List of dicts, with test result information (critical, warning, success)
test_results = []
for test_case in run_item['tests']:
# Skip test case if this when doesnt match
if 'finished' in run_result and test_case['when'] != 'finished':
continue
elif 'finished' not in run_result and test_case['when'] != 'during':
continue
# Ensure we have the test case field key we want to test, or fail
if test_case['key'] not in run_result:
Error('Missing test case key in run result: %s: %s' % (test_case['key'], run_result), command_options)
# Get the value we are to operation on
value = run_result[test_case['key']]
# Create our test_result dict, and just append it to our list of test results now
test_result = {}
test_results.append(test_result)
# Test with specified function
if test_case['function'] in ['==', 'equals']:
# If pass
if value == test_case['value']:
test_result['success'] = True
# If fail
else:
test_result['success'] = False
# Not Equals
elif test_case['function'] in ['!=', 'not equals']:
# If pass
if value != test_case['value']:
test_result['success'] = True
# If fail
else:
test_result['success'] = False
# RegEx
elif test_case['function'] in ['regex']:
regex_result = re.findall(str(test_case['value']), str(value))
# If pass
if regex_result:
test_result['success'] = True
# If fail
else:
test_result['success'] = False
# ---- Test Cases are Finished ----
# If we had a failure
if not test_result['success']:
if test_case.get('log failure', None):
log_message = test_case['log failure'] % run_result
test_result['log'] = log_message
log('Result Test Failure: %s' % log_message)
if test_case['critical']:
test_result['critical'] = True
break
if test_case['warning']:
test_result['warning'] = True
# Else, we had a success
else:
if test_case.get('log success', None):
log_message = test_case['log success'] % run_result
test_result['log'] = log_message
log('Result Test Failure: %s' % log_message)
return test_results
def RunShell(command):
"""Run the command on the local machine. Blocks until complete.
Args:
command: string, command to execute
"""
output_error = '' #Later, how to handle reading the timing stream between the two? It's lost...
# Subprocess is beautiful and finally makes this a pleasant experience!
# Imagine, OUTPUT, ERRORS and EXIT CODE!!! Not exclusively choosing two!
# Newbs be rejoice in your ignorance.
pipe = subprocess.Popen(command, stdout=subprocess.PIPE,
stderr=subprocess.PIPE, shell=True)
status = pipe.wait()
output = pipe.stdout.read()
output_error = pipe.stderr.read()
# Close the pipes
pipe.stderr.close()
pipe.stdout.close()
return (status, output, output_error)
| |
# Copyright 2010 Jacob Kaplan-Moss
# Copyright 2011 OpenStack Foundation
# Copyright 2012 Grid Dynamics
# Copyright 2013 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Base utilities to build API operation managers and objects on top of.
"""
# E1102: %s is not callable
# pylint: disable=E1102
import abc
import copy
from oslo_utils import reflection
from oslo_utils import strutils
from urllib import parse
from troveclient.apiclient import exceptions
def getid(obj):
"""Return id if argument is a Resource.
Abstracts the common pattern of allowing both an object or an object's ID
(UUID) as a parameter when dealing with relationships.
"""
try:
if obj.uuid:
return obj.uuid
except AttributeError:
pass
try:
return obj.id
except AttributeError:
return obj
# TODO(aababilov): call run_hooks() in HookableMixin's child classes
class HookableMixin(object):
"""Mixin so classes can register and run hooks."""
_hooks_map = {}
@classmethod
def add_hook(cls, hook_type, hook_func):
"""Add a new hook of specified type.
:param cls: class that registers hooks
:param hook_type: hook type, e.g., '__pre_parse_args__'
:param hook_func: hook function
"""
if hook_type not in cls._hooks_map:
cls._hooks_map[hook_type] = []
cls._hooks_map[hook_type].append(hook_func)
@classmethod
def run_hooks(cls, hook_type, *args, **kwargs):
"""Run all hooks of specified type.
:param cls: class that registers hooks
:param hook_type: hook type, e.g., '__pre_parse_args__'
:param args: args to be passed to every hook function
:param kwargs: kwargs to be passed to every hook function
"""
hook_funcs = cls._hooks_map.get(hook_type) or []
for hook_func in hook_funcs:
hook_func(*args, **kwargs)
class BaseManager(HookableMixin):
"""Basic manager type providing common operations.
Managers interact with a particular type of API (servers, flavors, images,
etc.) and provide CRUD operations for them.
"""
resource_class = None
def __init__(self, client):
"""Initializes BaseManager with `client`.
:param client: instance of BaseClient descendant for HTTP requests
"""
super(BaseManager, self).__init__()
self.client = client
def _list(self, url, response_key, obj_class=None, json=None):
"""List the collection.
:param url: a partial URL, e.g., '/servers'
:param response_key: the key to be looked up in response dictionary,
e.g., 'servers'
:param obj_class: class for constructing the returned objects
(self.resource_class will be used by default)
:param json: data that will be encoded as JSON and passed in POST
request (GET will be sent by default)
"""
if json:
body = self.client.post(url, json=json).json()
else:
body = self.client.get(url).json()
if obj_class is None:
obj_class = self.resource_class
data = body[response_key]
# NOTE(ja): keystone returns values as list as {'values': [ ... ]}
# unlike other services which just return the list...
try:
data = data['values']
except (KeyError, TypeError):
pass
return [obj_class(self, res, loaded=True) for res in data if res]
def _get(self, url, response_key):
"""Get an object from collection.
:param url: a partial URL, e.g., '/servers'
:param response_key: the key to be looked up in response dictionary,
e.g., 'server'
"""
body = self.client.get(url).json()
return self.resource_class(self, body[response_key], loaded=True)
def _head(self, url):
"""Retrieve request headers for an object.
:param url: a partial URL, e.g., '/servers'
"""
resp = self.client.head(url)
return resp.status_code == 204
def _post(self, url, json, response_key, return_raw=False):
"""Create an object.
:param url: a partial URL, e.g., '/servers'
:param json: data that will be encoded as JSON and passed in POST
request (GET will be sent by default)
:param response_key: the key to be looked up in response dictionary,
e.g., 'servers'
:param return_raw: flag to force returning raw JSON instead of
Python object of self.resource_class
"""
body = self.client.post(url, json=json).json()
if return_raw:
return body[response_key]
return self.resource_class(self, body[response_key])
def _put(self, url, json=None, response_key=None):
"""Update an object with PUT method.
:param url: a partial URL, e.g., '/servers'
:param json: data that will be encoded as JSON and passed in POST
request (GET will be sent by default)
:param response_key: the key to be looked up in response dictionary,
e.g., 'servers'
"""
resp = self.client.put(url, json=json)
# PUT requests may not return a body
if resp.content:
body = resp.json()
if response_key is not None:
return self.resource_class(self, body[response_key])
else:
return self.resource_class(self, body)
def _patch(self, url, json=None, response_key=None):
"""Update an object with PATCH method.
:param url: a partial URL, e.g., '/servers'
:param json: data that will be encoded as JSON and passed in POST
request (GET will be sent by default)
:param response_key: the key to be looked up in response dictionary,
e.g., 'servers'
"""
body = self.client.patch(url, json=json).json()
if response_key is not None:
return self.resource_class(self, body[response_key])
else:
return self.resource_class(self, body)
def _delete(self, url):
"""Delete an object.
:param url: a partial URL, e.g., '/servers/my-server'
"""
return self.client.delete(url)
class ManagerWithFind(BaseManager, metaclass=abc.ABCMeta):
"""Manager with additional `find()`/`findall()` methods."""
@abc.abstractmethod
def list(self):
pass
def find(self, **kwargs):
"""Find a single item with attributes matching ``**kwargs``.
This isn't very efficient: it loads the entire list then filters on
the Python side.
"""
matches = self.findall(**kwargs)
num_matches = len(matches)
if num_matches == 0:
msg = "No %s matching %s." % (self.resource_class.__name__, kwargs)
raise exceptions.NotFound(msg)
elif num_matches > 1:
raise exceptions.NoUniqueMatch()
else:
return matches[0]
def findall(self, **kwargs):
"""Find all items with attributes matching ``**kwargs``.
This isn't very efficient: it loads the entire list then filters on
the Python side.
"""
found = []
searches = kwargs.items()
for obj in self.list():
try:
if all(getattr(obj, attr) == value
for (attr, value) in searches):
found.append(obj)
except AttributeError:
continue
return found
class CrudManager(BaseManager):
"""Base manager class for manipulating entities.
Children of this class are expected to define a `collection_key` and `key`.
- `collection_key`: Usually a plural noun by convention (e.g. `entities`);
used to refer collections in both URL's (e.g. `/v3/entities`) and JSON
objects containing a list of member resources (e.g. `{'entities': [{},
{}, {}]}`).
- `key`: Usually a singular noun by convention (e.g. `entity`); used to
refer to an individual member of the collection.
"""
collection_key = None
key = None
def build_url(self, base_url=None, **kwargs):
"""Builds a resource URL for the given kwargs.
Given an example collection where `collection_key = 'entities'` and
`key = 'entity'`, the following URL's could be generated.
By default, the URL will represent a collection of entities, e.g.::
/entities
If kwargs contains an `entity_id`, then the URL will represent a
specific member, e.g.::
/entities/{entity_id}
:param base_url: if provided, the generated URL will be appended to it
"""
url = base_url if base_url is not None else ''
url += '/%s' % self.collection_key
# do we have a specific entity?
entity_id = kwargs.get('%s_id' % self.key)
if entity_id is not None:
url += '/%s' % entity_id
return url
def _filter_kwargs(self, kwargs):
"""Drop null values and handle ids."""
for key, ref in kwargs.copy().items():
if ref is None:
kwargs.pop(key)
else:
if isinstance(ref, Resource):
kwargs.pop(key)
kwargs['%s_id' % key] = getid(ref)
return kwargs
def create(self, **kwargs):
kwargs = self._filter_kwargs(kwargs)
return self._post(
self.build_url(**kwargs),
{self.key: kwargs},
self.key)
def get(self, **kwargs):
kwargs = self._filter_kwargs(kwargs)
return self._get(
self.build_url(**kwargs),
self.key)
def head(self, **kwargs):
kwargs = self._filter_kwargs(kwargs)
return self._head(self.build_url(**kwargs))
def list(self, base_url=None, **kwargs):
"""List the collection.
:param base_url: if provided, the generated URL will be appended to it
"""
kwargs = self._filter_kwargs(kwargs)
return self._list(
'%(base_url)s%(query)s' % {
'base_url': self.build_url(base_url=base_url, **kwargs),
'query': '?%s' % parse.urlencode(kwargs) if kwargs else '',
},
self.collection_key)
def put(self, base_url=None, **kwargs):
"""Update an element.
:param base_url: if provided, the generated URL will be appended to it
"""
kwargs = self._filter_kwargs(kwargs)
return self._put(self.build_url(base_url=base_url, **kwargs))
def update(self, **kwargs):
kwargs = self._filter_kwargs(kwargs)
params = kwargs.copy()
params.pop('%s_id' % self.key)
return self._patch(
self.build_url(**kwargs),
{self.key: params},
self.key)
def delete(self, **kwargs):
kwargs = self._filter_kwargs(kwargs)
return self._delete(
self.build_url(**kwargs))
def find(self, base_url=None, **kwargs):
"""Find a single item with attributes matching ``**kwargs``.
:param base_url: if provided, the generated URL will be appended to it
"""
kwargs = self._filter_kwargs(kwargs)
rl = self._list(
'%(base_url)s%(query)s' % {
'base_url': self.build_url(base_url=base_url, **kwargs),
'query': '?%s' % parse.urlencode(kwargs) if kwargs else '',
},
self.collection_key)
num = len(rl)
if num == 0:
msg = "No %s matching %s." % (self.resource_class.__name__, kwargs)
raise exceptions.NotFound(404, msg)
elif num > 1:
raise exceptions.NoUniqueMatch
else:
return rl[0]
class Extension(HookableMixin):
"""Extension descriptor."""
SUPPORTED_HOOKS = ('__pre_parse_args__', '__post_parse_args__')
manager_class = None
def __init__(self, name, module):
super(Extension, self).__init__()
self.name = name
self.module = module
self._parse_extension_module()
def _parse_extension_module(self):
self.manager_class = None
for attr_name, attr_value in self.module.__dict__.items():
if attr_name in self.SUPPORTED_HOOKS:
self.add_hook(attr_name, attr_value)
else:
try:
if issubclass(attr_value, BaseManager):
self.manager_class = attr_value
except TypeError:
pass
def __repr__(self):
return "<Extension '%s'>" % self.name
class Resource(object):
"""Base class for OpenStack resources (tenant, user, etc.).
This is pretty much just a bag for attributes.
"""
HUMAN_ID = False
NAME_ATTR = 'name'
def __init__(self, manager, info, loaded=False):
"""Populate and bind to a manager.
:param manager: BaseManager object
:param info: dictionary representing resource attributes
:param loaded: prevent lazy-loading if set to True
"""
self.manager = manager
self._info = info
self._add_details(info)
self._loaded = loaded
def __repr__(self):
reprkeys = sorted(k
for k in self.__dict__.keys()
if k[0] != '_' and k != 'manager')
info = ", ".join("%s=%s" % (k, getattr(self, k)) for k in reprkeys)
self_cls_name = reflection.get_class_name(self,
fully_qualified=False)
return "<%s %s>" % (self_cls_name, info)
@property
def human_id(self):
"""Human-readable ID which can be used for bash completion.
"""
if self.NAME_ATTR in self.__dict__ and self.HUMAN_ID:
return strutils.to_slug(getattr(self, self.NAME_ATTR))
return None
def _add_details(self, info):
for (k, v) in info.items():
try:
setattr(self, k, v)
self._info[k] = v
except AttributeError:
# In this case we already defined the attribute on the class
pass
def __getattr__(self, k):
if k == "__setstate__":
raise AttributeError(k)
if k not in self.__dict__:
# NOTE(bcwaldon): disallow lazy-loading if already loaded once
if not self.is_loaded:
self._get()
return self.__getattr__(k)
raise AttributeError(k)
else:
return self.__dict__[k]
def _get(self):
# set _loaded first ... so if we have to bail, we know we tried.
self._loaded = True
if not hasattr(self.manager, 'get'):
return
new = self.manager.get(self.id)
if new:
self._add_details(new._info)
def __eq__(self, other):
if not isinstance(other, Resource):
return NotImplemented
# two resources of different types are not equal
if not isinstance(other, self.__class__):
return False
if hasattr(self, 'id') and hasattr(other, 'id'):
return self.id == other.id
return self._info == other._info
@property
def is_loaded(self):
return self._loaded
def to_dict(self):
return copy.deepcopy(self._info)
| |
# Copyright 2017, David Wilson
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""
Helper functions intended to be executed on the target. These are entrypoints
for file transfer, module execution and sundry bits like changing file modes.
"""
from __future__ import absolute_import
from __future__ import unicode_literals
import errno
import functools
import grp
import json
import logging
import operator
import os
import pwd
import re
import resource
import signal
import stat
import subprocess
import sys
import tempfile
import traceback
import types
import mitogen.core
import mitogen.fork
import mitogen.parent
import mitogen.service
# Ansible since PR #41749 inserts "import __main__" into
# ansible.module_utils.basic. Mitogen's importer will refuse such an import, so
# we must setup a fake "__main__" before that module is ever imported. The
# str() is to cast Unicode to bytes on Python 2.6.
if not sys.modules.get(str('__main__')):
sys.modules[str('__main__')] = types.ModuleType(str('__main__'))
import ansible.module_utils.json_utils
import ansible_mitogen.runner
LOG = logging.getLogger(__name__)
MAKE_TEMP_FAILED_MSG = (
"Unable to find a useable temporary directory. This likely means no\n"
"system-supplied TMP directory can be written to, or all directories\n"
"were mounted on 'noexec' filesystems.\n"
"\n"
"The following paths were tried:\n"
" %(namelist)s\n"
"\n"
"Please check '-vvv' output for a log of individual path errors."
)
#: Initialized to an econtext.parent.Context pointing at a pristine fork of
#: the target Python interpreter before it executes any code or imports.
_fork_parent = None
#: Set by :func:`init_child` to the name of a writeable and executable
#: temporary directory accessible by the active user account.
good_temp_dir = None
# issue #362: subprocess.Popen(close_fds=True) aka. AnsibleModule.run_command()
# loops the entire SC_OPEN_MAX space. CentOS>5 ships with 1,048,576 FDs by
# default, resulting in huge (>500ms) runtime waste running many commands.
# Therefore if we are a child, cap the range to something reasonable.
rlimit = resource.getrlimit(resource.RLIMIT_NOFILE)
if (rlimit[0] > 512 or rlimit[1] > 512) and not mitogen.is_master:
resource.setrlimit(resource.RLIMIT_NOFILE, (512, 512))
subprocess.MAXFD = 512 # Python <3.x
del rlimit
def get_small_file(context, path):
"""
Basic in-memory caching module fetcher. This generates an one roundtrip for
every previously unseen file, so it is only a temporary solution.
:param context:
Context we should direct FileService requests to. For now (and probably
forever) this is just the top-level Mitogen connection manager process.
:param path:
Path to fetch from FileService, must previously have been registered by
a privileged context using the `register` command.
:returns:
Bytestring file data.
"""
pool = mitogen.service.get_or_create_pool(router=context.router)
service = pool.get_service('mitogen.service.PushFileService')
return service.get(path)
def transfer_file(context, in_path, out_path, sync=False, set_owner=False):
"""
Streamily download a file from the connection multiplexer process in the
controller.
:param mitogen.core.Context context:
Reference to the context hosting the FileService that will be used to
fetch the file.
:param bytes in_path:
FileService registered name of the input file.
:param bytes out_path:
Name of the output path on the local disk.
:param bool sync:
If :data:`True`, ensure the file content and metadat are fully on disk
before renaming the temporary file over the existing file. This should
ensure in the case of system crash, either the entire old or new file
are visible post-reboot.
:param bool set_owner:
If :data:`True`, look up the metadata username and group on the local
system and file the file owner using :func:`os.fchmod`.
"""
out_path = os.path.abspath(out_path)
fd, tmp_path = tempfile.mkstemp(suffix='.tmp',
prefix='.ansible_mitogen_transfer-',
dir=os.path.dirname(out_path))
fp = os.fdopen(fd, 'wb', mitogen.core.CHUNK_SIZE)
LOG.debug('transfer_file(%r) temporary file: %s', out_path, tmp_path)
try:
try:
ok, metadata = mitogen.service.FileService.get(
context=context,
path=in_path,
out_fp=fp,
)
if not ok:
raise IOError('transfer of %r was interrupted.' % (in_path,))
os.fchmod(fp.fileno(), metadata['mode'])
if set_owner:
set_fd_owner(fp.fileno(), metadata['owner'], metadata['group'])
finally:
fp.close()
if sync:
os.fsync(fp.fileno())
os.rename(tmp_path, out_path)
except BaseException:
os.unlink(tmp_path)
raise
os.utime(out_path, (metadata['atime'], metadata['mtime']))
def prune_tree(path):
"""
Like shutil.rmtree(), but log errors rather than discard them, and do not
waste multiple os.stat() calls discovering whether the object can be
deleted, just try deleting it instead.
"""
try:
os.unlink(path)
return
except OSError as e:
if not (os.path.isdir(path) and
e.args[0] in (errno.EPERM, errno.EISDIR)):
LOG.error('prune_tree(%r): %s', path, e)
return
try:
# Ensure write access for readonly directories. Ignore error in case
# path is on a weird filesystem (e.g. vfat).
os.chmod(path, int('0700', 8))
except OSError as e:
LOG.warning('prune_tree(%r): %s', path, e)
try:
for name in os.listdir(path):
if name not in ('.', '..'):
prune_tree(os.path.join(path, name))
os.rmdir(path)
except OSError as e:
LOG.error('prune_tree(%r): %s', path, e)
def _on_broker_shutdown():
"""
Respond to broker shutdown (graceful termination by parent, or loss of
connection to parent) by deleting our sole temporary directory.
"""
prune_tree(temp_dir)
def is_good_temp_dir(path):
"""
Return :data:`True` if `path` can be used as a temporary directory, logging
any failures that may cause it to be unsuitable. If the directory doesn't
exist, we attempt to create it using :func:`os.makedirs`.
"""
if not os.path.exists(path):
try:
os.makedirs(path, mode=int('0700', 8))
except OSError as e:
LOG.debug('temp dir %r unusable: did not exist and attempting '
'to create it failed: %s', path, e)
return False
try:
tmp = tempfile.NamedTemporaryFile(
prefix='ansible_mitogen_is_good_temp_dir',
dir=path,
)
except (OSError, IOError) as e:
LOG.debug('temp dir %r unusable: %s', path, e)
return False
try:
try:
os.chmod(tmp.name, int('0700', 8))
except OSError as e:
LOG.debug('temp dir %r unusable: %s: chmod failed: %s',
path, e)
return False
try:
# access(.., X_OK) is sufficient to detect noexec.
if not os.access(tmp.name, os.X_OK):
raise OSError('filesystem appears to be mounted noexec')
except OSError as e:
LOG.debug('temp dir %r unusable: %s: %s', path, e)
return False
finally:
tmp.close()
return True
def find_good_temp_dir(candidate_temp_dirs):
"""
Given a list of candidate temp directories extracted from ``ansible.cfg``,
combine it with the Python-builtin list of candidate directories used by
:mod:`tempfile`, then iteratively try each until one is found that is both
writeable and executable.
:param list candidate_temp_dirs:
List of candidate $variable-expanded and tilde-expanded directory paths
that may be usable as a temporary directory.
"""
paths = [os.path.expandvars(os.path.expanduser(p))
for p in candidate_temp_dirs]
paths.extend(tempfile._candidate_tempdir_list())
for path in paths:
if is_good_temp_dir(path):
LOG.debug('Selected temp directory: %r (from %r)', path, paths)
return path
raise IOError(MAKE_TEMP_FAILED_MSG % {
'paths': '\n '.join(paths),
})
@mitogen.core.takes_econtext
def init_child(econtext, log_level, candidate_temp_dirs):
"""
Called by ContextService immediately after connection; arranges for the
(presently) spotless Python interpreter to be forked, where the newly
forked interpreter becomes the parent of any newly forked future
interpreters.
This is necessary to prevent modules that are executed in-process from
polluting the global interpreter state in a way that effects explicitly
isolated modules.
:param int log_level:
Logging package level active in the master.
:param list[str] candidate_temp_dirs:
List of $variable-expanded and tilde-expanded directory names to add to
candidate list of temporary directories.
:returns:
Dict like::
{
'fork_context': mitogen.core.Context.
'home_dir': str.
}
Where `fork_context` refers to the newly forked 'fork parent' context
the controller will use to start forked jobs, and `home_dir` is the
home directory for the active user account.
"""
# Copying the master's log level causes log messages to be filtered before
# they reach LogForwarder, thus reducing an influx of tiny messges waking
# the connection multiplexer process in the master.
LOG.setLevel(log_level)
logging.getLogger('ansible_mitogen').setLevel(log_level)
global _fork_parent
mitogen.parent.upgrade_router(econtext)
_fork_parent = econtext.router.fork()
global good_temp_dir
good_temp_dir = find_good_temp_dir(candidate_temp_dirs)
return {
'fork_context': _fork_parent,
'home_dir': mitogen.core.to_text(os.path.expanduser('~')),
'good_temp_dir': good_temp_dir,
}
@mitogen.core.takes_econtext
def create_fork_child(econtext):
"""
For helper functions executed in the fork parent context, arrange for
the context's router to be upgraded as necessary and for a new child to be
prepared.
"""
mitogen.parent.upgrade_router(econtext)
context = econtext.router.fork()
LOG.debug('create_fork_child() -> %r', context)
return context
def run_module(kwargs):
"""
Set up the process environment in preparation for running an Ansible
module. This monkey-patches the Ansible libraries in various places to
prevent it from trying to kill the process on completion, and to prevent it
from reading sys.stdin.
"""
runner_name = kwargs.pop('runner_name')
klass = getattr(ansible_mitogen.runner, runner_name)
impl = klass(**kwargs)
return impl.run()
def _get_async_dir():
return os.path.expanduser(
os.environ.get('ANSIBLE_ASYNC_DIR', '~/.ansible_async')
)
class AsyncRunner(object):
def __init__(self, job_id, timeout_secs, econtext, kwargs):
self.job_id = job_id
self.timeout_secs = timeout_secs
self.econtext = econtext
self.kwargs = kwargs
self._timed_out = False
self._init_path()
def _init_path(self):
async_dir = _get_async_dir()
if not os.path.exists(async_dir):
os.makedirs(async_dir)
self.path = os.path.join(async_dir, self.job_id)
def _update(self, dct):
"""
Update an async job status file.
"""
LOG.info('%r._update(%r, %r)', self, self.job_id, dct)
dct.setdefault('ansible_job_id', self.job_id)
dct.setdefault('data', '')
with open(self.path + '.tmp', 'w') as fp:
fp.write(json.dumps(dct))
os.rename(self.path + '.tmp', self.path)
def _on_sigalrm(self, signum, frame):
"""
Respond to SIGALRM (job timeout) by updating the job file and killing
the process.
"""
msg = "Job reached maximum time limit of %d seconds." % (
self.timeout_secs,
)
self._update({
"failed": 1,
"finished": 1,
"msg": msg,
})
self._timed_out = True
self.econtext.broker.shutdown()
def _install_alarm(self):
signal.signal(signal.SIGALRM, self._on_sigalrm)
signal.alarm(self.timeout_secs)
def _run_module(self):
kwargs = dict(self.kwargs, **{
'detach': True,
'econtext': self.econtext,
'emulate_tty': False,
})
return run_module(kwargs)
def _parse_result(self, dct):
filtered, warnings = (
ansible.module_utils.json_utils.
_filter_non_json_lines(dct['stdout'])
)
result = json.loads(filtered)
result.setdefault('warnings', []).extend(warnings)
result['stderr'] = dct['stderr']
self._update(result)
def _run(self):
"""
1. Immediately updates the status file to mark the job as started.
2. Installs a timer/signal handler to implement the time limit.
3. Runs as with run_module(), writing the result to the status file.
:param dict kwargs:
Runner keyword arguments.
:param str job_id:
String job ID.
:param int timeout_secs:
If >0, limit the task's maximum run time.
"""
self._update({
'started': 1,
'finished': 0,
'pid': os.getpid()
})
if self.timeout_secs > 0:
self._install_alarm()
dct = self._run_module()
if not self._timed_out:
# After SIGALRM fires, there is a window between broker responding
# to shutdown() by killing the process, and work continuing on the
# main thread. If main thread was asleep in at least
# basic.py/select.select(), an EINTR will be raised. We want to
# discard that exception.
try:
self._parse_result(dct)
except Exception:
self._update({
"failed": 1,
"msg": traceback.format_exc(),
"data": dct['stdout'], # temporary notice only
"stderr": dct['stderr']
})
def run(self):
try:
try:
self._run()
except Exception:
self._update({
"failed": 1,
"msg": traceback.format_exc(),
})
finally:
self.econtext.broker.shutdown()
@mitogen.core.takes_econtext
def run_module_async(kwargs, job_id, timeout_secs, econtext):
"""
Execute a module with its run status and result written to a file,
terminating on the process on completion. This function must run in a child
forked using :func:`create_fork_child`.
"""
arunner = AsyncRunner(job_id, timeout_secs, econtext, kwargs)
arunner.run()
def get_user_shell():
"""
For commands executed directly via an SSH command-line, SSH looks up the
user's shell via getpwuid() and only defaults to /bin/sh if that field is
missing or empty.
"""
try:
pw_shell = pwd.getpwuid(os.geteuid()).pw_shell
except KeyError:
pw_shell = None
return pw_shell or '/bin/sh'
def exec_args(args, in_data='', chdir=None, shell=None, emulate_tty=False):
"""
Run a command in a subprocess, emulating the argument handling behaviour of
SSH.
:param list[str]:
Argument vector.
:param bytes in_data:
Optional standard input for the command.
:param bool emulate_tty:
If :data:`True`, arrange for stdout and stderr to be merged into the
stdout pipe and for LF to be translated into CRLF, emulating the
behaviour of a TTY.
:return:
(return code, stdout bytes, stderr bytes)
"""
LOG.debug('exec_args(%r, ..., chdir=%r)', args, chdir)
assert isinstance(args, list)
if emulate_tty:
stderr = subprocess.STDOUT
else:
stderr = subprocess.PIPE
proc = subprocess.Popen(
args=args,
stdout=subprocess.PIPE,
stderr=stderr,
stdin=subprocess.PIPE,
cwd=chdir,
)
stdout, stderr = proc.communicate(in_data)
if emulate_tty:
stdout = stdout.replace(b'\n', b'\r\n')
return proc.returncode, stdout, stderr or ''
def exec_command(cmd, in_data='', chdir=None, shell=None, emulate_tty=False):
"""
Run a command in a subprocess, emulating the argument handling behaviour of
SSH.
:param bytes cmd:
String command line, passed to user's shell.
:param bytes in_data:
Optional standard input for the command.
:return:
(return code, stdout bytes, stderr bytes)
"""
assert isinstance(cmd, mitogen.core.UnicodeType)
return exec_args(
args=[get_user_shell(), '-c', cmd],
in_data=in_data,
chdir=chdir,
shell=shell,
emulate_tty=emulate_tty,
)
def read_path(path):
"""
Fetch the contents of a filesystem `path` as bytes.
"""
return open(path, 'rb').read()
def set_fd_owner(fd, owner, group=None):
if owner:
uid = pwd.getpwnam(owner).pw_uid
else:
uid = os.geteuid()
if group:
gid = grp.getgrnam(group).gr_gid
else:
gid = os.getegid()
os.fchown(fd, (uid, gid))
def write_path(path, s, owner=None, group=None, mode=None,
utimes=None, sync=False):
"""
Writes bytes `s` to a filesystem `path`.
"""
path = os.path.abspath(path)
fd, tmp_path = tempfile.mkstemp(suffix='.tmp',
prefix='.ansible_mitogen_transfer-',
dir=os.path.dirname(path))
fp = os.fdopen(fd, 'wb', mitogen.core.CHUNK_SIZE)
LOG.debug('write_path(path=%r) temporary file: %s', path, tmp_path)
try:
try:
if mode:
os.fchmod(fp.fileno(), mode)
if owner or group:
set_fd_owner(fp.fileno(), owner, group)
fp.write(s)
finally:
fp.close()
if sync:
os.fsync(fp.fileno())
os.rename(tmp_path, path)
except BaseException:
os.unlink(tmp_path)
raise
if utimes:
os.utime(path, utimes)
CHMOD_CLAUSE_PAT = re.compile(r'([uoga]*)([+\-=])([ugo]|[rwx]*)')
CHMOD_MASKS = {
'u': stat.S_IRWXU,
'g': stat.S_IRWXG,
'o': stat.S_IRWXO,
'a': (stat.S_IRWXU | stat.S_IRWXG | stat.S_IRWXO),
}
CHMOD_BITS = {
'u': {'r': stat.S_IRUSR, 'w': stat.S_IWUSR, 'x': stat.S_IXUSR},
'g': {'r': stat.S_IRGRP, 'w': stat.S_IWGRP, 'x': stat.S_IXGRP},
'o': {'r': stat.S_IROTH, 'w': stat.S_IWOTH, 'x': stat.S_IXOTH},
'a': {
'r': (stat.S_IRUSR | stat.S_IRGRP | stat.S_IROTH),
'w': (stat.S_IWUSR | stat.S_IWGRP | stat.S_IWOTH),
'x': (stat.S_IXUSR | stat.S_IXGRP | stat.S_IXOTH)
}
}
def apply_mode_spec(spec, mode):
"""
Given a symbolic file mode change specification in the style of chmod(1)
`spec`, apply changes in the specification to the numeric file mode `mode`.
"""
for clause in spec.split(','):
match = CHMOD_CLAUSE_PAT.match(clause)
who, op, perms = match.groups()
for ch in who or 'a':
mask = CHMOD_MASKS[ch]
bits = CHMOD_BITS[ch]
cur_perm_bits = mode & mask
new_perm_bits = functools.reduce(operator.or_, (bits[p] for p in perms), 0)
mode &= ~mask
if op == '=':
mode |= new_perm_bits
elif op == '+':
mode |= new_perm_bits | cur_perm_bits
else:
mode |= cur_perm_bits & ~new_perm_bits
return mode
def set_file_mode(path, spec):
"""
Update the permissions of a file using the same syntax as chmod(1).
"""
mode = os.stat(path).st_mode
if spec.isdigit():
new_mode = int(spec, 8)
else:
new_mode = apply_mode_spec(spec, mode)
os.chmod(path, new_mode)
| |
# -*- coding: utf-8 -*-
"""
Test pylti/test_flask.py module
"""
from __future__ import absolute_import
import unittest
import httpretty
import mock
import oauthlib.oauth1
from six.moves.urllib.parse import urlencode
from pylti.common import LTIException
from pylti.flask import LTI
from pylti.tests.test_flask_app import app_exception, app
class TestFlask(unittest.TestCase):
"""
Consumers.
"""
# pylint: disable=too-many-public-methods
consumers = {
"__consumer_key__": {"secret": "__lti_secret__"}
}
# Valid XML response from LTI 1.0 consumer
expected_response = """<?xml version="1.0" encoding="UTF-8"?>
<imsx_POXEnvelopeResponse xmlns = "http://www.imsglobal.org/services/ltiv1p1\
/xsd/imsoms_v1p0">
<imsx_POXHeader>
<imsx_POXResponseHeaderInfo>
<imsx_version>V1.0</imsx_version>
<imsx_messageIdentifier>edX_fix</imsx_messageIdentifier>
<imsx_statusInfo>
<imsx_codeMajor>success</imsx_codeMajor>
<imsx_severity>status</imsx_severity>
<imsx_description>Score for StarX/StarX_DEMO/201X_StarX:\
edge.edx.org-i4x-StarX-StarX_DEMO-lti-40559041895b4065b2818c23b9cd9da8\
:18b71d3c46cb4dbe66a7c950d88e78ec is now 0.0</imsx_description>
<imsx_messageRefIdentifier>
</imsx_messageRefIdentifier>
</imsx_statusInfo>
</imsx_POXResponseHeaderInfo>
</imsx_POXHeader>
<imsx_POXBody><replaceResultResponse/></imsx_POXBody>
</imsx_POXEnvelopeResponse>
"""
def setUp(self):
"""
Setting up app config.
"""
app.config['TESTING'] = True
app.config['SERVER_NAME'] = 'localhost'
app.config['WTF_CSRF_ENABLED'] = False
app.config['SECRET_KEY'] = 'you-will-never-guess'
app.config['PYLTI_CONFIG'] = {'consumers': self.consumers}
app.config['PYLTI_URL_FIX'] = {
"https://localhost:8000/": {
"https://localhost:8000/": "http://localhost:8000/"
}
}
self.app = app.test_client()
app_exception.reset()
@staticmethod
def get_exception():
"""
Returns exception raised by PyLTI.
:return: exception
"""
return app_exception.get()
@staticmethod
def has_exception():
"""
Check if PyLTI raised exception.
:return: is exception raised
"""
return app_exception.get() is not None
@staticmethod
def get_exception_as_string():
"""
Return text of the exception raised by LTI.
:return: text
"""
return "{}".format(TestFlask.get_exception())
def test_access_to_oauth_resource_unknown_protection(self):
"""
Invalid LTI request scope.
"""
self.app.get('/unknown_protection')
self.assertTrue(self.has_exception())
self.assertIsInstance(self.get_exception(), LTIException)
self.assertEqual(self.get_exception_as_string(),
'Unknown request type')
def test_access_to_oauth_resource_without_authorization_any(self):
"""
Accessing LTI without establishing session.
"""
self.app.get('/any')
self.assertTrue(self.has_exception())
self.assertIsInstance(self.get_exception(), LTIException)
self.assertEqual(self.get_exception_as_string(),
'Session expired or unavailable')
def test_access_to_oauth_resource_without_authorization_session(self):
"""
Accessing LTI session scope before session established.
"""
self.app.get('/session')
self.assertTrue(self.has_exception())
self.assertIsInstance(self.get_exception(), LTIException)
self.assertEqual(self.get_exception_as_string(),
'Session expired or unavailable')
def test_access_to_oauth_resource_without_authorization_initial_get(self):
"""
Accessing LTI without basic-lti-launch-request parameters as GET.
"""
self.app.get('/initial')
self.assertTrue(self.has_exception())
self.assertIsInstance(self.get_exception(), LTIException)
self.assertEqual(self.get_exception_as_string(),
'This page requires a valid oauth session or request')
def test_access_to_oauth_resource_without_authorization_initial_post(self):
"""
Accessing LTI without basic-lti-launch-request parameters as POST.
"""
self.app.post('/initial')
self.assertTrue(self.has_exception())
self.assertIsInstance(self.get_exception(), LTIException)
self.assertEqual(self.get_exception_as_string(),
'This page requires a valid oauth session or request')
def test_access_to_oauth_resource_in_session(self):
"""
Accessing LTI after session established.
"""
self.app.get('/setup_session')
self.app.get('/session')
self.assertFalse(self.has_exception())
def test_access_to_oauth_resource_in_session_with_close(self):
"""
Accessing LTI after session closed.
"""
self.app.get('/setup_session')
self.app.get('/session')
self.assertFalse(self.has_exception())
self.app.get('/close_session')
self.app.get('/session')
self.assertTrue(self.has_exception())
def test_access_to_oauth_resource(self):
"""
Accessing oauth_resource.
"""
consumers = self.consumers
url = 'http://localhost/initial?'
new_url = self.generate_launch_request(consumers, url)
self.app.get(new_url)
self.assertFalse(self.has_exception())
def test_access_to_oauth_resource_name_passed(self):
"""
Check that name is returned if passed via initial request.
"""
# pylint: disable=maybe-no-member
consumers = self.consumers
url = 'http://localhost/name?'
add_params = {u'lis_person_sourcedid': u'person'}
new_url = self.generate_launch_request(
consumers, url, add_params=add_params
)
ret = self.app.get(new_url)
self.assertFalse(self.has_exception())
self.assertEqual(ret.data.decode('utf-8'), u'person')
def test_access_to_oauth_resource_email_passed(self):
"""
Check that email is returned if passed via initial request.
"""
# pylint: disable=maybe-no-member
consumers = self.consumers
url = 'http://localhost/name?'
add_params = {u'lis_person_contact_email_primary': u'email@email.com'}
new_url = self.generate_launch_request(
consumers, url, add_params=add_params
)
ret = self.app.get(new_url)
self.assertFalse(self.has_exception())
self.assertEqual(ret.data.decode('utf-8'), u'email@email.com')
def test_access_to_oauth_resource_name_and_email_passed(self):
"""
Check that name is returned if both email and name passed.
"""
# pylint: disable=maybe-no-member
consumers = self.consumers
url = 'http://localhost/name?'
add_params = {u'lis_person_sourcedid': u'person',
u'lis_person_contact_email_primary': u'email@email.com'}
new_url = self.generate_launch_request(
consumers, url, add_params=add_params
)
ret = self.app.get(new_url)
self.assertFalse(self.has_exception())
self.assertEqual(ret.data.decode('utf-8'), u'person')
def test_access_to_oauth_resource_staff_only_as_student(self):
"""
Deny access if user not in role.
"""
consumers = self.consumers
url = 'http://localhost/initial_staff?'
student_url = self.generate_launch_request(
consumers, url, roles='Student'
)
self.app.get(student_url)
self.assertTrue(self.has_exception())
learner_url = self.generate_launch_request(
consumers, url, roles='Learner'
)
self.app.get(learner_url)
self.assertTrue(self.has_exception())
def test_access_to_oauth_resource_staff_only_as_administrator(self):
"""
Allow access if user in role.
"""
consumers = self.consumers
url = 'http://localhost/initial_staff?'
new_url = self.generate_launch_request(
consumers, url, roles='Administrator'
)
self.app.get(new_url)
self.assertFalse(self.has_exception())
def test_access_to_oauth_resource_staff_only_as_unknown_role(self):
"""
Deny access if role not defined.
"""
consumers = self.consumers
url = 'http://localhost/initial_staff?'
admin_url = self.generate_launch_request(
consumers, url, roles='Foo'
)
self.app.get(admin_url)
self.assertTrue(self.has_exception())
def test_access_to_oauth_resource_student_as_student(self):
"""
Verify that the various roles we consider as students are students.
"""
consumers = self.consumers
url = 'http://localhost/initial_student?'
# Learner Role
learner_url = self.generate_launch_request(
consumers, url, roles='Learner'
)
self.app.get(learner_url)
self.assertFalse(self.has_exception())
student_url = self.generate_launch_request(
consumers, url, roles='Student'
)
self.app.get(student_url)
self.assertFalse(self.has_exception())
def test_access_to_oauth_resource_student_as_staff(self):
"""Verify staff doesn't have access to student only."""
consumers = self.consumers
url = 'http://localhost/initial_student?'
staff_url = self.generate_launch_request(
consumers, url, roles='Instructor'
)
self.app.get(staff_url)
self.assertTrue(self.has_exception())
def test_access_to_oauth_resource_student_as_unknown(self):
"""Verify staff doesn't have access to student only."""
consumers = self.consumers
url = 'http://localhost/initial_student?'
unknown_url = self.generate_launch_request(
consumers, url, roles='FooBar'
)
self.app.get(unknown_url)
self.assertTrue(self.has_exception())
@staticmethod
def generate_launch_request(consumers, url,
lit_outcome_service_url=None,
roles=u'Instructor',
add_params=None,
include_lti_message_type=False):
"""
Generate valid basic-lti-launch-request request with options.
:param consumers: consumer map
:param url: URL to sign
:param lit_outcome_service_url: LTI callback
:param roles: LTI role
:return: signed request
"""
# pylint: disable=unused-argument, too-many-arguments
params = {'resource_link_id': u'edge.edx.org-i4x-MITx-ODL_ENG-lti-'
u'94173d3e79d145fd8ec2e83f15836ac8',
'user_id': u'008437924c9852377e8994829aaac7a1',
'lis_result_sourcedid': u'MITx/ODL_ENG/2014_T1:'
u'edge.edx.org-i4x-MITx-ODL_ENG-lti-'
u'94173d3e79d145fd8ec2e83f15836ac8:'
u'008437924c9852377e8994829aaac7a1',
'context_id': u'MITx/ODL_ENG/2014_T1',
'lti_version': u'LTI-1p0',
'launch_presentation_return_url': u'',
'lis_outcome_service_url': (lit_outcome_service_url or
u'https://example.edu/'
u'courses/MITx/ODL_ENG/'
u'2014_T1/xblock/i4x:;_;'
u'_MITx;_ODL_ENG;_lti;'
u'_94173d3e79d145fd8ec2e'
u'83f15836ac8'
u'/handler_noauth/'
u'grade_handler')}
if include_lti_message_type:
params['lti_message_type'] = u'basic-lti-launch-request'
if roles is not None:
params['roles'] = roles
if add_params is not None:
params.update(add_params)
urlparams = urlencode(params)
client = oauthlib.oauth1.Client('__consumer_key__',
client_secret='__lti_secret__',
signature_method=oauthlib.oauth1.
SIGNATURE_HMAC,
signature_type=oauthlib.oauth1.
SIGNATURE_TYPE_QUERY)
signature = client.sign("{}{}".format(url, urlparams))
signed_url = signature[0]
new_url = signed_url[len('http://localhost'):]
return new_url
def test_access_to_oauth_resource_any(self):
"""
Test access to LTI protected resources.
"""
url = 'http://localhost/any?'
new_url = self.generate_launch_request(self.consumers, url)
self.app.post(new_url)
self.assertFalse(self.has_exception())
def test_access_to_oauth_resource_any_norole(self):
"""
Test access to LTI protected resources.
"""
url = 'http://localhost/any?'
new_url = self.generate_launch_request(self.consumers, url, roles=None)
self.app.post(new_url)
self.assertFalse(self.has_exception())
def test_access_to_oauth_resource_any_nonstandard_role(self):
"""
Test access to LTI protected resources.
"""
url = 'http://localhost/any?'
new_url = self.generate_launch_request(self.consumers, url,
roles=u'ThisIsNotAStandardRole')
self.app.post(new_url)
self.assertFalse(self.has_exception())
def test_access_to_oauth_resource_invalid(self):
"""
Deny access to LTI protected resources
on man in the middle attack.
"""
url = 'http://localhost/initial?'
new_url = self.generate_launch_request(self.consumers, url)
self.app.get("{}&FAIL=TRUE".format(new_url))
self.assertTrue(self.has_exception())
self.assertIsInstance(self.get_exception(), LTIException)
self.assertEqual(self.get_exception_as_string(),
'OAuth error: Please check your key and secret')
def test_access_to_oauth_resource_invalid_after_session_setup(self):
"""
Remove browser session on man in the middle attach.
"""
self.app.get('/setup_session')
self.app.get('/session')
self.assertFalse(self.has_exception())
url = 'http://localhost/initial?'
new_url = self.generate_launch_request(self.consumers, url)
self.app.get("{}&FAIL=TRUE".format(new_url))
self.assertTrue(self.has_exception())
self.assertIsInstance(self.get_exception(), LTIException)
self.assertEqual(self.get_exception_as_string(),
'OAuth error: Please check your key and secret')
@httpretty.activate
def test_access_to_oauth_resource_post_grade(self):
"""
Check post_grade functionality.
"""
# pylint: disable=maybe-no-member
uri = (u'https://example.edu/courses/MITx/ODL_ENG/2014_T1/xblock/'
u'i4x:;_;_MITx;_ODL_ENG;_lti;'
u'_94173d3e79d145fd8ec2e83f15836ac8/handler_noauth'
u'/grade_handler')
httpretty.register_uri(httpretty.POST, uri, body=self.request_callback)
consumers = self.consumers
url = 'http://localhost/initial?'
new_url = self.generate_launch_request(consumers, url)
ret = self.app.get(new_url)
self.assertFalse(self.has_exception())
ret = self.app.get("/post_grade/1.0")
self.assertFalse(self.has_exception())
self.assertEqual(ret.data.decode('utf-8'), "grade=True")
ret = self.app.get("/post_grade/2.0")
self.assertFalse(self.has_exception())
self.assertEqual(ret.data.decode('utf-8'), "grade=False")
@httpretty.activate
def test_access_to_oauth_resource_post_grade_fail(self):
"""
Check post_grade functionality fails on invalid response.
"""
# pylint: disable=maybe-no-member
uri = (u'https://example.edu/courses/MITx/ODL_ENG/2014_T1/xblock/'
u'i4x:;_;_MITx;_ODL_ENG;_lti;'
u'_94173d3e79d145fd8ec2e83f15836ac8/handler_noauth'
u'/grade_handler')
def request_callback(request, cburi, headers):
# pylint: disable=unused-argument
"""
Mock error response callback.
"""
return 200, headers, "wrong_response"
httpretty.register_uri(httpretty.POST, uri, body=request_callback)
consumers = self.consumers
url = 'http://localhost/initial?'
new_url = self.generate_launch_request(consumers, url)
ret = self.app.get(new_url)
self.assertFalse(self.has_exception())
self.assertFalse(self.has_exception())
ret = self.app.get("/post_grade/1.0")
self.assertTrue(self.has_exception())
self.assertEqual(ret.data.decode('utf-8'), "error")
@httpretty.activate
def test_access_to_oauth_resource_post_grade_fix_url(self):
"""
Make sure URL remap works for edX vagrant stack.
"""
# pylint: disable=maybe-no-member
uri = 'https://localhost:8000/dev_stack'
httpretty.register_uri(httpretty.POST, uri, body=self.request_callback)
url = 'http://localhost/initial?'
new_url = self.generate_launch_request(
self.consumers, url, lit_outcome_service_url=uri
)
ret = self.app.get(new_url)
self.assertFalse(self.has_exception())
ret = self.app.get("/post_grade/1.0")
self.assertFalse(self.has_exception())
self.assertEqual(ret.data.decode('utf-8'), "grade=True")
ret = self.app.get("/post_grade/2.0")
self.assertFalse(self.has_exception())
self.assertEqual(ret.data.decode('utf-8'), "grade=False")
@httpretty.activate
def test_access_to_oauth_resource_post_grade2(self):
"""
Check post_grade edX LTI2 functionality.
"""
uri = (u'https://example.edu/courses/MITx/ODL_ENG/2014_T1/xblock/'
u'i4x:;_;_MITx;_ODL_ENG;_lti;'
u'_94173d3e79d145fd8ec2e83f15836ac8/handler_noauth'
u'/lti_2_0_result_rest_handler/user/'
u'008437924c9852377e8994829aaac7a1')
httpretty.register_uri(httpretty.PUT, uri, body=self.request_callback)
consumers = self.consumers
url = 'http://localhost/initial?'
new_url = self.generate_launch_request(consumers, url)
ret = self.app.get(new_url)
self.assertFalse(self.has_exception())
ret = self.app.get("/post_grade2/1.0")
self.assertFalse(self.has_exception())
self.assertEqual(ret.data.decode('utf-8'), "grade=True")
ret = self.app.get("/post_grade2/2.0")
self.assertFalse(self.has_exception())
self.assertEqual(ret.data.decode('utf-8'), "grade=False")
def request_callback(self, request, cburi, headers):
# pylint: disable=unused-argument
"""
Mock expected response.
"""
return 200, headers, self.expected_response
@httpretty.activate
def test_access_to_oauth_resource_post_grade2_fail(self):
"""
Check post_grade edX LTI2 functionality
"""
uri = (u'https://example.edu/courses/MITx/ODL_ENG/2014_T1/xblock/'
u'i4x:;_;_MITx;_ODL_ENG;_lti;'
u'_94173d3e79d145fd8ec2e83f15836ac8/handler_noauth'
u'/lti_2_0_result_rest_handler/user/'
u'008437924c9852377e8994829aaac7a1')
def request_callback(request, cburi, headers):
# pylint: disable=unused-argument
"""
Mock expected response.
"""
return 400, headers, self.expected_response
httpretty.register_uri(httpretty.PUT, uri, body=request_callback)
consumers = self.consumers
url = 'http://localhost/initial?'
new_url = self.generate_launch_request(consumers, url)
ret = self.app.get(new_url)
self.assertFalse(self.has_exception())
ret = self.app.get("/post_grade2/1.0")
self.assertTrue(self.has_exception())
self.assertEqual(ret.data.decode('utf-8'), "error")
@mock.patch.object(LTI, '_check_role')
@mock.patch.object(LTI, 'verify')
def test_decorator_no_app(self, mock_verify, _):
"""Verify the decorator doesn't require the app object."""
# pylint: disable=maybe-no-member
mock_verify.return_value = True
response = self.app.get('/no_app')
self.assertEqual(200, response.status_code)
self.assertEqual('hi', response.data.decode('utf-8'))
def test_default_decorator(self):
"""
Verify default decorator works.
"""
url = 'http://localhost/default_lti?'
new_url = self.generate_launch_request(self.consumers, url)
self.app.get(new_url)
self.assertFalse(self.has_exception())
def test_default_decorator_bad(self):
"""
Verify error handling works.
"""
# Validate we get our error page when there is a bad LTI
# request
# pylint: disable=maybe-no-member
response = self.app.get('/default_lti')
self.assertEqual(500, response.status_code)
self.assertEqual("There was an LTI communication error",
response.data.decode('utf-8'))
| |
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright (c) 2010 Citrix Systems, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Stubouts, mocks and fixtures for the test suite"""
import json
import random
from eventlet import tpool
from nova.virt import xenapi_conn
from nova.virt.xenapi import fake
from nova.virt.xenapi import volume_utils
from nova.virt.xenapi import vm_utils
from nova.virt.xenapi import vmops
from nova import utils
def stubout_firewall_driver(stubs, conn):
def fake_none(self, *args):
return
vmops = conn._vmops
stubs.Set(vmops.firewall_driver, 'prepare_instance_filter', fake_none)
stubs.Set(vmops.firewall_driver, 'instance_filter_exists', fake_none)
def stubout_instance_snapshot(stubs):
@classmethod
def fake_fetch_image(cls, context, session, instance, image, user,
project, type):
return [dict(vdi_type='os', vdi_uuid=_make_fake_vdi())]
stubs.Set(vm_utils.VMHelper, 'fetch_image', fake_fetch_image)
def fake_wait_for_vhd_coalesce(*args):
#TODO(sirp): Should we actually fake out the data here
return "fakeparent", "fakebase"
stubs.Set(vm_utils, '_wait_for_vhd_coalesce', fake_wait_for_vhd_coalesce)
def stubout_session(stubs, cls, product_version=None, **opt_args):
"""Stubs out three methods from XenAPISession"""
def fake_import(self):
"""Stubs out get_imported_xenapi of XenAPISession"""
fake_module = 'nova.virt.xenapi.fake'
from_list = ['fake']
return __import__(fake_module, globals(), locals(), from_list, -1)
stubs.Set(xenapi_conn.XenAPISession, '_create_session',
lambda s, url: cls(url, **opt_args))
stubs.Set(xenapi_conn.XenAPISession, 'get_imported_xenapi',
fake_import)
if product_version is None:
product_version = (5, 6, 2)
stubs.Set(xenapi_conn.XenAPISession, 'get_product_version',
lambda s: product_version)
# NOTE(johannes): logging can't be used reliably from a thread
# since it can deadlock with eventlet. It's safe for our faked
# sessions to be called synchronously in the unit tests. (see
# bug 946687)
stubs.Set(tpool, 'execute', lambda m, *a, **kw: m(*a, **kw))
def stub_out_get_target(stubs):
"""Stubs out _get_target in volume_utils"""
def fake_get_target(volume_id):
return (None, None)
stubs.Set(volume_utils, '_get_target', fake_get_target)
def stubout_get_this_vm_uuid(stubs):
def f():
vms = [rec['uuid'] for ref, rec
in fake.get_all_records('VM').iteritems()
if rec['is_control_domain']]
return vms[0]
stubs.Set(vm_utils, 'get_this_vm_uuid', f)
def stubout_stream_disk(stubs):
def f(_1, _2, _3, _4):
pass
stubs.Set(vm_utils, '_stream_disk', f)
def stubout_is_vdi_pv(stubs):
def f(_1):
return False
stubs.Set(vm_utils, '_is_vdi_pv', f)
def stubout_determine_is_pv_objectstore(stubs):
"""Assumes VMs stu have PV kernels"""
@classmethod
def f(cls, *args):
return False
stubs.Set(vm_utils.VMHelper, '_determine_is_pv_objectstore', f)
def stubout_is_snapshot(stubs):
""" Always returns true
xenapi fake driver does not create vmrefs for snapshots """
@classmethod
def f(cls, *args):
return True
stubs.Set(vm_utils.VMHelper, 'is_snapshot', f)
def stubout_lookup_image(stubs):
"""Simulates a failure in lookup image."""
def f(_1, _2, _3, _4):
raise Exception("Test Exception raised by fake lookup_image")
stubs.Set(vm_utils, 'lookup_image', f)
def stubout_fetch_image_glance_disk(stubs, raise_failure=False):
"""Simulates a failure in fetch image_glance_disk."""
@classmethod
def _fake_fetch_image_glance_disk(cls, context, session, instance, image,
image_type):
if raise_failure:
raise fake.Failure("Test Exception raised by "
"fake fetch_image_glance_disk")
elif image_type == vm_utils.ImageType.KERNEL:
filename = "kernel"
elif image_type == vm_utils.ImageType.RAMDISK:
filename = "ramdisk"
else:
filename = "unknown"
return [dict(vdi_type=vm_utils.ImageType.to_string(image_type),
vdi_uuid=None,
file=filename)]
stubs.Set(vm_utils.VMHelper, '_fetch_image_glance_disk',
_fake_fetch_image_glance_disk)
def stubout_create_vm(stubs):
"""Simulates a failure in create_vm."""
@classmethod
def f(cls, *args):
raise fake.Failure("Test Exception raised by " +
"fake create_vm")
stubs.Set(vm_utils.VMHelper, 'create_vm', f)
def stubout_loopingcall_start(stubs):
def fake_start(self, interval):
self.f(*self.args, **self.kw)
stubs.Set(utils.LoopingCall, 'start', fake_start)
def _make_fake_vdi():
sr_ref = fake.get_all('SR')[0]
vdi_ref = fake.create_vdi('', False, sr_ref, False)
vdi_rec = fake.get_record('VDI', vdi_ref)
return vdi_rec['uuid']
class FakeSessionForVMTests(fake.SessionBase):
""" Stubs out a XenAPISession for VM tests """
_fake_iptables_save_output = ("# Generated by iptables-save v1.4.10 on "
"Sun Nov 6 22:49:02 2011\n"
"*filter\n"
":INPUT ACCEPT [0:0]\n"
":FORWARD ACCEPT [0:0]\n"
":OUTPUT ACCEPT [0:0]\n"
"COMMIT\n"
"# Completed on Sun Nov 6 22:49:02 2011\n")
def __init__(self, uri):
super(FakeSessionForVMTests, self).__init__(uri)
def host_call_plugin(self, _1, _2, plugin, method, _5):
if (plugin, method) == ('glance', 'download_vhd'):
return fake.as_json(dict(vdi_type='os',
vdi_uuid=_make_fake_vdi()))
elif (plugin, method) == ("xenhost", "iptables_config"):
return fake.as_json(out=self._fake_iptables_save_output,
err='')
else:
return (super(FakeSessionForVMTests, self).
host_call_plugin(_1, _2, plugin, method, _5))
def host_call_plugin_swap(self, _1, _2, plugin, method, _5):
if (plugin, method) == ('glance', 'download_vhd'):
return fake.as_json(dict(vdi_type='os',
vdi_uuid=_make_fake_vdi()),
dict(vdi_type='swap',
vdi_uuid=_make_fake_vdi()))
else:
return (super(FakeSessionForVMTests, self).
host_call_plugin(_1, _2, plugin, method, _5))
def VM_start(self, _1, ref, _2, _3):
vm = fake.get_record('VM', ref)
if vm['power_state'] != 'Halted':
raise fake.Failure(['VM_BAD_POWER_STATE', ref, 'Halted',
vm['power_state']])
vm['power_state'] = 'Running'
vm['is_a_template'] = False
vm['is_control_domain'] = False
vm['domid'] = random.randrange(1, 1 << 16)
return vm
def VM_start_on(self, _1, vm_ref, host_ref, _2, _3):
vm_rec = self.VM_start(_1, vm_ref, _2, _3)
vm_rec['resident_on'] = host_ref
def VM_snapshot(self, session_ref, vm_ref, label):
status = "Running"
template_vm_ref = fake.create_vm(label, status, is_a_template=True,
is_control_domain=False)
sr_ref = "fakesr"
template_vdi_ref = fake.create_vdi(label, read_only=True,
sr_ref=sr_ref, sharable=False)
template_vbd_ref = fake.create_vbd(template_vm_ref, template_vdi_ref)
return template_vm_ref
def VDI_destroy(self, session_ref, vdi_ref):
fake.destroy_vdi(vdi_ref)
def VM_destroy(self, session_ref, vm_ref):
fake.destroy_vm(vm_ref)
def SR_scan(self, session_ref, sr_ref):
pass
def VDI_set_name_label(self, session_ref, vdi_ref, name_label):
pass
class FakeSessionForFirewallTests(FakeSessionForVMTests):
""" Stubs out a XenApi Session for doing IPTable Firewall tests """
def __init__(self, uri, test_case=None):
super(FakeSessionForFirewallTests, self).__init__(uri)
if hasattr(test_case, '_in_filter_rules'):
self._in_filter_rules = test_case._in_filter_rules
if hasattr(test_case, '_in6_filter_rules'):
self._in6_filter_rules = test_case._in6_filter_rules
if hasattr(test_case, '_in_nat_rules'):
self._in_nat_rules = test_case._in_nat_rules
self._test_case = test_case
def host_call_plugin(self, _1, _2, plugin, method, args):
"""Mock method four host_call_plugin to be used in unit tests
for the dom0 iptables Firewall drivers for XenAPI
"""
if plugin == "xenhost" and method == "iptables_config":
# The command to execute is a json-encoded list
cmd_args = args.get('cmd_args', None)
cmd = json.loads(cmd_args)
if not cmd:
ret_str = ''
else:
output = ''
process_input = args.get('process_input', None)
if cmd == ['ip6tables-save', '-t', 'filter']:
output = '\n'.join(self._in6_filter_rules)
if cmd == ['iptables-save', '-t', 'filter']:
output = '\n'.join(self._in_filter_rules)
if cmd == ['iptables-save', '-t', 'nat']:
output = '\n'.join(self._in_nat_rules)
if cmd == ['iptables-restore', ]:
lines = process_input.split('\n')
if '*filter' in lines:
if self._test_case is not None:
self._test_case._out_rules = lines
output = '\n'.join(lines)
if cmd == ['ip6tables-restore', ]:
lines = process_input.split('\n')
if '*filter' in lines:
output = '\n'.join(lines)
ret_str = fake.as_json(out=output, err='')
return ret_str
def stub_out_vm_methods(stubs):
def fake_shutdown(self, inst, vm, method="clean"):
pass
def fake_acquire_bootlock(self, vm):
pass
def fake_release_bootlock(self, vm):
pass
def fake_spawn_rescue(self, context, inst, network_info, image_meta):
inst._rescue = False
@classmethod
def fake_generate_ephemeral(cls, *args):
pass
stubs.Set(vmops.VMOps, "_shutdown", fake_shutdown)
stubs.Set(vmops.VMOps, "_acquire_bootlock", fake_acquire_bootlock)
stubs.Set(vmops.VMOps, "_release_bootlock", fake_release_bootlock)
stubs.Set(vmops.VMOps, "spawn_rescue", fake_spawn_rescue)
stubs.Set(vm_utils.VMHelper, 'generate_ephemeral', fake_generate_ephemeral)
class FakeSessionForVolumeTests(fake.SessionBase):
""" Stubs out a XenAPISession for Volume tests """
def __init__(self, uri):
super(FakeSessionForVolumeTests, self).__init__(uri)
def VDI_introduce(self, _1, uuid, _2, _3, _4, _5,
_6, _7, _8, _9, _10, _11):
valid_vdi = False
refs = fake.get_all('VDI')
for ref in refs:
rec = fake.get_record('VDI', ref)
if rec['uuid'] == uuid:
valid_vdi = True
if not valid_vdi:
raise fake.Failure([['INVALID_VDI', 'session', self._session]])
class FakeSessionForVolumeFailedTests(FakeSessionForVolumeTests):
""" Stubs out a XenAPISession for Volume tests: it injects failures """
def __init__(self, uri):
super(FakeSessionForVolumeFailedTests, self).__init__(uri)
def VDI_introduce(self, _1, uuid, _2, _3, _4, _5,
_6, _7, _8, _9, _10, _11):
# This is for testing failure
raise fake.Failure([['INVALID_VDI', 'session', self._session]])
def PBD_unplug(self, _1, ref):
rec = fake.get_record('PBD', ref)
rec['currently-attached'] = False
def SR_forget(self, _1, ref):
pass
class FakeSessionForMigrationTests(FakeSessionForVMTests):
"""Stubs out a XenAPISession for Migration tests"""
def __init__(self, uri):
super(FakeSessionForMigrationTests, self).__init__(uri)
def VDI_get_by_uuid(self, *args):
return 'hurr'
def VM_set_name_label(self, *args):
pass
def VDI_set_name_label(self, session_ref, vdi_ref, name_label):
pass
def stub_out_migration_methods(stubs):
def fake_create_snapshot(self, instance):
return 'vm_ref', dict(image='foo', snap='bar')
@classmethod
def fake_get_vdi(cls, session, vm_ref):
vdi_ref = fake.create_vdi(name_label='derp', read_only=False,
sr_ref='herp', sharable=False)
vdi_rec = session.call_xenapi("VDI.get_record", vdi_ref)
return vdi_ref, {'uuid': vdi_rec['uuid'], }
def fake_shutdown(self, inst, vm, hard=True):
pass
@classmethod
def fake_sr(cls, session, *args):
pass
@classmethod
def fake_get_sr_path(cls, *args):
return "fake"
def fake_destroy(*args, **kwargs):
pass
def fake_reset_network(*args, **kwargs):
pass
@classmethod
def fake_generate_ephemeral(cls, *args):
pass
stubs.Set(vmops.VMOps, '_destroy', fake_destroy)
stubs.Set(vm_utils.VMHelper, 'scan_default_sr', fake_sr)
stubs.Set(vm_utils.VMHelper, 'scan_sr', fake_sr)
stubs.Set(vmops.VMOps, '_create_snapshot', fake_create_snapshot)
stubs.Set(vm_utils.VMHelper, 'get_vdi_for_vm_safely', fake_get_vdi)
stubs.Set(vm_utils.VMHelper, 'get_sr_path', fake_get_sr_path)
stubs.Set(vmops.VMOps, 'reset_network', fake_reset_network)
stubs.Set(vmops.VMOps, '_shutdown', fake_shutdown)
stubs.Set(vm_utils.VMHelper, 'generate_ephemeral', fake_generate_ephemeral)
| |
#!/usr/bin/env python
"""VFS-related test classes."""
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
import os
import time
from absl.testing import absltest
from future.builtins import str
from future.utils import iteritems
import mock
from typing import Iterable, Tuple
from grr_response_client import client_utils
from grr_response_client import vfs
from grr_response_core import config
from grr_response_core.lib import utils
from grr_response_core.lib.rdfvalues import client_fs as rdf_client_fs
from grr_response_core.lib.rdfvalues import paths as rdf_paths
from grr_response_core.lib.util import precondition
from grr_response_server import client_fixture
from grr_response_server import data_store
from grr_response_server import file_store
from grr_response_server.databases import db
from grr_response_server.rdfvalues import objects as rdf_objects
# pylint: mode=test
class VFSOverrider(object):
"""A context to temporarily change VFS handlers."""
def __init__(self, vfs_type, temp_handler):
self._vfs_type = vfs_type
self._temp_handler = temp_handler
def __enter__(self):
self.Start()
def Start(self):
if not vfs.VFS_HANDLERS:
# Initialize VFS if not yet done, otherwise VFS will not initialize
# correctly when it is used for the first time in testing code.
vfs.Init()
self._old_handler = vfs.VFS_HANDLERS.get(self._vfs_type)
vfs.VFS_HANDLERS[self._vfs_type] = self._temp_handler
def __exit__(self, unused_type, unused_value, unused_traceback):
self.Stop()
def Stop(self):
if self._old_handler:
vfs.VFS_HANDLERS[self._vfs_type] = self._old_handler
else:
del vfs.VFS_HANDLERS[self._vfs_type]
class FakeTestDataVFSOverrider(VFSOverrider):
"""A context to temporarily change VFS handler to `FakeTestDataVFSHandler`."""
def __init__(self):
super(FakeTestDataVFSOverrider,
self).__init__(rdf_paths.PathSpec.PathType.OS, FakeTestDataVFSHandler)
def __enter__(self):
super(FakeTestDataVFSOverrider, self).__enter__()
def Open(path, *args, **kwagrs):
path = FakeTestDataVFSHandler.FakeRootPath(path)
return self._os_open(path, *args, **kwagrs)
self._os_open = os.open
os.open = Open
def __exit__(self, exc_type, exc_value, trace):
super(FakeTestDataVFSOverrider, self).__exit__(exc_type, exc_value, trace)
os.open = self._os_open
class ClientVFSHandlerFixtureBase(vfs.VFSHandler):
"""A base class for VFSHandlerFixtures."""
def ListNames(self):
for stat in self.ListFiles():
yield os.path.basename(stat.pathspec.path)
def IsDirectory(self):
return bool(self.ListFiles())
def _FakeDirStat(self, vfs_name=None):
for path in self.pathspec:
path.path = self._NormalizeCaseForPath(self.path, vfs_name=vfs_name)
return rdf_client_fs.StatEntry(
pathspec=self.pathspec,
st_mode=16877,
st_size=12288,
st_atime=1319796280,
st_dev=1)
class ClientVFSHandlerFixture(ClientVFSHandlerFixtureBase):
"""A client side VFS handler for the OS type - returns the fixture."""
# A class wide cache for fixtures. Key is the prefix, and value is the
# compiled fixture.
cache = {}
paths = None
supported_pathtype = rdf_paths.PathSpec.PathType.OS
# Do not auto-register.
auto_register = False
# Everything below this prefix is emulated
prefix = "/fs/os"
def __init__(self,
base_fd=None,
prefix=None,
handlers=None,
pathspec=None,
progress_callback=None):
super(ClientVFSHandlerFixture, self).__init__(
base_fd,
handlers=handlers,
pathspec=pathspec,
progress_callback=progress_callback)
self.prefix = self.prefix or prefix
self.pathspec.Append(pathspec)
self.path = self.pathspec.CollapsePath()
self.paths = self.cache.get(self.prefix)
self.PopulateCache()
def PopulateCache(self):
"""Parse the paths from the fixture."""
if self.paths:
return
# The cache is attached to the class so it can be shared by all instance.
self.paths = self.__class__.cache[self.prefix] = {}
for path, (vfs_name, attributes) in client_fixture.VFS:
if not path.startswith(self.prefix):
continue
path = utils.NormalizePath(path[len(self.prefix):])
if path == "/":
continue
stat = rdf_client_fs.StatEntry()
args = {"client_id": "C.1234"}
attrs = attributes.get("stat")
if attrs:
attrs %= args # Remove any %% and interpolate client_id.
stat = rdf_client_fs.StatEntry.FromTextFormat(attrs)
stat.pathspec = rdf_paths.PathSpec(
pathtype=self.supported_pathtype, path=path)
# TODO(user): Once we add tests around not crossing device boundaries,
# we need to be smarter here, especially for the root entry.
stat.st_dev = 1
path = self._NormalizeCaseForPath(path, vfs_name)
self.paths[path] = (vfs_name, stat)
self.BuildIntermediateDirectories()
def _NormalizeCaseForPath(self, path, vfs_name):
"""Handle casing differences for different filesystems."""
# Special handling for case sensitivity of registry keys.
# This mimicks the behavior of the operating system.
if self.supported_pathtype == rdf_paths.PathSpec.PathType.REGISTRY:
self.path = self.path.replace("\\", "/")
parts = path.split("/")
if vfs_name == "File":
# If its a file, the last component is a value which is case sensitive.
lower_parts = [x.lower() for x in parts[0:-1]]
lower_parts.append(parts[-1])
path = utils.Join(*lower_parts)
else:
path = utils.Join(*[x.lower() for x in parts])
return path
def BuildIntermediateDirectories(self):
"""Interpolate intermediate directories based on their children.
This avoids us having to put in useless intermediate directories to the
client fixture.
"""
for dirname, (_, stat) in list(iteritems(self.paths)):
pathspec = stat.pathspec
while 1:
dirname = os.path.dirname(dirname)
new_pathspec = pathspec.Copy()
new_pathspec.path = os.path.dirname(pathspec.path)
pathspec = new_pathspec
if dirname == "/" or dirname in self.paths:
break
self.paths[dirname] = ("Directory",
rdf_client_fs.StatEntry(
st_mode=16877,
st_size=1,
st_dev=1,
pathspec=new_pathspec))
def ListFiles(self, ext_attrs=None):
del ext_attrs # Unused.
# First return exact matches
for k, (_, stat) in iteritems(self.paths):
dirname = os.path.dirname(k)
if dirname == self._NormalizeCaseForPath(self.path, None):
yield stat
def Read(self, length):
result = self.paths.get(self._NormalizeCaseForPath(self.path, "File"))
if not result:
raise IOError("File not found")
result = result[1] # We just want the stat.
data = b""
if result.HasField("resident"):
data = result.resident
elif result.HasField("registry_type"):
data = str(result.registry_data.GetValue()).encode("utf-8")
data = data[self.offset:self.offset + length]
self.offset += len(data)
return data
def Stat(
self,
ext_attrs = False,
follow_symlink = True,
):
"""Get Stat for self.path."""
del ext_attrs, follow_symlink # Unused.
stat_data = self.paths.get(self._NormalizeCaseForPath(self.path, None))
if (not stat_data and
self.supported_pathtype == rdf_paths.PathSpec.PathType.REGISTRY):
# Check in case it is a registry value. Unfortunately our API doesn't let
# the user specify if they are after a value or a key, so we have to try
# both.
stat_data = self.paths.get(self._NormalizeCaseForPath(self.path, "File"))
if stat_data:
return stat_data[1] # Strip the vfs_name.
else:
return self._FakeDirStat("File")
class FakeRegistryVFSHandler(ClientVFSHandlerFixture):
"""Special client VFS mock that will emulate the registry."""
prefix = "/registry"
supported_pathtype = rdf_paths.PathSpec.PathType.REGISTRY
class FakeFullVFSHandler(ClientVFSHandlerFixture):
"""Full client VFS mock."""
prefix = "/"
supported_pathtype = rdf_paths.PathSpec.PathType.OS
class FakeTestDataVFSHandler(ClientVFSHandlerFixtureBase):
"""Client VFS mock that looks for files in the test_data directory."""
prefix = "/fs/os"
supported_pathtype = rdf_paths.PathSpec.PathType.OS
def __init__(self,
base_fd=None,
handlers=None,
prefix=None,
pathspec=None,
progress_callback=None):
super(FakeTestDataVFSHandler, self).__init__(
base_fd,
handlers=handlers,
pathspec=pathspec,
progress_callback=progress_callback)
# This should not really be done since there might be more information
# in the pathspec than the path but here in the test is ok.
if not base_fd:
self.pathspec = pathspec
else:
self.pathspec.last.path = os.path.join(
self.pathspec.last.path,
pathspec.CollapsePath().lstrip("/"))
self.path = self.pathspec.CollapsePath()
@classmethod
def FakeRootPath(cls, path):
test_data_dir = config.CONFIG["Test.data_dir"]
return os.path.join(test_data_dir, "VFSFixture", path.lstrip("/"))
def _AbsPath(self, filename=None):
path = self.path
if filename:
path = os.path.join(path, filename)
return self.FakeRootPath(path)
def Read(self, length):
test_data_path = self._AbsPath()
if not os.path.exists(test_data_path):
raise IOError("Could not find %s" % test_data_path)
data = open(test_data_path, "rb").read()[self.offset:self.offset + length]
self.offset += len(data)
return data
def Stat(
self,
ext_attrs = False,
follow_symlink = True,
):
"""Get Stat for self.path."""
del follow_symlink # Unused.
return client_utils.StatEntryFromPath(
self._AbsPath(), self.pathspec, ext_attrs=ext_attrs)
def ListFiles(self, ext_attrs=None):
for f in os.listdir(self._AbsPath()):
ps = self.pathspec.Copy()
ps.last.path = os.path.join(ps.last.path, f)
yield client_utils.StatEntryFromPath(
self._AbsPath(f), self.pathspec, ext_attrs=ext_attrs)
class RegistryFake(FakeRegistryVFSHandler):
"""Implementation of fake registry VFS handler."""
class FakeKeyHandle(object):
def __init__(self, value):
self.value = value.replace("\\", "/")
def __enter__(self):
return self
def __exit__(self, exc_type=None, exc_val=None, exc_tb=None):
return False
def OpenKey(self, key, sub_key):
res = "%s/%s" % (key.value, sub_key.replace("\\", "/"))
res = res.rstrip("/")
parts = res.split("/")
for cache_key in [
utils.Join(*[p.lower() for p in parts[:-1]] + parts[-1:]),
res.lower()
]:
if not cache_key.startswith("/"):
cache_key = "/" + cache_key
if cache_key in self.cache[self.prefix]:
return self.__class__.FakeKeyHandle(cache_key)
raise OSError()
def QueryValueEx(self, key, value_name):
full_key = os.path.join(key.value.lower(), value_name).rstrip("/")
try:
stat_entry = self.cache[self.prefix][full_key][1]
data = stat_entry.registry_data.GetValue()
if data:
return data, str
except KeyError:
pass
raise OSError()
def QueryInfoKey(self, key):
num_keys = len(self._GetKeys(key))
num_vals = len(self._GetValues(key))
for path in self.cache[self.prefix]:
if path == key.value:
_, stat_entry = self.cache[self.prefix][path]
modification_time = stat_entry.st_mtime
if modification_time:
return num_keys, num_vals, modification_time
modification_time = time.time()
return num_keys, num_vals, modification_time
def EnumKey(self, key, index):
try:
return self._GetKeys(key)[index]
except IndexError:
raise OSError()
def _GetKeys(self, key):
res = []
for path in self.cache[self.prefix]:
if os.path.dirname(path) == key.value:
sub_type, stat_entry = self.cache[self.prefix][path]
if sub_type == "Directory":
res.append(os.path.basename(stat_entry.pathspec.path))
return sorted(res)
def EnumValue(self, key, index):
try:
subkey = self._GetValues(key)[index]
value, value_type = self.QueryValueEx(key, subkey)
return subkey, value, value_type
except IndexError:
raise OSError()
def _GetValues(self, key):
res = []
for path in self.cache[self.prefix]:
if os.path.dirname(path) == key.value:
sub_type, stat_entry = self.cache[self.prefix][path]
if sub_type == "File":
res.append(os.path.basename(stat_entry.pathspec.path))
return sorted(res)
class FakeWinreg(object):
"""A class to replace the winreg module.
winreg is only available on Windows so we use this class in tests instead.
"""
REG_NONE = 0
REG_SZ = 1
REG_EXPAND_SZ = 2
REG_BINARY = 3
REG_DWORD = 4
REG_DWORD_LITTLE_ENDIAN = 4
REG_DWORD_BIG_ENDIAN = 5
REG_LINK = 6
REG_MULTI_SZ = 7
HKEY_USERS = "HKEY_USERS"
HKEY_LOCAL_MACHINE = "HKEY_LOCAL_MACHINE"
class RegistryVFSStubber(object):
"""Stubber helper for tests that have to emulate registry VFS handler."""
def __enter__(self):
self.Start()
return self
def __exit__(self, exc_type, exc_value, traceback):
self.Stop()
def Start(self):
"""Install the stubs."""
modules = {
"_winreg": FakeWinreg(),
"winreg": FakeWinreg(),
"ctypes": mock.MagicMock(),
"ctypes.wintypes": mock.MagicMock(),
}
self.module_patcher = mock.patch.dict("sys.modules", modules)
self.module_patcher.start()
# pylint: disable= g-import-not-at-top
from grr_response_client.vfs_handlers import registry
# pylint: enable=g-import-not-at-top
fixture = RegistryFake()
self.stubber = utils.MultiStubber(
(registry, "KeyHandle", RegistryFake.FakeKeyHandle),
(registry, "OpenKey", fixture.OpenKey),
(registry, "QueryValueEx", fixture.QueryValueEx),
(registry, "QueryInfoKey", fixture.QueryInfoKey),
(registry, "EnumValue", fixture.EnumValue),
(registry, "EnumKey", fixture.EnumKey))
self.stubber.Start()
# Add the Registry handler to the vfs.
vfs.VFS_HANDLERS[
registry.RegistryFile.supported_pathtype] = registry.RegistryFile
def Stop(self):
"""Uninstall the stubs."""
self.module_patcher.stop()
self.stubber.Stop()
def CreateFile(client_path, content=b""):
"""Creates a file in datastore-agnostic way.
Args:
client_path: A `ClientPath` instance specifying location of the file.
content: A content to write to the file.
"""
precondition.AssertType(client_path, db.ClientPath)
precondition.AssertType(content, bytes)
blob_id = rdf_objects.BlobID.FromBlobData(content)
stat_entry = rdf_client_fs.StatEntry(
pathspec=rdf_paths.PathSpec(
pathtype=client_path.path_type,
path="/".join(client_path.components)),
st_mode=33206,
st_size=len(content))
data_store.BLOBS.WriteBlobs({blob_id: content})
blob_ref = rdf_objects.BlobReference(
size=len(content), offset=0, blob_id=blob_id)
hash_id = file_store.AddFileWithUnknownHash(client_path, [blob_ref])
path_info = rdf_objects.PathInfo()
path_info.path_type = client_path.path_type
path_info.components = client_path.components
path_info.hash_entry.num_bytes = len(content)
path_info.hash_entry.sha256 = hash_id.AsBytes()
path_info.stat_entry = stat_entry
data_store.REL_DB.WritePathInfos(client_path.client_id, [path_info])
def CreateDirectory(client_path):
"""Creates a directory in datastore-agnostic way.
Args:
client_path: A `ClientPath` instance specifying location of the file.
"""
precondition.AssertType(client_path, db.ClientPath)
stat_entry = rdf_client_fs.StatEntry(
pathspec=rdf_paths.PathSpec(
pathtype=client_path.path_type,
path="/".join(client_path.components)),
st_mode=16895)
path_info = rdf_objects.PathInfo()
path_info.path_type = client_path.path_type
path_info.components = client_path.components
path_info.stat_entry = stat_entry
path_info.directory = True
data_store.REL_DB.WritePathInfos(client_path.client_id, [path_info])
def GenerateBlobRefs(
blob_size, contents
):
"""Generates a series of blob data and references.
Args:
blob_size: size of each blob.
contents: each blob will be generated by repeating a byte from "contents"
blob_size times.
Returns:
A pair of blob data sequence and blob refs sequence. For each byte
in contents there's an element at the corresponding index in blob data
sequence with blob's data and in blob refs sequence with a
corresponding blob reference.
"""
blob_data = [(c * blob_size).encode("ascii") for c in contents]
blob_refs = []
offset = 0
for data in blob_data:
blob_id = rdf_objects.BlobID.FromBlobData(data)
blob_refs.append(
rdf_objects.BlobReference(
offset=offset, size=len(data), blob_id=blob_id))
offset += len(data)
return blob_data, blob_refs
def CreateFileWithBlobRefsAndData(
client_path, blob_refs,
blob_data):
"""Writes a file with given data and blob refs to the data/blob store.
Args:
client_path: Client path of the file to write.
blob_refs: Blob references corresponding to a file.
blob_data: Blob data to be written to the blob store.
"""
path_info = rdf_objects.PathInfo.OS(components=client_path.components)
data_store.BLOBS.WriteBlobs(
{rdf_objects.BlobID.FromBlobData(bdata): bdata for bdata in blob_data})
hash_id = rdf_objects.SHA256HashID.FromData(b"".join(blob_data))
data_store.REL_DB.WriteHashBlobReferences({hash_id: blob_refs})
path_info = rdf_objects.PathInfo(
path_type=client_path.path_type, components=client_path.components)
path_info.hash_entry.sha256 = hash_id.AsBytes()
data_store.REL_DB.WritePathInfos(client_path.client_id, [path_info])
class VfsTestCase(absltest.TestCase):
"""Mixin that resets VFS caches after tests."""
def tearDown(self):
super(VfsTestCase, self).tearDown()
vfs.files.FlushHandleCache()
vfs.sleuthkit.DEVICE_CACHE.Flush()
| |
# -*- coding: ascii -*-
r"""
:Copyright:
Copyright 2006 - 2015
Andr\xe9 Malo or his licensors, as applicable
:License:
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
========================
Template Builder Logic
========================
This module provides the logic to build a nodetree out of parser
events.
"""
if __doc__:
# pylint: disable = redefined-builtin
__doc__ = __doc__.encode('ascii').decode('unicode_escape')
__author__ = r"Andr\xe9 Malo".encode('ascii').decode('unicode_escape')
__docformat__ = "restructuredtext en"
import re as _re
from .._exceptions import TemplateAttributeError, TemplateAttributeEmptyError
from .. import interfaces as _interfaces
class AttributeAnalyzer(object):
"""
Attribute analyzer
:IVariables:
`attribute` : ``str``
The attribute name
`scope` : ``str``
The scope attribute name
`_overlay` : ``str``
The overlay attribute name
`_removeattr` : ``bool``
Should `attribute` be removed from the starttag?
"""
__implements__ = [_interfaces.AttributeAnalyzerInterface]
#: Regex matcher for valid tdi attributes
#:
#: :Type: ``callable``
_IDMATCH = _re.compile(ur'''
-$ |
(?P<flags>(?:
:|[+-]|\*|
:[+-]|:\*|[+-]:|[+-]\*|\*:|\*[+-]|
:[+-]\*|:\*[+-]|[+-]:\*|[+-]\*:|\*:[+-]|\*[+-]:
)?)
(?P<name>[A-Za-z][A-Za-z\d_]*)$
''', _re.X).match
#: Regex matcher for valid tdi:overlay attributes
#:
#: :Type: ``callable``
_OVMATCH = _re.compile(ur'''
(?P<flags>(?:[-+][<>]?|[<>][+-]?)?)
(?P<name>[A-Za-z][A-Za-z\d_]*)$
''', _re.X).match
#: Regex matcher for valid tdi:scope attributes
#:
#: :Type: ``callable``
_SCMATCH = _re.compile(ur'''
(?P<flags>(?:[+-]=?|=[+-]?)?)
(?P<name>(?:[A-Za-z][A-Za-z\d_]*(?:\.[A-Za-z][A-Za-z\d_]*)*)?)$
''', _re.X).match
#: Default tdi attribute name
#:
#: :Type: ``str``
_DEFAULT_ATTRIBUTE = 'tdi'
#: Default overlay attribute name
#:
#: :Type: ``str``
_DEFAULT_OVERLAY = 'tdi:overlay'
#: Default scope attribute name
#:
#: :Type: ``str``
_DEFAULT_SCOPE = 'tdi:scope'
#: Default value for removing the tdi attribute
#:
#: :Type: ``bool``
_DEFAULT_REMOVEATTR = True
def __init__(self, decoder, attribute=None, overlay=None, scope=None,
removeattribute=None, hidden=None):
"""
Initialization
:Parameters:
`attribute` : ``str``
The special tdi attribute name
`overlay` : ``str``
The overlay attribute name
`scope` : ``str``
The scope attribute name
`removeattribute` : ``bool``
Should `attribute` be removed from the starttag?
`hidden` : ``bool``
The default +- flag value. True: Tags are hidden, False:
Tags are kept. If omitted or ``None``, it's false.
"""
if attribute is None:
attribute = self._DEFAULT_ATTRIBUTE
self.attribute = decoder.normalize(attribute)
if overlay is None:
overlay = self._DEFAULT_OVERLAY
self._overlay = decoder.normalize(overlay)
if scope is None:
scope = self._DEFAULT_SCOPE
self.scope = decoder.normalize(scope)
if removeattribute is None:
removeattribute = self._DEFAULT_REMOVEATTR
self._removeattr = bool(removeattribute)
self._hidden = bool(hidden)
self._decoder = decoder
self._decode_attr = decoder.attribute
self._normalize = decoder.normalize
def _parse_attr(self, name, value, matcher):
"""
Parse attribute value
:Parameters:
`name` : ``str``
Name of the attribute (used for error messages)
`value` : ``str``
Raw attribute value (maybe ``None``, but it raises an error,
because there's some information expected here!)
`matcher` : ``callable``
Matcher, expected to return a match object or ``None``.
:Return: flags and name
:Rtype: ``tuple`` (``(str, str)``)
"""
if value is None:
raise TemplateAttributeError(
"Invalid short %s attribute" % (name,)
)
value = self._decode_attr(value).strip()
if not value:
raise TemplateAttributeEmptyError("Empty %s attribute" % (name,))
return self._parse(name, value, matcher)
def _parse(self, name, value, matcher):
"""
Parse value
:Parameters:
`name` : ``str``
Name of the attribute (used for error messages)
`value` : ``str``
Raw attribute value (maybe ``None``, but it raises an error,
because there's some information expected here!)
`matcher` : ``callable``
Matcher, expected to return a match object or ``None``.
:Return: flags and name
:Rtype: ``tuple`` (``(str, str)``)
"""
match = matcher(value)
if match is None:
raise TemplateAttributeError(
"Invalid %s attribute %r" % (name, value)
)
def uni2str(value):
""" Simple None-aware encoder """
if value is None:
return None
return value.encode(self._decoder.encoding)
flags, name = map(uni2str, match.group('flags', 'name'))
if name is not None:
if '+' in flags:
flags = flags.replace('+', '')
elif self._hidden and '-' not in flags:
flags += '-'
return flags, name
def __call__(self, attr, name=''):
"""
Analyze attributes
:Parameters:
`attr` : sequence
(key, value) list of attributes. value may be ``None``
`name` : ``str``
Name of the tag. If set and containing a value, it's additionally
considered being equal to a TDI attribute.
:Return: Either ``None`` if there's nothing special or a tuple of:
tdi name, tdi flags, (possibly) reduced attributes, overlay
info, scope info
:Rtype: ``tuple``
"""
normalize, reduced, special = self._normalize, [], {}
attribute, overlay, scope = wanted = (
self.attribute, self._overlay, self.scope
)
remove = self._removeattr
for key, value in attr:
nkey = normalize(key)
if nkey in wanted:
special[nkey] = value
if remove:
continue
reduced.append((key, value))
result = {}
# Scope
if scope in special:
result['scope'] = self._parse_attr(
scope, special[scope], self._SCMATCH,
)
# Overlay
if overlay in special:
result['overlay'] = self._parse_attr(
overlay, special[overlay], self._OVMATCH,
)
# TDI
if name:
nflags, ntdi = self._parse(
attribute, self._decoder.decode(name), self._IDMATCH
)
if not ntdi:
nflags, ntdi = '-', None
if attribute in special:
flags, tdi = self._parse_attr(
attribute, special[attribute], self._IDMATCH,
)
if not tdi:
flags, tdi = '-', None
if name and (nflags != flags or ntdi != tdi):
raise TemplateAttributeError(
"%s attribute value %r must equal name" % (
attribute, name
)
)
result['attribute'] = flags, tdi
elif name:
result['attribute'] = nflags, ntdi
return reduced, result
from .. import c
c = c.load('impl')
if c is not None:
DEFAULT_ANALYZER = c.AttributeAnalyzer
else:
DEFAULT_ANALYZER = AttributeAnalyzer # pylint: disable = invalid-name
del c
| |
import unittest
from pywin32_testutil import str2bytes, TestSkipped, testmain
import win32api, win32file, win32pipe, pywintypes, winerror, win32event
import win32con, ntsecuritycon
import sys
import os
import tempfile
import threading
import time
import shutil
import socket
import datetime
import random
import win32timezone
try:
set
except NameError:
from sets import Set as set
class TestReadBuffer(unittest.TestCase):
def testLen(self):
buffer = win32file.AllocateReadBuffer(1)
self.failUnlessEqual(len(buffer), 1)
def testSimpleIndex(self):
val = str2bytes('\xFF')
buffer = win32file.AllocateReadBuffer(1)
buffer[0] = val
self.failUnlessEqual(buffer[0], val)
def testSimpleSlice(self):
buffer = win32file.AllocateReadBuffer(2)
val = str2bytes('\0\0')
buffer[:2] = val
self.failUnlessEqual(buffer[0:2], val)
class TestSimpleOps(unittest.TestCase):
def testSimpleFiles(self):
fd, filename = tempfile.mkstemp()
os.close(fd)
os.unlink(filename)
handle = win32file.CreateFile(filename, win32file.GENERIC_WRITE, 0, None, win32con.CREATE_NEW, 0, None)
test_data = str2bytes("Hello\0there")
try:
win32file.WriteFile(handle, test_data)
handle.Close()
# Try and open for read
handle = win32file.CreateFile(filename, win32file.GENERIC_READ, 0, None, win32con.OPEN_EXISTING, 0, None)
rc, data = win32file.ReadFile(handle, 1024)
self.assertEquals(data, test_data)
finally:
handle.Close()
try:
os.unlink(filename)
except os.error:
pass
# A simple test using normal read/write operations.
def testMoreFiles(self):
# Create a file in the %TEMP% directory.
testName = os.path.join( win32api.GetTempPath(), "win32filetest.dat" )
desiredAccess = win32file.GENERIC_READ | win32file.GENERIC_WRITE
# Set a flag to delete the file automatically when it is closed.
fileFlags = win32file.FILE_FLAG_DELETE_ON_CLOSE
h = win32file.CreateFile( testName, desiredAccess, win32file.FILE_SHARE_READ, None, win32file.CREATE_ALWAYS, fileFlags, 0)
# Write a known number of bytes to the file.
data = str2bytes("z") * 1025
win32file.WriteFile(h, data)
self.failUnless(win32file.GetFileSize(h) == len(data), "WARNING: Written file does not have the same size as the length of the data in it!")
# Ensure we can read the data back.
win32file.SetFilePointer(h, 0, win32file.FILE_BEGIN)
hr, read_data = win32file.ReadFile(h, len(data)+10) # + 10 to get anything extra
self.failUnless(hr==0, "Readfile returned %d" % hr)
self.failUnless(read_data == data, "Read data is not what we wrote!")
# Now truncate the file at 1/2 its existing size.
newSize = len(data)//2
win32file.SetFilePointer(h, newSize, win32file.FILE_BEGIN)
win32file.SetEndOfFile(h)
self.failUnlessEqual(win32file.GetFileSize(h), newSize)
# GetFileAttributesEx/GetFileAttributesExW tests.
self.failUnlessEqual(win32file.GetFileAttributesEx(testName), win32file.GetFileAttributesExW(testName))
attr, ct, at, wt, size = win32file.GetFileAttributesEx(testName)
self.failUnless(size==newSize,
"Expected GetFileAttributesEx to return the same size as GetFileSize()")
self.failUnless(attr==win32file.GetFileAttributes(testName),
"Expected GetFileAttributesEx to return the same attributes as GetFileAttributes")
h = None # Close the file by removing the last reference to the handle!
self.failUnless(not os.path.isfile(testName), "After closing the file, it still exists!")
def testFilePointer(self):
# via [ 979270 ] SetFilePointer fails with negative offset
# Create a file in the %TEMP% directory.
filename = os.path.join( win32api.GetTempPath(), "win32filetest.dat" )
f = win32file.CreateFile(filename,
win32file.GENERIC_READ|win32file.GENERIC_WRITE,
0,
None,
win32file.CREATE_ALWAYS,
win32file.FILE_ATTRIBUTE_NORMAL,
0)
try:
#Write some data
data = str2bytes('Some data')
(res, written) = win32file.WriteFile(f, data)
self.failIf(res)
self.assertEqual(written, len(data))
#Move at the beginning and read the data
win32file.SetFilePointer(f, 0, win32file.FILE_BEGIN)
(res, s) = win32file.ReadFile(f, len(data))
self.failIf(res)
self.assertEqual(s, data)
#Move at the end and read the data
win32file.SetFilePointer(f, -len(data), win32file.FILE_END)
(res, s) = win32file.ReadFile(f, len(data))
self.failIf(res)
self.failUnlessEqual(s, data)
finally:
f.Close()
os.unlink(filename)
def testFileTimesTimezones(self):
if not issubclass(pywintypes.TimeType, datetime.datetime):
# maybe should report 'skipped', but that's not quite right as
# there is nothing you can do to avoid it being skipped!
return
filename = tempfile.mktemp("-testFileTimes")
now_utc = win32timezone.utcnow()
now_local = now_utc.astimezone(win32timezone.TimeZoneInfo.local())
h = win32file.CreateFile(filename,
win32file.GENERIC_READ|win32file.GENERIC_WRITE,
0, None, win32file.CREATE_ALWAYS, 0, 0)
try:
win32file.SetFileTime(h, now_utc, now_utc, now_utc)
ct, at, wt = win32file.GetFileTime(h)
self.failUnlessEqual(now_local, ct)
self.failUnlessEqual(now_local, at)
self.failUnlessEqual(now_local, wt)
# and the reverse - set local, check against utc
win32file.SetFileTime(h, now_local, now_local, now_local)
ct, at, wt = win32file.GetFileTime(h)
self.failUnlessEqual(now_utc, ct)
self.failUnlessEqual(now_utc, at)
self.failUnlessEqual(now_utc, wt)
finally:
h.close()
os.unlink(filename)
def testFileTimes(self):
if issubclass(pywintypes.TimeType, datetime.datetime):
from win32timezone import TimeZoneInfo
now = datetime.datetime.now(tz=TimeZoneInfo.local())
nowish = now + datetime.timedelta(seconds=1)
later = now + datetime.timedelta(seconds=120)
else:
rc, tzi = win32api.GetTimeZoneInformation()
bias = tzi[0]
if rc==2: # daylight-savings is in effect.
bias += tzi[-1]
bias *= 60 # minutes to seconds...
tick = int(time.time())
now = pywintypes.Time(tick+bias)
nowish = pywintypes.Time(tick+bias+1)
later = pywintypes.Time(tick+bias+120)
filename = tempfile.mktemp("-testFileTimes")
# Windows docs the 'last time' isn't valid until the last write
# handle is closed - so create the file, then re-open it to check.
open(filename,"w").close()
f = win32file.CreateFile(filename, win32file.GENERIC_READ|win32file.GENERIC_WRITE,
0, None,
win32con.OPEN_EXISTING, 0, None)
try:
ct, at, wt = win32file.GetFileTime(f)
self.failUnless(ct >= now, "File was created in the past - now=%s, created=%s" % (now, ct))
self.failUnless( now <= ct <= nowish, (now, ct))
self.failUnless(wt >= now, "File was written-to in the past now=%s, written=%s" % (now,wt))
self.failUnless( now <= wt <= nowish, (now, wt))
# Now set the times.
win32file.SetFileTime(f, later, later, later)
# Get them back.
ct, at, wt = win32file.GetFileTime(f)
# XXX - the builtin PyTime type appears to be out by a dst offset.
# just ignore that type here...
if issubclass(pywintypes.TimeType, datetime.datetime):
self.failUnlessEqual(ct, later)
self.failUnlessEqual(at, later)
self.failUnlessEqual(wt, later)
finally:
f.Close()
os.unlink(filename)
class TestGetFileInfoByHandleEx(unittest.TestCase):
__handle = __filename = None
def setUp(self):
fd, self.__filename = tempfile.mkstemp()
os.close(fd)
def tearDown(self):
if self.__handle is not None:
self.__handle.Close()
if self.__filename is not None:
try:
os.unlink(self.__filename)
except OSError:
pass
self.__handle = self.__filename = None
def testFileBasicInfo(self):
attr = win32file.GetFileAttributes(self.__filename)
f = win32file.CreateFile(self.__filename, win32file.GENERIC_READ, 0, None,
win32con.OPEN_EXISTING, 0, None)
self.__handle = f
ct, at, wt = win32file.GetFileTime(f)
# bug #752: this throws ERROR_BAD_LENGTH (24) in x86 binaries of build 221
basic_info = win32file.GetFileInformationByHandleEx(f, win32file.FileBasicInfo)
self.assertEqual(ct, basic_info['CreationTime'])
self.assertEqual(at, basic_info['LastAccessTime'])
self.assertEqual(wt, basic_info['LastWriteTime'])
self.assertEqual(attr, basic_info['FileAttributes'])
class TestOverlapped(unittest.TestCase):
def testSimpleOverlapped(self):
# Create a file in the %TEMP% directory.
import win32event
testName = os.path.join( win32api.GetTempPath(), "win32filetest.dat" )
desiredAccess = win32file.GENERIC_WRITE
overlapped = pywintypes.OVERLAPPED()
evt = win32event.CreateEvent(None, 0, 0, None)
overlapped.hEvent = evt
# Create the file and write shit-loads of data to it.
h = win32file.CreateFile( testName, desiredAccess, 0, None, win32file.CREATE_ALWAYS, 0, 0)
chunk_data = str2bytes("z") * 0x8000
num_loops = 512
expected_size = num_loops * len(chunk_data)
for i in range(num_loops):
win32file.WriteFile(h, chunk_data, overlapped)
win32event.WaitForSingleObject(overlapped.hEvent, win32event.INFINITE)
overlapped.Offset = overlapped.Offset + len(chunk_data)
h.Close()
# Now read the data back overlapped
overlapped = pywintypes.OVERLAPPED()
evt = win32event.CreateEvent(None, 0, 0, None)
overlapped.hEvent = evt
desiredAccess = win32file.GENERIC_READ
h = win32file.CreateFile( testName, desiredAccess, 0, None, win32file.OPEN_EXISTING, 0, 0)
buffer = win32file.AllocateReadBuffer(0xFFFF)
while 1:
try:
hr, data = win32file.ReadFile(h, buffer, overlapped)
win32event.WaitForSingleObject(overlapped.hEvent, win32event.INFINITE)
overlapped.Offset = overlapped.Offset + len(data)
if not data is buffer:
self.fail("Unexpected result from ReadFile - should be the same buffer we passed it")
except win32api.error:
break
h.Close()
def testCompletionPortsMultiple(self):
# Mainly checking that we can "associate" an existing handle. This
# failed in build 203.
ioport = win32file.CreateIoCompletionPort(win32file.INVALID_HANDLE_VALUE,
0, 0, 0)
socks = []
for PORT in range(9123, 9125):
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
sock.bind(('', PORT))
sock.listen(1)
socks.append(sock)
new = win32file.CreateIoCompletionPort(sock.fileno(), ioport, PORT, 0)
assert new is ioport
for s in socks:
s.close()
hv = int(ioport)
ioport = new = None
# The handle itself should be closed now (unless we leak references!)
# Check that.
try:
win32file.CloseHandle(hv)
raise RuntimeError("Expected close to fail!")
except win32file.error as details:
self.failUnlessEqual(details.winerror, winerror.ERROR_INVALID_HANDLE)
def testCompletionPortsQueued(self):
class Foo: pass
io_req_port = win32file.CreateIoCompletionPort(-1, None, 0, 0)
overlapped = pywintypes.OVERLAPPED()
overlapped.object = Foo()
win32file.PostQueuedCompletionStatus(io_req_port, 0, 99, overlapped)
errCode, bytes, key, overlapped = \
win32file.GetQueuedCompletionStatus(io_req_port, win32event.INFINITE)
self.failUnlessEqual(errCode, 0)
self.failUnless(isinstance(overlapped.object, Foo))
def _IOCPServerThread(self, handle, port, drop_overlapped_reference):
overlapped = pywintypes.OVERLAPPED()
win32pipe.ConnectNamedPipe(handle, overlapped)
if drop_overlapped_reference:
# Be naughty - the overlapped object is now dead, but
# GetQueuedCompletionStatus will still find it. Our check of
# reference counting should catch that error.
overlapped = None
# even if we fail, be sure to close the handle; prevents hangs
# on Vista 64...
try:
self.failUnlessRaises(RuntimeError,
win32file.GetQueuedCompletionStatus, port, -1)
finally:
handle.Close()
return
result = win32file.GetQueuedCompletionStatus(port, -1)
ol2 = result[-1]
self.failUnless(ol2 is overlapped)
data = win32file.ReadFile(handle, 512)[1]
win32file.WriteFile(handle, data)
def testCompletionPortsNonQueued(self, test_overlapped_death = 0):
# In 204 we had a reference count bug when OVERLAPPED objects were
# associated with a completion port other than via
# PostQueuedCompletionStatus. This test is based on the reproduction
# reported with that bug.
# Create the pipe.
BUFSIZE = 512
pipe_name = r"\\.\pipe\pywin32_test_pipe"
handle = win32pipe.CreateNamedPipe(pipe_name,
win32pipe.PIPE_ACCESS_DUPLEX|
win32file.FILE_FLAG_OVERLAPPED,
win32pipe.PIPE_TYPE_MESSAGE|
win32pipe.PIPE_READMODE_MESSAGE|
win32pipe.PIPE_WAIT,
1, BUFSIZE, BUFSIZE,
win32pipe.NMPWAIT_WAIT_FOREVER,
None)
# Create an IOCP and associate it with the handle.
port = win32file.CreateIoCompletionPort(-1, 0, 0, 0)
win32file.CreateIoCompletionPort(handle, port, 1, 0)
t = threading.Thread(target=self._IOCPServerThread, args=(handle,port, test_overlapped_death))
t.setDaemon(True) # avoid hanging entire test suite on failure.
t.start()
try:
time.sleep(0.1) # let thread do its thing.
try:
win32pipe.CallNamedPipe(r"\\.\pipe\pywin32_test_pipe", str2bytes("Hello there"), BUFSIZE, 0)
except win32pipe.error:
# Testing for overlapped death causes this
if not test_overlapped_death:
raise
finally:
if not test_overlapped_death:
handle.Close()
t.join(3)
self.failIf(t.isAlive(), "thread didn't finish")
def testCompletionPortsNonQueuedBadReference(self):
self.testCompletionPortsNonQueued(True)
def testHashable(self):
overlapped = pywintypes.OVERLAPPED()
d = {}
d[overlapped] = "hello"
self.failUnlessEqual(d[overlapped], "hello")
def testComparable(self):
overlapped = pywintypes.OVERLAPPED()
self.failUnlessEqual(overlapped, overlapped)
# ensure we explicitly test the operators.
self.failUnless(overlapped == overlapped)
self.failIf(overlapped != overlapped)
def testComparable2(self):
# 2 overlapped objects compare equal if their contents are the same.
overlapped1 = pywintypes.OVERLAPPED()
overlapped2 = pywintypes.OVERLAPPED()
self.failUnlessEqual(overlapped1, overlapped2)
# ensure we explicitly test the operators.
self.failUnless(overlapped1 == overlapped2)
self.failIf(overlapped1 != overlapped2)
# now change something in one of them - should no longer be equal.
overlapped1.hEvent = 1
self.failIfEqual(overlapped1, overlapped2)
# ensure we explicitly test the operators.
self.failIf(overlapped1 == overlapped2)
self.failUnless(overlapped1 != overlapped2)
class TestSocketExtensions(unittest.TestCase):
def acceptWorker(self, port, running_event, stopped_event):
listener = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
listener.bind(('', port))
listener.listen(200)
# create accept socket
accepter = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
# An overlapped
overlapped = pywintypes.OVERLAPPED()
overlapped.hEvent = win32event.CreateEvent(None, 0, 0, None)
# accept the connection.
# We used to allow strings etc to be passed here, and they would be
# modified! Obviously this is evil :)
buffer = " " * 1024 # EVIL - SHOULD NOT BE ALLOWED.
self.assertRaises(TypeError, win32file.AcceptEx, listener, accepter, buffer, overlapped)
# This is the correct way to allocate the buffer...
buffer = win32file.AllocateReadBuffer(1024)
rc = win32file.AcceptEx(listener, accepter, buffer, overlapped)
self.failUnlessEqual(rc, winerror.ERROR_IO_PENDING)
# Set the event to say we are all ready
running_event.set()
# and wait for the connection.
rc = win32event.WaitForSingleObject(overlapped.hEvent, 2000)
if rc == win32event.WAIT_TIMEOUT:
self.fail("timed out waiting for a connection")
nbytes = win32file.GetOverlappedResult(listener.fileno(), overlapped, False)
#fam, loc, rem = win32file.GetAcceptExSockaddrs(accepter, buffer)
accepter.send(buffer[:nbytes])
# NOT set in a finally - this means *successfully* stopped!
stopped_event.set()
def testAcceptEx(self):
port = 4680
running = threading.Event()
stopped = threading.Event()
t = threading.Thread(target=self.acceptWorker, args=(port, running,stopped))
t.start()
running.wait(2)
if not running.isSet():
self.fail("AcceptEx Worker thread failed to start")
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.connect(('127.0.0.1', port))
win32file.WSASend(s, str2bytes("hello"), None)
overlapped = pywintypes.OVERLAPPED()
overlapped.hEvent = win32event.CreateEvent(None, 0, 0, None)
# Like above - WSARecv used to allow strings as the receive buffer!!
buffer = " " * 10
self.assertRaises(TypeError, win32file.WSARecv, s, buffer, overlapped)
# This one should work :)
buffer = win32file.AllocateReadBuffer(10)
win32file.WSARecv(s, buffer, overlapped)
nbytes = win32file.GetOverlappedResult(s.fileno(), overlapped, True)
got = buffer[:nbytes]
self.failUnlessEqual(got, str2bytes("hello"))
# thread should have stopped
stopped.wait(2)
if not stopped.isSet():
self.fail("AcceptEx Worker thread failed to successfully stop")
class TestFindFiles(unittest.TestCase):
def testIter(self):
dir = os.path.join(os.getcwd(), "*")
files = win32file.FindFilesW(dir)
set1 = set()
set1.update(files)
set2 = set()
for file in win32file.FindFilesIterator(dir):
set2.add(file)
assert len(set2) > 5, "This directory has less than 5 files!?"
self.failUnlessEqual(set1, set2)
def testBadDir(self):
dir = os.path.join(os.getcwd(), "a dir that doesnt exist", "*")
self.assertRaises(win32file.error, win32file.FindFilesIterator, dir)
def testEmptySpec(self):
spec = os.path.join(os.getcwd(), "*.foo_bar")
num = 0
for i in win32file.FindFilesIterator(spec):
num += 1
self.failUnlessEqual(0, num)
def testEmptyDir(self):
test_path = os.path.join(win32api.GetTempPath(), "win32file_test_directory")
try:
# Note: previously used shutil.rmtree, but when looking for
# reference count leaks, that function showed leaks! os.rmdir
# doesn't have that problem.
os.rmdir(test_path)
except os.error:
pass
os.mkdir(test_path)
try:
num = 0
for i in win32file.FindFilesIterator(os.path.join(test_path, "*")):
num += 1
# Expecting "." and ".." only
self.failUnlessEqual(2, num)
finally:
os.rmdir(test_path)
class TestDirectoryChanges(unittest.TestCase):
num_test_dirs = 1
def setUp(self):
self.watcher_threads = []
self.watcher_thread_changes = []
self.dir_names = []
self.dir_handles = []
for i in range(self.num_test_dirs):
td = tempfile.mktemp("-test-directory-changes-%d" % i)
os.mkdir(td)
self.dir_names.append(td)
hdir = win32file.CreateFile(td,
ntsecuritycon.FILE_LIST_DIRECTORY,
win32con.FILE_SHARE_READ,
None, # security desc
win32con.OPEN_EXISTING,
win32con.FILE_FLAG_BACKUP_SEMANTICS |
win32con.FILE_FLAG_OVERLAPPED,
None)
self.dir_handles.append(hdir)
changes = []
t = threading.Thread(target=self._watcherThreadOverlapped,
args=(td, hdir, changes))
t.start()
self.watcher_threads.append(t)
self.watcher_thread_changes.append(changes)
def _watcherThread(self, dn, dh, changes):
# A synchronous version:
# XXX - not used - I was having a whole lot of problems trying to
# get this to work. Specifically:
# * ReadDirectoryChangesW without an OVERLAPPED blocks infinitely.
# * If another thread attempts to close the handle while
# ReadDirectoryChangesW is waiting on it, the ::CloseHandle() method
# blocks (which has nothing to do with the GIL - it is correctly
# managed)
# Which ends up with no way to kill the thread!
flags = win32con.FILE_NOTIFY_CHANGE_FILE_NAME
while 1:
try:
print("waiting", dh)
changes = win32file.ReadDirectoryChangesW(dh,
8192,
False, #sub-tree
flags)
print("got", changes)
except:
raise
changes.extend(changes)
def _watcherThreadOverlapped(self, dn, dh, changes):
flags = win32con.FILE_NOTIFY_CHANGE_FILE_NAME
buf = win32file.AllocateReadBuffer(8192)
overlapped = pywintypes.OVERLAPPED()
overlapped.hEvent = win32event.CreateEvent(None, 0, 0, None)
while 1:
win32file.ReadDirectoryChangesW(dh,
buf,
False, #sub-tree
flags,
overlapped)
# Wait for our event, or for 5 seconds.
rc = win32event.WaitForSingleObject(overlapped.hEvent, 5000)
if rc == win32event.WAIT_OBJECT_0:
# got some data! Must use GetOverlappedResult to find out
# how much is valid! 0 generally means the handle has
# been closed. Blocking is OK here, as the event has
# already been set.
nbytes = win32file.GetOverlappedResult(dh, overlapped, True)
if nbytes:
bits = win32file.FILE_NOTIFY_INFORMATION(buf, nbytes)
changes.extend(bits)
else:
# This is "normal" exit - our 'tearDown' closes the
# handle.
# print "looks like dir handle was closed!"
return
else:
print("ERROR: Watcher thread timed-out!")
return # kill the thread!
def tearDown(self):
# be careful about raising errors at teardown!
for h in self.dir_handles:
# See comments in _watcherThread above - this appears to
# deadlock if a synchronous ReadDirectoryChangesW is waiting...
# (No such problems with an asynch ReadDirectoryChangesW)
h.Close()
for dn in self.dir_names:
try:
shutil.rmtree(dn)
except OSError:
print("FAILED to remove directory", dn)
for t in self.watcher_threads:
# closing dir handle should have killed threads!
t.join(5)
if t.isAlive():
print("FAILED to wait for thread termination")
def stablize(self):
time.sleep(0.5)
def testSimple(self):
self.stablize()
for dn in self.dir_names:
fn = os.path.join(dn, "test_file")
open(fn, "w").close()
self.stablize()
changes = self.watcher_thread_changes[0]
self.failUnlessEqual(changes, [(1, "test_file")])
def testSmall(self):
self.stablize()
for dn in self.dir_names:
fn = os.path.join(dn, "x")
open(fn, "w").close()
self.stablize()
changes = self.watcher_thread_changes[0]
self.failUnlessEqual(changes, [(1, "x")])
class TestEncrypt(unittest.TestCase):
def testEncrypt(self):
fname = tempfile.mktemp("win32file_test")
f = open(fname, "wb")
f.write(str2bytes("hello"))
f.close()
f = None
try:
try:
win32file.EncryptFile(fname)
except win32file.error as details:
if details.winerror != winerror.ERROR_ACCESS_DENIED:
raise
print("It appears this is not NTFS - cant encrypt/decrypt")
win32file.DecryptFile(fname)
finally:
if f is not None:
f.close()
os.unlink(fname)
class TestConnect(unittest.TestCase):
def connect_thread_runner(self, expect_payload, giveup_event):
# As Windows 2000 doesn't do ConnectEx, we need to use a non-blocking
# accept, as our test connection may never come. May as well use
# AcceptEx for this...
listener = socket.socket()
self.addr = ('localhost', random.randint(10000,64000))
listener.bind(self.addr)
listener.listen(1)
# create accept socket
accepter = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
# An overlapped
overlapped = pywintypes.OVERLAPPED()
overlapped.hEvent = win32event.CreateEvent(None, 0, 0, None)
# accept the connection.
if expect_payload:
buf_size = 1024
else:
# when we don't expect data we must be careful to only pass the
# exact number of bytes for the endpoint data...
buf_size = win32file.CalculateSocketEndPointSize(listener)
buffer = win32file.AllocateReadBuffer(buf_size)
win32file.AcceptEx(listener, accepter, buffer, overlapped)
# wait for the connection or our test to fail.
events = giveup_event, overlapped.hEvent
rc = win32event.WaitForMultipleObjects(events, False, 2000)
if rc == win32event.WAIT_TIMEOUT:
self.fail("timed out waiting for a connection")
if rc == win32event.WAIT_OBJECT_0:
# Our main thread running the test failed and will never connect.
return
# must be a connection.
nbytes = win32file.GetOverlappedResult(listener.fileno(), overlapped, False)
if expect_payload:
self.request = buffer[:nbytes]
accepter.send(str2bytes('some expected response'))
def test_connect_with_payload(self):
giveup_event = win32event.CreateEvent(None, 0, 0, None)
t = threading.Thread(target=self.connect_thread_runner,
args=(True, giveup_event))
t.start()
time.sleep(0.1)
s2 = socket.socket()
ol = pywintypes.OVERLAPPED()
s2.bind(('0.0.0.0', 0)) # connectex requires the socket be bound beforehand
try:
win32file.ConnectEx(s2, self.addr, ol, str2bytes("some expected request"))
except win32file.error as exc:
win32event.SetEvent(giveup_event)
if exc.winerror == 10022: # WSAEINVAL
raise TestSkipped("ConnectEx is not available on this platform")
raise # some error error we don't expect.
win32file.GetOverlappedResult(s2.fileno(), ol, 1)
ol = pywintypes.OVERLAPPED()
buff = win32file.AllocateReadBuffer(1024)
win32file.WSARecv(s2, buff, ol, 0)
length = win32file.GetOverlappedResult(s2.fileno(), ol, 1)
self.response = buff[:length]
self.assertEqual(self.response, str2bytes('some expected response'))
self.assertEqual(self.request, str2bytes('some expected request'))
t.join(5)
self.failIf(t.isAlive(), "worker thread didn't terminate")
def test_connect_without_payload(self):
giveup_event = win32event.CreateEvent(None, 0, 0, None)
t = threading.Thread(target=self.connect_thread_runner,
args=(False, giveup_event))
t.start()
time.sleep(0.1)
s2 = socket.socket()
ol = pywintypes.OVERLAPPED()
s2.bind(('0.0.0.0', 0)) # connectex requires the socket be bound beforehand
try:
win32file.ConnectEx(s2, self.addr, ol)
except win32file.error as exc:
win32event.SetEvent(giveup_event)
if exc.winerror == 10022: # WSAEINVAL
raise TestSkipped("ConnectEx is not available on this platform")
raise # some error error we don't expect.
win32file.GetOverlappedResult(s2.fileno(), ol, 1)
ol = pywintypes.OVERLAPPED()
buff = win32file.AllocateReadBuffer(1024)
win32file.WSARecv(s2, buff, ol, 0)
length = win32file.GetOverlappedResult(s2.fileno(), ol, 1)
self.response = buff[:length]
self.assertEqual(self.response, str2bytes('some expected response'))
t.join(5)
self.failIf(t.isAlive(), "worker thread didn't terminate")
class TestTransmit(unittest.TestCase):
def test_transmit(self):
import binascii
bytes = os.urandom(1024*1024)
val = binascii.hexlify(bytes)
val_length = len(val)
f = tempfile.TemporaryFile()
f.write(val)
def runner():
s1 = socket.socket()
self.addr = ('localhost', random.randint(10000,64000))
s1.bind(self.addr)
s1.listen(1)
cli, addr = s1.accept()
buf = 1
self.request = []
while buf:
buf = cli.recv(1024*100)
self.request.append(buf)
th = threading.Thread(target=runner)
th.start()
time.sleep(0.5)
s2 = socket.socket()
s2.connect(self.addr)
length = 0
aaa = str2bytes("[AAA]")
bbb = str2bytes("[BBB]")
ccc = str2bytes("[CCC]")
ddd = str2bytes("[DDD]")
empty = str2bytes("")
ol = pywintypes.OVERLAPPED()
f.seek(0)
win32file.TransmitFile(s2, win32file._get_osfhandle(f.fileno()), val_length, 0, ol, 0)
length += win32file.GetOverlappedResult(s2.fileno(), ol, 1)
ol = pywintypes.OVERLAPPED()
f.seek(0)
win32file.TransmitFile(s2, win32file._get_osfhandle(f.fileno()), val_length, 0, ol, 0, aaa, bbb)
length += win32file.GetOverlappedResult(s2.fileno(), ol, 1)
ol = pywintypes.OVERLAPPED()
f.seek(0)
win32file.TransmitFile(s2, win32file._get_osfhandle(f.fileno()), val_length, 0, ol, 0, empty, empty)
length += win32file.GetOverlappedResult(s2.fileno(), ol, 1)
ol = pywintypes.OVERLAPPED()
f.seek(0)
win32file.TransmitFile(s2, win32file._get_osfhandle(f.fileno()), val_length, 0, ol, 0, None, ccc)
length += win32file.GetOverlappedResult(s2.fileno(), ol, 1)
ol = pywintypes.OVERLAPPED()
f.seek(0)
win32file.TransmitFile(s2, win32file._get_osfhandle(f.fileno()), val_length, 0, ol, 0, ddd)
length += win32file.GetOverlappedResult(s2.fileno(), ol, 1)
s2.close()
th.join()
buf = str2bytes('').join(self.request)
self.assertEqual(length, len(buf))
expected = val + aaa + val + bbb + val + val + ccc + ddd + val
self.assertEqual(type(expected), type(buf))
self.assert_(expected == buf)
class TestWSAEnumNetworkEvents(unittest.TestCase):
def test_basics(self):
s = socket.socket()
e = win32event.CreateEvent(None, 1, 0, None)
win32file.WSAEventSelect(s, e, 0)
self.assertEquals(win32file.WSAEnumNetworkEvents(s), {})
self.assertEquals(win32file.WSAEnumNetworkEvents(s, e), {})
self.assertRaises(TypeError, win32file.WSAEnumNetworkEvents, s, e, 3)
self.assertRaises(TypeError, win32file.WSAEnumNetworkEvents, s, "spam")
self.assertRaises(TypeError, win32file.WSAEnumNetworkEvents, "spam", e)
self.assertRaises(TypeError, win32file.WSAEnumNetworkEvents, "spam")
f = open("NUL")
h = win32file._get_osfhandle(f.fileno())
self.assertRaises(win32file.error, win32file.WSAEnumNetworkEvents, h)
self.assertRaises(win32file.error, win32file.WSAEnumNetworkEvents, s, h)
try:
win32file.WSAEnumNetworkEvents(h)
except win32file.error as e:
self.assertEquals(e.winerror, win32file.WSAENOTSOCK)
try:
win32file.WSAEnumNetworkEvents(s, h)
except win32file.error as e:
# According to the docs it would seem reasonable that
# this would fail with WSAEINVAL, but it doesn't.
self.assertEquals(e.winerror, win32file.WSAENOTSOCK)
def test_functional(self):
# This is not really a unit test, but it does exercise the code
# quite well and can serve as an example of WSAEventSelect and
# WSAEnumNetworkEvents usage.
port = socket.socket()
port.setblocking(0)
port_event = win32event.CreateEvent(None, 0, 0, None)
win32file.WSAEventSelect(port, port_event,
win32file.FD_ACCEPT |
win32file.FD_CLOSE)
port.bind(("127.0.0.1", 0))
port.listen(10)
client = socket.socket()
client.setblocking(0)
client_event = win32event.CreateEvent(None, 0, 0, None)
win32file.WSAEventSelect(client, client_event,
win32file.FD_CONNECT |
win32file.FD_READ |
win32file.FD_WRITE |
win32file.FD_CLOSE)
err = client.connect_ex(port.getsockname())
self.assertEquals(err, win32file.WSAEWOULDBLOCK)
res = win32event.WaitForSingleObject(port_event, 1000)
self.assertEquals(res, win32event.WAIT_OBJECT_0)
events = win32file.WSAEnumNetworkEvents(port, port_event)
self.assertEquals(events, {win32file.FD_ACCEPT: 0})
server, addr = port.accept()
server.setblocking(0)
server_event = win32event.CreateEvent(None, 1, 0, None)
win32file.WSAEventSelect(server, server_event,
win32file.FD_READ |
win32file.FD_WRITE |
win32file.FD_CLOSE)
res = win32event.WaitForSingleObject(server_event, 1000)
self.assertEquals(res, win32event.WAIT_OBJECT_0)
events = win32file.WSAEnumNetworkEvents(server, server_event)
self.assertEquals(events, {win32file.FD_WRITE: 0})
res = win32event.WaitForSingleObject(client_event, 1000)
self.assertEquals(res, win32event.WAIT_OBJECT_0)
events = win32file.WSAEnumNetworkEvents(client, client_event)
self.assertEquals(events, {win32file.FD_CONNECT: 0,
win32file.FD_WRITE: 0})
sent = 0
data = str2bytes("x") * 16 * 1024
while sent < 16 * 1024 * 1024:
try:
sent += client.send(data)
except socket.error as e:
if e.args[0] == win32file.WSAEINTR:
continue
elif e.args[0] in (win32file.WSAEWOULDBLOCK, win32file.WSAENOBUFS):
break
else:
raise
else:
self.fail("could not find socket buffer limit")
events = win32file.WSAEnumNetworkEvents(client)
self.assertEquals(events, {})
res = win32event.WaitForSingleObject(server_event, 1000)
self.assertEquals(res, win32event.WAIT_OBJECT_0)
events = win32file.WSAEnumNetworkEvents(server, server_event)
self.assertEquals(events, {win32file.FD_READ: 0})
received = 0
while received < sent:
try:
received += len(server.recv(16 * 1024))
except socket.error as e:
if e.args[0] in [win32file.WSAEINTR, win32file.WSAEWOULDBLOCK]:
continue
else:
raise
self.assertEquals(received, sent)
events = win32file.WSAEnumNetworkEvents(server)
self.assertEquals(events, {})
res = win32event.WaitForSingleObject(client_event, 1000)
self.assertEquals(res, win32event.WAIT_OBJECT_0)
events = win32file.WSAEnumNetworkEvents(client, client_event)
self.assertEquals(events, {win32file.FD_WRITE: 0})
client.shutdown(socket.SHUT_WR)
res = win32event.WaitForSingleObject(server_event, 1000)
self.assertEquals(res, win32event.WAIT_OBJECT_0)
# strange timing issues...
for i in range(5):
events = win32file.WSAEnumNetworkEvents(server, server_event)
if events: break
win32api.Sleep(100)
else:
raise AssertionError("failed to get events")
self.assertEquals(events, {win32file.FD_CLOSE: 0})
events = win32file.WSAEnumNetworkEvents(client)
self.assertEquals(events, {})
server.close()
res = win32event.WaitForSingleObject(client_event, 1000)
self.assertEquals(res, win32event.WAIT_OBJECT_0)
events = win32file.WSAEnumNetworkEvents(client, client_event)
self.assertEquals(events, {win32file.FD_CLOSE: 0})
client.close()
events = win32file.WSAEnumNetworkEvents(port)
self.assertEquals(events, {})
if __name__ == '__main__':
testmain()
| |
# ----------------------------------------------------------------------------
# pyglet
# Copyright (c) 2006-2008 Alex Holkner
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
# * Neither the name of pyglet nor the names of its
# contributors may be used to endorse or promote products
# derived from this software without specific prior written
# permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
# ----------------------------------------------------------------------------
# $Id: $
"""Text formatting, layout and display.
This module provides classes for loading styled documents from text files,
HTML files and a pyglet-specific markup format. Documents can be styled with
multiple fonts, colours, styles, text sizes, margins, paragraph alignments,
and so on.
Using the layout classes, documents can be laid out on a single line or
word-wrapped to fit a rectangle. A layout can then be efficiently drawn in
a window or updated incrementally (for example, to support interactive text
editing).
The label classes provide a simple interface for the common case where an
application simply needs to display some text in a window.
A plain text label can be created with::
label = pyglet.text.Label('Hello, world',
font_name='Times New Roman',
font_size=36,
x=10, y=10)
Alternatively, a styled text label using HTML can be created with::
label = pyglet.text.HTMLLabel('<b>Hello</b>, <i>world</i>',
x=10, y=10)
Either label can then be drawn at any time with::
label.draw()
For details on the subset of HTML supported, see `pyglet.text.formats.html`.
Refer to the Programming Guide for advanced usage of the document and layout
classes, including interactive editing, embedding objects within documents and
creating scrollable layouts.
.. versionadded:: 1.1
"""
from builtins import object
__docformat__ = 'restructuredtext'
__version__ = '$Id: $'
import os.path
import pyglet
from pyglet.text import layout, document, caret
class DocumentDecodeException(Exception):
"""An error occurred decoding document text."""
pass
class DocumentDecoder(object):
"""Abstract document decoder.
"""
def decode(self, text, location=None):
"""Decode document text.
:Parameters:
`text` : str
Text to decode
`location` : `Location`
Location to use as base path for additional resources
referenced within the document (for example, HTML images).
:rtype: `AbstractDocument`
"""
raise NotImplementedError('abstract')
def get_decoder(filename, mimetype=None):
"""Get a document decoder for the given filename and MIME type.
If `mimetype` is omitted it is guessed from the filename extension.
The following MIME types are supported:
``text/plain``
Plain text
``text/html``
HTML 4 Transitional
``text/vnd.pyglet-attributed``
Attributed text; see `pyglet.text.formats.attributed`
`DocumentDecodeException` is raised if another MIME type is given.
:Parameters:
`filename` : str
Filename to guess the MIME type from. If a MIME type is given,
the filename is ignored.
`mimetype` : str
MIME type to lookup, or ``None`` to guess the type from the
filename.
:rtype: `DocumentDecoder`
"""
if mimetype is None:
_, ext = os.path.splitext(filename)
if ext.lower() in ('.htm', '.html', '.xhtml'):
mimetype = 'text/html'
else:
mimetype = 'text/plain'
if mimetype == 'text/plain':
from pyglet.text.formats import plaintext
return plaintext.PlainTextDecoder()
elif mimetype == 'text/html':
from pyglet.text.formats import html
return html.HTMLDecoder()
elif mimetype == 'text/vnd.pyglet-attributed':
from pyglet.text.formats import attributed
return attributed.AttributedTextDecoder()
else:
raise DocumentDecodeException('Unknown format "%s"' % mimetype)
def load(filename, file=None, mimetype=None):
"""Load a document from a file.
:Parameters:
`filename` : str
Filename of document to load.
`file` : file-like object
File object containing encoded data. If omitted, `filename` is
loaded from disk.
`mimetype` : str
MIME type of the document. If omitted, the filename extension is
used to guess a MIME type. See `get_decoder` for a list of
supported MIME types.
:rtype: `AbstractDocument`
"""
decoder = get_decoder(filename, mimetype)
if not file:
with open(filename) as f:
file_contents = f.read()
else:
file_contents = file.read()
file.close()
if hasattr(file_contents, "decode"):
file_contents = file_contents.decode()
location = pyglet.resource.FileLocation(os.path.dirname(filename))
return decoder.decode(file_contents, location)
def decode_html(text, location=None):
"""Create a document directly from some HTML formatted text.
:Parameters:
`text` : str
HTML data to decode.
`location` : str
Location giving the base path for additional resources
referenced from the document (e.g., images).
:rtype: `FormattedDocument`
"""
decoder = get_decoder(None, 'text/html')
return decoder.decode(text, location)
def decode_attributed(text):
"""Create a document directly from some attributed text.
See `pyglet.text.formats.attributed` for a description of attributed text.
:Parameters:
`text` : str
Attributed text to decode.
:rtype: `FormattedDocument`
"""
decoder = get_decoder(None, 'text/vnd.pyglet-attributed')
return decoder.decode(text)
def decode_text(text):
"""Create a document directly from some plain text.
:Parameters:
`text` : str
Plain text to initialise the document with.
:rtype: `UnformattedDocument`
"""
decoder = get_decoder(None, 'text/plain')
return decoder.decode(text)
class DocumentLabel(layout.TextLayout):
"""Base label class.
A label is a layout that exposes convenience methods for manipulating the
associated document.
"""
def __init__(self, document=None,
x=0, y=0, width=None, height=None,
anchor_x='left', anchor_y='baseline',
multiline=False, dpi=None, batch=None, group=None):
"""Create a label for a given document.
:Parameters:
`document` : `AbstractDocument`
Document to attach to the layout.
`x` : int
X coordinate of the label.
`y` : int
Y coordinate of the label.
`width` : int
Width of the label in pixels, or None
`height` : int
Height of the label in pixels, or None
`anchor_x` : str
Anchor point of the X coordinate: one of ``"left"``,
``"center"`` or ``"right"``.
`anchor_y` : str
Anchor point of the Y coordinate: one of ``"bottom"``,
``"baseline"``, ``"center"`` or ``"top"``.
`multiline` : bool
If True, the label will be word-wrapped and accept newline
characters. You must also set the width of the label.
`dpi` : float
Resolution of the fonts in this layout. Defaults to 96.
`batch` : `~pyglet.graphics.Batch`
Optional graphics batch to add the label to.
`group` : `~pyglet.graphics.Group`
Optional graphics group to use.
"""
super(DocumentLabel, self).__init__(document,
width=width, height=height,
multiline=multiline,
dpi=dpi, batch=batch, group=group)
self._x = x
self._y = y
self._anchor_x = anchor_x
self._anchor_y = anchor_y
self._update()
@property
def text(self):
"""The text of the label.
:type: str
"""
return self.document.text
@text.setter
def text(self, text):
self.document.text = text
@property
def color(self):
"""Text color.
Color is a 4-tuple of RGBA components, each in range [0, 255].
:type: (int, int, int, int)
"""
return self.document.get_style('color')
@color.setter
def color(self, color):
self.document.set_style(0, len(self.document.text),
{'color': color})
@property
def font_name(self):
"""Font family name.
The font name, as passed to :py:func:`pyglet.font.load`. A list of names can
optionally be given: the first matching font will be used.
:type: str or list
"""
return self.document.get_style('font_name')
@font_name.setter
def font_name(self, font_name):
self.document.set_style(0, len(self.document.text),
{'font_name': font_name})
@property
def font_size(self):
"""Font size, in points.
:type: float
"""
return self.document.get_style('font_size')
@font_size.setter
def font_size(self, font_size):
self.document.set_style(0, len(self.document.text),
{'font_size': font_size})
@property
def bold(self):
"""Bold font style.
:type: bool
"""
return self.document.get_style('bold')
@bold.setter
def bold(self, bold):
self.document.set_style(0, len(self.document.text),
{'bold': bold})
@property
def italic(self):
"""Italic font style.
:type: bool
"""
return self.document.get_style('italic')
@italic.setter
def italic(self, italic):
self.document.set_style(0, len(self.document.text),
{'italic': italic})
def get_style(self, name):
"""Get a document style value by name.
If the document has more than one value of the named style,
`pyglet.text.document.STYLE_INDETERMINATE` is returned.
:Parameters:
`name` : str
Style name to query. See documentation for
`pyglet.text.layout` for known style names.
:rtype: object
"""
return self.document.get_style_range(name, 0, len(self.document.text))
def set_style(self, name, value):
"""Set a document style value by name over the whole document.
:Parameters:
`name` : str
Name of the style to set. See documentation for
`pyglet.text.layout` for known style names.
`value` : object
Value of the style.
"""
self.document.set_style(0, len(self.document.text), {name: value})
class Label(DocumentLabel):
"""Plain text label.
"""
def __init__(self, text='',
font_name=None, font_size=None, bold=False, italic=False,
color=(255, 255, 255, 255),
x=0, y=0, width=None, height=None,
anchor_x='left', anchor_y='baseline',
align='left',
multiline=False, dpi=None, batch=None, group=None):
"""Create a plain text label.
:Parameters:
`text` : str
Text to display.
`font_name` : str or list
Font family name(s). If more than one name is given, the
first matching name is used.
`font_size` : float
Font size, in points.
`bold` : bool
Bold font style.
`italic` : bool
Italic font style.
`color` : (int, int, int, int)
Font colour, as RGBA components in range [0, 255].
`x` : int
X coordinate of the label.
`y` : int
Y coordinate of the label.
`width` : int
Width of the label in pixels, or None
`height` : int
Height of the label in pixels, or None
`anchor_x` : str
Anchor point of the X coordinate: one of ``"left"``,
``"center"`` or ``"right"``.
`anchor_y` : str
Anchor point of the Y coordinate: one of ``"bottom"``,
``"baseline"``, ``"center"`` or ``"top"``.
`align` : str
Horizontal alignment of text on a line, only applies if
a width is supplied. One of ``"left"``, ``"center"``
or ``"right"``.
`multiline` : bool
If True, the label will be word-wrapped and accept newline
characters. You must also set the width of the label.
`dpi` : float
Resolution of the fonts in this layout. Defaults to 96.
`batch` : `~pyglet.graphics.Batch`
Optional graphics batch to add the label to.
`group` : `~pyglet.graphics.Group`
Optional graphics group to use.
"""
document = decode_text(text)
super(Label, self).__init__(document, x, y, width, height,
anchor_x, anchor_y,
multiline, dpi, batch, group)
self.document.set_style(0, len(self.document.text), {
'font_name': font_name,
'font_size': font_size,
'bold': bold,
'italic': italic,
'color': color,
'align': align,
})
class HTMLLabel(DocumentLabel):
"""HTML formatted text label.
A subset of HTML 4.01 is supported. See `pyglet.text.formats.html` for
details.
"""
def __init__(self, text='', location=None,
x=0, y=0, width=None, height=None,
anchor_x='left', anchor_y='baseline',
multiline=False, dpi=None, batch=None, group=None):
"""Create a label with an HTML string.
:Parameters:
`text` : str
HTML formatted text to display.
`location` : `Location`
Location object for loading images referred to in the
document. By default, the working directory is used.
`x` : int
X coordinate of the label.
`y` : int
Y coordinate of the label.
`width` : int
Width of the label in pixels, or None
`height` : int
Height of the label in pixels, or None
`anchor_x` : str
Anchor point of the X coordinate: one of ``"left"``,
``"center"`` or ``"right"``.
`anchor_y` : str
Anchor point of the Y coordinate: one of ``"bottom"``,
``"baseline"``, ``"center"`` or ``"top"``.
`multiline` : bool
If True, the label will be word-wrapped and render paragraph
and line breaks. You must also set the width of the label.
`dpi` : float
Resolution of the fonts in this layout. Defaults to 96.
`batch` : `~pyglet.graphics.Batch`
Optional graphics batch to add the label to.
`group` : `~pyglet.graphics.Group`
Optional graphics group to use.
"""
self._text = text
self._location = location
document = decode_html(text, location)
super(HTMLLabel, self).__init__(document, x, y, width, height,
anchor_x, anchor_y,
multiline, dpi, batch, group)
@property
def text(self):
"""HTML formatted text of the label.
:type: str
"""
return self._text
@text.setter
def text(self, text):
self._text = text
self.document = decode_html(text, self._location)
| |
# -*- coding: utf-8 -*-
"""
Created on Thu Aug 24 17:35:00 2017
@author: eyt21
This module contains the live graph classes to the acquisition window.
Attributes
----------
CHANLVL_FACTOR: float
Not used
TRACE_DECAY: float
The decay factor of the peak plots
TRACE_DURATION: float
Duration before the peak plots decay
"""
from PyQt5.QtGui import QColor
from PyQt5.QtCore import Qt, pyqtSignal
from cued_datalogger.api.pyqtgraph_extensions import CustomPlotWidget
import pyqtgraph as pg
import numpy as np
import math
CHANLVL_FACTOR = 0.1
TRACE_DECAY = 0.005
TRACE_DURATION = 2.0
class LiveGraph(pg.PlotWidget):
"""
A base PlotWidget reimplemented to store extra plot information, such as
offsets, colours, and visibility.
Attributes
----------
plotColourChanged: pyqtsignal
Emits when colour of a plot change
Sends out QColor
plotlines: list of PlotDataItem
Contains the individual PlotDataItem
plot_xoffset: list of float
Contains the X offset of each plot
plot_yoffset: list of float
Contains the Y offset of each plot
plot_colours:list of QColor
Contains the current colour of each plot
plot_visible: list of bool
Contains the visibility of each plot
"""
#plotLineClicked = pyqtSignal()
plotColourChanged = pyqtSignal(object)
def __init__(self,*args,**kwargs):
"""
Reimplemented PlotWidget
Set the background black and axes white
All parameters are passed into PlotWidget
"""
super().__init__(*args,background = 'k',**kwargs)
self.plotlines = []
self.plot_xoffset = []
self.plot_yoffset = []
self.plot_colours = []
self.plot_visible = []
self.plotItem = self.getPlotItem()
self.gen_default_colour()
self.plotItem.getAxis('bottom').setPen('w')
self.plotItem.getAxis('left').setPen('w')
def plot(self, *arg, **kwargs):
"""
Plot the data and set it to be clickable
Returns
----------
PlotDataItem
The plot line, effectively
"""
line = self.plotItem.plot(*arg, **kwargs)
line.curve.setClickable(True,width = 4)
self.plotlines.append(line)
return line
def check_line(self,line):
"""
Check whether a plot line exists
Returns
----------
int
Index of the plot line, if it exists
None otherwise
"""
if line in self.plotlines:
return self.plotlines.index(line)
else:
return None
def toggle_plotline(self,num,visible):
"""
Set the visibility of the specific line
Parameters
----------
num: int
index of the line to be set
visible: bool
Visibility of the line to be set
"""
self.plot_visible[num] = visible
if visible:
self.plotlines[num].setPen(self.plot_colours[num])
else:
self.plotlines[num].setPen(None)
def set_plot_colour(self,num,col):
"""
Set the colour of the specific line
Parameters
----------
num: int
index of the line to be set
col: QColor
Colour of the line to be set
"""
self.plot_colours[num] = col
if self.plot_visible[num]:
self.plotlines[num].setPen(col)
def set_offset(self,num,x_off = None, y_off =None):
"""
Set the offsets of the specific line
Parameters
----------
num : int
Index of the line to be set
x_off : float
X offset of the line to be set, if given a value
x_off : float
Y offset of the line to be set, if given a value
"""
if not x_off is None:
self.plot_xoffset[num] = x_off
if not y_off is None:
self.plot_yoffset[num] = y_off
def update_line(self, num, x=None, y=None, *arg, **kwargs):
"""
Update the existing lines with new data, with the offsets
Parameters
----------
num : int
index of the line to be set
x : float
X data of the line to be set, if given a value
y : float
Y data of the line to be set, if given a value
The rest to pass to PlotDataItem.setData
"""
self.plotlines[num].setData(*arg,x = x+self.plot_xoffset[num],
y = y+ self.plot_yoffset[num],**kwargs)
def reset_plotlines(self):
"""
Clear all of the lines
"""
for _ in range(len(self.plotlines)):
line = self.plotlines.pop()
line.clear()
del line
def reset_offsets(self):
"""
Reset the offsets of the plots
"""
n = len(self.plotlines)
self.plot_xoffset = np.zeros(shape = (n,), dtype = np.float)
self.plot_yoffset = np.arange(n, dtype = np.float)
def reset_plot_visible(self):
"""
Reset the visibilities of the plots
"""
self.plot_visible = [True]*len(self.plotlines)
def reset_colour(self):
"""
Clear the colours of the plots
"""
self.plot_colours = [None] * len(self.plotlines)
self.gen_default_colour()
def reset_default_colour(self,num):
"""
Set the default colour of the specified plot
Parameters
----------
num: int
Index of the line to be set
"""
col = self.def_colours[num]
self.set_plot_colour(num,col)
self.plotColourChanged.emit(col)
#return col
def gen_default_colour(self):
"""
Generate the default colours of the plots
"""
val = [0.0,0.5,1.0]
colour = np.array([[255,0,0,255],[0,255,0,255],[0,0,255,255]], dtype = np.ubyte)
self.plot_colourmap = pg.ColorMap(val,colour)
c_list = self.plot_colourmap.getLookupTable(nPts = len(self.plotlines))
self.def_colours = []
for i in range(len(self.plotlines)):
r,g,b = c_list[i]
self.set_plot_colour(i,QColor(r,g,b))
self.def_colours.append(QColor(r,g,b))
class TimeLiveGraph(LiveGraph):
"""
Reimplemented LiveGraph. Displays the time domain plot
Attributes
----------
sig_hold: list of bool
Contains whether the signal is being held
"""
def __init__(self, *args,**kwargs):
"""
Reimplemented from LiveGraph.
"""
super().__init__(*args,**kwargs)
self.sig_hold = []
self.plotItem.setTitle(title="Time Plot", color = 'FFFFFF')
self.plotItem.setLabel('bottom','Time(s)')
def set_sig_hold(self, num, state):
"""
Set the hold status of the specific line
Parameters
----------
num: int
Index of the line to be set
state: bool
Hold status of the line to be set
"""
self.sig_hold[num] = state
def reset_sig_hold(self):
self.sig_hold = [Qt.Unchecked] * len(self.plotlines)
class FreqLiveGraph(LiveGraph):
"""
Reimplemented LiveGraph. Displays the frequency domain plot
"""
def __init__(self,*args,**kwargs):
"""
Reimplemented from LiveGraph.
"""
super().__init__(*args,**kwargs)
self.plotItem.setTitle(title="FFT Plot", color = 'FFFFFF')
self.plotItem.setLabel('bottom','Freq(Hz)')
self.plotItem.disableAutoRange(axis=None)
class LevelsLiveGraph(LiveGraph):
"""
Reimplemented LiveGraph. Displays the channel levels
Attributes
----------
thresholdChanged: pyqtSignal
Emits when the threshold line is moved
Sends out the value of the threshold
peak_plots: list of plotDataItem
The lines which indicate the channels' peaks
peak_trace: list of float
The values of the channels' peaks
trace_counter: list of int
Counter for the peak plots before they decay
chanlvl_pts: list of plotDataItem
Rms plots
chanlvl_bars: list of bool
Instantaneous channels' peaks plots
threshold_line:
The line indicating the trigger threshold
level_colourmap:
The colour for the peak levels
"""
thresholdChanged = pyqtSignal(str)
def __init__(self,rec,*args,**kwargs):
"""
Reimplemented from LiveGraph.
Parameters
----------
rec: Recorder
The reference of the Recorder
The rest are passed into LiveGraph
"""
self.peak_plots = []
self.peak_trace = []
self.trace_counter = []
self.trace_countlimit = 30
self.level_colourmap = None
super().__init__(*args,**kwargs)
self.plotItem.setTitle(title="Channel Levels", color = 'FFFFFF')
self.plotItem.setLabel('bottom','Amplitude')
self.plotItem.hideAxis('left')
self.chanlvl_pts = self.plotItem.plot()
self.chanlvl_bars = pg.ErrorBarItem(x=np.arange(rec.channels),
y =np.arange(rec.channels)*0.1,
beam = CHANLVL_FACTOR/2,
pen = pg.mkPen(width = 3))
self.plotItem.addItem(self.chanlvl_bars)
baseline = pg.InfiniteLine(pos = 0.0, movable = False)
self.plotItem.addItem(baseline)
self.threshold_line = pg.InfiniteLine(pos = 0.0, movable = True,bounds = [0,1])
self.threshold_line.sigPositionChanged.connect(self.change_threshold)
self.plotItem.addItem(self.threshold_line)
self.reset_channel_peaks(rec)
val = [0.0,0.5,0.8]
colour = np.array([[0,255,0,255],[0,255,0,255],[255,0,0,255]], dtype = np.ubyte)
self.level_colourmap = pg.ColorMap(val,colour)
def set_plot_colour(self,num,col):
"""
Parameters
----------
num: int
index of the point to be set
col: QColor
Colour of the point to be set
"""
self.plot_colours[num] = col
self.chanlvl_pts.scatter.setBrush(col)
def set_peaks(self,num,maximum):
"""
Set the value of the peak plots
Parameters
----------
num: int
index of the peak to be set
maximum: float
Instantaneous maximum value of the peak
"""
if self.trace_counter[num]>self.trace_countlimit:
self.peak_trace[num] = max(self.peak_trace[num]*math.exp(-self.peak_decays[num]),0)
self.peak_decays[num] += TRACE_DECAY
self.trace_counter[num] += 1
if self.peak_trace[num]<maximum:
self.peak_trace[num] = maximum
self.peak_decays[num] = 0
self.trace_counter[num] = 0
self.peak_plots[num].setData(x = [self.peak_trace[num],self.peak_trace[num]],
y = [(num-0.3), (num+0.3)])
self.peak_plots[num].setPen(self.level_colourmap.map(self.peak_trace[num]))
def set_channel_levels(self,value,maximum):
"""
Set the value of the levels plots
Parameters
----------
value: float
rms values
maximum: float
Instantaneous maximum value of the plot
"""
self.chanlvl_bars.setData(x = value,y = np.arange(len(self.peak_plots)), right = maximum-value,left = value)
self.chanlvl_pts.setData(x = value,y = np.arange(len(self.peak_plots)))
def change_threshold(self,arg):
"""
Set the trigger threshold
If arg is str, set the threshold_line to match the value
otherwise, emit the value of the threshold_line
Parameters
----------
arg: str or InfiniteLine
"""
if type(arg) == str:
self.threshold_line.setValue(float(arg))
else:
self.thresholdChanged.emit('%.2f' % arg.value())
def reset_colour(self):
"""
Reimplemented from LiveGraph.
"""
self.plot_colours = [None] * len(self.peak_plots)
self.gen_default_colour()
def reset_default_colour(self,chan):
"""
Reimplemented from LiveGraph.
"""
col = self.def_colours[chan]
self.set_plot_colour(chan,col)
return col
def gen_default_colour(self):
"""
Reimplemented from LiveGraph.
"""
val = [0.0,0.5,1.0]
colour = np.array([[255,0,0,255],[0,255,0,255],[0,0,255,255]], dtype = np.ubyte)
plot_colourmap = pg.ColorMap(val,colour)
c_list = plot_colourmap.getLookupTable(nPts = len(self.peak_plots))
self.def_colours = []
for i in range(len(self.peak_plots)):
r,g,b = c_list[i]
#self.plotlines.set_plot_colour(i,QColor(r,g,b),True)
self.plot_colours[i] = QColor(r,g,b)
self.def_colours.append(QColor(r,g,b))
def reset_channel_levels(self):
"""
Reset the channel levels plot
"""
self.chanlvl_pts.clear()
self.chanlvl_pts = self.plotItem.plot(pen = None,symbol='o',
symbolBrush = self.plot_colours,
symbolPen = None)
def reset_channel_peaks(self,rec):
"""
Reset the channel peaks plot
"""
for _ in range(len(self.peak_plots)):
line = self.peak_plots.pop()
line.clear()
del line
self.peak_trace = np.zeros(rec.channels)
self.peak_decays = np.zeros(rec.channels)
self.trace_counter = np.zeros(rec.channels)
self.trace_countlimit = TRACE_DURATION *rec.rate//rec.chunk_size
self.threshold_line.setBounds((0,rec.max_value))
for i in range(rec.channels):
self.peak_plots.append(self.plotItem.plot(x = [self.peak_trace[i],self.peak_trace[i]],
y = [(i-0.3), (i+0.3)]))
self.plotItem.setRange(xRange = (0,rec.max_value+0.1),yRange = (-0.5, (rec.channels+5-0.5)))
self.plotItem.setLimits(xMin = -0.1,xMax = rec.max_value+0.1,yMin = -0.5,yMax = (rec.channels+5-0.5))
| |
# Copyright (c) 2014 Amazon.com, Inc. or its affiliates. All Rights Reserved
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
#
from boto.compat import json
from boto.exception import JSONResponseError
from boto.connection import AWSAuthConnection
from boto.regioninfo import RegionInfo
from boto.cloudsearchdomain import exceptions
class CloudSearchDomainConnection(AWSAuthConnection):
"""
You use the AmazonCloudSearch2013 API to upload documents to a
search domain and search those documents.
The endpoints for submitting `UploadDocuments`, `Search`, and
`Suggest` requests are domain-specific. To get the endpoints for
your domain, use the Amazon CloudSearch configuration service
`DescribeDomains` action. The domain endpoints are also displayed
on the domain dashboard in the Amazon CloudSearch console. You
submit suggest requests to the search endpoint.
For more information, see the `Amazon CloudSearch Developer
Guide`_.
"""
APIVersion = "2013-01-01"
AuthServiceName = 'cloudsearch'
DefaultRegionName = "us-east-1"
DefaultRegionEndpoint = "cloudsearch.us-east-1.amazonaws.com"
ResponseError = JSONResponseError
_faults = {
"SearchException": exceptions.SearchException,
"DocumentServiceException": exceptions.DocumentServiceException,
}
def __init__(self, **kwargs):
region = kwargs.get('region')
if not region:
region = RegionInfo(self, self.DefaultRegionName,
self.DefaultRegionEndpoint)
else:
del kwargs['region']
if kwargs.get('host', None) is None:
raise ValueError(
'The argument, host, must be provided when creating a '
'CloudSearchDomainConnection because its methods require the '
'specific domain\'s endpoint in order to successfully make '
'requests to that CloudSearch Domain.'
)
super(CloudSearchDomainConnection, self).__init__(**kwargs)
self.region = region
def _required_auth_capability(self):
return ['hmac-v4']
def search(self, query, cursor=None, expr=None, facet=None,
filter_query=None, highlight=None, partial=None,
query_options=None, query_parser=None, ret=None, size=None,
sort=None, start=None):
"""
Retrieves a list of documents that match the specified search
criteria. How you specify the search criteria depends on which
query parser you use. Amazon CloudSearch supports four query
parsers:
+ `simple`: search all `text` and `text-array` fields for the
specified string. Search for phrases, individual terms, and
prefixes.
+ `structured`: search specific fields, construct compound
queries using Boolean operators, and use advanced features
such as term boosting and proximity searching.
+ `lucene`: specify search criteria using the Apache Lucene
query parser syntax.
+ `dismax`: specify search criteria using the simplified
subset of the Apache Lucene query parser syntax defined by the
DisMax query parser.
For more information, see `Searching Your Data`_ in the Amazon
CloudSearch Developer Guide .
The endpoint for submitting `Search` requests is domain-
specific. You submit search requests to a domain's search
endpoint. To get the search endpoint for your domain, use the
Amazon CloudSearch configuration service `DescribeDomains`
action. A domain's endpoints are also displayed on the domain
dashboard in the Amazon CloudSearch console.
:type cursor: string
:param cursor: Retrieves a cursor value you can use to page through
large result sets. Use the `size` parameter to control the number
of hits to include in each response. You can specify either the
`cursor` or `start` parameter in a request; they are mutually
exclusive. To get the first cursor, set the cursor value to
`initial`. In subsequent requests, specify the cursor value
returned in the hits section of the response.
For more information, see `Paginating Results`_ in the Amazon
CloudSearch Developer Guide .
:type expr: string
:param expr: Defines one or more numeric expressions that can be used
to sort results or specify search or filter criteria. You can also
specify expressions as return fields.
For more information about defining and using expressions, see
`Configuring Expressions`_ in the Amazon CloudSearch Developer
Guide .
:type facet: string
:param facet: Specifies one or more fields for which to get facet
information, and options that control how the facet information is
returned. Each specified field must be facet-enabled in the domain
configuration. The fields and options are specified in JSON using
the form `{"FIELD":{"OPTION":VALUE,"OPTION:"STRING"},"FIELD":{"OPTI
ON":VALUE,"OPTION":"STRING"}}`.
You can specify the following faceting options:
+ `buckets` specifies an array of the facet values or ranges to count.
Ranges are specified using the same syntax that you use to search
for a range of values. For more information, see ` Searching for a
Range of Values`_ in the Amazon CloudSearch Developer Guide .
Buckets are returned in the order they are specified in the
request. The `sort` and `size` options are not valid if you specify
`buckets`.
+ `size` specifies the maximum number of facets to include in the
results. By default, Amazon CloudSearch returns counts for the top
10. The `size` parameter is only valid when you specify the `sort`
option; it cannot be used in conjunction with `buckets`.
+ `sort` specifies how you want to sort the facets in the results:
`bucket` or `count`. Specify `bucket` to sort alphabetically or
numerically by facet value (in ascending order). Specify `count` to
sort by the facet counts computed for each facet value (in
descending order). To retrieve facet counts for particular values
or ranges of values, use the `buckets` option instead of `sort`.
If no facet options are specified, facet counts are computed for all
field values, the facets are sorted by facet count, and the top 10
facets are returned in the results.
For more information, see `Getting and Using Facet Information`_ in the
Amazon CloudSearch Developer Guide .
:type filter_query: string
:param filter_query: Specifies a structured query that filters the
results of a search without affecting how the results are scored
and sorted. You use `filterQuery` in conjunction with the `query`
parameter to filter the documents that match the constraints
specified in the `query` parameter. Specifying a filter controls
only which matching documents are included in the results, it has
no effect on how they are scored and sorted. The `filterQuery`
parameter supports the full structured query syntax.
For more information about using filters, see `Filtering Matching
Documents`_ in the Amazon CloudSearch Developer Guide .
:type highlight: string
:param highlight: Retrieves highlights for matches in the specified
`text` or `text-array` fields. Each specified field must be
highlight enabled in the domain configuration. The fields and
options are specified in JSON using the form `{"FIELD":{"OPTION":VA
LUE,"OPTION:"STRING"},"FIELD":{"OPTION":VALUE,"OPTION":"STRING"}}`.
You can specify the following highlight options:
+ `format`: specifies the format of the data in the text field: `text`
or `html`. When data is returned as HTML, all non-alphanumeric
characters are encoded. The default is `html`.
+ `max_phrases`: specifies the maximum number of occurrences of the
search term(s) you want to highlight. By default, the first
occurrence is highlighted.
+ `pre_tag`: specifies the string to prepend to an occurrence of a
search term. The default for HTML highlights is `<em>`. The
default for text highlights is `*`.
+ `post_tag`: specifies the string to append to an occurrence of a
search term. The default for HTML highlights is `</em>`. The
default for text highlights is `*`.
If no highlight options are specified for a field, the returned field
text is treated as HTML and the first match is highlighted with
emphasis tags: `<em>search-term</em>`.
:type partial: boolean
:param partial: Enables partial results to be returned if one or more
index partitions are unavailable. When your search index is
partitioned across multiple search instances, by default Amazon
CloudSearch only returns results if every partition can be queried.
This means that the failure of a single search instance can result
in 5xx (internal server) errors. When you enable partial results,
Amazon CloudSearch returns whatever results are available and
includes the percentage of documents searched in the search results
(percent-searched). This enables you to more gracefully degrade
your users' search experience. For example, rather than displaying
no results, you could display the partial results and a message
indicating that the results might be incomplete due to a temporary
system outage.
:type query: string
:param query: Specifies the search criteria for the request. How you
specify the search criteria depends on the query parser used for
the request and the parser options specified in the `queryOptions`
parameter. By default, the `simple` query parser is used to process
requests. To use the `structured`, `lucene`, or `dismax` query
parser, you must also specify the `queryParser` parameter.
For more information about specifying search criteria, see `Searching
Your Data`_ in the Amazon CloudSearch Developer Guide .
:type query_options: string
:param query_options:
Configures options for the query parser specified in the `queryParser`
parameter.
The options you can configure vary according to which parser you use:
+ `defaultOperator`: The default operator used to combine individual
terms in the search string. For example: `defaultOperator: 'or'`.
For the `dismax` parser, you specify a percentage that represents
the percentage of terms in the search string (rounded down) that
must match, rather than a default operator. A value of `0%` is the
equivalent to OR, and a value of `100%` is equivalent to AND. The
percentage must be specified as a value in the range 0-100 followed
by the percent (%) symbol. For example, `defaultOperator: 50%`.
Valid values: `and`, `or`, a percentage in the range 0%-100% (
`dismax`). Default: `and` ( `simple`, `structured`, `lucene`) or
`100` ( `dismax`). Valid for: `simple`, `structured`, `lucene`, and
`dismax`.
+ `fields`: An array of the fields to search when no fields are
specified in a search. If no fields are specified in a search and
this option is not specified, all text and text-array fields are
searched. You can specify a weight for each field to control the
relative importance of each field when Amazon CloudSearch
calculates relevance scores. To specify a field weight, append a
caret ( `^`) symbol and the weight to the field name. For example,
to boost the importance of the `title` field over the `description`
field you could specify: `"fields":["title^5","description"]`.
Valid values: The name of any configured field and an optional
numeric value greater than zero. Default: All `text` and `text-
array` fields. Valid for: `simple`, `structured`, `lucene`, and
`dismax`.
+ `operators`: An array of the operators or special characters you want
to disable for the simple query parser. If you disable the `and`,
`or`, or `not` operators, the corresponding operators ( `+`, `|`,
`-`) have no special meaning and are dropped from the search
string. Similarly, disabling `prefix` disables the wildcard
operator ( `*`) and disabling `phrase` disables the ability to
search for phrases by enclosing phrases in double quotes. Disabling
precedence disables the ability to control order of precedence
using parentheses. Disabling `near` disables the ability to use the
~ operator to perform a sloppy phrase search. Disabling the `fuzzy`
operator disables the ability to use the ~ operator to perform a
fuzzy search. `escape` disables the ability to use a backslash (
`\`) to escape special characters within the search string.
Disabling whitespace is an advanced option that prevents the parser
from tokenizing on whitespace, which can be useful for Vietnamese.
(It prevents Vietnamese words from being split incorrectly.) For
example, you could disable all operators other than the phrase
operator to support just simple term and phrase queries:
`"operators":["and","not","or", "prefix"]`. Valid values: `and`,
`escape`, `fuzzy`, `near`, `not`, `or`, `phrase`, `precedence`,
`prefix`, `whitespace`. Default: All operators and special
characters are enabled. Valid for: `simple`.
+ `phraseFields`: An array of the `text` or `text-array` fields you
want to use for phrase searches. When the terms in the search
string appear in close proximity within a field, the field scores
higher. You can specify a weight for each field to boost that
score. The `phraseSlop` option controls how much the matches can
deviate from the search string and still be boosted. To specify a
field weight, append a caret ( `^`) symbol and the weight to the
field name. For example, to boost phrase matches in the `title`
field over the `abstract` field, you could specify:
`"phraseFields":["title^3", "plot"]` Valid values: The name of any
`text` or `text-array` field and an optional numeric value greater
than zero. Default: No fields. If you don't specify any fields with
`phraseFields`, proximity scoring is disabled even if `phraseSlop`
is specified. Valid for: `dismax`.
+ `phraseSlop`: An integer value that specifies how much matches can
deviate from the search phrase and still be boosted according to
the weights specified in the `phraseFields` option; for example,
`phraseSlop: 2`. You must also specify `phraseFields` to enable
proximity scoring. Valid values: positive integers. Default: 0.
Valid for: `dismax`.
+ `explicitPhraseSlop`: An integer value that specifies how much a
match can deviate from the search phrase when the phrase is
enclosed in double quotes in the search string. (Phrases that
exceed this proximity distance are not considered a match.) For
example, to specify a slop of three for dismax phrase queries, you
would specify `"explicitPhraseSlop":3`. Valid values: positive
integers. Default: 0. Valid for: `dismax`.
+ `tieBreaker`: When a term in the search string is found in a
document's field, a score is calculated for that field based on how
common the word is in that field compared to other documents. If
the term occurs in multiple fields within a document, by default
only the highest scoring field contributes to the document's
overall score. You can specify a `tieBreaker` value to enable the
matches in lower-scoring fields to contribute to the document's
score. That way, if two documents have the same max field score for
a particular term, the score for the document that has matches in
more fields will be higher. The formula for calculating the score
with a tieBreaker is `(max field score) + (tieBreaker) * (sum of
the scores for the rest of the matching fields)`. Set `tieBreaker`
to 0 to disregard all but the highest scoring field (pure max):
`"tieBreaker":0`. Set to 1 to sum the scores from all fields (pure
sum): `"tieBreaker":1`. Valid values: 0.0 to 1.0. Default: 0.0.
Valid for: `dismax`.
:type query_parser: string
:param query_parser:
Specifies which query parser to use to process the request. If
`queryParser` is not specified, Amazon CloudSearch uses the
`simple` query parser.
Amazon CloudSearch supports four query parsers:
+ `simple`: perform simple searches of `text` and `text-array` fields.
By default, the `simple` query parser searches all `text` and
`text-array` fields. You can specify which fields to search by with
the `queryOptions` parameter. If you prefix a search term with a
plus sign (+) documents must contain the term to be considered a
match. (This is the default, unless you configure the default
operator with the `queryOptions` parameter.) You can use the `-`
(NOT), `|` (OR), and `*` (wildcard) operators to exclude particular
terms, find results that match any of the specified terms, or
search for a prefix. To search for a phrase rather than individual
terms, enclose the phrase in double quotes. For more information,
see `Searching for Text`_ in the Amazon CloudSearch Developer Guide
.
+ `structured`: perform advanced searches by combining multiple
expressions to define the search criteria. You can also search
within particular fields, search for values and ranges of values,
and use advanced options such as term boosting, `matchall`, and
`near`. For more information, see `Constructing Compound Queries`_
in the Amazon CloudSearch Developer Guide .
+ `lucene`: search using the Apache Lucene query parser syntax. For
more information, see `Apache Lucene Query Parser Syntax`_.
+ `dismax`: search using the simplified subset of the Apache Lucene
query parser syntax defined by the DisMax query parser. For more
information, see `DisMax Query Parser Syntax`_.
:type ret: string
:param ret: Specifies the field and expression values to include in
the response. Multiple fields or expressions are specified as a
comma-separated list. By default, a search response includes all
return enabled fields ( `_all_fields`). To return only the document
IDs for the matching documents, specify `_no_fields`. To retrieve
the relevance score calculated for each document, specify `_score`.
:type size: long
:param size: Specifies the maximum number of search hits to include in
the response.
:type sort: string
:param sort: Specifies the fields or custom expressions to use to sort
the search results. Multiple fields or expressions are specified as
a comma-separated list. You must specify the sort direction ( `asc`
or `desc`) for each field; for example, `year desc,title asc`. To
use a field to sort results, the field must be sort-enabled in the
domain configuration. Array type fields cannot be used for sorting.
If no `sort` parameter is specified, results are sorted by their
default relevance scores in descending order: `_score desc`. You
can also sort by document ID ( `_id asc`) and version ( `_version
desc`).
For more information, see `Sorting Results`_ in the Amazon CloudSearch
Developer Guide .
:type start: long
:param start: Specifies the offset of the first search hit you want to
return. Note that the result set is zero-based; the first result is
at index 0. You can specify either the `start` or `cursor`
parameter in a request, they are mutually exclusive.
For more information, see `Paginating Results`_ in the Amazon
CloudSearch Developer Guide .
"""
uri = '/2013-01-01/search'
params = {}
headers = {}
query_params = {}
if cursor is not None:
query_params['cursor'] = cursor
if expr is not None:
for k,v in expr.iteritems():
query_params['expr.'+k] = v
if facet is not None:
for (facet_field, facet_options) in facet.iteritems():
query_params[facet_field] = facet_options
if filter_query is not None:
query_params['fq'] = filter_query
if highlight is not None:
query_params['highlight'] = highlight
if partial is not None:
query_params['partial'] = partial
if query is not None:
query_params['q'] = query
if query_options is not None:
query_params['q.options'] = query_options
if query_parser is not None:
query_params['q.parser'] = query_parser
if ret is not None:
query_params['return'] = ret
if size is not None:
query_params['size'] = size
if sort is not None:
query_params['sort'] = sort
if start is not None:
query_params['start'] = start
return self.make_request('POST', uri, expected_status=200,
data=json.dumps(params), headers=headers,
params=query_params)
def suggest(self, query, suggester, size=None):
"""
Retrieves autocomplete suggestions for a partial query string.
You can use suggestions enable you to display likely matches
before users finish typing. In Amazon CloudSearch, suggestions
are based on the contents of a particular text field. When you
request suggestions, Amazon CloudSearch finds all of the
documents whose values in the suggester field start with the
specified query string. The beginning of the field must match
the query string to be considered a match.
For more information about configuring suggesters and
retrieving suggestions, see `Getting Suggestions`_ in the
Amazon CloudSearch Developer Guide .
The endpoint for submitting `Suggest` requests is domain-
specific. You submit suggest requests to a domain's search
endpoint. To get the search endpoint for your domain, use the
Amazon CloudSearch configuration service `DescribeDomains`
action. A domain's endpoints are also displayed on the domain
dashboard in the Amazon CloudSearch console.
:type query: string
:param query: Specifies the string for which you want to get
suggestions.
:type suggester: string
:param suggester: Specifies the name of the suggester to use to find
suggested matches.
:type size: long
:param size: Specifies the maximum number of suggestions to return.
"""
uri = '/2013-01-01/suggest'
params = {}
headers = {}
query_params = {}
if query is not None:
query_params['q'] = query
if suggester is not None:
query_params['suggester'] = suggester
if size is not None:
query_params['size'] = size
return self.make_request('GET', uri, expected_status=200,
data=json.dumps(params), headers=headers,
params=query_params)
def upload_documents(self, documents, content_type):
"""
Posts a batch of documents to a search domain for indexing. A
document batch is a collection of add and delete operations
that represent the documents you want to add, update, or
delete from your domain. Batches can be described in either
JSON or XML. Each item that you want Amazon CloudSearch to
return as a search result (such as a product) is represented
as a document. Every document has a unique ID and one or more
fields that contain the data that you want to search and
return in results. Individual documents cannot contain more
than 1 MB of data. The entire batch cannot exceed 5 MB. To get
the best possible upload performance, group add and delete
operations in batches that are close the 5 MB limit.
Submitting a large volume of single-document batches can
overload a domain's document service.
The endpoint for submitting `UploadDocuments` requests is
domain-specific. To get the document endpoint for your domain,
use the Amazon CloudSearch configuration service
`DescribeDomains` action. A domain's endpoints are also
displayed on the domain dashboard in the Amazon CloudSearch
console.
For more information about formatting your data for Amazon
CloudSearch, see `Preparing Your Data`_ in the Amazon
CloudSearch Developer Guide . For more information about
uploading data for indexing, see `Uploading Data`_ in the
Amazon CloudSearch Developer Guide .
:type documents: blob
:param documents: A batch of documents formatted in JSON or HTML.
:type content_type: string
:param content_type:
The format of the batch you are uploading. Amazon CloudSearch supports
two document batch formats:
+ application/json
+ application/xml
"""
uri = '/2013-01-01/documents/batch'
headers = {}
query_params = {}
if content_type is not None:
headers['Content-Type'] = content_type
return self.make_request('POST', uri, expected_status=200,
data=documents, headers=headers,
params=query_params)
def make_request(self, verb, resource, headers=None, data='',
expected_status=None, params=None):
if headers is None:
headers = {}
response = AWSAuthConnection.make_request(
self, verb, resource, headers=headers, data=data, params=params)
body = json.loads(response.read().decode('utf-8'))
if response.status == expected_status:
return body
else:
raise JSONResponseError(response.status, response.reason, body)
| |
# sql/types_api.py
# Copyright (C) 2005-2015 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""Base types API.
"""
from .. import exc, util
from . import operators
from .visitors import Visitable, VisitableType
from .base import SchemaEventTarget
# these are back-assigned by sqltypes.
BOOLEANTYPE = None
INTEGERTYPE = None
NULLTYPE = None
STRINGTYPE = None
MATCHTYPE = None
INDEXABLE = None
_resolve_value_to_type = None
class TypeEngine(Visitable):
"""The ultimate base class for all SQL datatypes.
Common subclasses of :class:`.TypeEngine` include
:class:`.String`, :class:`.Integer`, and :class:`.Boolean`.
For an overview of the SQLAlchemy typing system, see
:ref:`types_toplevel`.
.. seealso::
:ref:`types_toplevel`
"""
_sqla_type = True
_isnull = False
class Comparator(operators.ColumnOperators):
"""Base class for custom comparison operations defined at the
type level. See :attr:`.TypeEngine.comparator_factory`.
"""
__slots__ = 'expr', 'type'
default_comparator = None
def __init__(self, expr):
self.expr = expr
self.type = expr.type
@util.dependencies('sqlalchemy.sql.default_comparator')
def operate(self, default_comparator, op, *other, **kwargs):
o = default_comparator.operator_lookup[op.__name__]
return o[0](self.expr, op, *(other + o[1:]), **kwargs)
@util.dependencies('sqlalchemy.sql.default_comparator')
def reverse_operate(self, default_comparator, op, other, **kwargs):
o = default_comparator.operator_lookup[op.__name__]
return o[0](self.expr, op, other,
reverse=True, *o[1:], **kwargs)
def _adapt_expression(self, op, other_comparator):
"""evaluate the return type of <self> <op> <othertype>,
and apply any adaptations to the given operator.
This method determines the type of a resulting binary expression
given two source types and an operator. For example, two
:class:`.Column` objects, both of the type :class:`.Integer`, will
produce a :class:`.BinaryExpression` that also has the type
:class:`.Integer` when compared via the addition (``+``) operator.
However, using the addition operator with an :class:`.Integer`
and a :class:`.Date` object will produce a :class:`.Date`, assuming
"days delta" behavior by the database (in reality, most databases
other than Postgresql don't accept this particular operation).
The method returns a tuple of the form <operator>, <type>.
The resulting operator and type will be those applied to the
resulting :class:`.BinaryExpression` as the final operator and the
right-hand side of the expression.
Note that only a subset of operators make usage of
:meth:`._adapt_expression`,
including math operators and user-defined operators, but not
boolean comparison or special SQL keywords like MATCH or BETWEEN.
"""
return op, self.type
def __reduce__(self):
return _reconstitute_comparator, (self.expr, )
hashable = True
"""Flag, if False, means values from this type aren't hashable.
Used by the ORM when uniquing result lists.
"""
comparator_factory = Comparator
"""A :class:`.TypeEngine.Comparator` class which will apply
to operations performed by owning :class:`.ColumnElement` objects.
The :attr:`.comparator_factory` attribute is a hook consulted by
the core expression system when column and SQL expression operations
are performed. When a :class:`.TypeEngine.Comparator` class is
associated with this attribute, it allows custom re-definition of
all existing operators, as well as definition of new operators.
Existing operators include those provided by Python operator overloading
such as :meth:`.operators.ColumnOperators.__add__` and
:meth:`.operators.ColumnOperators.__eq__`,
those provided as standard
attributes of :class:`.operators.ColumnOperators` such as
:meth:`.operators.ColumnOperators.like`
and :meth:`.operators.ColumnOperators.in_`.
Rudimentary usage of this hook is allowed through simple subclassing
of existing types, or alternatively by using :class:`.TypeDecorator`.
See the documentation section :ref:`types_operators` for examples.
.. versionadded:: 0.8 The expression system was enhanced to support
customization of operators on a per-type level.
"""
evaluates_none = False
"""If True, the Python constant ``None`` is considered to be handled
explicitly by this type.
The ORM will use this flag to ensure that a positive value of ``None``
is definitely passed to the backend, ignoring whether or not there
are Python or server side defaults on this column.
.. versionadded:: 1.1
"""
def compare_against_backend(self, dialect, conn_type):
"""Compare this type against the given backend type.
This function is currently not implemented for SQLAlchemy
types, and for all built in types will return ``None``. However,
it can be implemented by a user-defined type
where it can be consumed by schema comparison tools such as
Alembic autogenerate.
A future release of SQLAlchemy will potentially impement this method
for builtin types as well.
The function should return True if this type is equivalent to the
given type; the type is typically reflected from the database
so should be database specific. The dialect in use is also
passed. It can also return False to assert that the type is
not equivalent.
:param dialect: a :class:`.Dialect` that is involved in the comparison.
:param conn_type: the type object reflected from the backend.
.. versionadded:: 1.0.3
"""
return None
def copy_value(self, value):
return value
def literal_processor(self, dialect):
"""Return a conversion function for processing literal values that are
to be rendered directly without using binds.
This function is used when the compiler makes use of the
"literal_binds" flag, typically used in DDL generation as well
as in certain scenarios where backends don't accept bound parameters.
.. versionadded:: 0.9.0
"""
return None
def bind_processor(self, dialect):
"""Return a conversion function for processing bind values.
Returns a callable which will receive a bind parameter value
as the sole positional argument and will return a value to
send to the DB-API.
If processing is not necessary, the method should return ``None``.
:param dialect: Dialect instance in use.
"""
return None
def result_processor(self, dialect, coltype):
"""Return a conversion function for processing result row values.
Returns a callable which will receive a result row column
value as the sole positional argument and will return a value
to return to the user.
If processing is not necessary, the method should return ``None``.
:param dialect: Dialect instance in use.
:param coltype: DBAPI coltype argument received in cursor.description.
"""
return None
def column_expression(self, colexpr):
"""Given a SELECT column expression, return a wrapping SQL expression.
This is typically a SQL function that wraps a column expression
as rendered in the columns clause of a SELECT statement.
It is used for special data types that require
columns to be wrapped in some special database function in order
to coerce the value before being sent back to the application.
It is the SQL analogue of the :meth:`.TypeEngine.result_processor`
method.
The method is evaluated at statement compile time, as opposed
to statement construction time.
See also:
:ref:`types_sql_value_processing`
"""
return None
@util.memoized_property
def _has_column_expression(self):
"""memoized boolean, check if column_expression is implemented.
Allows the method to be skipped for the vast majority of expression
types that don't use this feature.
"""
return self.__class__.column_expression.__code__ \
is not TypeEngine.column_expression.__code__
def bind_expression(self, bindvalue):
""""Given a bind value (i.e. a :class:`.BindParameter` instance),
return a SQL expression in its place.
This is typically a SQL function that wraps the existing bound
parameter within the statement. It is used for special data types
that require literals being wrapped in some special database function
in order to coerce an application-level value into a database-specific
format. It is the SQL analogue of the
:meth:`.TypeEngine.bind_processor` method.
The method is evaluated at statement compile time, as opposed
to statement construction time.
Note that this method, when implemented, should always return
the exact same structure, without any conditional logic, as it
may be used in an executemany() call against an arbitrary number
of bound parameter sets.
See also:
:ref:`types_sql_value_processing`
"""
return None
@util.memoized_property
def _has_bind_expression(self):
"""memoized boolean, check if bind_expression is implemented.
Allows the method to be skipped for the vast majority of expression
types that don't use this feature.
"""
return self.__class__.bind_expression.__code__ \
is not TypeEngine.bind_expression.__code__
def compare_values(self, x, y):
"""Compare two values for equality."""
return x == y
def get_dbapi_type(self, dbapi):
"""Return the corresponding type object from the underlying DB-API, if
any.
This can be useful for calling ``setinputsizes()``, for example.
"""
return None
@property
def python_type(self):
"""Return the Python type object expected to be returned
by instances of this type, if known.
Basically, for those types which enforce a return type,
or are known across the board to do such for all common
DBAPIs (like ``int`` for example), will return that type.
If a return type is not defined, raises
``NotImplementedError``.
Note that any type also accommodates NULL in SQL which
means you can also get back ``None`` from any type
in practice.
"""
raise NotImplementedError()
def with_variant(self, type_, dialect_name):
"""Produce a new type object that will utilize the given
type when applied to the dialect of the given name.
e.g.::
from sqlalchemy.types import String
from sqlalchemy.dialects import mysql
s = String()
s = s.with_variant(mysql.VARCHAR(collation='foo'), 'mysql')
The construction of :meth:`.TypeEngine.with_variant` is always
from the "fallback" type to that which is dialect specific.
The returned type is an instance of :class:`.Variant`, which
itself provides a :meth:`.Variant.with_variant`
that can be called repeatedly.
:param type_: a :class:`.TypeEngine` that will be selected
as a variant from the originating type, when a dialect
of the given name is in use.
:param dialect_name: base name of the dialect which uses
this type. (i.e. ``'postgresql'``, ``'mysql'``, etc.)
.. versionadded:: 0.7.2
"""
return Variant(self, {dialect_name: to_instance(type_)})
@util.memoized_property
def _type_affinity(self):
"""Return a rudimental 'affinity' value expressing the general class
of type."""
typ = None
for t in self.__class__.__mro__:
if t in (TypeEngine, UserDefinedType):
return typ
elif issubclass(t, (TypeEngine, UserDefinedType)):
typ = t
else:
return self.__class__
def dialect_impl(self, dialect):
"""Return a dialect-specific implementation for this
:class:`.TypeEngine`.
"""
try:
return dialect._type_memos[self]['impl']
except KeyError:
return self._dialect_info(dialect)['impl']
def _cached_literal_processor(self, dialect):
"""Return a dialect-specific literal processor for this type."""
try:
return dialect._type_memos[self]['literal']
except KeyError:
d = self._dialect_info(dialect)
d['literal'] = lp = d['impl'].literal_processor(dialect)
return lp
def _cached_bind_processor(self, dialect):
"""Return a dialect-specific bind processor for this type."""
try:
return dialect._type_memos[self]['bind']
except KeyError:
d = self._dialect_info(dialect)
d['bind'] = bp = d['impl'].bind_processor(dialect)
return bp
def _cached_result_processor(self, dialect, coltype):
"""Return a dialect-specific result processor for this type."""
try:
return dialect._type_memos[self][coltype]
except KeyError:
d = self._dialect_info(dialect)
# key assumption: DBAPI type codes are
# constants. Else this dictionary would
# grow unbounded.
d[coltype] = rp = d['impl'].result_processor(dialect, coltype)
return rp
def _dialect_info(self, dialect):
"""Return a dialect-specific registry which
caches a dialect-specific implementation, bind processing
function, and one or more result processing functions."""
if self in dialect._type_memos:
return dialect._type_memos[self]
else:
impl = self._gen_dialect_impl(dialect)
if impl is self:
impl = self.adapt(type(self))
# this can't be self, else we create a cycle
assert impl is not self
dialect._type_memos[self] = d = {'impl': impl}
return d
def _gen_dialect_impl(self, dialect):
return dialect.type_descriptor(self)
def adapt(self, cls, **kw):
"""Produce an "adapted" form of this type, given an "impl" class
to work with.
This method is used internally to associate generic
types with "implementation" types that are specific to a particular
dialect.
"""
return util.constructor_copy(self, cls, **kw)
def coerce_compared_value(self, op, value):
"""Suggest a type for a 'coerced' Python value in an expression.
Given an operator and value, gives the type a chance
to return a type which the value should be coerced into.
The default behavior here is conservative; if the right-hand
side is already coerced into a SQL type based on its
Python type, it is usually left alone.
End-user functionality extension here should generally be via
:class:`.TypeDecorator`, which provides more liberal behavior in that
it defaults to coercing the other side of the expression into this
type, thus applying special Python conversions above and beyond those
needed by the DBAPI to both ides. It also provides the public method
:meth:`.TypeDecorator.coerce_compared_value` which is intended for
end-user customization of this behavior.
"""
_coerced_type = _resolve_value_to_type(value)
if _coerced_type is NULLTYPE or _coerced_type._type_affinity \
is self._type_affinity:
return self
else:
return _coerced_type
def _compare_type_affinity(self, other):
return self._type_affinity is other._type_affinity
def compile(self, dialect=None):
"""Produce a string-compiled form of this :class:`.TypeEngine`.
When called with no arguments, uses a "default" dialect
to produce a string result.
:param dialect: a :class:`.Dialect` instance.
"""
# arg, return value is inconsistent with
# ClauseElement.compile()....this is a mistake.
if not dialect:
dialect = self._default_dialect()
return dialect.type_compiler.process(self)
@util.dependencies("sqlalchemy.engine.default")
def _default_dialect(self, default):
if self.__class__.__module__.startswith("sqlalchemy.dialects"):
tokens = self.__class__.__module__.split(".")[0:3]
mod = ".".join(tokens)
return getattr(__import__(mod).dialects, tokens[-1]).dialect()
else:
return default.DefaultDialect()
def __str__(self):
if util.py2k:
return unicode(self.compile()).\
encode('ascii', 'backslashreplace')
else:
return str(self.compile())
def __repr__(self):
return util.generic_repr(self)
class VisitableCheckKWArg(util.EnsureKWArgType, VisitableType):
pass
class UserDefinedType(util.with_metaclass(VisitableCheckKWArg, TypeEngine)):
"""Base for user defined types.
This should be the base of new types. Note that
for most cases, :class:`.TypeDecorator` is probably
more appropriate::
import sqlalchemy.types as types
class MyType(types.UserDefinedType):
def __init__(self, precision = 8):
self.precision = precision
def get_col_spec(self, **kw):
return "MYTYPE(%s)" % self.precision
def bind_processor(self, dialect):
def process(value):
return value
return process
def result_processor(self, dialect, coltype):
def process(value):
return value
return process
Once the type is made, it's immediately usable::
table = Table('foo', meta,
Column('id', Integer, primary_key=True),
Column('data', MyType(16))
)
The ``get_col_spec()`` method will in most cases receive a keyword
argument ``type_expression`` which refers to the owning expression
of the type as being compiled, such as a :class:`.Column` or
:func:`.cast` construct. This keyword is only sent if the method
accepts keyword arguments (e.g. ``**kw``) in its argument signature;
introspection is used to check for this in order to support legacy
forms of this function.
.. versionadded:: 1.0.0 the owning expression is passed to
the ``get_col_spec()`` method via the keyword argument
``type_expression``, if it receives ``**kw`` in its signature.
"""
__visit_name__ = "user_defined"
ensure_kwarg = 'get_col_spec'
class Comparator(TypeEngine.Comparator):
__slots__ = ()
def _adapt_expression(self, op, other_comparator):
if hasattr(self.type, 'adapt_operator'):
util.warn_deprecated(
"UserDefinedType.adapt_operator is deprecated. Create "
"a UserDefinedType.Comparator subclass instead which "
"generates the desired expression constructs, given a "
"particular operator."
)
return self.type.adapt_operator(op), self.type
else:
return op, self.type
comparator_factory = Comparator
def coerce_compared_value(self, op, value):
"""Suggest a type for a 'coerced' Python value in an expression.
Default behavior for :class:`.UserDefinedType` is the
same as that of :class:`.TypeDecorator`; by default it returns
``self``, assuming the compared value should be coerced into
the same type as this one. See
:meth:`.TypeDecorator.coerce_compared_value` for more detail.
.. versionchanged:: 0.8 :meth:`.UserDefinedType.coerce_compared_value`
now returns ``self`` by default, rather than falling onto the
more fundamental behavior of
:meth:`.TypeEngine.coerce_compared_value`.
"""
return self
class TypeDecorator(SchemaEventTarget, TypeEngine):
"""Allows the creation of types which add additional functionality
to an existing type.
This method is preferred to direct subclassing of SQLAlchemy's
built-in types as it ensures that all required functionality of
the underlying type is kept in place.
Typical usage::
import sqlalchemy.types as types
class MyType(types.TypeDecorator):
'''Prefixes Unicode values with "PREFIX:" on the way in and
strips it off on the way out.
'''
impl = types.Unicode
def process_bind_param(self, value, dialect):
return "PREFIX:" + value
def process_result_value(self, value, dialect):
return value[7:]
def copy(self):
return MyType(self.impl.length)
The class-level "impl" attribute is required, and can reference any
TypeEngine class. Alternatively, the load_dialect_impl() method
can be used to provide different type classes based on the dialect
given; in this case, the "impl" variable can reference
``TypeEngine`` as a placeholder.
Types that receive a Python type that isn't similar to the ultimate type
used may want to define the :meth:`TypeDecorator.coerce_compared_value`
method. This is used to give the expression system a hint when coercing
Python objects into bind parameters within expressions. Consider this
expression::
mytable.c.somecol + datetime.date(2009, 5, 15)
Above, if "somecol" is an ``Integer`` variant, it makes sense that
we're doing date arithmetic, where above is usually interpreted
by databases as adding a number of days to the given date.
The expression system does the right thing by not attempting to
coerce the "date()" value into an integer-oriented bind parameter.
However, in the case of ``TypeDecorator``, we are usually changing an
incoming Python type to something new - ``TypeDecorator`` by default will
"coerce" the non-typed side to be the same type as itself. Such as below,
we define an "epoch" type that stores a date value as an integer::
class MyEpochType(types.TypeDecorator):
impl = types.Integer
epoch = datetime.date(1970, 1, 1)
def process_bind_param(self, value, dialect):
return (value - self.epoch).days
def process_result_value(self, value, dialect):
return self.epoch + timedelta(days=value)
Our expression of ``somecol + date`` with the above type will coerce the
"date" on the right side to also be treated as ``MyEpochType``.
This behavior can be overridden via the
:meth:`~TypeDecorator.coerce_compared_value` method, which returns a type
that should be used for the value of the expression. Below we set it such
that an integer value will be treated as an ``Integer``, and any other
value is assumed to be a date and will be treated as a ``MyEpochType``::
def coerce_compared_value(self, op, value):
if isinstance(value, int):
return Integer()
else:
return self
"""
__visit_name__ = "type_decorator"
def __init__(self, *args, **kwargs):
"""Construct a :class:`.TypeDecorator`.
Arguments sent here are passed to the constructor
of the class assigned to the ``impl`` class level attribute,
assuming the ``impl`` is a callable, and the resulting
object is assigned to the ``self.impl`` instance attribute
(thus overriding the class attribute of the same name).
If the class level ``impl`` is not a callable (the unusual case),
it will be assigned to the same instance attribute 'as-is',
ignoring those arguments passed to the constructor.
Subclasses can override this to customize the generation
of ``self.impl`` entirely.
"""
if not hasattr(self.__class__, 'impl'):
raise AssertionError("TypeDecorator implementations "
"require a class-level variable "
"'impl' which refers to the class of "
"type being decorated")
self.impl = to_instance(self.__class__.impl, *args, **kwargs)
coerce_to_is_types = (util.NoneType, )
"""Specify those Python types which should be coerced at the expression
level to "IS <constant>" when compared using ``==`` (and same for
``IS NOT`` in conjunction with ``!=``.
For most SQLAlchemy types, this includes ``NoneType``, as well as
``bool``.
:class:`.TypeDecorator` modifies this list to only include ``NoneType``,
as typedecorator implementations that deal with boolean types are common.
Custom :class:`.TypeDecorator` classes can override this attribute to
return an empty tuple, in which case no values will be coerced to
constants.
..versionadded:: 0.8.2
Added :attr:`.TypeDecorator.coerce_to_is_types` to allow for easier
control of ``__eq__()`` ``__ne__()`` operations.
"""
class Comparator(TypeEngine.Comparator):
__slots__ = ()
def operate(self, op, *other, **kwargs):
kwargs['_python_is_types'] = self.expr.type.coerce_to_is_types
return super(TypeDecorator.Comparator, self).operate(
op, *other, **kwargs)
def reverse_operate(self, op, other, **kwargs):
kwargs['_python_is_types'] = self.expr.type.coerce_to_is_types
return super(TypeDecorator.Comparator, self).reverse_operate(
op, other, **kwargs)
@property
def comparator_factory(self):
if TypeDecorator.Comparator in self.impl.comparator_factory.__mro__:
return self.impl.comparator_factory
else:
return type("TDComparator",
(TypeDecorator.Comparator,
self.impl.comparator_factory),
{})
def _gen_dialect_impl(self, dialect):
"""
#todo
"""
adapted = dialect.type_descriptor(self)
if adapted is not self:
return adapted
# otherwise adapt the impl type, link
# to a copy of this TypeDecorator and return
# that.
typedesc = self.load_dialect_impl(dialect).dialect_impl(dialect)
tt = self.copy()
if not isinstance(tt, self.__class__):
raise AssertionError('Type object %s does not properly '
'implement the copy() method, it must '
'return an object of type %s' %
(self, self.__class__))
tt.impl = typedesc
return tt
@property
def _type_affinity(self):
"""
#todo
"""
return self.impl._type_affinity
def _set_parent(self, column):
"""Support SchemaEentTarget"""
if isinstance(self.impl, SchemaEventTarget):
self.impl._set_parent(column)
def _set_parent_with_dispatch(self, parent):
"""Support SchemaEentTarget"""
if isinstance(self.impl, SchemaEventTarget):
self.impl._set_parent_with_dispatch(parent)
def type_engine(self, dialect):
"""Return a dialect-specific :class:`.TypeEngine` instance
for this :class:`.TypeDecorator`.
In most cases this returns a dialect-adapted form of
the :class:`.TypeEngine` type represented by ``self.impl``.
Makes usage of :meth:`dialect_impl` but also traverses
into wrapped :class:`.TypeDecorator` instances.
Behavior can be customized here by overriding
:meth:`load_dialect_impl`.
"""
adapted = dialect.type_descriptor(self)
if not isinstance(adapted, type(self)):
return adapted
elif isinstance(self.impl, TypeDecorator):
return self.impl.type_engine(dialect)
else:
return self.load_dialect_impl(dialect)
def load_dialect_impl(self, dialect):
"""Return a :class:`.TypeEngine` object corresponding to a dialect.
This is an end-user override hook that can be used to provide
differing types depending on the given dialect. It is used
by the :class:`.TypeDecorator` implementation of :meth:`type_engine`
to help determine what type should ultimately be returned
for a given :class:`.TypeDecorator`.
By default returns ``self.impl``.
"""
return self.impl
def __getattr__(self, key):
"""Proxy all other undefined accessors to the underlying
implementation."""
return getattr(self.impl, key)
def process_literal_param(self, value, dialect):
"""Receive a literal parameter value to be rendered inline within
a statement.
This method is used when the compiler renders a
literal value without using binds, typically within DDL
such as in the "server default" of a column or an expression
within a CHECK constraint.
The returned string will be rendered into the output string.
.. versionadded:: 0.9.0
"""
raise NotImplementedError()
def process_bind_param(self, value, dialect):
"""Receive a bound parameter value to be converted.
Subclasses override this method to return the
value that should be passed along to the underlying
:class:`.TypeEngine` object, and from there to the
DBAPI ``execute()`` method.
The operation could be anything desired to perform custom
behavior, such as transforming or serializing data.
This could also be used as a hook for validating logic.
This operation should be designed with the reverse operation
in mind, which would be the process_result_value method of
this class.
:param value: Data to operate upon, of any type expected by
this method in the subclass. Can be ``None``.
:param dialect: the :class:`.Dialect` in use.
"""
raise NotImplementedError()
def process_result_value(self, value, dialect):
"""Receive a result-row column value to be converted.
Subclasses should implement this method to operate on data
fetched from the database.
Subclasses override this method to return the
value that should be passed back to the application,
given a value that is already processed by
the underlying :class:`.TypeEngine` object, originally
from the DBAPI cursor method ``fetchone()`` or similar.
The operation could be anything desired to perform custom
behavior, such as transforming or serializing data.
This could also be used as a hook for validating logic.
:param value: Data to operate upon, of any type expected by
this method in the subclass. Can be ``None``.
:param dialect: the :class:`.Dialect` in use.
This operation should be designed to be reversible by
the "process_bind_param" method of this class.
"""
raise NotImplementedError()
@util.memoized_property
def _has_bind_processor(self):
"""memoized boolean, check if process_bind_param is implemented.
Allows the base process_bind_param to raise
NotImplementedError without needing to test an expensive
exception throw.
"""
return self.__class__.process_bind_param.__code__ \
is not TypeDecorator.process_bind_param.__code__
@util.memoized_property
def _has_literal_processor(self):
"""memoized boolean, check if process_literal_param is implemented.
"""
return self.__class__.process_literal_param.__code__ \
is not TypeDecorator.process_literal_param.__code__
def literal_processor(self, dialect):
"""Provide a literal processing function for the given
:class:`.Dialect`.
Subclasses here will typically override
:meth:`.TypeDecorator.process_literal_param` instead of this method
directly.
By default, this method makes use of
:meth:`.TypeDecorator.process_bind_param` if that method is
implemented, where :meth:`.TypeDecorator.process_literal_param` is
not. The rationale here is that :class:`.TypeDecorator` typically
deals with Python conversions of data that are above the layer of
database presentation. With the value converted by
:meth:`.TypeDecorator.process_bind_param`, the underlying type will
then handle whether it needs to be presented to the DBAPI as a bound
parameter or to the database as an inline SQL value.
.. versionadded:: 0.9.0
"""
if self._has_literal_processor:
process_param = self.process_literal_param
elif self._has_bind_processor:
# the bind processor should normally be OK
# for TypeDecorator since it isn't doing DB-level
# handling, the handling here won't be different for bound vs.
# literals.
process_param = self.process_bind_param
else:
process_param = None
if process_param:
impl_processor = self.impl.literal_processor(dialect)
if impl_processor:
def process(value):
return impl_processor(process_param(value, dialect))
else:
def process(value):
return process_param(value, dialect)
return process
else:
return self.impl.literal_processor(dialect)
def bind_processor(self, dialect):
"""Provide a bound value processing function for the
given :class:`.Dialect`.
This is the method that fulfills the :class:`.TypeEngine`
contract for bound value conversion. :class:`.TypeDecorator`
will wrap a user-defined implementation of
:meth:`process_bind_param` here.
User-defined code can override this method directly,
though its likely best to use :meth:`process_bind_param` so that
the processing provided by ``self.impl`` is maintained.
:param dialect: Dialect instance in use.
This method is the reverse counterpart to the
:meth:`result_processor` method of this class.
"""
if self._has_bind_processor:
process_param = self.process_bind_param
impl_processor = self.impl.bind_processor(dialect)
if impl_processor:
def process(value):
return impl_processor(process_param(value, dialect))
else:
def process(value):
return process_param(value, dialect)
return process
else:
return self.impl.bind_processor(dialect)
@util.memoized_property
def _has_result_processor(self):
"""memoized boolean, check if process_result_value is implemented.
Allows the base process_result_value to raise
NotImplementedError without needing to test an expensive
exception throw.
"""
return self.__class__.process_result_value.__code__ \
is not TypeDecorator.process_result_value.__code__
def result_processor(self, dialect, coltype):
"""Provide a result value processing function for the given
:class:`.Dialect`.
This is the method that fulfills the :class:`.TypeEngine`
contract for result value conversion. :class:`.TypeDecorator`
will wrap a user-defined implementation of
:meth:`process_result_value` here.
User-defined code can override this method directly,
though its likely best to use :meth:`process_result_value` so that
the processing provided by ``self.impl`` is maintained.
:param dialect: Dialect instance in use.
:param coltype: An SQLAlchemy data type
This method is the reverse counterpart to the
:meth:`bind_processor` method of this class.
"""
if self._has_result_processor:
process_value = self.process_result_value
impl_processor = self.impl.result_processor(dialect,
coltype)
if impl_processor:
def process(value):
return process_value(impl_processor(value), dialect)
else:
def process(value):
return process_value(value, dialect)
return process
else:
return self.impl.result_processor(dialect, coltype)
def coerce_compared_value(self, op, value):
"""Suggest a type for a 'coerced' Python value in an expression.
By default, returns self. This method is called by
the expression system when an object using this type is
on the left or right side of an expression against a plain Python
object which does not yet have a SQLAlchemy type assigned::
expr = table.c.somecolumn + 35
Where above, if ``somecolumn`` uses this type, this method will
be called with the value ``operator.add``
and ``35``. The return value is whatever SQLAlchemy type should
be used for ``35`` for this particular operation.
"""
return self
def copy(self):
"""Produce a copy of this :class:`.TypeDecorator` instance.
This is a shallow copy and is provided to fulfill part of
the :class:`.TypeEngine` contract. It usually does not
need to be overridden unless the user-defined :class:`.TypeDecorator`
has local state that should be deep-copied.
"""
instance = self.__class__.__new__(self.__class__)
instance.__dict__.update(self.__dict__)
return instance
def get_dbapi_type(self, dbapi):
"""Return the DBAPI type object represented by this
:class:`.TypeDecorator`.
By default this calls upon :meth:`.TypeEngine.get_dbapi_type` of the
underlying "impl".
"""
return self.impl.get_dbapi_type(dbapi)
def compare_values(self, x, y):
"""Given two values, compare them for equality.
By default this calls upon :meth:`.TypeEngine.compare_values`
of the underlying "impl", which in turn usually
uses the Python equals operator ``==``.
This function is used by the ORM to compare
an original-loaded value with an intercepted
"changed" value, to determine if a net change
has occurred.
"""
return self.impl.compare_values(x, y)
def __repr__(self):
return util.generic_repr(self, to_inspect=self.impl)
class Variant(TypeDecorator):
"""A wrapping type that selects among a variety of
implementations based on dialect in use.
The :class:`.Variant` type is typically constructed
using the :meth:`.TypeEngine.with_variant` method.
.. versionadded:: 0.7.2
.. seealso:: :meth:`.TypeEngine.with_variant` for an example of use.
"""
def __init__(self, base, mapping):
"""Construct a new :class:`.Variant`.
:param base: the base 'fallback' type
:param mapping: dictionary of string dialect names to
:class:`.TypeEngine` instances.
"""
self.impl = base
self.mapping = mapping
def load_dialect_impl(self, dialect):
if dialect.name in self.mapping:
return self.mapping[dialect.name]
else:
return self.impl
def with_variant(self, type_, dialect_name):
"""Return a new :class:`.Variant` which adds the given
type + dialect name to the mapping, in addition to the
mapping present in this :class:`.Variant`.
:param type_: a :class:`.TypeEngine` that will be selected
as a variant from the originating type, when a dialect
of the given name is in use.
:param dialect_name: base name of the dialect which uses
this type. (i.e. ``'postgresql'``, ``'mysql'``, etc.)
"""
if dialect_name in self.mapping:
raise exc.ArgumentError(
"Dialect '%s' is already present in "
"the mapping for this Variant" % dialect_name)
mapping = self.mapping.copy()
mapping[dialect_name] = type_
return Variant(self.impl, mapping)
@property
def comparator_factory(self):
"""express comparison behavior in terms of the base type"""
return self.impl.comparator_factory
def _reconstitute_comparator(expression):
return expression.comparator
def to_instance(typeobj, *arg, **kw):
if typeobj is None:
return NULLTYPE
if util.callable(typeobj):
return typeobj(*arg, **kw)
else:
return typeobj
def adapt_type(typeobj, colspecs):
if isinstance(typeobj, type):
typeobj = typeobj()
for t in typeobj.__class__.__mro__[0:-1]:
try:
impltype = colspecs[t]
break
except KeyError:
pass
else:
# couldn't adapt - so just return the type itself
# (it may be a user-defined type)
return typeobj
# if we adapted the given generic type to a database-specific type,
# but it turns out the originally given "generic" type
# is actually a subclass of our resulting type, then we were already
# given a more specific type than that required; so use that.
if (issubclass(typeobj.__class__, impltype)):
return typeobj
return typeobj.adapt(impltype)
| |
# Copyright (c) 2013, Web Notes Technologies Pvt. Ltd. and Contributors
# MIT License. See license.txt
from __future__ import unicode_literals
"""build query for doclistview and return results"""
import frappe, json
import frappe.defaults
import frappe.permissions
from frappe.utils import flt
from frappe import _
class DatabaseQuery(object):
def __init__(self, doctype):
self.doctype = doctype
self.tables = []
self.conditions = []
self.fields = ["`tab{0}`.`name`".format(doctype)]
self.user = None
self.ignore_permissions = False
def execute(self, query=None, filters=None, fields=None, or_filters=None,
docstatus=None, group_by=None, order_by=None, limit_start=0,
limit_page_length=20, as_list=False, with_childnames=False, debug=False,
ignore_permissions=False, user=None):
if not ignore_permissions and not frappe.has_permission(self.doctype, "read", user=user):
raise frappe.PermissionError, self.doctype
if fields:
self.fields = fields
self.filters = filters or []
self.or_filters = or_filters or []
self.docstatus = docstatus or []
self.group_by = group_by
self.order_by = order_by
self.limit_start = limit_start
self.limit_page_length = limit_page_length
self.with_childnames = with_childnames
self.debug = debug
self.as_list = as_list
self.ignore_permissions = ignore_permissions
self.user = user or frappe.session.user
if query:
return self.run_custom_query(query)
else:
return self.build_and_run()
def build_and_run(self):
args = self.prepare_args()
args.limit = self.add_limit()
if args.conditions:
args.conditions = "where " + args.conditions
query = """select %(fields)s from %(tables)s %(conditions)s
%(group_by)s %(order_by)s %(limit)s""" % args
return frappe.db.sql(query, as_dict=not self.as_list, debug=self.debug)
def prepare_args(self):
self.parse_args()
self.extract_tables()
self.remove_user_tags()
self.build_conditions()
args = frappe._dict()
if self.with_childnames:
for t in self.tables:
if t != "`tab" + self.doctype + "`":
self.fields.append(t + ".name as '%s:name'" % t[4:-1])
# query dict
args.tables = ', '.join(self.tables)
if self.or_conditions:
self.conditions.append("({0})".format(" or ".join(self.or_conditions)))
args.conditions = ' and '.join(self.conditions)
args.fields = ', '.join(self.fields)
self.set_order_by(args)
self.check_sort_by_table(args.order_by)
args.order_by = args.order_by and (" order by " + args.order_by) or ""
args.group_by = self.group_by and (" group by " + self.group_by) or ""
return args
def parse_args(self):
if isinstance(self.filters, basestring):
self.filters = json.loads(self.filters)
if isinstance(self.fields, basestring):
self.fields = json.loads(self.fields)
if isinstance(self.filters, dict):
fdict = self.filters
self.filters = []
for key, value in fdict.iteritems():
self.filters.append(self.make_filter_tuple(key, value))
def make_filter_tuple(self, key, value):
if isinstance(value, (list, tuple)):
return [self.doctype, key, value[0], value[1]]
else:
return [self.doctype, key, "=", value]
def extract_tables(self):
"""extract tables from fields"""
self.tables = ['`tab' + self.doctype + '`']
# add tables from fields
if self.fields:
for f in self.fields:
if ( not ("tab" in f and "." in f) ) or ("locate(" in f): continue
table_name = f.split('.')[0]
if table_name.lower().startswith('group_concat('):
table_name = table_name[13:]
if table_name.lower().startswith('ifnull('):
table_name = table_name[7:]
if not table_name[0]=='`':
table_name = '`' + table_name + '`'
if not table_name in self.tables:
self.append_table(table_name)
def append_table(self, table_name):
self.tables.append(table_name)
doctype = table_name[4:-1]
if (not self.ignore_permissions) and (not frappe.has_permission(doctype)):
raise frappe.PermissionError, doctype
def remove_user_tags(self):
"""remove column _user_tags if not in table"""
columns = frappe.db.get_table_columns(self.doctype)
# remove from fields
to_remove = []
for fld in self.fields:
for f in ("_user_tags", "_comments", "_assign"):
if f in fld and not f in columns:
to_remove.append(fld)
for fld in to_remove:
del self.fields[self.fields.index(fld)]
# remove from filters
to_remove = []
for each in self.filters:
if isinstance(each, basestring):
each = [each]
for element in each:
if element in ("_user_tags", "_comments", "_assign") and element not in columns:
to_remove.append(each)
for each in to_remove:
if isinstance(self.filters, dict):
del self.filters[each]
else:
self.filters.remove(each)
def build_conditions(self):
self.conditions = []
self.or_conditions = []
self.build_filter_conditions(self.filters, self.conditions)
self.build_filter_conditions(self.or_filters, self.or_conditions)
# join parent, child tables
for tname in self.tables[1:]:
self.conditions.append(tname + '.parent = ' + self.tables[0] + '.name')
# match conditions
if not self.ignore_permissions:
match_conditions = self.build_match_conditions()
if match_conditions:
self.conditions.append("(" + match_conditions + ")")
def build_filter_conditions(self, filters, conditions):
"""build conditions from user filters"""
if isinstance(filters, dict):
filters = [filters]
for f in filters:
if isinstance(f, basestring):
conditions.append(f)
else:
f = self.get_filter_tuple(f)
tname = ('`tab' + f[0] + '`')
if not tname in self.tables:
self.append_table(tname)
# prepare in condition
if f[2] in ['in', 'not in']:
opts = f[3]
if not isinstance(opts, (list, tuple)):
opts = f[3].split(",")
opts = [frappe.db.escape(t.strip()) for t in opts]
f[3] = '("{0}")'.format('", "'.join(opts))
conditions.append('ifnull({tname}.{fname}, "") {operator} {value}'.format(
tname=tname, fname=f[1], operator=f[2], value=f[3]))
else:
df = frappe.get_meta(f[0]).get("fields", {"fieldname": f[1]})
if f[2] == "like" or (isinstance(f[3], basestring) and
(not df or df[0].fieldtype not in ["Float", "Int", "Currency", "Percent"])):
if f[2] == "like":
# because "like" uses backslash (\) for escaping
f[3] = f[3].replace("\\", "\\\\")
value, default_val = '"{0}"'.format(frappe.db.escape(f[3])), '""'
else:
value, default_val = flt(f[3]), 0
conditions.append('ifnull({tname}.{fname}, {default_val}) {operator} {value}'.format(
tname=tname, fname=f[1], default_val=default_val, operator=f[2],
value=value))
def get_filter_tuple(self, f):
if isinstance(f, dict):
key, value = f.items()[0]
f = self.make_filter_tuple(key, value)
if not isinstance(f, (list, tuple)):
frappe.throw("Filter must be a tuple or list (in a list)")
if len(f) != 4:
frappe.throw("Filter must have 4 values (doctype, fieldname, condition, value): " + str(f))
return f
def build_match_conditions(self, as_condition=True):
"""add match conditions if applicable"""
self.match_filters = []
self.match_conditions = []
if not self.tables: self.extract_tables()
meta = frappe.get_meta(self.doctype)
role_permissions = frappe.permissions.get_role_permissions(meta, user=self.user)
if not meta.istable and not role_permissions.get("read") and not getattr(self, "ignore_permissions", False):
frappe.throw(_("No permission to read {0}").format(self.doctype))
# apply user permissions?
if role_permissions.get("apply_user_permissions", {}).get("read"):
# get user permissions
user_permissions = frappe.defaults.get_user_permissions(self.user)
self.add_user_permissions(user_permissions,
user_permission_doctypes=role_permissions.get("user_permission_doctypes"))
if as_condition:
conditions = ""
if self.match_conditions:
# will turn out like ((blog_post in (..) and blogger in (...)) or (blog_category in (...)))
conditions = "((" + ") or (".join(self.match_conditions) + "))"
doctype_conditions = self.get_permission_query_conditions()
if doctype_conditions:
conditions += (' and ' + doctype_conditions) if conditions else doctype_conditions
return conditions
else:
return self.match_filters
def add_user_permissions(self, user_permissions, user_permission_doctypes=None):
user_permission_doctypes = frappe.permissions.get_user_permission_doctypes(user_permission_doctypes,
user_permissions)
meta = frappe.get_meta(self.doctype)
for doctypes in user_permission_doctypes:
match_filters = {}
match_conditions = []
# check in links
for df in meta.get_fields_to_check_permissions(doctypes):
match_conditions.append("""(ifnull(`tab{doctype}`.`{fieldname}`, "")=""
or `tab{doctype}`.`{fieldname}` in ({values}))""".format(
doctype=self.doctype,
fieldname=df.fieldname,
values=", ".join([('"'+v.replace('"', '\"')+'"') for v in user_permissions[df.options]])
))
match_filters[df.options] = user_permissions[df.options]
if match_conditions:
self.match_conditions.append(" and ".join(match_conditions))
if match_filters:
self.match_filters.append(match_filters)
def get_permission_query_conditions(self):
condition_methods = frappe.get_hooks("permission_query_conditions", {}).get(self.doctype, [])
if condition_methods:
conditions = []
for method in condition_methods:
c = frappe.call(frappe.get_attr(method), self.user)
if c:
conditions.append(c)
return " and ".join(conditions) if conditions else None
def run_custom_query(self, query):
if '%(key)s' in query:
query = query.replace('%(key)s', 'name')
return frappe.db.sql(query, as_dict = (not self.as_list))
def set_order_by(self, args):
meta = frappe.get_meta(self.doctype)
if self.order_by:
args.order_by = self.order_by
else:
args.order_by = ""
# don't add order by from meta if a mysql group function is used without group by clause
group_function_without_group_by = (len(self.fields)==1 and
( self.fields[0].lower().startswith("count(")
or self.fields[0].lower().startswith("min(")
or self.fields[0].lower().startswith("max(")
) and not self.group_by)
if not group_function_without_group_by:
args.order_by = "`tab{0}`.`{1}` {2}".format(self.doctype,
meta.sort_field or "modified", meta.sort_order or "desc")
# draft docs always on top
if meta.is_submittable:
args.order_by = "`tab{0}`.docstatus asc, ".format(self.doctype) + args.order_by
def check_sort_by_table(self, order_by):
if "." in order_by:
tbl = order_by.split('.')[0]
if tbl not in self.tables:
if tbl.startswith('`'):
tbl = tbl[4:-1]
frappe.throw(_("Please select atleast 1 column from {0} to sort").format(tbl))
def add_limit(self):
if self.limit_page_length:
return 'limit %s, %s' % (self.limit_start, self.limit_page_length)
else:
return ''
| |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.pipeline import ClientRawResponse
from msrestazure.azure_exceptions import CloudError
from msrestazure.azure_operation import AzureOperationPoller
import uuid
from .. import models
class VirtualNetworksOperations(object):
"""VirtualNetworksOperations operations.
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An objec model deserializer.
:ivar api_version: Client API version. Constant value: "2016-09-01".
"""
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self.api_version = "2016-09-01"
self.config = config
def delete(
self, resource_group_name, virtual_network_name, custom_headers=None, raw=False, **operation_config):
"""Deletes the specified virtual network.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param virtual_network_name: The name of the virtual network.
:type virtual_network_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:rtype:
:class:`AzureOperationPoller<msrestazure.azure_operation.AzureOperationPoller>`
instance that returns None
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
# Construct URL
url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworks/{virtualNetworkName}'
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualNetworkName': self._serialize.url("virtual_network_name", virtual_network_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
def long_running_send():
request = self._client.delete(url, query_parameters)
return self._client.send(request, header_parameters, **operation_config)
def get_long_running_status(status_link, headers=None):
request = self._client.get(status_link)
if headers:
request.headers.update(headers)
return self._client.send(
request, header_parameters, **operation_config)
def get_long_running_output(response):
if response.status_code not in [202, 204, 200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
if raw:
response = long_running_send()
return get_long_running_output(response)
long_running_operation_timeout = operation_config.get(
'long_running_operation_timeout',
self.config.long_running_operation_timeout)
return AzureOperationPoller(
long_running_send, get_long_running_output,
get_long_running_status, long_running_operation_timeout)
def get(
self, resource_group_name, virtual_network_name, expand=None, custom_headers=None, raw=False, **operation_config):
"""Gets the specified virtual network by resource group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param virtual_network_name: The name of the virtual network.
:type virtual_network_name: str
:param expand: Expands referenced resources.
:type expand: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: :class:`VirtualNetwork
<azure.mgmt.network.v2016_09_01.models.VirtualNetwork>`
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
# Construct URL
url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworks/{virtualNetworkName}'
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualNetworkName': self._serialize.url("virtual_network_name", virtual_network_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
if expand is not None:
query_parameters['$expand'] = self._serialize.query("expand", expand, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('VirtualNetwork', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
def create_or_update(
self, resource_group_name, virtual_network_name, parameters, custom_headers=None, raw=False, **operation_config):
"""Creates or updates a virtual network in the specified resource group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param virtual_network_name: The name of the virtual network.
:type virtual_network_name: str
:param parameters: Parameters supplied to the create or update virtual
network operation
:type parameters: :class:`VirtualNetwork
<azure.mgmt.network.v2016_09_01.models.VirtualNetwork>`
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:rtype:
:class:`AzureOperationPoller<msrestazure.azure_operation.AzureOperationPoller>`
instance that returns :class:`VirtualNetwork
<azure.mgmt.network.v2016_09_01.models.VirtualNetwork>`
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
# Construct URL
url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworks/{virtualNetworkName}'
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualNetworkName': self._serialize.url("virtual_network_name", virtual_network_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct body
body_content = self._serialize.body(parameters, 'VirtualNetwork')
# Construct and send request
def long_running_send():
request = self._client.put(url, query_parameters)
return self._client.send(
request, header_parameters, body_content, **operation_config)
def get_long_running_status(status_link, headers=None):
request = self._client.get(status_link)
if headers:
request.headers.update(headers)
return self._client.send(
request, header_parameters, **operation_config)
def get_long_running_output(response):
if response.status_code not in [200, 201]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('VirtualNetwork', response)
if response.status_code == 201:
deserialized = self._deserialize('VirtualNetwork', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
if raw:
response = long_running_send()
return get_long_running_output(response)
long_running_operation_timeout = operation_config.get(
'long_running_operation_timeout',
self.config.long_running_operation_timeout)
return AzureOperationPoller(
long_running_send, get_long_running_output,
get_long_running_status, long_running_operation_timeout)
def list_all(
self, custom_headers=None, raw=False, **operation_config):
"""Gets all virtual networks in a subscription.
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: :class:`VirtualNetworkPaged
<azure.mgmt.network.v2016_09_01.models.VirtualNetworkPaged>`
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
def internal_paging(next_link=None, raw=False):
if not next_link:
# Construct URL
url = '/subscriptions/{subscriptionId}/providers/Microsoft.Network/virtualNetworks'
path_format_arguments = {
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
else:
url = next_link
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(
request, header_parameters, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
return response
# Deserialize response
deserialized = models.VirtualNetworkPaged(internal_paging, self._deserialize.dependencies)
if raw:
header_dict = {}
client_raw_response = models.VirtualNetworkPaged(internal_paging, self._deserialize.dependencies, header_dict)
return client_raw_response
return deserialized
def list(
self, resource_group_name, custom_headers=None, raw=False, **operation_config):
"""Gets all virtual networks in a resource group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: :class:`VirtualNetworkPaged
<azure.mgmt.network.v2016_09_01.models.VirtualNetworkPaged>`
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
def internal_paging(next_link=None, raw=False):
if not next_link:
# Construct URL
url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworks'
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
else:
url = next_link
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(
request, header_parameters, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
return response
# Deserialize response
deserialized = models.VirtualNetworkPaged(internal_paging, self._deserialize.dependencies)
if raw:
header_dict = {}
client_raw_response = models.VirtualNetworkPaged(internal_paging, self._deserialize.dependencies, header_dict)
return client_raw_response
return deserialized
def check_ip_address_availability(
self, resource_group_name, virtual_network_name, ip_address=None, custom_headers=None, raw=False, **operation_config):
"""Checks whether a private IP address is available for use.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param virtual_network_name: The name of the virtual network.
:type virtual_network_name: str
:param ip_address: The private IP address to be verified.
:type ip_address: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: :class:`IPAddressAvailabilityResult
<azure.mgmt.network.v2016_09_01.models.IPAddressAvailabilityResult>`
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
# Construct URL
url = '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworks/{virtualNetworkName}/CheckIPAddressAvailability'
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualNetworkName': self._serialize.url("virtual_network_name", virtual_network_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
if ip_address is not None:
query_parameters['ipAddress'] = self._serialize.query("ip_address", ip_address, 'str')
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('IPAddressAvailabilityResult', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
| |
import slackclient
import markovify
import os
import time
import argparse
import re
import sys
import json
STATE_SIZE = 2
USER_ID_REGEX = r"\<\@(.+?)\>"
global username
projectname = "andrey3000"
if os.name != "posix":
import win32com
from win32com.shell import shellcon, shell
homedir = shell.SHGetFolderPath(0, shellcon.CSIDL_APPDATA, 0, 0)
else:
homedir = os.path.join(os.path.expanduser("~"), ".config")
settingsdirectory = os.path.join(homedir, projectname)
if not os.path.isdir(settingsdirectory):
os.makedirs(settingsdirectory)
chains_dir = os.path.join(settingsdirectory, "chains")
if not os.path.isdir(chains_dir):
os.makedirs(chains_dir)
texts_dir = os.path.join(settingsdirectory, "texts")
if not os.path.isdir(texts_dir):
os.makedirs(texts_dir)
class ArgumentParserError(Exception):
pass
class ThrowingArgumentParser(argparse.ArgumentParser):
not_error = False
def error(self, message):
if not self.not_error:
raise ArgumentParserError(message)
def get_markov(user_id, path=chains_dir):
try:
with open(os.path.join(path, user_id), "r") as infile:
return AndreyText.from_json(infile.read())
except:
return AndreyText("", state_size=STATE_SIZE)
def save_markov(user_id, markov, path=chains_dir):
with open(os.path.join(path, user_id), "w") as outfile:
outfile.write(markov.to_json())
class DontErrorAction(argparse.Action):
def __call__(self, parser, namespace, values, option_string=None):
parser.not_error = True
setattr(namespace, self.dest, True)
class AndreyText(markovify.Text):
def test_sentence_input(self, sentence):
return True
def to_json(self):
return json.dumps(self.to_dict(), ensure_ascii=False)
def parse_command(command, message):
parser = ThrowingArgumentParser(add_help=False, prog="@{}".format(username))
parser.add_argument("--help", "-h", default=False, action=DontErrorAction, nargs=0, help=argparse.SUPPRESS)
parser.set_defaults(function=None)
impersonate_parent_parser = argparse.ArgumentParser(add_help=False)
impersonate_parent_parser.add_argument("user", default=None, help="Highlight of the user you want me to impersonate, or everyone to impersonate a mix of all users.")
impersonate_parent_parser.add_argument("--help", "-h", default=False, nargs=0, action=DontErrorAction, help=argparse.SUPPRESS)
impersonate_parent_parser.set_defaults(function="impersonate")
subparsers = parser.add_subparsers(metavar="{impersonate,write}")
impersonate_subparser = subparsers.add_parser("impersonate", parents=[impersonate_parent_parser], description="Impersonate the given user", add_help=False, help="Impersonate a user")
do_subparser = subparsers.add_parser("do", parents=[impersonate_parent_parser], description="Impersonate the given user", add_help=False)
spoof_subparser = subparsers.add_parser("spoof", parents=[impersonate_parent_parser], description="Impersonate the given user", add_help=False)
write_subparser = subparsers.add_parser("write", description="Write from a saved text", add_help=False, help="Write in the style of a given text")
write_subparser.set_defaults(function="write")
write_subparser.add_argument("name", default=[], nargs="+", help="Name of the text you want me to write like.")
write_subparser.add_argument("--help", "-h", default=False, action=DontErrorAction, nargs=0, help=argparse.SUPPRESS)
try:
args, other_args = parser.parse_known_args(command)
if args.function is None and args.help:
return parser.format_help()
elif args.function == "impersonate":
if args.help:
return impersonate_subparser.format_help()
if args.user is None:
return "Who should I impersonate?"
if args.user.lower() == "me":
uid = message.get("user")
elif args.user in ("<!everyone>", "everyone"):
uid = "everyone"
else:
m = re.match(USER_ID_REGEX, args.user)
if not m:
return "I don't recognize user {}".format(args.user)
uid = m.group(1)
markov = get_markov(uid)
try:
sentence = markov.make_sentence()
except:
sentence = "Could not impersonate <@{}>, not enough data".format(uid)
if not sentence:
sentence = "Could not impersonate <@{}>, not enough data".format(uid)
return sentence
elif args.function == "write":
if args.help:
return write_subparser.format_help()
name = "_".join(args.name).lower()
markov = get_markov(name, path=texts_dir)
try:
sentence = markov.make_sentence()
except:
sentence = "I do not know how to write {}".format(" ".join(args.name))
if not sentence:
sentence = "I do not know how to write {}".format(" ".join(args.name))
return sentence
else:
raise ArgumentParserError("Unkown command")
except ArgumentParserError as e:
print e
return "Unknown command"
def main(argv=None):
if argv is None:
argv = sys.argv[1:]
global username
parser = argparse.ArgumentParser()
parser.set_defaults(function=None)
subparsers = parser.add_subparsers()
add_text_subparser = subparsers.add_parser("add_text", description="Add a text for andrey to write excerpts for")
add_text_subparser.set_defaults(function="add_text")
add_text_subparser.add_argument("text_file", help="File to read text from")
add_text_subparser.add_argument("name", help="Name to save text as")
run_subparser = subparsers.add_parser("run", description="Add a text for andrey to write excerpts for")
run_subparser.set_defaults(function="run")
run_subparser.add_argument("--key", "-k", default=None, help="Key to use to connect to slack")
if not argv or argv[0] not in [command for command in subparsers.choices]:
argv = ["run"] + argv
args = parser.parse_args(argv)
if args.function == "run":
slack_token = None
try:
with open(os.path.join(os.path.dirname(__file__), "andrey.key")) as infile:
slack_token = infile.read().strip()
except:
pass
try:
slack_token = os.environ["andrey_key"]
except:
pass
if args.key is not None:
slack_token = args.key
if not slack_token:
print "No slack token found!"
return -1
client = slackclient.SlackClient(slack_token)
channels = client.api_call("channels.list")
if not client.rtm_connect():
raise Exception("Failed to connect to slack")
username = client.server.username
user_id = None
for uid, user_data in client.server.users.items():
if user_data.name == username:
user_id = uid
break
assert user_id is not None, "Unable to find own user id"
while True:
for slack_message in client.rtm_read():
if slack_message.get("type") == "message":
text = slack_message.get("text")
if text is None:
continue
if not text.startswith("<@{}>".format(user_id)):
if slack_message.get("user") == user_id:
continue
try:
chain = AndreyText(str(text), state_size=STATE_SIZE)
old_chain = get_markov(slack_message.get("user"))
new_chain = markovify.combine([old_chain, chain])
save_markov(slack_message.get("user"), new_chain)
old_chain = get_markov("everyone")
new_chain = markovify.combine([old_chain, chain])
save_markov("everyone", new_chain)
except Exception as e:
print e
print text
else:
message = parse_command(text.split()[1:], slack_message)
client.rtm_send_message(slack_message.get("channel"), message)
time.sleep(.25)
elif args.function == "add_text":
with open(args.text_file, "r") as infile:
data = infile.read()
m = AndreyText(data)
name = "_".join(args.name.split(" ")).lower()
save_markov(name, m, path=texts_dir)
if __name__ == "__main__":
exit(main())
| |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import io
import unittest
from contextlib import redirect_stdout
from parameterized import parameterized
from airflow.bin import cli
from airflow.cli.commands import connection_command
from airflow.models import Connection
from airflow.utils.db import create_session, merge_conn, provide_session
from tests.test_utils.db import clear_db_connections
TEST_CONN_IDS = [f"new{index}" for index in range(1, 7)]
class TestCliListConnections(unittest.TestCase):
EXPECTED_CONS = [
('airflow_db', 'mysql', ),
('local_mysql', 'mysql', ),
('presto_default', 'presto', ),
('google_cloud_default', 'google_cloud_platform', ),
('mongo_default', 'mongo', ),
('mysql_default', 'mysql', ),
('postgres_default', 'postgres', ),
('sqlite_default', 'sqlite', ),
('http_default', 'http', ),
('mssql_default', 'mssql', ),
('vertica_default', 'vertica', ),
]
def setUp(self):
self.parser = cli.CLIFactory.get_parser()
clear_db_connections()
def tearDown(self):
clear_db_connections()
def test_cli_connections_list(self):
with redirect_stdout(io.StringIO()) as stdout:
connection_command.connections_list(self.parser.parse_args(["connections", "list"]))
stdout = stdout.getvalue()
lines = stdout.split("\n")
for conn_id, conn_type in self.EXPECTED_CONS:
self.assertTrue(any(conn_id in line and conn_type in line for line in lines))
def test_cli_connections_list_as_tsv(self):
args = self.parser.parse_args(["connections", "list", "--output", "tsv"])
with redirect_stdout(io.StringIO()) as stdout:
connection_command.connections_list(args)
stdout = stdout.getvalue()
lines = stdout.split("\n")
for conn_id, conn_type in self.EXPECTED_CONS:
self.assertTrue(any(conn_id in line and conn_type in line for line in lines))
TEST_URL = "postgresql://airflow:airflow@host:5432/airflow"
class TestCliAddConnections(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.parser = cli.CLIFactory.get_parser()
clear_db_connections()
@classmethod
def tearDownClass(cls):
clear_db_connections()
@parameterized.expand(
[
(
["connections", "add", "new1", "--conn_uri=%s" % TEST_URL],
"\tSuccessfully added `conn_id`=new1 : postgresql://airflow:airflow@host:5432/airflow",
{
"conn_id": "new1",
"conn_type": "postgres",
"host": "host",
"is_encrypted": True,
"is_extra_encrypted": False,
"login": "airflow",
"port": 5432,
"schema": "airflow",
},
),
(
["connections", "add", "new2", "--conn_uri=%s" % TEST_URL],
"\tSuccessfully added `conn_id`=new2 : postgresql://airflow:airflow@host:5432/airflow",
{
"conn_id": "new2",
"conn_type": "postgres",
"host": "host",
"is_encrypted": True,
"is_extra_encrypted": False,
"login": "airflow",
"port": 5432,
"schema": "airflow",
},
),
(
[
"connections",
"add",
"new3",
"--conn_uri=%s" % TEST_URL,
"--conn_extra",
"{'extra': 'yes'}",
],
"\tSuccessfully added `conn_id`=new3 : postgresql://airflow:airflow@host:5432/airflow",
{
"conn_id": "new3",
"conn_type": "postgres",
"host": "host",
"is_encrypted": True,
"is_extra_encrypted": True,
"login": "airflow",
"port": 5432,
"schema": "airflow",
},
),
(
[
"connections",
"add",
"new4",
"--conn_uri=%s" % TEST_URL,
"--conn_extra",
"{'extra': 'yes'}",
],
"\tSuccessfully added `conn_id`=new4 : postgresql://airflow:airflow@host:5432/airflow",
{
"conn_id": "new4",
"conn_type": "postgres",
"host": "host",
"is_encrypted": True,
"is_extra_encrypted": True,
"login": "airflow",
"port": 5432,
"schema": "airflow",
},
),
(
[
"connections",
"add",
"new5",
"--conn_type=hive_metastore",
"--conn_login=airflow",
"--conn_password=airflow",
"--conn_host=host",
"--conn_port=9083",
"--conn_schema=airflow",
],
"\tSuccessfully added `conn_id`=new5 : hive_metastore://airflow:airflow@host:9083/airflow",
{
"conn_id": "new5",
"conn_type": "hive_metastore",
"host": "host",
"is_encrypted": True,
"is_extra_encrypted": False,
"login": "airflow",
"port": 9083,
"schema": "airflow",
},
),
(
[
"connections",
"add",
"new6",
"--conn_uri",
"",
"--conn_type=google_cloud_platform",
"--conn_extra",
"{'extra': 'yes'}",
],
"\tSuccessfully added `conn_id`=new6 : google_cloud_platform://:@:",
{
"conn_id": "new6",
"conn_type": "google_cloud_platform",
"host": None,
"is_encrypted": False,
"is_extra_encrypted": True,
"login": None,
"port": None,
"schema": None,
},
),
]
)
def test_cli_connection_add(self, cmd, expected_output, expected_conn):
with redirect_stdout(io.StringIO()) as stdout:
connection_command.connections_add(self.parser.parse_args(cmd))
stdout = stdout.getvalue()
self.assertIn(expected_output, stdout)
conn_id = cmd[2]
with create_session() as session:
comparable_attrs = [
"conn_id",
"conn_type",
"host",
"is_encrypted",
"is_extra_encrypted",
"login",
"port",
"schema",
]
current_conn = session.query(Connection).filter(Connection.conn_id == conn_id).first()
self.assertEqual(expected_conn, {attr: getattr(current_conn, attr) for attr in comparable_attrs})
def test_cli_connections_add_duplicate(self):
# Attempt to add duplicate
connection_command.connections_add(
self.parser.parse_args(["connections", "add", "new1", "--conn_uri=%s" % TEST_URL])
)
with redirect_stdout(io.StringIO()) as stdout:
connection_command.connections_add(
self.parser.parse_args(["connections", "add", "new1", "--conn_uri=%s" % TEST_URL])
)
stdout = stdout.getvalue()
# Check stdout for addition attempt
self.assertIn("\tA connection with `conn_id`=new1 already exists", stdout)
def test_cli_connections_add_delete_with_missing_parameters(self):
# Attempt to add without providing conn_uri
with self.assertRaisesRegex(
SystemExit, r"The following args are required to add a connection: \['conn_uri or conn_type'\]"
):
connection_command.connections_add(self.parser.parse_args(["connections", "add", "new1"]))
class TestCliDeleteConnections(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.parser = cli.CLIFactory.get_parser()
clear_db_connections()
@classmethod
def tearDownClass(cls):
clear_db_connections()
@provide_session
def test_cli_delete_connections(self, session=None):
merge_conn(
Connection(
conn_id="new1", conn_type="mysql", host="mysql", login="root", password="", schema="airflow"
),
session=session
)
# Delete connections
with redirect_stdout(io.StringIO()) as stdout:
connection_command.connections_delete(self.parser.parse_args(["connections", "delete", "new1"]))
stdout = stdout.getvalue()
# Check deletion stdout
self.assertIn("\tSuccessfully deleted `conn_id`=new1", stdout)
# Check deletions
result = session.query(Connection).filter(Connection.conn_id == "new1").first()
self.assertTrue(result is None)
def test_cli_delete_invalid_connection(self):
# Attempt to delete a non-existing connection
with redirect_stdout(io.StringIO()) as stdout:
connection_command.connections_delete(self.parser.parse_args(["connections", "delete", "fake"]))
stdout = stdout.getvalue()
# Check deletion attempt stdout
self.assertIn("\tDid not find a connection with `conn_id`=fake", stdout)
| |
import code
import sys
import threading
from kivy.uix.textinput import TextInput
from kivy.uix.boxlayout import BoxLayout
from kivy.base import runTouchApp
from kivy.clock import Clock
from kivy.properties import ObjectProperty, ListProperty,\
StringProperty, NumericProperty, partial
from kivy.lang import Builder
try:
from rlcompleter import Completer
except ImportError:
Completer = None
Builder.load_string('''
<PythonConsole>:
text_input: text_input2
scroll_view: scroll_view
ScrollView:
id: scroll_view
InteractiveShellInput:
id: text_input2
size_hint: (1, None)
font_name: root.font_name
font_size: root.font_size
foreground_color: root.foreground_color
background_color: root.background_color
height: max(self.parent.height, self.minimum_height)
on_ready_to_input: root.ready_to_input()
''')
class PseudoFile(object):
'''A psuedo file object, to redirect I/O operations from Python Shell to
InteractiveShellInput.
'''
def __init__(self, sh):
self.sh = sh
def write(self, s):
'''To write to a PsuedoFile object.
'''
self.sh.write(s)
def writelines(self, lines):
'''To write lines to a PsuedoFile object.
'''
for line in lines:
self.write(line)
def flush(self):
'''To flush a PsuedoFile object.
'''
pass
def isatty(self):
'''To determine if PsuedoFile object is a tty or not.
'''
return True
class Shell(code.InteractiveConsole):
"Wrapper around Python that can filter input/output to the shell"
def __init__(self, root):
code.InteractiveConsole.__init__(self)
self.thread = None
self.root = root
self._exit = False
def write(self, data):
'''write data to show as output on the screen.
'''
import functools
Clock.schedule_once(functools.partial(self.root.show_output, data), 0)
def raw_input(self, prompt=""):
'''To show prompt and get required data from user.
'''
return self.root.get_input(prompt)
def runcode(self, _code):
"""Execute a code object.
When an exception occurs, self.showtraceback() is called to
display a traceback. All exceptions are caught except
SystemExit, which is reraised.
A note about KeyboardInterrupt: this exception may occur
elsewhere in this code, and may not always be caught. The
caller should be prepared to deal with it.
"""
org_stdout = sys.stdout
sys.stdout = PseudoFile(self)
try:
exec(_code, self.locals)
except SystemExit:
print('It\'s not possible to exit from Kivy Designer'
' Python console')
except:
self.showtraceback()
sys.stdout = org_stdout
def exit(self):
'''To exit PythonConsole.
'''
self._exit = True
def interact(self, banner=None):
"""Closely emulate the interactive Python console.
The optional banner argument specify the banner to print
before the first interaction; by default it prints a banner
similar to the one printed by the real Python interpreter,
followed by the current class name in parentheses (so as not
to confuse this with the real interpreter -- since it's so
close!).
"""
try:
sys.ps1
except AttributeError:
sys.ps1 = ">>> "
try:
sys.ps2
except AttributeError:
sys.ps2 = "... "
cprt = 'Type "help", "copyright", "credits" or "license"'\
' for more information.'
if banner is None:
self.write("Python %s on %s\n%s\n(%s)\n" %
(sys.version, sys.platform, cprt,
self.__class__.__name__))
else:
self.write("%s\n" % str(banner))
more = 0
while not self._exit:
try:
if more:
prompt = sys.ps2
else:
prompt = sys.ps1
try:
line = self.raw_input(prompt)
if line is None:
continue
# Can be None if sys.stdin was redefined
encoding = getattr(sys.stdin, "encoding", None)
if encoding and isinstance(line, bytes):
line = line.decode(encoding)
except EOFError:
self.write("\n")
break
else:
more = self.push(line)
except KeyboardInterrupt:
self.write("\nKeyboardInterrupt\n")
self.resetbuffer()
more = 0
class InteractiveThread(threading.Thread):
'''Another thread in which main loop of Shell will run.
'''
def __init__(self, sh):
super(InteractiveThread, self).__init__()
self._sh = sh
self._sh.thread = self
def run(self):
'''To start main loop of _sh in this thread.
'''
self._sh.interact()
class InteractiveShellInput(TextInput):
'''Displays Output and sends input to Shell. Emits 'on_ready_to_input'
when it is ready to get input from user.
'''
__events__ = ('on_ready_to_input',)
def __init__(self, **kwargs):
super(InteractiveShellInput, self).__init__(**kwargs)
self.last_line = None
def keyboard_on_key_down(self, window, keycode, text, modifiers):
'''Override of _keyboard_on_key_down.
'''
if keycode[0] == 9 and Completer:
# tab, add autocomplete suggestion
txt = self.text[self._cursor_pos:]
if txt.strip():
suggestion = Completer(self.sh.locals).complete(txt, 0)
if suggestion:
self.select_text(self._cursor_pos,
self._cursor_pos + len(txt))
self.delete_selection()
Clock.schedule_once(
partial(self.insert_text, suggestion))
return False
elif keycode[0] == 13:
# For enter
self.last_line = self.text[self._cursor_pos:]
self.dispatch('on_ready_to_input')
return super(InteractiveShellInput, self).keyboard_on_key_down(
window, keycode, text, modifiers)
def insert_text(self, substring, from_undo=False):
'''Override of insert_text
'''
if self.cursor_index() < self._cursor_pos:
return
return super(InteractiveShellInput, self).insert_text(substring,
from_undo)
def on_ready_to_input(self, *args):
'''Default handler of 'on_ready_to_input'
'''
pass
def show_output(self, output):
'''Show output to the user.
'''
self.text += output
Clock.schedule_once(self._set_cursor_val, 0.1)
def _set_cursor_val(self, *args):
'''Get last position of cursor where output was added.
'''
self._cursor_pos = self.cursor_index()
from kivy.animation import Animation
anim = Animation(scroll_y=0, d=0.5)
anim.cancel_all(self.parent)
anim.start(self.parent)
class PythonConsole(BoxLayout):
text_input = ObjectProperty(None)
'''Instance of :class:`~designer.uix.py_console.InteractiveShellInput`
:data:`text_input` is an :class:`~kivy.properties.ObjectProperty`
'''
sh = ObjectProperty(None)
'''Instance of :class:`~designer.uix.py_console.Shell`
:data:`sh` is an :class:`~kivy.properties.ObjectProperty`
'''
scroll_view = ObjectProperty(None)
'''Instance of :class:`~kivy.uix.scrollview.ScrollView`
:data:`scroll_view` is an :class:`~kivy.properties.ObjectProperty`
'''
foreground_color = ListProperty((.5, .5, .5, .93))
'''This defines the color of the text in the console
:data:`foreground_color` is an :class:`~kivy.properties.ListProperty`,
Default to '(.5, .5, .5, .93)'
'''
background_color = ListProperty((0, 0, 0, 1))
'''This defines the color of the text in the console
:data:`foreground_color` is an :class:`~kivy.properties.ListProperty`,
Default to '(0, 0, 0, 1)'''
font_name = StringProperty('data/fonts/DroidSansMono.ttf')
'''Indicates the font Style used in the console
:data:`font` is a :class:`~kivy.properties.StringProperty`,
Default to 'DroidSansMono'
'''
font_size = NumericProperty(14)
'''Indicates the size of the font used for the console
:data:`font_size` is a :class:`~kivy.properties.NumericProperty`,
Default to '9'
'''
def __init__(self, **kwargs):
super(PythonConsole, self).__init__()
self.sh = Shell(self)
self._thread = InteractiveThread(self.sh)
self._thread.setDaemon(True)
Clock.schedule_once(self.run_sh)
self.text_input.sh = self.sh
self._ready_to_input = False
self._exit = False
def ready_to_input(self, *args):
'''Specifies that PythonConsole is ready to take input from user.
'''
self._ready_to_input = True
def run_sh(self, *args):
'''Start Python Shell.
'''
self._thread.start()
def show_output(self, data, dt):
'''Show output to user.
'''
self.text_input.show_output(data)
def _show_prompt(self, *args):
'''Show prompt to user and asks for input.
'''
self.text_input.show_output(self.prompt)
def get_input(self, prompt):
'''Get input from user.
'''
import time
self.prompt = prompt
Clock.schedule_once(self._show_prompt, 0.1)
while not self._ready_to_input and not self._exit:
time.sleep(0.05)
self._ready_to_input = False
return self.text_input.last_line
def exit(self):
'''Exit PythonConsole
'''
self._exit = True
self.sh.exit()
if __name__ == '__main__':
runTouchApp(PythonConsole())
| |
from __future__ import print_function
import caffe
from caffe.model_libs import *
from google.protobuf import text_format
import math
import os
import shutil
import stat
import subprocess
import sys
# Add extra layers on top of a "base" network (e.g. VGGNet or Inception).
def AddExtraLayers(net, use_batchnorm=True, lr_mult=1):
use_relu = True
# Add additional convolutional layers.
# 19 x 19
from_layer = net.keys()[-1]
# TODO(weiliu89): Construct the name using the last layer to avoid duplication.
# 10 x 10
out_layer = "conv6_1"
ConvBNLayer(net, from_layer, out_layer, use_batchnorm, use_relu, 256, 1, 0, 1,
lr_mult=lr_mult)
from_layer = out_layer
out_layer = "conv6_2"
ConvBNLayer(net, from_layer, out_layer, use_batchnorm, use_relu, 512, 3, 1, 2,
lr_mult=lr_mult)
# 5 x 5
from_layer = out_layer
out_layer = "conv7_1"
ConvBNLayer(net, from_layer, out_layer, use_batchnorm, use_relu, 128, 1, 0, 1,
lr_mult=lr_mult)
from_layer = out_layer
out_layer = "conv7_2"
ConvBNLayer(net, from_layer, out_layer, use_batchnorm, use_relu, 256, 3, 1, 2,
lr_mult=lr_mult)
# 3 x 3
from_layer = out_layer
out_layer = "conv8_1"
ConvBNLayer(net, from_layer, out_layer, use_batchnorm, use_relu, 128, 1, 0, 1,
lr_mult=lr_mult)
from_layer = out_layer
out_layer = "conv8_2"
ConvBNLayer(net, from_layer, out_layer, use_batchnorm, use_relu, 256, 3, 0, 1,
lr_mult=lr_mult)
# 1 x 1
from_layer = out_layer
out_layer = "conv9_1"
ConvBNLayer(net, from_layer, out_layer, use_batchnorm, use_relu, 128, 1, 0, 1,
lr_mult=lr_mult)
from_layer = out_layer
out_layer = "conv9_2"
ConvBNLayer(net, from_layer, out_layer, use_batchnorm, use_relu, 256, 3, 0, 1,
lr_mult=lr_mult)
return net
### Modify the following parameters accordingly ###
# The directory which contains the caffe code.
# We assume you are running the script at the CAFFE_ROOT.
caffe_root = os.getcwd()
# Set true if you want to start training right after generating all files.
run_soon = True
# The video file path
video_file = "examples/videos/ILSVRC2015_train_00755001.mp4"
# The parameters for the video demo
# Key parameters used in training
# If true, use batch norm for all newly added layers.
# Currently only the non batch norm version has been tested.
use_batchnorm = False
num_classes = 21
share_location = True
background_label_id=0
conf_loss_type = P.MultiBoxLoss.SOFTMAX
code_type = P.PriorBox.CENTER_SIZE
lr_mult = 1.
# Stores LabelMapItem.
label_map_file = "data/VOC0712/labelmap_voc.prototxt"
# The resized image size
resize_width = 300
resize_height = 300
# Parameters needed for test.
# Set the number of test iterations to the maximum integer number.
test_iter = int(math.pow(2, 29) - 1)
# Use GPU or CPU
solver_mode = P.Solver.GPU
# Defining which GPUs to use.
gpus = "0"
# Number of frames to be processed per batch.
test_batch_size = 1
# Only display high quality detections whose scores are higher than a threshold.
visualize_threshold = 0.3
# Size of video image.
video_width = 1280
video_height = 720
# Scale the image size for display.
scale = 0.8
### Hopefully you don't need to change the following ###
resize = "{}x{}".format(resize_width, resize_height)
video_data_param = {
'video_type': P.VideoData.VIDEO,
'video_file': video_file,
}
test_transform_param = {
'mean_value': [104, 117, 123],
'resize_param': {
'prob': 1,
'resize_mode': P.Resize.WARP,
'height': resize_height,
'width': resize_width,
'interp_mode': [P.Resize.LINEAR],
},
}
output_transform_param = {
'mean_value': [104, 117, 123],
'resize_param': {
'prob': 1,
'resize_mode': P.Resize.WARP,
'height': int(video_height * scale),
'width': int(video_width * scale),
'interp_mode': [P.Resize.LINEAR],
},
}
# parameters for generating detection output.
det_out_param = {
'num_classes': num_classes,
'share_location': share_location,
'background_label_id': background_label_id,
'nms_param': {'nms_threshold': 0.45, 'top_k': 400},
'save_output_param': {
'label_map_file': label_map_file,
},
'keep_top_k': 200,
'confidence_threshold': 0.01,
'code_type': code_type,
'visualize': True,
'visualize_threshold': visualize_threshold,
}
# The job name should be same as the name used in examples/ssd/ssd_pascal.py.
job_name = "SSD_{}".format(resize)
# The name of the model. Modify it if you want.
model_name = "VGG_VOC0712_{}".format(job_name)
# Directory which stores the model .prototxt file.
save_dir = "models/VGGNet/VOC0712/{}_video".format(job_name)
# Directory which stores the snapshot of trained models.
snapshot_dir = "models/VGGNet/VOC0712/{}".format(job_name)
# Directory which stores the job script and log file.
job_dir = "jobs/VGGNet/VOC0712/{}_video".format(job_name)
# model definition files.
test_net_file = "{}/test.prototxt".format(save_dir)
# snapshot prefix.
snapshot_prefix = "{}/{}".format(snapshot_dir, model_name)
# job script path.
job_file = "{}/{}.sh".format(job_dir, model_name)
# Find most recent snapshot.
max_iter = 0
for file in os.listdir(snapshot_dir):
if file.endswith(".caffemodel"):
basename = os.path.splitext(file)[0]
iter = int(basename.split("{}_iter_".format(model_name))[1])
if iter > max_iter:
max_iter = iter
if max_iter == 0:
print("Cannot find snapshot in {}".format(snapshot_dir))
sys.exit()
# The resume model.
pretrain_model = "{}_iter_{}.caffemodel".format(snapshot_prefix, max_iter)
# parameters for generating priors.
# minimum dimension of input image
min_dim = 300
# conv4_3 ==> 38 x 38
# fc7 ==> 19 x 19
# conv6_2 ==> 10 x 10
# conv7_2 ==> 5 x 5
# conv8_2 ==> 3 x 3
# conv9_2 ==> 1 x 1
mbox_source_layers = ['conv4_3', 'fc7', 'conv6_2', 'conv7_2', 'conv8_2', 'conv9_2']
# in percent %
min_ratio = 20
max_ratio = 90
step = int(math.floor((max_ratio - min_ratio) / (len(mbox_source_layers) - 2)))
min_sizes = []
max_sizes = []
for ratio in xrange(min_ratio, max_ratio + 1, step):
min_sizes.append(min_dim * ratio / 100.)
max_sizes.append(min_dim * (ratio + step) / 100.)
min_sizes = [min_dim * 10 / 100.] + min_sizes
max_sizes = [min_dim * 20 / 100.] + max_sizes
steps = [8, 16, 32, 64, 100, 300]
aspect_ratios = [[2], [2, 3], [2, 3], [2, 3], [2], [2]]
# L2 normalize conv4_3.
normalizations = [20, -1, -1, -1, -1, -1]
# variance used to encode/decode prior bboxes.
if code_type == P.PriorBox.CENTER_SIZE:
prior_variance = [0.1, 0.1, 0.2, 0.2]
else:
prior_variance = [0.1]
flip = True
clip = False
# Check file.
check_if_exist(label_map_file)
check_if_exist(pretrain_model)
make_if_not_exist(save_dir)
make_if_not_exist(job_dir)
make_if_not_exist(snapshot_dir)
# Create test net.
net = caffe.NetSpec()
net.data = L.VideoData(video_data_param=video_data_param,
data_param=dict(batch_size=test_batch_size),
transform_param=test_transform_param)
VGGNetBody(net, from_layer='data', fully_conv=True, reduced=True, dilated=True,
dropout=False)
AddExtraLayers(net, use_batchnorm, lr_mult=lr_mult)
mbox_layers = CreateMultiBoxHead(net, data_layer='data', from_layers=mbox_source_layers,
use_batchnorm=use_batchnorm, min_sizes=min_sizes, max_sizes=max_sizes,
aspect_ratios=aspect_ratios, steps=steps, normalizations=normalizations,
num_classes=num_classes, share_location=share_location, flip=flip, clip=clip,
prior_variance=prior_variance, kernel_size=3, pad=1, lr_mult=lr_mult)
conf_name = "mbox_conf"
if conf_loss_type == P.MultiBoxLoss.SOFTMAX:
reshape_name = "{}_reshape".format(conf_name)
net[reshape_name] = L.Reshape(net[conf_name], shape=dict(dim=[0, -1, num_classes]))
softmax_name = "{}_softmax".format(conf_name)
net[softmax_name] = L.Softmax(net[reshape_name], axis=2)
flatten_name = "{}_flatten".format(conf_name)
net[flatten_name] = L.Flatten(net[softmax_name], axis=1)
mbox_layers[1] = net[flatten_name]
elif conf_loss_type == P.MultiBoxLoss.LOGISTIC:
sigmoid_name = "{}_sigmoid".format(conf_name)
net[sigmoid_name] = L.Sigmoid(net[conf_name])
mbox_layers[1] = net[sigmoid_name]
mbox_layers.append(net.data)
net.detection_out = L.DetectionOutput(*mbox_layers,
detection_output_param=det_out_param,
transform_param=output_transform_param,
include=dict(phase=caffe_pb2.Phase.Value('TEST')))
net.slience = L.Silence(net.detection_out, ntop=0,
include=dict(phase=caffe_pb2.Phase.Value('TEST')))
with open(test_net_file, 'w') as f:
print('name: "{}_test"'.format(model_name), file=f)
print(net.to_proto(), file=f)
shutil.copy(test_net_file, job_dir)
# Create job file.
with open(job_file, 'w') as f:
f.write('cd {}\n'.format(caffe_root))
f.write('./build/tools/caffe test \\\n')
f.write('--model="{}" \\\n'.format(test_net_file))
f.write('--weights="{}" \\\n'.format(pretrain_model))
f.write('--iterations="{}" \\\n'.format(test_iter))
if solver_mode == P.Solver.GPU:
f.write('--gpu {}\n'.format(gpus))
# Copy the python script to job_dir.
py_file = os.path.abspath(__file__)
shutil.copy(py_file, job_dir)
# Run the job.
os.chmod(job_file, stat.S_IRWXU)
if run_soon:
subprocess.call(job_file, shell=True)
| |
# SPDX-License-Identifier: Apache-2.0
# Copyright Contributors to the Rez Project
from __future__ import print_function
from rez.resolved_context import ResolvedContext
from rez.utils.colorize import heading, local, critical, Printer
from rez.utils.data_utils import cached_property
from rez.utils.formatting import columnise
from rez.vendor import yaml
from rez.vendor.yaml.error import YAMLError
from rez.exceptions import RezSystemError, SuiteError
from rez.config import config
import os.path
import sys
class Wrapper(object):
"""A Wrapper.
A wrapper is a tool created by a `Suite`. Wrappers reside in the ./bin
directory of a suite. They are executable yaml files that are run with the
internal '_rez-forward' tool.
When a wrapper is executed, it runs the associated tool within the matching
context in the suite.
"""
def __init__(self, filepath):
"""Create a wrapper given its executable file."""
from rez.suite import Suite
def _err(msg):
raise RezSystemError("Invalid executable file %s: %s"
% (filepath, msg))
with open(filepath) as f:
content = f.read()
try:
doc = yaml.load(content, Loader=yaml.FullLoader)
doc = doc["kwargs"]
context_name = doc["context_name"]
tool_name = doc["tool_name"]
prefix_char = doc.get("prefix_char")
except YAMLError as e:
_err(str(e))
# check that the suite is there - a wrapper may have been moved out of
# a suite's ./bin path, which renders it useless.
suite_path = os.path.dirname(os.path.dirname(filepath))
try:
Suite.load(suite_path)
except SuiteError as e:
_err(str(e))
path = os.path.join(suite_path, "contexts", "%s.rxt" % context_name)
context = ResolvedContext.load(path)
self._init(suite_path, context_name, context, tool_name, prefix_char)
def _init(self, suite_path, context_name, context, tool_name, prefix_char=None):
self.suite_path = suite_path
self.context_name = context_name
self.context = context
self.tool_name = tool_name
self.prefix_char = prefix_char
@cached_property
def suite(self):
from rez.suite import Suite
return Suite.load(self.suite_path)
def run(self, *args):
"""Invoke the wrapped script.
Returns:
Return code of the command, or 0 if the command is not run.
"""
if self.prefix_char is None:
prefix_char = config.suite_alias_prefix_char
else:
prefix_char = self.prefix_char
if prefix_char == '':
# empty prefix char means we don't support the '+' args
return self._run_no_args(args)
else:
return self._run(prefix_char, args)
def _run_no_args(self, args):
cmd = [self.tool_name] + list(args)
retcode, _, _ = self.context.execute_shell(command=cmd, block=True)
return retcode
def _run(self, prefix_char, args):
import argparse
parser = argparse.ArgumentParser(prog=self.tool_name,
prefix_chars=prefix_char)
def _add_argument(*nargs, **kwargs):
nargs_ = []
for narg in nargs:
nargs_.append(narg.replace('=', prefix_char))
parser.add_argument(*nargs_, **kwargs)
_add_argument(
"=a", "==about", action="store_true",
help="print information about the tool")
_add_argument(
"=i", "==interactive", action="store_true",
help="launch an interactive shell within the tool's configured "
"environment")
_add_argument(
"=p", "==patch", type=str, nargs='*', metavar="PKG",
help="run the tool in a patched environment")
_add_argument(
"==versions", action="store_true",
help="list versions of package providing this tool")
_add_argument(
"==command", type=str, nargs='+', metavar=("COMMAND", "ARG"),
help="read commands from string, rather than executing the tool")
_add_argument(
"==stdin", action="store_true",
help="read commands from standard input, rather than executing the tool")
_add_argument(
"==strict", action="store_true",
help="strict patching. Ignored if ++patch is not present")
_add_argument(
"==nl", "==no-local", dest="no_local", action="store_true",
help="don't load local packages when patching")
_add_argument(
"==peek", action="store_true",
help="diff against the tool's context and a re-resolved copy - "
"this shows how 'stale' the context is")
_add_argument(
"==verbose", action="count", default=0,
help="verbose mode, repeat for more verbosity")
_add_argument(
"==quiet", action="store_true",
help="hide welcome message when entering interactive mode")
_add_argument(
"==no-rez-args", dest="no_rez_args", action="store_true",
help="pass all args to the tool, even if they start with '%s'" % prefix_char)
opts, tool_args = parser.parse_known_args(args)
if opts.no_rez_args:
args = list(args)
args.remove("==no-rez-args".replace('=', prefix_char))
tool_args = args
opts = parser.parse_args([])
# print info
if opts.about:
return self.print_about()
elif opts.versions:
return self.print_package_versions()
elif opts.peek:
return self.peek()
# patching
context = self.context
if opts.patch is not None:
new_request = opts.patch
request = context.get_patched_request(new_request, strict=opts.strict)
config.remove_override("quiet")
pkg_paths = (config.nonlocal_packages_path
if opts.no_local else None)
context = ResolvedContext(request,
package_paths=pkg_paths,
verbosity=opts.verbose)
# reapply quiet mode (see cli.forward)
if "REZ_QUIET" not in os.environ:
config.override("quiet", True)
if opts.stdin:
# generally shells will behave as though the '-s' flag was not present
# when no stdin is available. So here we replicate this behaviour.
import select
try:
if not select.select([sys.stdin], [], [], 0.0)[0]:
opts.stdin = False
except select.error:
pass # because windows
# construct command
cmd = None
if opts.command:
cmd = opts.command
elif opts.interactive:
label = self.context_name
if opts.patch:
label += '*'
config.override("prompt", "%s>" % label)
cmd = None
else:
cmd = [self.tool_name] + tool_args
retcode, _, _ = context.execute_shell(command=cmd,
stdin=opts.stdin,
quiet=opts.quiet,
block=True)
return retcode
def print_about(self):
"""Print an info message about the tool."""
filepath = os.path.join(self.suite_path, "bin", self.tool_name)
print("Tool: %s" % self.tool_name)
print("Path: %s" % filepath)
print("Suite: %s" % self.suite_path)
msg = "%s (%r)" % (self.context.load_path, self.context_name)
print("Context: %s" % msg)
variants = self.context.get_tool_variants(self.tool_name)
if variants:
if len(variants) > 1:
self._print_conflicting(variants)
else:
variant = next(iter(variants))
print("Package: %s" % variant.qualified_package_name)
return 0
def print_package_versions(self):
"""Print a list of versions of the package this tool comes from, and
indicate which version this tool is from."""
variants = self.context.get_tool_variants(self.tool_name)
if variants:
if len(variants) > 1:
self._print_conflicting(variants)
return 1
else:
from rez.packages import iter_packages
variant = next(iter(variants))
it = iter_packages(name=variant.name)
rows = []
colors = []
for pkg in sorted(it, key=lambda x: x.version, reverse=True):
if pkg.version == variant.version:
name = "* %s" % pkg.qualified_name
col = heading
else:
name = " %s" % pkg.qualified_name
col = local if pkg.is_local else None
label = "(local)" if pkg.is_local else ""
rows.append((name, pkg.path, label))
colors.append(col)
_pr = Printer()
for col, line in zip(colors, columnise(rows)):
_pr(line, col)
return 0
def peek(self):
config.remove_override("quiet")
new_context = ResolvedContext(self.context.requested_packages(),
package_paths=self.context.package_paths)
# reapply quiet mode (see cli.forward)
if "REZ_QUIET" not in os.environ:
config.override("quiet", True)
self.context.print_resolve_diff(new_context)
return 0
@classmethod
def _print_conflicting(cls, variants):
vars_str = " ".join(x.qualified_package_name for x in variants)
msg = "Packages (in conflict): %s" % vars_str
Printer()(msg, critical)
| |
# coding=utf-8
# Copyright 2014 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
import logging
import os
import re
import traceback
from collections import defaultdict
import six
from pathspec import PathSpec
from pathspec.gitignore import GitIgnorePattern
from twitter.common.collections import OrderedSet
from pants.base.build_environment import get_buildroot
from pants.base.build_file import BuildFile
from pants.base.specs import DescendantAddresses, SiblingAddresses, SingleAddress
from pants.build_graph.address import Address, parse_spec
from pants.build_graph.address_lookup_error import AddressLookupError
from pants.build_graph.build_file_parser import BuildFileParser
from pants.util.dirutil import fast_relpath
logger = logging.getLogger(__name__)
# Note: Significant effort has been made to keep the types BuildFile, BuildGraph, Address, and
# Target separated appropriately. The BuildFileAddressMapper is intended to have knowledge
# of just BuildFile, BuildFileParser and Address.
#
# Here are some guidelines to help maintain this abstraction:
# - Use the terminology 'address' instead of 'target' in symbols and user messages
# - Wrap exceptions from BuildFile and BuildFileParser with a subclass of AddressLookupError
# so that callers do not have to reference those modules
#
# Note: 'spec' should not be a user visible term, substitute 'address' instead.
class BuildFileAddressMapper(object):
"""Maps addresses in the pants virtual address space to corresponding BUILD file declarations."""
class AddressNotInBuildFile(AddressLookupError):
"""Indicates an address cannot be found in an existing BUILD file."""
class EmptyBuildFileError(AddressLookupError):
"""Indicates no addresses are defined in a BUILD file."""
class InvalidBuildFileReference(AddressLookupError):
"""Indicates no BUILD file exists at the address referenced."""
class InvalidAddressError(AddressLookupError):
"""Indicates an address cannot be parsed."""
class BuildFileScanError(AddressLookupError):
"""Indicates a problem was encountered scanning a tree of BUILD files."""
class InvalidRootError(BuildFileScanError):
"""Indicates an invalid scan root was supplied."""
# Target specs are mapped to the patterns which match them, if any. This variable is a key for
# specs which don't match any exclusion regexps. We know it won't already be in the list of
# patterns, because the asterisks in its name make it an invalid regexp.
_UNMATCHED_KEY = '** unmatched **'
def __init__(self, build_file_parser, project_tree, build_ignore_patterns=None, exclude_target_regexps=None):
"""Create a BuildFileAddressMapper.
:param build_file_parser: An instance of BuildFileParser
:param build_file_type: A subclass of BuildFile used to construct and cache BuildFile objects
"""
self._build_file_parser = build_file_parser
self._spec_path_to_address_map_map = {} # {spec_path: {address: addressable}} mapping
self._project_tree = project_tree
self._build_ignore_patterns = PathSpec.from_lines(GitIgnorePattern, build_ignore_patterns or [])
self._exclude_target_regexps = exclude_target_regexps or []
self._exclude_patterns = [re.compile(pattern) for pattern in self._exclude_target_regexps]
@property
def root_dir(self):
return self._build_file_parser.root_dir
def _raise_incorrect_address_error(self, spec_path, wrong_target_name, targets):
"""Search through the list of targets and return those which originate from the same folder
which wrong_target_name resides in.
:raises: A helpful error message listing possible correct target addresses.
"""
was_not_found_message = '{target_name} was not found in BUILD files from {spec_path}'.format(
target_name=wrong_target_name, spec_path=os.path.join(self._project_tree.build_root, spec_path))
if not targets:
raise self.EmptyBuildFileError(
'{was_not_found_message}, because that directory contains no BUILD files defining addressable entities.'
.format(was_not_found_message=was_not_found_message))
# Print BUILD file extensions if there's more than one BUILD file with targets only.
if len(set([target.build_file for target in targets])) == 1:
specs = [':{}'.format(target.target_name) for target in targets]
else:
specs = [':{} (from {})'.format(target.target_name, os.path.basename(target.build_file.relpath))
for target in targets]
# Might be neat to sort by edit distance or something, but for now alphabetical is fine.
specs = [''.join(pair) for pair in sorted(specs)]
# Give different error messages depending on whether BUILD file was empty.
one_of = ' one of' if len(specs) > 1 else '' # Handle plurality, just for UX.
raise self.AddressNotInBuildFile(
'{was_not_found_message}. Perhaps you '
'meant{one_of}: \n {specs}'.format(was_not_found_message=was_not_found_message,
one_of=one_of,
specs='\n '.join(specs)))
def resolve(self, address):
"""Maps an address in the virtual address space to an object.
:param Address address: the address to lookup in a BUILD file
:raises AddressLookupError: if the path to the address is not found.
:returns: A tuple of the natively mapped BuildFileAddress and the Addressable it points to.
"""
address_map = self._address_map_from_spec_path(address.spec_path)
if address not in address_map:
self._raise_incorrect_address_error(address.spec_path, address.target_name, address_map)
else:
return address_map[address]
def resolve_spec(self, spec):
"""Converts a spec to an address and maps it using `resolve`"""
try:
address = Address.parse(spec)
except ValueError as e:
raise self.InvalidAddressError(e)
_, addressable = self.resolve(address)
return addressable
def _address_map_from_spec_path(self, spec_path):
"""Returns a resolution map of all addresses in a "directory" in the virtual address space.
:returns {Address: (Address, <resolved Object>)}:
"""
if spec_path not in self._spec_path_to_address_map_map:
try:
build_files = list(BuildFile.get_build_files_family(self._project_tree, spec_path,
self._build_ignore_patterns))
if not build_files:
raise self.BuildFileScanError("{spec_path} does not contain any BUILD files."
.format(spec_path=os.path.join(self.root_dir, spec_path)))
mapping = self._build_file_parser.address_map_from_build_files(build_files)
except BuildFileParser.BuildFileParserError as e:
raise AddressLookupError("{message}\n Loading addresses from '{spec_path}' failed."
.format(message=e, spec_path=spec_path))
address_map = {address: (address, addressed) for address, addressed in mapping.items()}
self._spec_path_to_address_map_map[spec_path] = address_map
return self._spec_path_to_address_map_map[spec_path]
def addresses_in_spec_path(self, spec_path):
"""Returns only the addresses gathered by `address_map_from_spec_path`, with no values."""
return self._address_map_from_spec_path(spec_path).keys()
def spec_to_address(self, spec, relative_to=''):
"""A helper method for mapping a spec to the correct build file address.
:param string spec: A spec to lookup in the map.
:param string relative_to: Path the spec might be relative to
:raises :class:`pants.build_graph.address_lookup_error.AddressLookupError`
If the BUILD file cannot be found in the path specified by the spec.
:returns: A new Address instance.
:rtype: :class:`pants.build_graph.address.BuildFileAddress`
"""
spec_path, name = parse_spec(spec, relative_to=relative_to)
address = Address(spec_path, name)
try:
build_file_address, _ = self.resolve(address)
return build_file_address
except AddressLookupError as e:
raise self.InvalidBuildFileReference('{message}\n when translating spec {spec}'
.format(message=e, spec=spec))
def scan_build_files(self, base_path):
return BuildFile.scan_build_files(self._project_tree, base_path,
build_ignore_patterns=self._build_ignore_patterns)
def specs_to_addresses(self, specs, relative_to=''):
"""The equivalent of `spec_to_address` for a group of specs all relative to the same path.
:param spec: iterable of Addresses.
:raises AddressLookupError: if the BUILD file cannot be found in the path specified by the spec
"""
for spec in specs:
yield self.spec_to_address(spec, relative_to=relative_to)
def scan_addresses(self, root=None):
"""Recursively gathers all addresses visible under `root` of the virtual address space.
:param string root: The absolute path of the root to scan; defaults to the root directory of the
pants project.
:rtype: set of :class:`pants.build_graph.address.Address`
:raises AddressLookupError: if there is a problem parsing a BUILD file
"""
root_dir = get_buildroot()
base_path = None
if root:
try:
base_path = fast_relpath(root, root_dir)
except ValueError as e:
raise self.InvalidRootError(e)
addresses = set()
try:
for build_file in BuildFile.scan_build_files(self._project_tree,
base_relpath=base_path,
build_ignore_patterns=self._build_ignore_patterns):
for address in self.addresses_in_spec_path(build_file.spec_path):
addresses.add(address)
except BuildFile.BuildFileError as e:
# Handle exception from BuildFile out of paranoia. Currently, there is no way to trigger it.
raise self.BuildFileScanError("{message}\n while scanning BUILD files in '{root}'."
.format(message=e, root=root))
return addresses
def scan_specs(self, specs, fail_fast=True):
"""Execute a collection of `specs.Spec` objects and return an ordered set of Addresses."""
excluded_target_map = defaultdict(set) # pattern -> targets (for debugging)
def exclude_spec(spec):
for pattern in self._exclude_patterns:
if pattern.search(spec) is not None:
excluded_target_map[pattern.pattern].add(spec)
return True
excluded_target_map[self._UNMATCHED_KEY].add(spec)
return False
def exclude_address(address):
return exclude_spec(address.spec)
addresses = OrderedSet()
for spec in specs:
for address in self._scan_spec(spec, fail_fast, exclude_spec):
if not exclude_address(address):
addresses.add(address)
# Print debug information about the excluded targets
if logger.getEffectiveLevel() <= logging.DEBUG and excluded_target_map:
logger.debug('excludes:\n {excludes}'
.format(excludes='\n '.join(self._exclude_target_regexps)))
targets = ', '.join(excluded_target_map[self._UNMATCHED_KEY])
logger.debug('Targets after excludes: %s', targets)
excluded_count = 0
for pattern, targets in six.iteritems(excluded_target_map):
if pattern != self._UNMATCHED_KEY:
logger.debug('Targets excluded by pattern {pattern}\n {targets}'
.format(pattern=pattern,
targets='\n '.join(targets)))
excluded_count += len(targets)
logger.debug('Excluded {count} target{plural}.'
.format(count=excluded_count,
plural=('s' if excluded_count != 1 else '')))
return addresses
def _scan_spec(self, spec, fail_fast, exclude_spec):
"""Scans the given address spec."""
errored_out = []
if type(spec) is DescendantAddresses:
addresses = set()
try:
build_files = self.scan_build_files(base_path=spec.directory)
except BuildFile.BuildFileError as e:
raise AddressLookupError(e)
for build_file in build_files:
try:
addresses.update(self.addresses_in_spec_path(build_file.spec_path))
except (BuildFile.BuildFileError, AddressLookupError) as e:
if fail_fast:
raise AddressLookupError(e)
errored_out.append('--------------------')
errored_out.append(traceback.format_exc())
errored_out.append('Exception message: {0}'.format(e))
if errored_out:
error_msg = '\n'.join(errored_out + ["Invalid BUILD files for [{0}]".format(spec.to_spec_string())])
raise AddressLookupError(error_msg)
return addresses
elif type(spec) is SiblingAddresses:
return set(self.addresses_in_spec_path(spec.directory))
elif type(spec) is SingleAddress:
return {self.spec_to_address(spec.to_spec_string())}
else:
raise ValueError('Unsupported Spec type: {}'.format(spec))
| |
"""
Support for interacting with Spotify Connect.
For more details about this platform, please refer to the documentation at
https://home-assistant.io/components/media_player.spotify/
"""
from datetime import timedelta
import logging
import voluptuous as vol
from homeassistant.components.http import HomeAssistantView
from homeassistant.components.media_player import (
MediaPlayerDevice, PLATFORM_SCHEMA)
from homeassistant.components.media_player.const import (
MEDIA_TYPE_MUSIC, MEDIA_TYPE_PLAYLIST, SUPPORT_NEXT_TRACK,
SUPPORT_PAUSE, SUPPORT_PLAY, SUPPORT_PLAY_MEDIA, SUPPORT_PREVIOUS_TRACK,
SUPPORT_SELECT_SOURCE, SUPPORT_SHUFFLE_SET, SUPPORT_VOLUME_SET)
from homeassistant.const import (
CONF_NAME, STATE_IDLE, STATE_PAUSED, STATE_PLAYING)
from homeassistant.core import callback
import homeassistant.helpers.config_validation as cv
REQUIREMENTS = ['spotipy-homeassistant==2.4.4.dev1']
_LOGGER = logging.getLogger(__name__)
AUTH_CALLBACK_NAME = 'api:spotify'
AUTH_CALLBACK_PATH = '/api/spotify'
CONF_ALIASES = 'aliases'
CONF_CACHE_PATH = 'cache_path'
CONF_CLIENT_ID = 'client_id'
CONF_CLIENT_SECRET = 'client_secret'
CONFIGURATOR_DESCRIPTION = 'To link your Spotify account, ' \
'click the link, login, and authorize:'
CONFIGURATOR_LINK_NAME = 'Link Spotify account'
CONFIGURATOR_SUBMIT_CAPTION = 'I authorized successfully'
DEFAULT_CACHE_PATH = '.spotify-token-cache'
DEFAULT_NAME = 'Spotify'
DEPENDENCIES = ['http']
DOMAIN = 'spotify'
ICON = 'mdi:spotify'
SCAN_INTERVAL = timedelta(seconds=30)
SCOPE = 'user-read-playback-state user-modify-playback-state user-read-private'
SUPPORT_SPOTIFY = SUPPORT_VOLUME_SET | SUPPORT_PAUSE | SUPPORT_PLAY |\
SUPPORT_NEXT_TRACK | SUPPORT_PREVIOUS_TRACK | SUPPORT_SELECT_SOURCE |\
SUPPORT_PLAY_MEDIA | SUPPORT_SHUFFLE_SET
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
vol.Required(CONF_CLIENT_ID): cv.string,
vol.Required(CONF_CLIENT_SECRET): cv.string,
vol.Optional(CONF_NAME): cv.string,
vol.Optional(CONF_CACHE_PATH): cv.string,
vol.Optional(CONF_ALIASES, default={}): {cv.string: cv.string}
})
def request_configuration(hass, config, add_entities, oauth):
"""Request Spotify authorization."""
configurator = hass.components.configurator
hass.data[DOMAIN] = configurator.request_config(
DEFAULT_NAME, lambda _: None,
link_name=CONFIGURATOR_LINK_NAME,
link_url=oauth.get_authorize_url(),
description=CONFIGURATOR_DESCRIPTION,
submit_caption=CONFIGURATOR_SUBMIT_CAPTION)
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up the Spotify platform."""
import spotipy.oauth2
callback_url = '{}{}'.format(hass.config.api.base_url, AUTH_CALLBACK_PATH)
cache = config.get(CONF_CACHE_PATH, hass.config.path(DEFAULT_CACHE_PATH))
oauth = spotipy.oauth2.SpotifyOAuth(
config.get(CONF_CLIENT_ID), config.get(CONF_CLIENT_SECRET),
callback_url, scope=SCOPE,
cache_path=cache)
token_info = oauth.get_cached_token()
if not token_info:
_LOGGER.info("no token; requesting authorization")
hass.http.register_view(SpotifyAuthCallbackView(
config, add_entities, oauth))
request_configuration(hass, config, add_entities, oauth)
return
if hass.data.get(DOMAIN):
configurator = hass.components.configurator
configurator.request_done(hass.data.get(DOMAIN))
del hass.data[DOMAIN]
player = SpotifyMediaPlayer(
oauth, config.get(CONF_NAME, DEFAULT_NAME), config[CONF_ALIASES])
add_entities([player], True)
class SpotifyAuthCallbackView(HomeAssistantView):
"""Spotify Authorization Callback View."""
requires_auth = False
url = AUTH_CALLBACK_PATH
name = AUTH_CALLBACK_NAME
def __init__(self, config, add_entities, oauth):
"""Initialize."""
self.config = config
self.add_entities = add_entities
self.oauth = oauth
@callback
def get(self, request):
"""Receive authorization token."""
hass = request.app['hass']
self.oauth.get_access_token(request.query['code'])
hass.async_add_job(
setup_platform, hass, self.config, self.add_entities)
class SpotifyMediaPlayer(MediaPlayerDevice):
"""Representation of a Spotify controller."""
def __init__(self, oauth, name, aliases):
"""Initialize."""
self._name = name
self._oauth = oauth
self._album = None
self._title = None
self._artist = None
self._uri = None
self._image_url = None
self._state = None
self._current_device = None
self._devices = {}
self._volume = None
self._shuffle = False
self._player = None
self._user = None
self._aliases = aliases
self._token_info = self._oauth.get_cached_token()
def refresh_spotify_instance(self):
"""Fetch a new spotify instance."""
import spotipy
token_refreshed = False
need_token = (self._token_info is None or
self._oauth.is_token_expired(self._token_info))
if need_token:
new_token = \
self._oauth.refresh_access_token(
self._token_info['refresh_token'])
# skip when refresh failed
if new_token is None:
return
self._token_info = new_token
token_refreshed = True
if self._player is None or token_refreshed:
self._player = \
spotipy.Spotify(auth=self._token_info.get('access_token'))
self._user = self._player.me()
def update(self):
"""Update state and attributes."""
self.refresh_spotify_instance()
# Don't true update when token is expired
if self._oauth.is_token_expired(self._token_info):
_LOGGER.warning("Spotify failed to update, token expired.")
return
# Available devices
player_devices = self._player.devices()
if player_devices is not None:
devices = player_devices.get('devices')
if devices is not None:
old_devices = self._devices
self._devices = {self._aliases.get(device.get('id'),
device.get('name')):
device.get('id')
for device in devices}
device_diff = {name: id for name, id in self._devices.items()
if old_devices.get(name, None) is None}
if device_diff:
_LOGGER.info("New Devices: %s", str(device_diff))
# Current playback state
current = self._player.current_playback()
if current is None:
self._state = STATE_IDLE
return
# Track metadata
item = current.get('item')
if item:
self._album = item.get('album').get('name')
self._title = item.get('name')
self._artist = ', '.join([artist.get('name')
for artist in item.get('artists')])
self._uri = item.get('uri')
images = item.get('album').get('images')
self._image_url = images[0].get('url') if images else None
# Playing state
self._state = STATE_PAUSED
if current.get('is_playing'):
self._state = STATE_PLAYING
self._shuffle = current.get('shuffle_state')
device = current.get('device')
if device is None:
self._state = STATE_IDLE
else:
if device.get('volume_percent'):
self._volume = device.get('volume_percent') / 100
if device.get('name'):
self._current_device = device.get('name')
def set_volume_level(self, volume):
"""Set the volume level."""
self._player.volume(int(volume * 100))
def set_shuffle(self, shuffle):
"""Enable/Disable shuffle mode."""
self._player.shuffle(shuffle)
def media_next_track(self):
"""Skip to next track."""
self._player.next_track()
def media_previous_track(self):
"""Skip to previous track."""
self._player.previous_track()
def media_play(self):
"""Start or resume playback."""
self._player.start_playback()
def media_pause(self):
"""Pause playback."""
self._player.pause_playback()
def select_source(self, source):
"""Select playback device."""
if self._devices:
self._player.transfer_playback(self._devices[source],
self._state == STATE_PLAYING)
def play_media(self, media_type, media_id, **kwargs):
"""Play media."""
kwargs = {}
if media_type == MEDIA_TYPE_MUSIC:
kwargs['uris'] = [media_id]
elif media_type == MEDIA_TYPE_PLAYLIST:
kwargs['context_uri'] = media_id
else:
_LOGGER.error("media type %s is not supported", media_type)
return
if not media_id.startswith('spotify:'):
_LOGGER.error("media id must be spotify uri")
return
self._player.start_playback(**kwargs)
@property
def name(self):
"""Return the name."""
return self._name
@property
def icon(self):
"""Return the icon."""
return ICON
@property
def state(self):
"""Return the playback state."""
return self._state
@property
def volume_level(self):
"""Return the device volume."""
return self._volume
@property
def shuffle(self):
"""Shuffling state."""
return self._shuffle
@property
def source_list(self):
"""Return a list of source devices."""
if self._devices:
return list(self._devices.keys())
@property
def source(self):
"""Return the current playback device."""
return self._current_device
@property
def media_content_id(self):
"""Return the media URL."""
return self._uri
@property
def media_image_url(self):
"""Return the media image URL."""
return self._image_url
@property
def media_artist(self):
"""Return the media artist."""
return self._artist
@property
def media_album_name(self):
"""Return the media album."""
return self._album
@property
def media_title(self):
"""Return the media title."""
return self._title
@property
def supported_features(self):
"""Return the media player features that are supported."""
if self._user is not None and self._user['product'] == 'premium':
return SUPPORT_SPOTIFY
return None
@property
def media_content_type(self):
"""Return the media type."""
return MEDIA_TYPE_MUSIC
| |
# Copyright 2015: Mirantis Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from cloudferry.actions.prechecks import check_networks
from cloudferry.lib.base import exception
from tests import test
class CheckNetworksTestCase(test.TestCase):
@staticmethod
def get_action(src_net_info, dst_net_info=None, src_compute_info=None):
if not dst_net_info:
dst_net_info = {'networks': [],
'subnets': [],
'floating_ips': []}
if not src_compute_info:
src_compute_info = [mock.Mock(id='fake_id_1'),
mock.Mock(id='fake_id_2')]
fake_src_compute = mock.Mock()
fake_src_compute.get_instances_list.return_value = src_compute_info
fake_src_net = mock.Mock()
fake_src_net.read_info.return_value = src_net_info
fake_src_net.get_ports_list.return_value = [
{'id': 'fake_port_id',
'network_id': 'fake_network_id',
'device_id': 'fake_instance_id'}]
fake_dst_net = mock.Mock()
fake_dst_net.read_info.return_value = dst_net_info
fake_src_cloud = mock.Mock()
fake_dst_cloud = mock.Mock()
fake_config = mock.MagicMock()
fake_config.migrate.ext_net_map = None
fake_src_cloud.resources = {'network': fake_src_net,
'compute': fake_src_compute}
fake_dst_cloud.resources = {'network': fake_dst_net}
fake_init = {
'src_cloud': fake_src_cloud,
'dst_cloud': fake_dst_cloud,
'cfg': fake_config
}
return check_networks.CheckNetworks(fake_init)
def test_all_empty(self):
src_net_info = {'networks': [],
'subnets': [],
'floating_ips': []}
dst_net_info = {'networks': [],
'subnets': [],
'floating_ips': []}
self.get_action(src_net_info, dst_net_info).run()
def test_empty_dst(self):
src_net_info = {'networks': [{'id': 'id1',
'res_hash': 1,
'subnets_hash': {2},
"provider:physical_network": None,
'provider:network_type': 'local',
'provider:segmentation_id': None,
'router:external': False}],
'subnets': [{'cidr': '10.0.0.0/24',
'res_hash': 2,
'network_id': 'id1',
'id': 'sub1',
'external': False}],
'floating_ips': []}
dst_net_info = {'networks': [],
'subnets': [],
'floating_ips': []}
self.get_action(src_net_info, dst_net_info).run()
def test_equals_networks(self):
src_net_info = {'networks': [{'id': 'id1',
'res_hash': 1,
'subnets_hash': {2},
"provider:physical_network": None,
'provider:network_type': 'local',
'provider:segmentation_id': None,
'router:external': False}],
'subnets': [{'cidr': '10.0.0.0/24',
'res_hash': 2,
'network_id': 'id1',
'id': 'sub1',
'external': False}],
'floating_ips': []}
dst_net_info = {'networks': [{'id': 'id2',
'res_hash': 1,
'subnets_hash': {2},
"provider:physical_network": None,
'provider:network_type': 'local',
'provider:segmentation_id': None,
'router:external': False}],
'subnets': [{'cidr': '10.0.0.0/24',
'res_hash': 2,
'network_id': 'id2',
'id': 'sub2',
'external': False}],
'floating_ips': []}
self.get_action(src_net_info, dst_net_info).run()
def test_equals_and_new_networks(self):
src_net_info = {'networks': [{'id': 'id1',
'res_hash': 1,
'subnets_hash': {2, 5},
"provider:physical_network": None,
'provider:network_type': 'local',
'provider:segmentation_id': None,
'router:external': False}],
'subnets': [{'cidr': '10.0.0.0/24',
'res_hash': 2,
'network_id': 'id1',
'id': 'sub1',
'external': False},
{'cidr': '11.0.0.0/24',
'res_hash': 5,
'network_id': 'id1',
'id': 'sub2',
'external': False}],
'floating_ips': []}
dst_net_info = {'networks': [{'id': 'id2',
'res_hash': 1,
'subnets_hash': {2},
"provider:physical_network": None,
'provider:network_type': 'local',
'provider:segmentation_id': None,
'router:external': False}],
'subnets': [{'cidr': '10.0.0.0/24',
'res_hash': 2,
'network_id': 'id2',
'id': 'sub2',
'external': False}],
'floating_ips': []}
self.get_action(src_net_info, dst_net_info).run()
def test_check_segmentation_id_overlapping_no_dst_networks(self):
src_net_info = {'networks': [{'id': 'id1',
'res_hash': 1,
'subnets_hash': {},
"provider:physical_network": None,
'provider:network_type': 'gre',
'provider:segmentation_id': 200,
'router:external': False}],
'subnets': [],
'floating_ips': []}
dst_net_info = {'networks': [],
'subnets': [],
'floating_ips': []}
self.get_action(src_net_info, dst_net_info).run()
def test_check_segmentation_id_overlapping_same_network(self):
src_net_info = {'networks': [{'id': 'id1',
'res_hash': 1,
'subnets_hash': {},
"provider:physical_network": None,
'provider:network_type': 'gre',
'provider:segmentation_id': 200,
'router:external': False}],
'subnets': [],
'floating_ips': []}
dst_net_info = {'networks': [{'id': 'id2',
'res_hash': 1,
'subnets_hash': {},
"provider:physical_network": None,
'provider:network_type': 'gre',
'provider:segmentation_id': 200,
'router:external': False}],
'subnets': [],
'floating_ips': []}
self.get_action(src_net_info, dst_net_info).run()
def test_check_segmentation_id_overlapping_different_network(self):
src_net_info = {'networks': [{'id': 'id1',
'res_hash': 1,
"provider:physical_network": None,
'provider:network_type': 'gre',
'provider:segmentation_id': 200}],
'subnets': [],
'floating_ips': []}
dst_net_info = {'networks': [{'id': 'id2',
'res_hash': 2,
"provider:physical_network": None,
'provider:network_type': 'gre',
'provider:segmentation_id': 200}],
'subnets': [],
'floating_ips': []}
self.get_action(src_net_info, dst_net_info)
def test_floating_ip_overlap_clean_dst(self):
src_net_info = {'networks': [],
'subnets': [],
'floating_ips': [{'floating_ip_address': '1.1.1.1',
'tenant_name': 'test_tenant',
'network_name': 'test_net',
'ext_net_tenant_name': 'test_tenant',
'port_id': 'test_port',
'floating_network_id': 'net_id'}]}
dst_net_info = {'networks': [],
'subnets': [],
'floating_ips': []}
self.get_action(src_net_info, dst_net_info).run()
def test_floating_ip_overlap_same_floating_ip(self):
src_net_info = {'networks': [],
'subnets': [],
'floating_ips': [{'floating_ip_address': '1.1.1.1',
'tenant_name': 'test_tenant',
'network_name': 'test_net',
'ext_net_tenant_name': 'test_tenant',
'port_id': None,
'floating_network_id': 'net_id'}]}
dst_net_info = {'networks': [],
'subnets': [],
'floating_ips': [{'floating_ip_address': '1.1.1.1',
'tenant_name': 'test_tenant',
'network_name': 'test_net',
'ext_net_tenant_name': 'test_tenant',
'port_id': None,
'floating_network_id': 'net_id'}]}
self.get_action(src_net_info, dst_net_info).run()
def test_floating_ip_overlap_same_floating_ip_diff_parameter(self):
src_net_info = {'networks': [],
'subnets': [],
'floating_ips': [{'floating_ip_address': '1.1.1.1',
'tenant_name': 'test_tenant',
'network_name': 'test_net',
'ext_net_tenant_name': 'test_tenant',
'port_id': None,
'floating_network_id': 'net_id'}]}
dst_net_info = {'networks': [],
'subnets': [],
'floating_ips': [{'floating_ip_address': '1.1.1.1',
'tenant_name': 'new_tenant',
'network_name': 'test_net',
'ext_net_tenant_name': 'test_tenant',
'port_id': None,
'floating_network_id': 'net_id'}]}
action = self.get_action(src_net_info, dst_net_info)
self.assertRaises(exception.AbortMigrationError, action.run)
def test_floating_ip_overlap_same_floating_ip_associated_to_both_vms(self):
src_net_info = {'networks': [],
'subnets': [],
'floating_ips': [{'floating_ip_address': '1.1.1.1',
'tenant_name': 'test_tenant',
'network_name': 'test_net',
'ext_net_tenant_name': 'test_tenant',
'port_id': 'test_port',
'floating_network_id': 'net_id'}]}
dst_net_info = {'networks': [],
'subnets': [],
'floating_ips': [{'floating_ip_address': '1.1.1.1',
'tenant_name': 'test_tenant',
'network_name': 'test_net',
'ext_net_tenant_name': 'test_tenant',
'port_id': 'test_port',
'floating_network_id': 'net_id'}]}
action = self.get_action(src_net_info, dst_net_info)
self.assertRaises(exception.AbortMigrationError, action.run)
def test_floating_ip_overlap_same_floating_ip_associated_to_one_vm(self):
src_net_info = {'networks': [],
'subnets': [],
'floating_ips': [{'floating_ip_address': '1.1.1.1',
'tenant_name': 'test_tenant',
'network_name': 'test_net',
'ext_net_tenant_name': 'test_tenant',
'port_id': None,
'floating_network_id': 'net_id'}]}
dst_net_info = {'networks': [],
'subnets': [],
'floating_ips': [{'floating_ip_address': '1.1.1.1',
'tenant_name': 'test_tenant',
'network_name': 'test_net',
'ext_net_tenant_name': 'test_tenant',
'port_id': 'test_port',
'floating_network_id': 'net_id'}]}
self.get_action(src_net_info, dst_net_info).run()
def test_no_instance_in_external_network(self):
src_net_info = {'networks': [{'id': 'fake_network_id',
'res_hash': 1,
'subnets_hash': {2},
"provider:physical_network": None,
'provider:network_type': 'local',
'provider:segmentation_id': None,
'router:external': True}],
'subnets': [{'cidr': '10.0.0.0/24',
'res_hash': 2,
'network_id': 'fake_network_id',
'id': 'sub1',
'external': True}],
'floating_ips': []}
src_cmp_info = [mock.Mock(id='fake_instance_id_not_in_external')]
self.get_action(src_net_info, src_compute_info=src_cmp_info).run()
def test_instance_in_external_network(self):
src_net_info = {'networks': [{'id': 'fake_network_id',
'res_hash': 1,
'subnets_hash': {2},
"provider:physical_network": None,
'provider:network_type': 'local',
'provider:segmentation_id': None,
'router:external': False}],
'subnets': [{'cidr': '10.0.0.0/24',
'res_hash': 2,
'network_id': 'fake_network_id',
'id': 'sub1',
'external': True}],
'floating_ips': []}
src_cmp_info = [mock.Mock(id='fake_instance_id')]
action = self.get_action(src_net_info, src_compute_info=src_cmp_info)
with mock.patch.object(check_networks.LOG, 'warning') as warning:
action.run()
warning.assert_called_once_with(mock.ANY, ['fake_instance_id'])
def test_allocation_pools_overlap(self):
src_net_info = {'networks': [{'id': 'id1',
'res_hash': 1,
'subnets_hash': {2},
"provider:physical_network": None,
'provider:network_type': 'local',
'provider:segmentation_id': None,
'router:external': True}],
'subnets': [{'cidr': '1.1.1.1/24',
'name': 'snet1',
'res_hash': 2,
'network_id': 'id1',
'id': 'sub1',
'external': True,
'allocation_pools': [
{'start': '1.1.1.2',
'end': '1.1.1.10'},
{'start': '1.1.1.20',
'end': '1.1.1.30'}]
}],
'floating_ips': []}
dst_net_info = {'networks': [{'id': 'id2',
'res_hash': 1,
'subnets_hash': {3},
"provider:physical_network": None,
'provider:network_type': 'local',
'provider:segmentation_id': None,
'router:external': True}],
'subnets': [{'cidr': '1.1.1.1/25',
'name': 'snet1',
'res_hash': 3,
'network_id': 'id2',
'id': 'sub2',
'external': True,
'allocation_pools': [
{'start': '1.1.1.5',
'end': '1.1.1.15'}]
}],
'floating_ips': []}
action = self.get_action(src_net_info, dst_net_info)
self.assertRaises(exception.AbortMigrationError, action.run)
def test_allocation_pools_no_overlap(self):
src_net_info = {'networks': [{'id': 'id1',
'res_hash': 1,
'subnets_hash': {2},
"provider:physical_network": None,
'provider:network_type': 'local',
'provider:segmentation_id': None,
'router:external': True}],
'subnets': [{'cidr': '1.1.1.1/24',
'res_hash': 2,
'network_id': 'id1',
'id': 'sub1',
'external': True,
'allocation_pools': [
{'start': '1.1.1.100',
'end': '1.1.1.200'}]
}],
'floating_ips': []}
dst_net_info = {'networks': [{'id': 'id2',
'res_hash': 1,
'subnets_hash': {3},
"provider:physical_network": None,
'provider:network_type': 'local',
'provider:segmentation_id': None,
'router:external': True}],
'subnets': [{'cidr': '1.1.1.1/25',
'res_hash': 3,
'network_id': 'id2',
'id': 'sub2',
'external': True,
'allocation_pools': [
{'start': '1.1.1.2',
'end': '1.1.1.10'},
{'start': '1.1.1.20',
'end': '1.1.1.30'}]
}],
'floating_ips': []}
self.get_action(src_net_info, dst_net_info).run()
def test_allocation_pools_same_network_and_subnet(self):
src_net_info = {'networks': [{'id': 'id1',
'res_hash': 1,
'subnets_hash': {3},
"provider:physical_network": None,
'provider:network_type': 'local',
'provider:segmentation_id': None,
'router:external': True}],
'subnets': [{'cidr': '1.1.1.1/25',
'res_hash': 3,
'network_id': 'id1',
'id': 'sub1',
'external': True,
'allocation_pools': [
{'start': '1.1.1.2',
'end': '1.1.1.10'},
{'start': '1.1.1.20',
'end': '1.1.1.30'}]
}],
'floating_ips': []}
dst_net_info = {'networks': [{'id': 'id2',
'res_hash': 1,
'subnets_hash': {3},
"provider:physical_network": None,
'provider:network_type': 'local',
'provider:segmentation_id': None,
'router:external': True}],
'subnets': [{'cidr': '1.1.1.1/25',
'res_hash': 3,
'network_id': 'id2',
'id': 'sub2',
'external': True,
'allocation_pools': [
{'start': '1.1.1.2',
'end': '1.1.1.10'},
{'start': '1.1.1.20',
'end': '1.1.1.30'}]
}],
'floating_ips': []}
self.get_action(src_net_info, dst_net_info).run()
| |
'''Lightweight declarative YAML and XML data binding for Python.'''
import yaml
try:
from yaml import CSafeLoader as SafeLoader, CSafeDumper as SafeDumper
except:
from yaml import SafeLoader, SafeDumper
import datetime
import calendar
import re
import sys
import time
import math
import types
import platform
from cStringIO import StringIO
util_ext = None
g_iprop = 0
g_deferred = {}
g_deferred_content = {}
g_tagname_to_class = {}
g_xmltagname_to_class = {}
guts_types = ['Object', 'SObject', 'String', 'Unicode', 'Int', 'Float',
'Complex', 'Bool', 'Timestamp', 'DateTimestamp', 'StringPattern',
'UnicodePattern', 'StringChoice', 'List', 'Dict', 'Tuple',
'Union', 'Choice', 'Any']
us_to_cc_regex = re.compile(r'([a-z])_([a-z])')
def us_to_cc(s):
return us_to_cc_regex.sub(lambda pat: pat.group(1)+pat.group(2).upper(), s)
cc_to_us_regex1 = re.compile(r'([a-z])([A-Z]+)([a-z]|$)')
cc_to_us_regex2 = re.compile(r'([A-Z])([A-Z][a-z])')
def cc_to_us(s):
return cc_to_us_regex2.sub(
'\\1_\\2', cc_to_us_regex1.sub('\\1_\\2\\3', s)).lower()
re_frac = re.compile(r'\.[1-9]FRAC')
frac_formats = dict([('.%sFRAC' % x, '%.'+x+'f') for x in '123456789'])
def make_xmltagname_from_name(name):
return us_to_cc(name)
def make_name_from_xmltagname(xmltagname):
return cc_to_us(xmltagname)
def make_content_name(name):
if name.endswith('_list'):
return name[:-5]
elif name.endswith('s'):
return name[:-1]
else:
return name
def expand_stream_args(mode):
def wrap(f):
'''Decorator to enhance functions taking stream objects.
Wraps a function f(..., stream, ...) so that it can also be called as
f(..., filename='myfilename', ...) or as f(..., string='mydata', ...).
'''
def g(*args, **kwargs):
stream = kwargs.pop('stream', None)
filename = kwargs.pop('filename', None)
string = kwargs.pop('string', None)
assert sum(x is not None for x in (stream, filename, string)) <= 1
if stream is not None:
kwargs['stream'] = stream
return f(*args, **kwargs)
elif filename is not None:
stream = open(filename, mode)
kwargs['stream'] = stream
retval = f(*args, **kwargs)
if isinstance(retval, types.GeneratorType):
def wrap_generator(gen):
try:
for x in gen:
yield x
except GeneratorExit:
pass
stream.close()
return wrap_generator(retval)
else:
stream.close()
return retval
elif string is not None:
assert mode == 'r', \
'Keyword argument string=... cannot be used in dumper ' \
'function.'
kwargs['stream'] = StringIO(string)
return f(*args, **kwargs)
else:
assert mode == 'w', \
'Use keyword argument stream=... or filename=... in ' \
'loader function.'
sout = StringIO()
f(stream=sout, *args, **kwargs)
return sout.getvalue()
return g
return wrap
class TimeStrError(Exception):
pass
class FractionalSecondsMissing(TimeStrError):
'''
Exception raised by :py:func:`str_to_time` when the given string lacks
fractional seconds.
'''
pass
class FractionalSecondsWrongNumberOfDigits(TimeStrError):
'''
Exception raised by :py:func:`str_to_time` when the given string has an
incorrect number of digits in the fractional seconds part.
'''
pass
def _endswith_n(s, endings):
for ix, x in enumerate(endings):
if s.endswith(x):
return ix
return -1
def str_to_time(s, format='%Y-%m-%d %H:%M:%S.OPTFRAC'):
'''
Convert string representing UTC time to floating point system time.
:param s: string representing UTC time
:param format: time string format
:returns: system time stamp as floating point value
Uses the semantics of :py:func:`time.strptime` but allows for fractional
seconds. If the format ends with ``'.FRAC'``, anything after a dot is
interpreted as fractional seconds. If the format ends with ``'.OPTFRAC'``,
the fractional part, including the dot is made optional. The latter has the
consequence, that the time strings and the format may not contain any other
dots. If the format ends with ``'.xFRAC'`` where x is 1, 2, or 3, it is
ensured, that exactly that number of digits are present in the fractional
seconds.
'''
if util_ext is not None:
try:
t, tfrac = util_ext.stt(s, format)
except util_ext.UtilExtError, e:
raise TimeStrError(
'%s, string=%s, format=%s' % (str(e), s, format))
return t+tfrac
fracsec = 0.
fixed_endings = '.FRAC', '.1FRAC', '.2FRAC', '.3FRAC'
iend = _endswith_n(format, fixed_endings)
if iend != -1:
dotpos = s.rfind('.')
if dotpos == -1:
raise FractionalSecondsMissing(
'string=%s, format=%s' % (s, format))
if iend > 0 and iend != (len(s)-dotpos-1):
raise FractionalSecondsWrongNumberOfDigits(
'string=%s, format=%s' % (s, format))
format = format[:-len(fixed_endings[iend])]
fracsec = float(s[dotpos:])
s = s[:dotpos]
elif format.endswith('.OPTFRAC'):
dotpos = s.rfind('.')
format = format[:-8]
if dotpos != -1 and len(s[dotpos:]) > 1:
fracsec = float(s[dotpos:])
if dotpos != -1:
s = s[:dotpos]
try:
return calendar.timegm(time.strptime(s, format)) + fracsec
except ValueError, e:
raise TimeStrError('%s, string=%s, format=%s' % (str(e), s, format))
stt = str_to_time
def time_to_str(t, format='%Y-%m-%d %H:%M:%S.3FRAC'):
'''
Get string representation for floating point system time.
:param t: floating point system time
:param format: time string format
:returns: string representing UTC time
Uses the semantics of :py:func:`time.strftime` but additionally allows for
fractional seconds. If ``format`` contains ``'.xFRAC'``, where ``x`` is a
digit between 1 and 9, this is replaced with the fractional part of ``t``
with ``x`` digits precision.
'''
if isinstance(format, int):
format = '%Y-%m-%d %H:%M:%S.'+str(format)+'FRAC'
if util_ext is not None:
t0 = math.floor(t)
try:
return util_ext.tts(int(t0), t - t0, format)
except util_ext.UtilExtError, e:
raise TimeStrError(
'%s, timestamp=%f, format=%s' % (str(e), t, format))
ts = float(math.floor(t))
tfrac = t-ts
m = re_frac.search(format)
if m:
sfrac = (frac_formats[m.group(0)] % tfrac)
if sfrac[0] == '1':
ts += 1.
format, nsub = re_frac.subn(sfrac[1:], format, 1)
return time.strftime(format, time.gmtime(ts))
tts = time_to_str
class Defer:
def __init__(self, classname, *args, **kwargs):
global g_iprop
if kwargs.get('position', None) is None:
kwargs['position'] = g_iprop
g_iprop += 1
self.classname = classname
self.args = args
self.kwargs = kwargs
class TBase(object):
strict = False
multivalued = False
force_regularize = False
propnames = []
@classmethod
def init_propertystuff(cls):
cls.properties = []
cls.xmltagname_to_name = {}
cls.xmltagname_to_name_multivalued = {}
cls.xmltagname_to_class = {}
cls.content_property = None
def __init__(
self,
default=None,
optional=False,
xmlstyle='element',
xmltagname=None,
help=None,
position=None):
global g_iprop
if position is not None:
self.position = position
else:
self.position = g_iprop
g_iprop += 1
self._default = default
self.optional = optional
self.name = None
self._xmltagname = xmltagname
self.parent = None
self.xmlstyle = xmlstyle
self.help = help
def default(self):
if isinstance(self._default, DefaultMaker):
return self._default.make()
else:
return self._default
def has_default(self):
return self._default is not None
def xname(self):
if self.name is not None:
return self.name
elif self.parent is not None:
return 'element of %s' % self.parent.xname()
else:
return '?'
def get_xmltagname(self):
if self._xmltagname is not None:
return self._xmltagname
elif self.name:
return make_xmltagname_from_name(self.name)
elif self.xmltagname:
return self.xmltagname
else:
assert False
@classmethod
def get_property(cls, name):
for prop in cls.properties:
if prop.name == name:
return prop
raise ValueError()
@classmethod
def remove_property(cls, name):
prop = cls.get_property(name)
if not prop.multivalued:
del cls.xmltagname_to_class[prop.effective_xmltagname]
del cls.xmltagname_to_name[prop.effective_xmltagname]
else:
del cls.xmltagname_to_class[prop.content_t.effective_xmltagname]
del cls.xmltagname_to_name_multivalued[
prop.content_t.effective_xmltagname]
if cls.content_property == prop:
cls.content_property = None
cls.properties.remove(prop)
return prop
@classmethod
def add_property(cls, name, prop):
prop.instance = prop
prop.name = name
if isinstance(prop, Choice.T):
for tc in prop.choices:
tc.effective_xmltagname = tc.get_xmltagname()
cls.xmltagname_to_class[tc.effective_xmltagname] = tc.cls
cls.xmltagname_to_name[tc.effective_xmltagname] = prop.name
elif not prop.multivalued:
prop.effective_xmltagname = prop.get_xmltagname()
cls.xmltagname_to_class[prop.effective_xmltagname] = prop.cls
cls.xmltagname_to_name[prop.effective_xmltagname] = prop.name
else:
prop.content_t.name = make_content_name(prop.name)
prop.content_t.effective_xmltagname = \
prop.content_t.get_xmltagname()
cls.xmltagname_to_class[
prop.content_t.effective_xmltagname] = prop.content_t.cls
cls.xmltagname_to_name_multivalued[
prop.content_t.effective_xmltagname] = prop.name
cls.properties.append(prop)
cls.properties.sort(key=lambda x: x.position)
cls.propnames = [p.name for p in cls.properties]
if prop.xmlstyle == 'content':
cls.content_property = prop
@classmethod
def ivals(cls, val):
for prop in cls.properties:
yield getattr(val, prop.name)
@classmethod
def ipropvals(cls, val):
for prop in cls.properties:
yield prop, getattr(val, prop.name)
@classmethod
def inamevals(cls, val):
for prop in cls.properties:
yield prop.name, getattr(val, prop.name)
@classmethod
def ipropvals_to_save(cls, val, xmlmode=False):
for prop in cls.properties:
v = getattr(val, prop.name)
if v is not None and (
not (prop.optional or (prop.multivalued and not v))
or prop.default() != v):
if xmlmode:
yield prop, prop.to_save_xml(v)
else:
yield prop, prop.to_save(v)
@classmethod
def inamevals_to_save(cls, val, xmlmode=False):
for prop, v in cls.ipropvals_to_save(val, xmlmode):
yield prop.name, v
@classmethod
def translate_from_xml(cls, list_of_pairs, strict):
d = {}
for k, v in list_of_pairs:
if k in cls.xmltagname_to_name_multivalued:
k2 = cls.xmltagname_to_name_multivalued[k]
if k2 not in d:
d[k2] = []
d[k2].append(v)
elif k in cls.xmltagname_to_name:
k2 = cls.xmltagname_to_name[k]
if k2 in d:
raise ArgumentError(
'Unexpectedly found more than one child element "%s" '
'within "%s".' % (k, cls.tagname))
d[k2] = v
elif k is None:
if cls.content_property:
k2 = cls.content_property.name
d[k2] = v
else:
if strict:
raise ArgumentError(
'Unexpected child element "%s" found within "%s".' % (
k, cls.tagname))
return d
def validate(self, val, regularize=False, depth=-1):
if self.optional and val is None:
return val
is_derived = isinstance(val, self.cls)
is_exact = type(val) == self.cls
not_ok = not self.strict and not is_derived or \
self.strict and not is_exact
if not_ok or self.force_regularize:
if regularize:
try:
val = self.regularize_extra(val)
except (RegularizationError, ValueError):
raise ValidationError(
'%s: could not convert "%s" to type %s' % (
self.xname(), val, self.cls.__name__))
else:
raise ValidationError(
'%s: "%s" (type: %s) is not of type %s' % (
self.xname(), val, type(val), self.cls.__name__))
validator = self
if type(val) != self.cls and isinstance(val, self.cls):
validator = val.T.instance
validator.validate_extra(val)
if depth != 0:
val = validator.validate_children(val, regularize, depth)
return val
def regularize_extra(self, val):
return self.cls(val)
def validate_extra(self, val):
pass
def validate_children(self, val, regularize, depth):
for prop, propval in self.ipropvals(val):
newpropval = prop.validate(propval, regularize, depth-1)
if regularize and (newpropval is not propval):
setattr(val, prop.name, newpropval)
return val
def to_save(self, val):
return val
def to_save_xml(self, val):
return self.to_save(val)
def extend_xmlelements(self, elems, v):
if self.multivalued:
for x in v:
elems.append((self.content_t.effective_xmltagname, x))
else:
elems.append((self.effective_xmltagname, v))
def deferred(self):
return []
def classname_for_help(self, strip_module=''):
if self.dummy_cls in guts_plain_dummy_types:
return '``%s``' % self.cls.__name__
else:
mod = self.cls.__module__
cls = self.cls.__name__
if self.dummy_cls is not self.cls:
if self.dummy_cls.__module__ == strip_module:
sadd = ' (:py:class:`%s`)' % (
self.dummy_cls.__name__)
else:
sadd = ' (:py:class:`%s.%s`)' % (
self.dummy_cls.__module__, self.dummy_cls.__name__)
else:
sadd = ''
if mod == '__builtin__':
return '``%s``%s' % (cls, sadd)
elif self.cls.__module__ == strip_module:
return ':py:class:`%s`%s' % (cls, sadd)
else:
return ':py:class:`%s.%s`%s' % (mod, cls, sadd)
@classmethod
def props_help_string(cls):
baseprops = []
for base in cls.dummy_cls.__bases__:
if hasattr(base, 'T'):
baseprops.extend(base.T.properties)
l = []
l.append('')
for prop in cls.properties:
if prop in baseprops:
continue
descr = [
prop.classname_for_help(strip_module=cls.dummy_cls.__module__)]
if prop.optional:
descr.append('*optional*')
d = prop.default()
if d is not None:
descr.append('*default:* ``%s``' % repr(d))
l.append(' .. py:attribute:: %s' % prop.name)
l.append('')
l.append(' %s' % ', '.join(descr))
l.append('')
if prop.help is not None:
l.append(' %s' % prop.help)
l.append('')
return '\n'.join(l)
@classmethod
def class_help_string(cls):
return cls.dummy_cls.__doc_template__
@classmethod
def class_signature(cls):
l = []
for prop in cls.properties:
d = prop.default()
if d is not None:
arg = repr(d)
elif prop.optional:
arg = 'None'
else:
arg = '...'
l.append('%s=%s' % (prop.name, arg))
return '(%s)' % ', '.join(l)
@classmethod
def help(cls):
return cls.props_help_string()
class ObjectMetaClass(type):
def __new__(meta, classname, bases, class_dict):
cls = type.__new__(meta, classname, bases, class_dict)
if classname != 'Object':
t_class_attr_name = '_%s__T' % classname
if not hasattr(cls, t_class_attr_name):
if hasattr(cls, 'T'):
class T(cls.T):
pass
else:
class T(TBase):
pass
setattr(cls, t_class_attr_name, T)
T = getattr(cls, t_class_attr_name)
if cls.dummy_for is not None:
T.cls = cls.dummy_for
else:
T.cls = cls
T.dummy_cls = cls
if hasattr(cls, 'xmltagname'):
T.xmltagname = cls.xmltagname
else:
T.xmltagname = classname
mod = sys.modules[cls.__module__]
if hasattr(mod, 'guts_prefix'):
if mod.guts_prefix:
T.tagname = mod.guts_prefix + '.' + classname
else:
T.tagname = classname
else:
if cls.__module__ != '__main__':
T.tagname = cls.__module__ + '.' + classname
else:
T.tagname = classname
T.classname = classname
T.init_propertystuff()
for k in dir(cls):
prop = getattr(cls, k)
if k.endswith('__'):
k = k[:-2]
if isinstance(prop, TBase):
if prop.deferred():
for defer in prop.deferred():
g_deferred_content.setdefault(
defer.classname[:-2], []).append((prop, defer))
g_deferred.setdefault(
defer.classname[:-2], []).append((T, k, prop))
else:
T.add_property(k, prop)
elif isinstance(prop, Defer):
g_deferred.setdefault(prop.classname[:-2], []).append(
(T, k, prop))
if classname in g_deferred_content:
for prop, defer in g_deferred_content[classname]:
prop.process_deferred(
defer, T(*defer.args, **defer.kwargs))
del g_deferred_content[classname]
if classname in g_deferred:
for (T_, k_, prop_) in g_deferred.get(classname, []):
if isinstance(prop_, Defer):
prop_ = T(*prop_.args, **prop_.kwargs)
if not prop_.deferred():
T_.add_property(k_, prop_)
del g_deferred[classname]
g_tagname_to_class[T.tagname] = cls
if hasattr(cls, 'xmltagname'):
g_xmltagname_to_class[T.xmltagname] = cls
cls.T = T
T.instance = T()
cls.__doc_template__ = cls.__doc__
cls.__doc__ = T.class_help_string()
if cls.__doc__ is None:
cls.__doc__ = 'Undocumented.'
cls.__doc__ += '\n' + T.props_help_string()
return cls
class ValidationError(Exception):
pass
class RegularizationError(Exception):
pass
class ArgumentError(Exception):
pass
class DefaultMaker(object):
def __init__(self, cls, args, kwargs):
self.cls = cls
self.args = args
self.kwargs = kwargs
def make(self):
return self.cls(*self.args, **self.kwargs)
class Object(object):
__metaclass__ = ObjectMetaClass
dummy_for = None
def __init__(self, **kwargs):
if not kwargs.get('init_props', True):
return
for prop in self.T.properties:
k = prop.name
if k in kwargs:
setattr(self, k, kwargs.pop(k))
else:
if not prop.optional and not prop.has_default():
raise ArgumentError('Missing argument to %s: %s' % (
self.T.tagname, prop.name))
else:
setattr(self, k, prop.default())
if kwargs:
raise ArgumentError('Invalid argument to %s: %s' % (
self.T.tagname, ', '.join(kwargs.keys())))
@classmethod
def D(cls, *args, **kwargs):
return DefaultMaker(cls, args, kwargs)
def validate(self, regularize=False, depth=-1):
self.T.instance.validate(self, regularize, depth)
def regularize(self, depth=-1):
self.validate(regularize=True, depth=depth)
def dump(self, stream=None, filename=None, header=False):
return dump(self, stream=stream, filename=filename, header=header)
def dump_xml(self, stream=None, filename=None, header=False):
return dump_xml(self, stream=stream, filename=filename, header=header)
@classmethod
def load(cls, stream=None, filename=None, string=None):
return load(stream=stream, filename=filename, string=string)
@classmethod
def load_xml(cls, stream=None, filename=None, string=None):
return load_xml(stream=stream, filename=filename, string=string)
def __str__(self):
return self.dump()
class SObject(Object):
class __T(TBase):
def regularize_extra(self, val):
if isinstance(val, basestring):
return self.cls(val)
return val
def to_save(self, val):
return str(val)
def to_save_xml(self, val):
return str(val)
class Any(Object):
class __T(TBase):
def validate(self, val, regularize=False, depth=-1):
if isinstance(val, Object):
val.validate(regularize, depth)
return val
class Int(Object):
dummy_for = int
class __T(TBase):
strict = True
def to_save_xml(self, value):
return repr(value)
class Float(Object):
dummy_for = float
class __T(TBase):
strict = True
def to_save_xml(self, value):
return repr(value)
class Complex(Object):
dummy_for = complex
class __T(TBase):
strict = True
def regularize_extra(self, val):
if isinstance(val, list) or isinstance(val, tuple):
assert len(val) == 2
val = complex(*val)
elif not isinstance(val, complex):
val = complex(val)
return val
def to_save(self, value):
return repr(value)
def to_save_xml(self, value):
return repr(value)
class Bool(Object):
dummy_for = bool
class __T(TBase):
strict = True
def regularize_extra(self, val):
if isinstance(val, basestring):
if val.lower().strip() in ('0', 'false'):
return False
return bool(val)
def to_save_xml(self, value):
return repr(bool(value)).lower()
class String(Object):
dummy_for = str
class Unicode(Object):
dummy_for = unicode
guts_plain_dummy_types = (String, Unicode, Int, Float, Complex, Bool)
class Dict(Object):
dummy_for = dict
class __T(TBase):
multivalued = True
def __init__(self, key_t=Any.T(), content_t=Any.T(), *args, **kwargs):
TBase.__init__(self, *args, **kwargs)
assert isinstance(key_t, TBase)
assert isinstance(content_t, TBase)
self.key_t = key_t
self.content_t = content_t
self.content_t.parent = self
def default(self):
if self._default is not None:
return self._default
if self.optional:
return None
else:
return {}
def has_default(self):
return True
def validate(self, val, regularize, depth):
return TBase.validate(self, val, regularize, depth+1)
def validate_children(self, val, regularize, depth):
for key, ele in val.items():
newkey = self.key_t.validate(key, regularize, depth-1)
newele = self.content_t.validate(ele, regularize, depth-1)
if regularize:
if newkey is not key or newele is not ele:
del val[key]
val[newkey] = newele
return val
def to_save(self, val):
return dict((self.key_t.to_save(k), self.content_t.to_save(v))
for (k, v) in val.iteritems())
def to_save_xml(self, val):
raise NotImplementedError()
def classname_for_help(self, strip_module=""):
return '``dict`` of %s objects' % \
self.content_t.classname_for_help(strip_module=strip_module)
class List(Object):
dummy_for = list
class __T(TBase):
multivalued = True
def __init__(self, content_t=Any.T(), *args, **kwargs):
TBase.__init__(self, *args, **kwargs)
assert isinstance(content_t, TBase) or isinstance(content_t, Defer)
self.content_t = content_t
self.content_t.parent = self
def default(self):
if self._default is not None:
return self._default
if self.optional:
return None
else:
return []
def has_default(self):
return True
def validate(self, val, regularize, depth):
return TBase.validate(self, val, regularize, depth+1)
def validate_children(self, val, regularize, depth):
for i, ele in enumerate(val):
newele = self.content_t.validate(ele, regularize, depth-1)
if regularize and newele is not ele:
val[i] = newele
return val
def to_save(self, val):
return [self.content_t.to_save(v) for v in val]
def to_save_xml(self, val):
return [self.content_t.to_save_xml(v) for v in val]
def deferred(self):
if isinstance(self.content_t, Defer):
return [self.content_t]
return []
def process_deferred(self, defer, t_inst):
if defer is self.content_t:
self.content_t = t_inst
def classname_for_help(self, strip_module=''):
return '``list`` of %s objects' % \
self.content_t.classname_for_help(strip_module=strip_module)
def make_typed_list_class(t):
class O(List):
class __T(List.T):
def __init__(self, *args, **kwargs):
List.T.__init__(self, content_t=t.T(), *args, **kwargs)
return O
class Tuple(Object):
dummy_for = tuple
class __T(TBase):
multivalued = True
def __init__(self, n=None, content_t=Any.T(), *args, **kwargs):
TBase.__init__(self, *args, **kwargs)
assert isinstance(content_t, TBase)
self.content_t = content_t
self.content_t.parent = self
self.n = n
def default(self):
if self._default is not None:
return self._default
elif self.optional:
return None
else:
if self.n is not None:
return tuple(
self.content_t.default() for x in xrange(self.n))
else:
return tuple()
def has_default(self):
return True
def validate(self, val, regularize, depth):
return TBase.validate(self, val, regularize, depth+1)
def validate_extra(self, val):
if self.n is not None and len(val) != self.n:
raise ValidationError(
'%s should have length %i' % (self.xname(), self.n))
return val
def validate_children(self, val, regularize, depth):
if not regularize:
for ele in val:
self.content_t.validate(ele, regularize, depth-1)
return val
else:
newval = []
isnew = False
for ele in val:
newele = self.content_t.validate(ele, regularize, depth-1)
newval.append(newele)
if newele is not ele:
isnew = True
if isnew:
return tuple(newval)
else:
return val
def to_save(self, val):
return tuple(self.content_t.to_save(v) for v in val)
def to_save_xml(self, val):
return [self.content_t.to_save_xml(v) for v in val]
def classname_for_help(self, strip_module=''):
if self.n is not None:
return '``tuple`` of %i %s objects' % (
self.n, self.content_t.classname_for_help(
strip_module=strip_module))
else:
return '``tuple`` of %s objects' % (
self.content_t.classname_for_help(
strip_module=strip_module))
class Timestamp(Object):
dummy_for = float
class __T(TBase):
def regularize_extra(self, val):
if isinstance(val, datetime.datetime):
tt = val.utctimetuple()
val = calendar.timegm(tt) + val.microsecond * 1e-6
elif isinstance(val, datetime.date):
tt = val.timetuple()
val = float(calendar.timegm(tt))
elif isinstance(val, str) or isinstance(val, unicode):
val = val.strip()
val = re.sub(r'(Z|\+00(:?00)?)$', '', val)
if val[10] == 'T':
val = val.replace('T', ' ', 1)
val = str_to_time(val)
elif isinstance(val, int):
val = float(val)
else:
raise ValidationError('%s: cannot convert "%s" to float' % (
self.xname(), val))
return val
def to_save(self, val):
return datetime.datetime.utcfromtimestamp(val)
def to_save_xml(self, val):
return datetime.datetime.utcfromtimestamp(val).isoformat() + 'Z'
class DateTimestamp(Object):
dummy_for = float
class __T(TBase):
def regularize_extra(self, val):
if isinstance(val, datetime.datetime):
tt = val.utctimetuple()
val = calendar.timegm(tt) + val.microsecond * 1e-6
elif isinstance(val, str) or isinstance(val, unicode):
val = str_to_time(val, format='%Y-%m-%d')
if not isinstance(val, float):
val = float(val)
return val
def to_save(self, val):
return time_to_str(val, format='%Y-%m-%d')
def to_save_xml(self, val):
return time_to_str(val, format='%Y-%m-%d')
class StringPattern(String):
'''Any ``str`` matching pattern ``%(pattern)s``.'''
dummy_for = str
pattern = '.*'
class __T(TBase):
def __init__(self, pattern=None, *args, **kwargs):
TBase.__init__(self, *args, **kwargs)
if pattern is not None:
self.pattern = pattern
else:
self.pattern = self.dummy_cls.pattern
def validate_extra(self, val):
pat = self.pattern
if not re.search(pat, val):
raise ValidationError('%s: "%s" does not match pattern %s' % (
self.xname(), val, repr(pat)))
@classmethod
def class_help_string(cls):
dcls = cls.dummy_cls
doc = dcls.__doc_template__ or StringPattern.__doc_template__
return doc % {'pattern': repr(dcls.pattern)}
class UnicodePattern(Unicode):
'''Any ``unicode`` matching pattern ``%(pattern)s``.'''
dummy_for = unicode
pattern = '.*'
class __T(TBase):
def __init__(self, pattern=None, *args, **kwargs):
TBase.__init__(self, *args, **kwargs)
if pattern is not None:
self.pattern = pattern
else:
self.pattern = self.dummy_cls.pattern
def validate_extra(self, val):
pat = self.pattern
if not re.search(pat, val, flags=re.UNICODE):
raise ValidationError('%s: "%s" does not match pattern %s' % (
self.xname(), val, repr(pat)))
@classmethod
def class_help_string(cls):
dcls = cls.dummy_cls
doc = dcls.__doc_template__ or UnicodePattern.__doc_template__
return doc % {'pattern': repr(dcls.pattern)}
class StringChoice(String):
'''Any ``str`` out of ``%(choices)s``.'''
dummy_for = str
choices = []
ignore_case = False
class __T(TBase):
def __init__(self, choices=None, ignore_case=None, *args, **kwargs):
TBase.__init__(self, *args, **kwargs)
if choices is not None:
self.choices = choices
else:
self.choices = self.dummy_cls.choices
if ignore_case is not None:
self.ignore_case = ignore_case
else:
self.ignore_case = self.dummy_cls.ignore_case
if self.ignore_case:
self.choices = [x.upper() for x in self.choices]
def validate_extra(self, val):
if self.ignore_case:
val = val.upper()
if val not in self.choices:
raise ValidationError(
'%s: "%s" is not a valid choice out of %s' % (
self.xname(), val, repr(self.choices)))
@classmethod
def class_help_string(cls):
dcls = cls.dummy_cls
doc = dcls.__doc_template__ or StringChoice.__doc_template__
return doc % {'choices': repr(dcls.choices)}
# this will not always work...
class Union(Object):
members = []
dummy_for = str
class __T(TBase):
def __init__(self, members=None, *args, **kwargs):
TBase.__init__(self, *args, **kwargs)
if members is not None:
self.members = members
else:
self.members = self.dummy_cls.members
def validate(self, val, regularize=False, depth=-1):
assert self.members
for member in self.members:
try:
return member.validate(val, regularize, depth=depth)
except ValidationError, e:
pass
raise e
class Choice(Object):
choices = []
class __T(TBase):
def __init__(self, choices=None, *args, **kwargs):
TBase.__init__(self, *args, **kwargs)
if choices is not None:
self.choices = choices
else:
self.choices = self.dummy_cls.choices
self.cls_to_xmltagname = dict(
(t.cls, t.get_xmltagname()) for t in self.choices)
def validate(self, val, regularize=False, depth=-1):
if self.optional and val is None:
return val
t = None
for tc in self.choices:
is_derived = isinstance(val, tc.cls)
is_exact = type(val) == tc.cls
if not (not tc.strict and not is_derived or
tc.strict and not is_exact):
t = tc
break
if t is None:
if regularize:
ok = False
for tc in self.choices:
try:
val = tc.regularize_extra(val)
ok = True
t = tc
break
except (RegularizationError, ValueError):
pass
if not ok:
raise ValidationError(
'%s: could not convert "%s" to any type out of '
'(%s)' % (self.xname(), val, ','.join(
x.cls.__name__ for x in self.choices)))
else:
raise ValidationError(
'%s: "%s" (type: %s) is not of any type out of '
'(%s)' % (self.xname(), val, type(val), ','.join(
x.cls.__name__ for x in self.choices)))
validator = t
if type(val) != t.cls and isinstance(val, t.cls):
validator = val.T.instance
validator.validate_extra(val)
if depth != 0:
val = validator.validate_children(val, regularize, depth)
return val
def extend_xmlelements(self, elems, v):
elems.append((self.cls_to_xmltagname[type(v)], v))
def _dump(object, stream, header=False, _dump_function=yaml.dump):
if header:
stream.write('%YAML 1.1\n')
if isinstance(header, basestring):
banner = '\n'.join('# ' + x for x in header.splitlines())
stream.write(banner)
stream.write('\n')
_dump_function(
object, stream=stream, explicit_start=True, Dumper=SafeDumper)
def _dump_all(object, stream, header=True):
_dump(object, stream=stream, header=header, _dump_function=yaml.dump_all)
def _load(stream):
return yaml.load(stream=stream, Loader=SafeLoader)
def _load_all(stream):
return list(yaml.load_all(stream=stream, Loader=SafeLoader))
def _iload_all(stream):
return yaml.load_all(stream=stream, Loader=SafeLoader)
def multi_representer(dumper, data):
node = dumper.represent_mapping(
'!'+data.T.tagname, data.T.inamevals_to_save(data), flow_style=False)
return node
def multi_constructor(loader, tag_suffix, node):
tagname = str(tag_suffix)
cls = g_tagname_to_class[tagname]
kwargs = dict(loader.construct_mapping(node, deep=True).iteritems())
o = cls(**kwargs)
o.validate(regularize=True, depth=1)
return o
def dict_noflow_representer(dumper, data):
return dumper.represent_mapping(
'tag:yaml.org,2002:map', data, flow_style=False)
yaml.add_multi_representer(Object, multi_representer, Dumper=SafeDumper)
yaml.add_multi_constructor('!', multi_constructor, Loader=SafeLoader)
yaml.add_representer(dict, dict_noflow_representer, Dumper=SafeDumper)
class Constructor(object):
def __init__(self, add_namespace_maps=False, strict=False):
self.stack = []
self.queue = []
self.namespaces = {}
self.namespaces_rev = {}
self.add_namespace_maps = add_namespace_maps
self.strict = strict
def start_element(self, name, attrs):
name = name.split()[-1]
if self.stack and self.stack[-1][1] is not None:
cls = self.stack[-1][1].T.xmltagname_to_class.get(name, None)
if cls is not None and (
not issubclass(cls, Object) or issubclass(cls, SObject)):
cls = None
else:
cls = g_xmltagname_to_class.get(name, None)
self.stack.append((name, cls, attrs, [], []))
def end_element(self, name):
name = name.split()[-1]
name, cls, attrs, content2, content1 = self.stack.pop()
if cls is not None:
content2.extend(x for x in attrs.iteritems())
content2.append((None, ''.join(content1)))
o = cls(**cls.T.translate_from_xml(content2, self.strict))
o.validate(regularize=True, depth=1)
if self.add_namespace_maps:
o.namespace_map = dict(self.namespaces)
if self.stack and not all(x[1] is None for x in self.stack):
self.stack[-1][-2].append((name, o))
else:
self.queue.append(o)
else:
content = [''.join(content1)]
if self.stack:
for c in content:
self.stack[-1][-2].append((name, c))
def characters(self, char_content):
if self.stack:
self.stack[-1][-1].append(char_content)
def start_namespace(self, ns, uri):
assert ns not in self.namespaces
assert uri not in self.namespaces_rev
self.namespaces[ns] = uri
self.namespaces_rev[uri] = ns
def end_namespace(self, ns):
del self.namespaces_rev[self.namespaces[ns]]
del self.namespaces[ns]
def get_queued_elements(self):
queue = self.queue
self.queue = []
return queue
def _iload_all_xml(
stream,
bufsize=100000, add_namespace_maps=False, strict=False):
from xml.parsers.expat import ParserCreate
parser = ParserCreate(namespace_separator=' ')
handler = Constructor(add_namespace_maps=add_namespace_maps, strict=strict)
parser.StartElementHandler = handler.start_element
parser.EndElementHandler = handler.end_element
parser.CharacterDataHandler = handler.characters
parser.StartNamespaceDeclHandler = handler.start_namespace
parser.EndNamespaceDeclHandler = handler.end_namespace
while True:
data = stream.read(bufsize)
parser.Parse(data, bool(not data))
for element in handler.get_queued_elements():
yield element
if not data:
break
def _load_all_xml(*args, **kwargs):
return list(_iload_all_xml(*args, **kwargs))
def _load_xml(*args, **kwargs):
g = _iload_all_xml(*args, **kwargs)
return g.next()
def _dump_all_xml(objects, stream, root_element_name='root', header=True):
_dump_xml_header(stream, header)
beg = '<%s>\n' % root_element_name
end = '</%s>\n' % root_element_name
stream.write(beg)
for object in objects:
_dump_xml(object, stream=stream)
stream.write(end)
def _dump_xml_header(stream, banner=None):
stream.write('<?xml version="1.0" encoding="UTF-8" ?>\n')
if isinstance(banner, basestring):
stream.write('<!-- ')
stream.write(banner)
stream.write(' -->\n')
def _dump_xml(obj, stream, depth=0, xmltagname=None, header=False):
from xml.sax.saxutils import escape, quoteattr
if depth == 0 and header:
_dump_xml_header(stream, header)
indent = ' '*depth*2
if xmltagname is None:
xmltagname = obj.T.xmltagname
if isinstance(obj, Object):
obj.validate(depth=1)
attrs = []
elems = []
for prop, v in obj.T.ipropvals_to_save(obj, xmlmode=True):
if prop.xmlstyle == 'attribute':
assert not prop.multivalued
assert not isinstance(v, Object)
attrs.append((prop.effective_xmltagname, v))
elif prop.xmlstyle == 'content':
assert not prop.multivalued
assert not isinstance(v, Object)
elems.append((None, v))
else:
prop.extend_xmlelements(elems, v)
attr_str = ''
if attrs:
attr_str = ' ' + ' '.join(
'%s=%s' % (k, quoteattr(str(v))) for (k, v) in attrs)
if not elems:
stream.write('%s<%s%s />\n' % (indent, xmltagname, attr_str))
else:
oneline = len(elems) == 1 and elems[0][0] is None
stream.write(u'%s<%s%s>%s' % (
indent, xmltagname, attr_str, ('\n', '')[oneline]))
for (k, v) in elems:
if k is None:
stream.write(
'%s' % escape(unicode(v), {'\0': '�'})
.encode('utf8'))
else:
_dump_xml(v, stream=stream, depth=depth+1, xmltagname=k)
stream.write('%s</%s>\n' % ((indent, '')[oneline], xmltagname))
else:
stream.write('%s<%s>%s</%s>\n' % (
indent,
xmltagname,
escape(unicode(obj), {'\0': '�'}).encode('utf8'),
xmltagname))
def walk(x, typ=None, path=()):
if typ is None or isinstance(x, typ):
yield path, x
if isinstance(x, Object):
for (prop, val) in x.T.ipropvals(x):
if prop.multivalued:
if val is not None:
for iele, ele in enumerate(val):
for y in walk(ele, typ,
path=path + ((prop.name, iele),)):
yield y
else:
for y in walk(val, typ, path=path + (prop.name,)):
yield y
def zip_walk(x, typ=None, path=(), stack=()):
if typ is None or isinstance(x, typ):
yield path, stack + (x,)
if isinstance(x, Object):
for (prop, val) in x.T.ipropvals(x):
if prop.multivalued:
if val is not None:
for iele, ele in enumerate(val):
for y in zip_walk(
ele, typ,
path=path + ((prop.name, iele),),
stack=stack + (x,)):
yield y
else:
for y in zip_walk(val, typ,
path=path+(prop.name,),
stack=stack + (x,)):
yield y
def path_element(x):
if isinstance(x, tuple):
return '%s[%i]' % x
else:
return x
def path_to_str(path):
return '.'.join(path_element(x) for x in path)
@expand_stream_args('w')
def dump(*args, **kwargs):
return _dump(*args, **kwargs)
@expand_stream_args('r')
def load(*args, **kwargs):
return _load(*args, **kwargs)
def load_string(s, *args, **kwargs):
return load(string=s, *args, **kwargs)
@expand_stream_args('w')
def dump_all(*args, **kwargs):
return _dump_all(*args, **kwargs)
@expand_stream_args('r')
def load_all(*args, **kwargs):
return _load_all(*args, **kwargs)
@expand_stream_args('r')
def iload_all(*args, **kwargs):
return _iload_all(*args, **kwargs)
@expand_stream_args('w')
def dump_xml(*args, **kwargs):
return _dump_xml(*args, **kwargs)
@expand_stream_args('r')
def load_xml(*args, **kwargs):
return _load_xml(*args, **kwargs)
def load_xml_string(s, *args, **kwargs):
return load_xml(string=s, *args, **kwargs)
@expand_stream_args('w')
def dump_all_xml(*args, **kwargs):
return _dump_all_xml(*args, **kwargs)
@expand_stream_args('r')
def load_all_xml(*args, **kwargs):
return _load_all_xml(*args, **kwargs)
@expand_stream_args('r')
def iload_all_xml(*args, **kwargs):
return _iload_all_xml(*args, **kwargs)
__all__ = guts_types + [
'guts_types', 'TBase', 'ValidationError',
'ArgumentError', 'Defer',
'dump', 'load',
'dump_all', 'load_all', 'iload_all',
'dump_xml', 'load_xml',
'dump_all_xml', 'load_all_xml', 'iload_all_xml',
'load_string',
'load_xml_string',
'make_typed_list_class', 'walk', 'zip_walk', 'path_to_str'
]
| |
# Generated by Haxe 3.3.0
import math as python_lib_Math
import math as Math
import pandas as pandas_Pandas_Module
import functools as python_lib_Functools
import inspect as python_lib_Inspect
import random as python_lib_Random
class _hx_AnonObject:
def __init__(self, fields):
self.__dict__ = fields
class Enum:
_hx_class_name = "Enum"
__slots__ = ("tag", "index", "params")
_hx_fields = ["tag", "index", "params"]
_hx_methods = ["__str__"]
def __init__(self,tag,index,params):
self.tag = tag
self.index = index
self.params = params
def __str__(self):
if (self.params is None):
return self.tag
else:
return (((HxOverrides.stringOrNull(self.tag) + "(") + HxOverrides.stringOrNull(",".join([python_Boot.toString1(x1,'') for x1 in self.params]))) + ")")
class Reflect:
_hx_class_name = "Reflect"
__slots__ = ()
_hx_statics = ["field"]
@staticmethod
def field(o,field):
return python_Boot.field(o,field)
class Script:
_hx_class_name = "Script"
__slots__ = ()
_hx_statics = ["main"]
@staticmethod
def main():
housing_2013 = pandas_Pandas_Module.read_csv("../Hud_2013.csv")
cols = ["FMR", "AGE1", "TOTSAL"]
print(str((("[" + HxOverrides.stringOrNull(",".join([python_Boot.toString1(x1,'') for x1 in cols]))) + "]")))
filtered_housing_2013 = housing_2013[cols]
print(str(filtered_housing_2013.head(5)))
class python_Boot:
_hx_class_name = "python.Boot"
__slots__ = ()
_hx_statics = ["keywords", "toString1", "fields", "simpleField", "field", "getInstanceFields", "getSuperClass", "getClassFields", "prefixLength", "unhandleKeywords"]
@staticmethod
def toString1(o,s):
if (o is None):
return "null"
if isinstance(o,str):
return o
if (s is None):
s = ""
if (len(s) >= 5):
return "<...>"
if isinstance(o,bool):
if o:
return "true"
else:
return "false"
if isinstance(o,int):
return str(o)
if isinstance(o,float):
try:
if (o == int(o)):
return str(Math.floor((o + 0.5)))
else:
return str(o)
except Exception as _hx_e:
_hx_e1 = _hx_e
e = _hx_e1
return str(o)
if isinstance(o,list):
o1 = o
l = len(o1)
st = "["
s = (("null" if s is None else s) + "\t")
_g1 = 0
_g = l
while (_g1 < _g):
i = _g1
_g1 = (_g1 + 1)
prefix = ""
if (i > 0):
prefix = ","
st = (("null" if st is None else st) + HxOverrides.stringOrNull(((("null" if prefix is None else prefix) + HxOverrides.stringOrNull(python_Boot.toString1((o1[i] if i >= 0 and i < len(o1) else None),s))))))
st = (("null" if st is None else st) + "]")
return st
try:
if hasattr(o,"toString"):
return o.toString()
except Exception as _hx_e:
_hx_e1 = _hx_e
pass
if (python_lib_Inspect.isfunction(o) or python_lib_Inspect.ismethod(o)):
return "<function>"
if hasattr(o,"__class__"):
if isinstance(o,_hx_AnonObject):
toStr = None
try:
fields = python_Boot.fields(o)
_g2 = []
_g11 = 0
while (_g11 < len(fields)):
f = (fields[_g11] if _g11 >= 0 and _g11 < len(fields) else None)
_g11 = (_g11 + 1)
tmp = (("" + ("null" if f is None else f)) + " : ")
tmp1 = python_Boot.toString1(python_Boot.simpleField(o,f),(("null" if s is None else s) + "\t"))
_g2.append((("null" if tmp is None else tmp) + ("null" if tmp1 is None else tmp1)))
fieldsStr = _g2
toStr = (("{ " + HxOverrides.stringOrNull(", ".join([x1 for x1 in fieldsStr]))) + " }")
except Exception as _hx_e:
_hx_e1 = _hx_e
e2 = _hx_e1
return "{ ... }"
if (toStr is None):
return "{ ... }"
else:
return toStr
if isinstance(o,Enum):
o2 = o
l1 = len(o2.params)
hasParams = (l1 > 0)
if hasParams:
paramsStr = ""
_g12 = 0
_g3 = l1
while (_g12 < _g3):
i1 = _g12
_g12 = (_g12 + 1)
prefix1 = ""
if (i1 > 0):
prefix1 = ","
paramsStr = (("null" if paramsStr is None else paramsStr) + HxOverrides.stringOrNull(((("null" if prefix1 is None else prefix1) + HxOverrides.stringOrNull(python_Boot.toString1((o2.params[i1] if i1 >= 0 and i1 < len(o2.params) else None),s))))))
return (((HxOverrides.stringOrNull(o2.tag) + "(") + ("null" if paramsStr is None else paramsStr)) + ")")
else:
return o2.tag
if hasattr(o,"_hx_class_name"):
if (o.__class__.__name__ != "type"):
fields1 = python_Boot.getInstanceFields(o)
_g4 = []
_g13 = 0
while (_g13 < len(fields1)):
f1 = (fields1[_g13] if _g13 >= 0 and _g13 < len(fields1) else None)
_g13 = (_g13 + 1)
tmp2 = (("" + ("null" if f1 is None else f1)) + " : ")
tmp3 = python_Boot.toString1(python_Boot.simpleField(o,f1),(("null" if s is None else s) + "\t"))
_g4.append((("null" if tmp2 is None else tmp2) + ("null" if tmp3 is None else tmp3)))
fieldsStr1 = _g4
toStr1 = (((HxOverrides.stringOrNull(o._hx_class_name) + "( ") + HxOverrides.stringOrNull(", ".join([x1 for x1 in fieldsStr1]))) + " )")
return toStr1
else:
fields2 = python_Boot.getClassFields(o)
_g5 = []
_g14 = 0
while (_g14 < len(fields2)):
f2 = (fields2[_g14] if _g14 >= 0 and _g14 < len(fields2) else None)
_g14 = (_g14 + 1)
tmp4 = (("" + ("null" if f2 is None else f2)) + " : ")
tmp5 = python_Boot.toString1(python_Boot.simpleField(o,f2),(("null" if s is None else s) + "\t"))
_g5.append((("null" if tmp4 is None else tmp4) + ("null" if tmp5 is None else tmp5)))
fieldsStr2 = _g5
toStr2 = (((("#" + HxOverrides.stringOrNull(o._hx_class_name)) + "( ") + HxOverrides.stringOrNull(", ".join([x1 for x1 in fieldsStr2]))) + " )")
return toStr2
if (o == str):
return "#String"
if (o == list):
return "#Array"
if callable(o):
return "function"
try:
if hasattr(o,"__repr__"):
return o.__repr__()
except Exception as _hx_e:
_hx_e1 = _hx_e
pass
if hasattr(o,"__str__"):
return o.__str__([])
if hasattr(o,"__name__"):
return o.__name__
return "???"
else:
return str(o)
@staticmethod
def fields(o):
a = []
if (o is not None):
if hasattr(o,"_hx_fields"):
fields = o._hx_fields
return list(fields)
if isinstance(o,_hx_AnonObject):
d = o.__dict__
keys = d.keys()
handler = python_Boot.unhandleKeywords
for k in keys:
a.append(handler(k))
elif hasattr(o,"__dict__"):
d1 = o.__dict__
keys1 = d1.keys()
for k in keys1:
a.append(k)
return a
@staticmethod
def simpleField(o,field):
if (field is None):
return None
field1 = (("_hx_" + field) if ((field in python_Boot.keywords)) else (("_hx_" + field) if (((((len(field) > 2) and ((ord(field[0]) == 95))) and ((ord(field[1]) == 95))) and ((ord(field[(len(field) - 1)]) != 95)))) else field))
if hasattr(o,field1):
return getattr(o,field1)
else:
return None
@staticmethod
def field(o,field):
if (field is None):
return None
_hx_local_0 = len(field)
if (_hx_local_0 == 10):
if (field == "charCodeAt"):
if isinstance(o,str):
s1 = o
def _hx_local_1(a11):
return HxString.charCodeAt(s1,a11)
return _hx_local_1
elif (_hx_local_0 == 11):
if (field == "lastIndexOf"):
if isinstance(o,str):
s3 = o
def _hx_local_2(a15):
return HxString.lastIndexOf(s3,a15)
return _hx_local_2
elif isinstance(o,list):
a4 = o
def _hx_local_3(x4):
return python_internal_ArrayImpl.lastIndexOf(a4,x4)
return _hx_local_3
elif (field == "toLowerCase"):
if isinstance(o,str):
s7 = o
def _hx_local_4():
return HxString.toLowerCase(s7)
return _hx_local_4
elif (field == "toUpperCase"):
if isinstance(o,str):
s9 = o
def _hx_local_5():
return HxString.toUpperCase(s9)
return _hx_local_5
elif (_hx_local_0 == 9):
if (field == "substring"):
if isinstance(o,str):
s6 = o
def _hx_local_6(a19):
return HxString.substring(s6,a19)
return _hx_local_6
elif (_hx_local_0 == 4):
if (field == "copy"):
if isinstance(o,list):
def _hx_local_7():
return list(o)
return _hx_local_7
elif (field == "join"):
if isinstance(o,list):
def _hx_local_8(sep):
return sep.join([python_Boot.toString1(x1,'') for x1 in o])
return _hx_local_8
elif (field == "push"):
if isinstance(o,list):
x7 = o
def _hx_local_9(e):
return python_internal_ArrayImpl.push(x7,e)
return _hx_local_9
elif (field == "sort"):
if isinstance(o,list):
x11 = o
def _hx_local_10(f2):
python_internal_ArrayImpl.sort(x11,f2)
return _hx_local_10
elif (_hx_local_0 == 5):
if (field == "shift"):
if isinstance(o,list):
x9 = o
def _hx_local_11():
return python_internal_ArrayImpl.shift(x9)
return _hx_local_11
elif (field == "slice"):
if isinstance(o,list):
x10 = o
def _hx_local_12(a16):
return python_internal_ArrayImpl.slice(x10,a16)
return _hx_local_12
elif (field == "split"):
if isinstance(o,str):
s4 = o
def _hx_local_13(d):
return HxString.split(s4,d)
return _hx_local_13
elif (_hx_local_0 == 7):
if (field == "indexOf"):
if isinstance(o,str):
s2 = o
def _hx_local_14(a13):
return HxString.indexOf(s2,a13)
return _hx_local_14
elif isinstance(o,list):
a = o
def _hx_local_15(x1):
return python_internal_ArrayImpl.indexOf(a,x1)
return _hx_local_15
elif (field == "reverse"):
if isinstance(o,list):
a5 = o
def _hx_local_16():
python_internal_ArrayImpl.reverse(a5)
return _hx_local_16
elif (field == "unshift"):
if isinstance(o,list):
x14 = o
def _hx_local_17(e2):
python_internal_ArrayImpl.unshift(x14,e2)
return _hx_local_17
elif (_hx_local_0 == 3):
if (field == "map"):
if isinstance(o,list):
x5 = o
def _hx_local_18(f1):
return python_internal_ArrayImpl.map(x5,f1)
return _hx_local_18
elif (field == "pop"):
if isinstance(o,list):
x6 = o
def _hx_local_19():
return python_internal_ArrayImpl.pop(x6)
return _hx_local_19
elif (_hx_local_0 == 8):
if (field == "iterator"):
if isinstance(o,list):
x3 = o
def _hx_local_20():
return python_internal_ArrayImpl.iterator(x3)
return _hx_local_20
elif (field == "toString"):
if isinstance(o,str):
s8 = o
def _hx_local_21():
return HxString.toString(s8)
return _hx_local_21
elif isinstance(o,list):
x13 = o
def _hx_local_22():
return python_internal_ArrayImpl.toString(x13)
return _hx_local_22
elif (_hx_local_0 == 6):
if (field == "charAt"):
if isinstance(o,str):
s = o
def _hx_local_23(a1):
return HxString.charAt(s,a1)
return _hx_local_23
elif (field == "concat"):
if isinstance(o,list):
a12 = o
def _hx_local_24(a2):
return python_internal_ArrayImpl.concat(a12,a2)
return _hx_local_24
elif (field == "filter"):
if isinstance(o,list):
x = o
def _hx_local_25(f):
return python_internal_ArrayImpl.filter(x,f)
return _hx_local_25
elif (field == "insert"):
if isinstance(o,list):
a3 = o
def _hx_local_26(a14,x2):
python_internal_ArrayImpl.insert(a3,a14,x2)
return _hx_local_26
elif (field == "length"):
if isinstance(o,str):
return len(o)
elif isinstance(o,list):
return len(o)
elif (field == "remove"):
if isinstance(o,list):
x8 = o
def _hx_local_27(e1):
return python_internal_ArrayImpl.remove(x8,e1)
return _hx_local_27
elif (field == "splice"):
if isinstance(o,list):
x12 = o
def _hx_local_28(a17,a21):
return python_internal_ArrayImpl.splice(x12,a17,a21)
return _hx_local_28
elif (field == "substr"):
if isinstance(o,str):
s5 = o
def _hx_local_29(a18):
return HxString.substr(s5,a18)
return _hx_local_29
else:
pass
field1 = (("_hx_" + field) if ((field in python_Boot.keywords)) else (("_hx_" + field) if (((((len(field) > 2) and ((ord(field[0]) == 95))) and ((ord(field[1]) == 95))) and ((ord(field[(len(field) - 1)]) != 95)))) else field))
if hasattr(o,field1):
return getattr(o,field1)
else:
return None
@staticmethod
def getInstanceFields(c):
f = (c._hx_fields if (hasattr(c,"_hx_fields")) else [])
if hasattr(c,"_hx_methods"):
f = (f + c._hx_methods)
sc = python_Boot.getSuperClass(c)
if (sc is None):
return f
else:
scArr = python_Boot.getInstanceFields(sc)
scMap = set(scArr)
_g = 0
while (_g < len(f)):
f1 = (f[_g] if _g >= 0 and _g < len(f) else None)
_g = (_g + 1)
if (not (f1 in scMap)):
scArr.append(f1)
return scArr
@staticmethod
def getSuperClass(c):
if (c is None):
return None
try:
if hasattr(c,"_hx_super"):
return c._hx_super
return None
except Exception as _hx_e:
_hx_e1 = _hx_e
pass
return None
@staticmethod
def getClassFields(c):
if hasattr(c,"_hx_statics"):
x = c._hx_statics
return list(x)
else:
return []
@staticmethod
def unhandleKeywords(name):
if (HxString.substr(name,0,python_Boot.prefixLength) == "_hx_"):
real = HxString.substr(name,python_Boot.prefixLength,None)
if (real in python_Boot.keywords):
return real
return name
class python_HaxeIterator:
_hx_class_name = "python.HaxeIterator"
__slots__ = ("it", "x", "has", "checked")
_hx_fields = ["it", "x", "has", "checked"]
_hx_methods = ["next", "hasNext"]
def __init__(self,it):
self.checked = False
self.has = False
self.x = None
self.it = it
def next(self):
if (not self.checked):
self.hasNext()
self.checked = False
return self.x
def hasNext(self):
if (not self.checked):
try:
self.x = self.it.__next__()
self.has = True
except Exception as _hx_e:
_hx_e1 = _hx_e
if isinstance(_hx_e1, StopIteration):
s = _hx_e1
self.has = False
self.x = None
else:
raise _hx_e
self.checked = True
return self.has
class python_internal_ArrayImpl:
_hx_class_name = "python.internal.ArrayImpl"
__slots__ = ()
_hx_statics = ["concat", "iterator", "indexOf", "lastIndexOf", "toString", "pop", "push", "unshift", "remove", "shift", "slice", "sort", "splice", "map", "filter", "insert", "reverse", "_get"]
@staticmethod
def concat(a1,a2):
return (a1 + a2)
@staticmethod
def iterator(x):
return python_HaxeIterator(x.__iter__())
@staticmethod
def indexOf(a,x,fromIndex = None):
_hx_len = len(a)
l = (0 if ((fromIndex is None)) else ((_hx_len + fromIndex) if ((fromIndex < 0)) else fromIndex))
if (l < 0):
l = 0
_g1 = l
while (_g1 < _hx_len):
i = _g1
_g1 = (_g1 + 1)
if (a[i] == x):
return i
return -1
@staticmethod
def lastIndexOf(a,x,fromIndex = None):
_hx_len = len(a)
l = (_hx_len if ((fromIndex is None)) else (((_hx_len + fromIndex) + 1) if ((fromIndex < 0)) else (fromIndex + 1)))
if (l > _hx_len):
l = _hx_len
while True:
l = (l - 1)
tmp = l
if (not ((tmp > -1))):
break
if (a[l] == x):
return l
return -1
@staticmethod
def toString(x):
return (("[" + HxOverrides.stringOrNull(",".join([python_Boot.toString1(x1,'') for x1 in x]))) + "]")
@staticmethod
def pop(x):
if (len(x) == 0):
return None
else:
return x.pop()
@staticmethod
def push(x,e):
x.append(e)
return len(x)
@staticmethod
def unshift(x,e):
x.insert(0, e)
@staticmethod
def remove(x,e):
try:
x.remove(e)
return True
except Exception as _hx_e:
_hx_e1 = _hx_e
e1 = _hx_e1
return False
@staticmethod
def shift(x):
if (len(x) == 0):
return None
return x.pop(0)
@staticmethod
def slice(x,pos,end = None):
return x[pos:end]
@staticmethod
def sort(x,f):
x.sort(key= python_lib_Functools.cmp_to_key(f))
@staticmethod
def splice(x,pos,_hx_len):
if (pos < 0):
pos = (len(x) + pos)
if (pos < 0):
pos = 0
res = x[pos:(pos + _hx_len)]
del x[pos:(pos + _hx_len)]
return res
@staticmethod
def map(x,f):
return list(map(f,x))
@staticmethod
def filter(x,f):
return list(filter(f,x))
@staticmethod
def insert(a,pos,x):
a.insert(pos, x)
@staticmethod
def reverse(a):
a.reverse()
@staticmethod
def _get(x,idx):
if ((idx > -1) and ((idx < len(x)))):
return x[idx]
else:
return None
class HxOverrides:
_hx_class_name = "HxOverrides"
__slots__ = ()
_hx_statics = ["eq", "stringOrNull"]
@staticmethod
def eq(a,b):
if (isinstance(a,list) or isinstance(b,list)):
return a is b
return (a == b)
@staticmethod
def stringOrNull(s):
if (s is None):
return "null"
else:
return s
class HxString:
_hx_class_name = "HxString"
__slots__ = ()
_hx_statics = ["split", "charCodeAt", "charAt", "lastIndexOf", "toUpperCase", "toLowerCase", "indexOf", "toString", "substring", "substr"]
@staticmethod
def split(s,d):
if (d == ""):
return list(s)
else:
return s.split(d)
@staticmethod
def charCodeAt(s,index):
if ((((s is None) or ((len(s) == 0))) or ((index < 0))) or ((index >= len(s)))):
return None
else:
return ord(s[index])
@staticmethod
def charAt(s,index):
if ((index < 0) or ((index >= len(s)))):
return ""
else:
return s[index]
@staticmethod
def lastIndexOf(s,_hx_str,startIndex = None):
if (startIndex is None):
return s.rfind(_hx_str, 0, len(s))
else:
i = s.rfind(_hx_str, 0, (startIndex + 1))
startLeft = (max(0,((startIndex + 1) - len(_hx_str))) if ((i == -1)) else (i + 1))
check = s.find(_hx_str, startLeft, len(s))
if ((check > i) and ((check <= startIndex))):
return check
else:
return i
@staticmethod
def toUpperCase(s):
return s.upper()
@staticmethod
def toLowerCase(s):
return s.lower()
@staticmethod
def indexOf(s,_hx_str,startIndex = None):
if (startIndex is None):
return s.find(_hx_str)
else:
return s.find(_hx_str, startIndex)
@staticmethod
def toString(s):
return s
@staticmethod
def substring(s,startIndex,endIndex = None):
if (startIndex < 0):
startIndex = 0
if (endIndex is None):
return s[startIndex:]
else:
if (endIndex < 0):
endIndex = 0
if (endIndex < startIndex):
return s[endIndex:startIndex]
else:
return s[startIndex:endIndex]
@staticmethod
def substr(s,startIndex,_hx_len = None):
if (_hx_len is None):
return s[startIndex:]
else:
if (_hx_len == 0):
return ""
return s[startIndex:(startIndex + _hx_len)]
Math.NEGATIVE_INFINITY = float("-inf")
Math.POSITIVE_INFINITY = float("inf")
Math.NaN = float("nan")
Math.PI = python_lib_Math.pi
python_Boot.keywords = set(["and", "del", "from", "not", "with", "as", "elif", "global", "or", "yield", "assert", "else", "if", "pass", "None", "break", "except", "import", "raise", "True", "class", "exec", "in", "return", "False", "continue", "finally", "is", "try", "def", "for", "lambda", "while"])
python_Boot.prefixLength = len("_hx_")
Script.main()
| |
"""Tests for media.views."""
# pylint: disable=protected-access,maybe-no-member,invalid-name
import json
from django.conf import settings
from django.core.urlresolvers import reverse
from django.test import Client, TestCase, RequestFactory
from django.test.utils import override_settings
from mock import patch
from open_connect.media.models import Image, ShortenedURL
from open_connect.media.tests import (
get_in_memory_image_file, get_in_memory_image_instance
)
from open_connect.media.views import (
image_view, URLPopularityView, AdminGalleryView
)
from open_connect.connectmessages.models import MESSAGE_STATUSES
from open_connect.connectmessages.tests import ConnectMessageTestCase
from open_connect.connect_core.utils.basetests import ConnectTestMixin
class ImageViewsTest(ConnectTestMixin, TestCase):
"""Tests for image_view."""
def setUp(self):
"""Setup for image views tests"""
super(ImageViewsTest, self).setUp()
self.user = self.create_superuser()
self.client.login(username=self.user.email, password='moo')
self.factory = RequestFactory()
self.request = self.factory.get('/')
def test_image_view_increments_view_count(self):
"""Viewing an image should increase the view count."""
image = get_in_memory_image_instance(self.user)
image_view(self.request, image.uuid)
small_image = Image.objects.get(pk=image.pk)
self.assertEqual(small_image.view_count, 1)
def test_non_display_image_view_does_not_increment_view_count(self):
"""Viewing an alternate size (thumbnail) should not increment count."""
image = get_in_memory_image_instance(self.user)
response = image_view(self.request, image.uuid, image_type='thumbnail')
self.assertEqual(response.url, image.get_thumbnail.url)
small_image = Image.objects.get(pk=image.pk)
self.assertEqual(small_image.view_count, 0)
@patch('open_connect.media.views.cache')
def test_image_view_uses_correct_hash_key(self, mock):
"""Viewing an alternate size (thumbnail) should not increment count."""
mock.get.return_value = None
image = get_in_memory_image_instance(self.user)
image.uuid = 'uuid-here'
image.save()
response = image_view(
self.request, image.uuid, image_type='display_image')
self.assertEqual(response.url, image.get_display_image.url)
mock.get.assert_called_once_with(
'imageurlcache_display_image_uuid-here')
mock.set.assert_called_once_with(
'imageurlcache_display_image_uuid-here',
image.get_display_image.url,
2700
)
display_image = Image.objects.get(pk=image.pk)
self.assertEqual(display_image.view_count, 1)
@patch('open_connect.media.views.cache')
def test_image_view_returns_cache_if_possible(self, mock):
"""If possible, return what's in the cache"""
mock.get.return_value = 'http://razzmatazz.local/great.gif'
image = get_in_memory_image_instance(self.user)
image.uuid = 'uuid-here'
image.save()
response = image_view(
self.request, image.uuid, image_type='display_image')
self.assertEqual(response.url, 'http://razzmatazz.local/great.gif')
mock.get.assert_called_once_with(
'imageurlcache_display_image_uuid-here')
display_image = Image.objects.get(pk=image.pk)
self.assertEqual(display_image.view_count, 1)
def test_image_view_redirects_to_image_url(self):
"""image_view should redirect to the actual image url."""
image = get_in_memory_image_instance(self.user)
response = self.client.get(
reverse('image', kwargs={'image_uuid': image.uuid}))
# assertRedirects doesn't work here because there's no staticfiles
# in the test client
self.assertEqual(
response._headers['location'][1],
'http://testserver%s' % image.image.url
)
def test_upload_photos(self):
"""create_image should return JSON response with filelink and id."""
response = self.client.post(
reverse('create_image'),
{'file': get_in_memory_image_file()}
)
json_response = json.loads(response.content)
self.assertTrue('filelink' in json_response[0].keys())
self.assertTrue(
json_response[0]['filelink'].startswith(settings.ORIGIN))
self.assertTrue('id' in json_response[0].keys())
def test_upload_photos_invalid(self):
"""uploading an invalid value for image should return an empty list."""
response = self.client.post(
reverse('create_image'),
{'file': 'cookies!'}
)
self.assertEqual(json.loads(response.content), [])
def test_upload_photos_get(self):
"""create_image view should return 405 when request method is GET."""
response = self.client.get(
reverse('create_image'),
{'file': 'cookies!'}
)
self.assertEqual(response.status_code, 405)
def test_my_images(self):
"""my_images should return list of images a user has uploaded."""
image1 = get_in_memory_image_instance(self.user)
image2 = get_in_memory_image_instance(self.user)
response = self.client.get(reverse('my_images'))
content = json.loads(response.content)
expected = [
{u'thumb': image1.get_thumbnail.url,
u'image': image1.get_absolute_url(),
u'id': image1.pk},
{u'thumb': image2.get_thumbnail.url,
u'image': image2.get_absolute_url(),
u'id': image2.pk}
]
self.assertEqual(len(content), len(expected))
for image in content:
self.assertIn(image, expected)
def test_promote_image_view(self):
"""promote image view should return json with status and uuid."""
image = get_in_memory_image_instance(self.user)
self.assertFalse(image.promoted)
response = self.client.post(
reverse('promote_image'), {'uuid': image.uuid})
result = json.loads(response.content)
image = Image.objects.get(pk=image.pk)
self.assertTrue(image.promoted)
self.assertEqual(result['status'], 'success')
self.assertEqual(result['uuid'], image.uuid)
def test_demote_image_view(self):
"""demote image view should return json with status and uuid."""
image = get_in_memory_image_instance(self.user)
image.promoted = True
image.save()
response = self.client.post(
reverse('demote_image'), {'uuid': image.uuid})
result = json.loads(response.content)
image = Image.objects.get(pk=image.pk)
self.assertFalse(image.promoted)
self.assertEqual(result['status'], 'success')
self.assertEqual(result['uuid'], image.uuid)
@override_settings(LOGIN_URL=reverse('login'))
def test_promote_image_view_no_permission(self):
"""should redirect to login if user doesn't have permission."""
user = self.create_user()
client = Client()
client.login(username=user.email, password='moo')
response = client.post(reverse('promote_image'))
self.assertRedirects(
response, '%s?next=/media/image/promote/' % reverse('login'))
@override_settings(LOGIN_URL=reverse('login'))
def test_demote_image_view_no_permission(self):
"""should redirect to login if user doesn't have permission."""
user = self.create_user()
client = Client()
client.login(username=user.email, password='moo')
response = client.post(reverse('demote_image'))
self.assertRedirects(
response, '%s?next=/media/image/demote/' % reverse('login'))
@override_settings(LOGIN_URL=reverse('login'))
def test_promote_image_view_requires_post(self):
"""promote image view should return 405 if http method is get."""
response = self.client.get(reverse('promote_image'))
self.assertEqual(response.status_code, 405)
@override_settings(LOGIN_URL=reverse('login'))
def test_demote_image_view_requires_post(self):
"""demote image view should return 405 if http method is get."""
response = self.client.get(reverse('demote_image'))
self.assertEqual(response.status_code, 405)
@override_settings(LOGIN_URL=reverse('login'))
def test_admin_gallery_requires_permission(self):
"""admin gallery should return 403 if user doesn't have permission."""
user = self.create_user()
client = Client()
client.login(username=user.email, password='moo')
response = client.get(reverse('admin_gallery'))
self.assertEqual(response.status_code, 403)
def test_admin_gallery_does_not_have_direct_message_images(self):
"""admin gallery should not include images from direct messages."""
image = get_in_memory_image_instance(self.user)
direct_message = self.create_thread(direct=True)
direct_message.first_message.images.add(image)
response = self.client.get(reverse('admin_gallery'))
self.assertNotContains(response, image.uuid)
def test_admin_gallery_only_has_images_from_approved_messages(self):
"""Test that admin gallery only has images from approved messages"""
# pylint: disable=unused-variable
thread = self.create_thread()
image = get_in_memory_image_instance(self.user)
thread.first_message.images.add(image)
for code, name in MESSAGE_STATUSES:
thread.first_message.status = code
thread.first_message.save()
view = AdminGalleryView()
view.request = self.factory.get('/')
view.request.user = self.user
queryset = view.get_queryset()
if code == 'approved':
self.assertIn(image, queryset)
else:
self.assertNotIn(image, queryset, msg="Status was %s." % code)
class ShortenedURLRedirectTest(ConnectMessageTestCase):
"""Tests for shortened_url_redirect view."""
def test_redirects(self):
"""view should redirect user to the correct url."""
url = ShortenedURL.objects.create(url='http://www.google.com')
response = self.client.get(
reverse(
'shortened_url_redirect',
kwargs={'code': url.short_code}
)
)
self.assertEqual(response['Location'], 'http://www.google.com')
def test_increases_click_count(self):
"""view should increment click_count by one."""
url = ShortenedURL.objects.create(url='http://www.google.com')
click_count = url.click_count
self.client.get(
reverse(
'shortened_url_redirect',
kwargs={'code': url.short_code}
)
)
url = ShortenedURL.objects.get(pk=url.pk)
self.assertEqual(url.click_count, click_count + 1)
class URLPopularityViewTest(ConnectMessageTestCase):
"""Tests for URLPopularityView."""
def setUp(self):
"""Setup the URLPopularityViewTest TestCase"""
self.view = URLPopularityView()
def test_order_by_accepts_valid_values(self):
"""order_by in query params should sort by valid values."""
for value in ['message_count', 'url', 'short_code', 'created_at']:
request = self.request_factory.get('/?order_by=%s' % value)
self.view.request = request
result = self.view.get_queryset()
self.assertEqual(result.query.order_by, ['-%s' % value])
def test_order_by_defaults_to_click_count(self):
"""if there is no order_by in query, default to click_count."""
request = self.request_factory.get('/')
self.view.request = request
result = self.view.get_queryset()
self.assertEqual(result.query.order_by, ['-click_count'])
def test_order_by_is_click_count_if_value_is_invalid(self):
"""if order_by isn't a valid value, order by click_count."""
request = self.request_factory.get('/?order_by=invalid')
self.view.request = request
result = self.view.get_queryset()
self.assertEqual(result.query.order_by, ['-click_count'])
def test_order_is_asc(self):
"""if order is asc, queryset should be sorted ascending."""
request = self.request_factory.get('/?sort=asc')
self.view.request = request
result = self.view.get_queryset()
self.assertEqual(result.query.order_by, ['click_count'])
def test_order_is_desc(self):
"""if order is desc, queryset should be sorted descending."""
request = self.request_factory.get('/?sort=desc')
self.view.request = request
result = self.view.get_queryset()
self.assertEqual(result.query.order_by, ['-click_count'])
def test_order_is_other(self):
"""if order is invalid, default to descending."""
request = self.request_factory.get('/?sort=cows!')
self.view.request = request
result = self.view.get_queryset()
self.assertEqual(result.query.order_by, ['-click_count'])
def test_get_context_data_query_string_removes_order_and_order_by(self):
"""order and order_by keys should be removed from query string."""
request = self.request_factory.get('/?sort=desc&order_by=url')
self.view.request = request
self.view.kwargs = {}
result = self.view.get_context_data(
object_list=ShortenedURL.objects.all())
self.assertEqual(result['query_string'], '')
def test_get_context_data_query_string_has_other_values(self):
"""
querystring in context should have any values that aren't filtered
"""
request = self.request_factory.get('/?sort=desc&order_by=url&cow=moo')
self.view.request = request
self.view.kwargs = {}
result = self.view.get_context_data(
object_list=ShortenedURL.objects.all())
self.assertEqual(result['query_string'], 'cow=moo')
def test_get_context_data_query_string_is_empty(self):
"""
query_string in context should be empty if there's no query string
"""
request = self.request_factory.get('/')
self.view.request = request
self.view.kwargs = {}
result = self.view.get_context_data(
object_list=ShortenedURL.objects.all())
self.assertEqual(result['query_string'], '')
| |
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from page_sets import press_story
from telemetry import story
class WebrtcPage(press_story.PressStory):
def __init__(self, url, page_set, name, tags, extra_browser_args=None):
assert url.startswith('file://webrtc_cases/')
self.URL = url
self.NAME = name
super(WebrtcPage, self).__init__(page_set,
tags=tags,
extra_browser_args=extra_browser_args)
class GetUserMedia(WebrtcPage):
"""Why: Acquires a high definition (720p) local stream."""
def __init__(self, page_set, tags):
super(GetUserMedia, self).__init__(
url='file://webrtc_cases/resolution.html',
name='hd_local_stream_10s',
page_set=page_set, tags=tags)
def ExecuteTest(self, action_runner):
action_runner.ClickElement('button[id="hd"]')
action_runner.Wait(10)
class DataChannel(WebrtcPage):
"""Why: Transfer as much data as possible through a data channel in 10s."""
def __init__(self, page_set, tags):
super(DataChannel, self).__init__(
url='file://webrtc_cases/datatransfer.html',
name='10s_datachannel_transfer',
page_set=page_set, tags=tags)
def ExecuteTest(self, action_runner):
action_runner.ExecuteJavaScript('megsToSend.value = 100;')
action_runner.ClickElement('button[id="sendTheData"]')
action_runner.Wait(10)
def ParseTestResults(self, action_runner):
self.AddJavaScriptMeasurement(
'data_transferred',
'sizeInBytes_biggerIsBetter',
'receiveProgress.value',
description='Amount of data transferred by data channel in 10 seconds')
self.AddJavaScriptMeasurement(
'data_throughput',
'bytesPerSecond_biggerIsBetter',
'currentThroughput',
description='Throughput of the data transfer.')
class CanvasCapturePeerConnection(WebrtcPage):
"""Why: Sets up a canvas capture stream connection to a peer connection."""
def __init__(self, page_set, tags):
super(CanvasCapturePeerConnection, self).__init__(
url='file://webrtc_cases/canvas-capture.html',
name='canvas_capture_peer_connection',
page_set=page_set, tags=tags)
def ExecuteTest(self, action_runner):
with action_runner.CreateInteraction('Action_Canvas_PeerConnection',
repeatable=False):
action_runner.ClickElement('button[id="startButton"]')
action_runner.Wait(10)
class VideoCodecConstraints(WebrtcPage):
"""Why: Sets up a video codec to a peer connection."""
def __init__(self, page_set, video_codec, tags):
super(VideoCodecConstraints, self).__init__(
url='file://webrtc_cases/codec_constraints.html',
name='codec_constraints_%s' % video_codec.lower(),
page_set=page_set, tags=tags)
self.video_codec = video_codec
def ExecuteTest(self, action_runner):
with action_runner.CreateInteraction('Action_Codec_Constraints',
repeatable=False):
action_runner.ClickElement('input[id="%s"]' % self.video_codec)
action_runner.ClickElement('button[id="startButton"]')
action_runner.WaitForElement('button[id="callButton"]:enabled')
action_runner.ClickElement('button[id="callButton"]')
action_runner.Wait(20)
action_runner.ClickElement('button[id="hangupButton"]')
class MultiplePeerConnections(WebrtcPage):
"""Why: Sets up several peer connections in the same page."""
def __init__(self, page_set, tags):
super(MultiplePeerConnections, self).__init__(
url='file://webrtc_cases/multiple-peerconnections.html',
name='multiple_peerconnections',
page_set=page_set, tags=tags)
def ExecuteTest(self, action_runner):
with action_runner.CreateInteraction('Action_Create_PeerConnection',
repeatable=False):
# Set the number of peer connections to create to 10.
action_runner.ExecuteJavaScript(
'document.getElementById("num-peerconnections").value=10')
action_runner.ExecuteJavaScript(
'document.getElementById("cpuoveruse-detection").checked=false')
action_runner.ClickElement('button[id="start-test"]')
action_runner.Wait(20)
class PausePlayPeerConnections(WebrtcPage):
"""Why: Ensures frequent pause and plays of peer connection streams work."""
def __init__(self, page_set, tags):
super(PausePlayPeerConnections, self).__init__(
url='file://webrtc_cases/pause-play.html',
name='pause_play_peerconnections',
page_set=page_set, tags=tags)
def ExecuteTest(self, action_runner):
action_runner.ExecuteJavaScript(
'startTest({test_runtime_s}, {num_peerconnections},'
'{iteration_delay_ms}, "video");'.format(
test_runtime_s=20, num_peerconnections=10, iteration_delay_ms=20))
action_runner.Wait(20)
class InsertableStreamsAudioProcessing(WebrtcPage):
"""Why: processes/transforms audio using insertable streams."""
def __init__(self, page_set, tags):
super(InsertableStreamsAudioProcessing, self).__init__(
url='file://webrtc_cases/audio-processing.html',
name='insertable_streams_audio_processing',
page_set=page_set,
tags=tags,
extra_browser_args=(
'--enable-blink-features=WebCodecs,MediaStreamInsertableStreams'))
self.supported = None
def RunNavigateSteps(self, action_runner):
self.supported = action_runner.EvaluateJavaScript('''(function () {
try {
new MediaStreamTrackGenerator('audio');
return true;
} catch (e) {
return false;
}
})()''')
if self.supported:
super(InsertableStreamsAudioProcessing,
self).RunNavigateSteps(action_runner)
def ExecuteTest(self, action_runner):
self.AddMeasurement(
'supported', 'count_biggerIsBetter', 1 if self.supported else 0,
'Boolean flag indicating if this benchmark is supported by the browser.'
)
if not self.supported:
return
action_runner.WaitForJavaScriptCondition('!!audio')
action_runner.ExecuteJavaScript('start()')
action_runner.Wait(10)
class InsertableStreamsVideoProcessing(WebrtcPage):
"""Why: processes/transforms video in various ways."""
def __init__(self, page_set, source, transform, sink, tags):
super(InsertableStreamsVideoProcessing, self).__init__(
url='file://webrtc_cases/video-processing.html',
name=('insertable_streams_video_processing_%s_%s_%s' %
(source, transform, sink)),
page_set=page_set,
tags=tags,
extra_browser_args=(
'--enable-blink-features=WebCodecs,MediaStreamInsertableStreams'))
self.source = source
self.transform = transform
self.sink = sink
self.supported = None
def RunNavigateSteps(self, action_runner):
self.supported = action_runner.EvaluateJavaScript(
"typeof MediaStreamTrackProcessor !== 'undefined' &&"
"typeof MediaStreamTrackGenerator !== 'undefined'")
if self.supported:
super(InsertableStreamsVideoProcessing,
self).RunNavigateSteps(action_runner)
def ExecuteTest(self, action_runner):
self.AddMeasurement(
'supported', 'count_biggerIsBetter', 1 if self.supported else 0,
'Boolean flag indicating if this benchmark is supported by the browser.'
)
if not self.supported:
return
with action_runner.CreateInteraction('Start_Pipeline', repeatable=True):
action_runner.WaitForElement('select[id="sourceSelector"]:enabled')
action_runner.ExecuteJavaScript(
'document.getElementById("sourceSelector").value="%s";' % self.source)
action_runner.WaitForElement('select[id="transformSelector"]:enabled')
action_runner.ExecuteJavaScript(
'document.getElementById("transformSelector").value="%s";' %
self.transform)
action_runner.WaitForElement('select[id="sinkSelector"]:enabled')
action_runner.ExecuteJavaScript(
'document.getElementById("sinkSelector").value="%s";' % self.sink)
action_runner.ExecuteJavaScript(
'document.getElementById("sourceSelector").dispatchEvent('
' new InputEvent("input", {}));')
action_runner.WaitForElement('.sinkVideo')
action_runner.Wait(10)
self.AddJavaScriptMeasurement(
'sink_decoded_frames',
'count_biggerIsBetter',
'document.querySelector(".sinkVideo").webkitDecodedFrameCount',
description='Number of frames received at the sink video.')
class NegotiateTiming(WebrtcPage):
"""Why: Measure how long renegotiation takes with large SDP blobs."""
def __init__(self, page_set, tags):
super(NegotiateTiming,
self).__init__(url='file://webrtc_cases/negotiate-timing.html',
name='negotiate-timing',
page_set=page_set,
tags=tags)
def ExecuteTest(self, action_runner):
action_runner.ExecuteJavaScript('start()')
action_runner.WaitForJavaScriptCondition('!callButton.disabled')
action_runner.ExecuteJavaScript('call()')
action_runner.WaitForJavaScriptCondition('!renegotiateButton.disabled')
# Due to suspicion of renegotiate activating too early:
action_runner.Wait(1)
# Negotiate 50 transceivers, then negotiate back to 1, simulating Meet "pin"
action_runner.ExecuteJavaScript('videoSectionsField.value = 50')
action_runner.ExecuteJavaScript('renegotiate()')
action_runner.WaitForJavaScriptCondition('!renegotiateButton.disabled')
action_runner.ExecuteJavaScript('videoSectionsField.value = 1')
action_runner.ExecuteJavaScript('renegotiate()')
action_runner.WaitForJavaScriptCondition('!renegotiateButton.disabled')
# Negotiate back up to 50, simulating Meet "unpin". This is what gets measured.
action_runner.ExecuteJavaScript('videoSectionsField.value = 50')
action_runner.ExecuteJavaScript('renegotiate()')
action_runner.WaitForJavaScriptCondition('!renegotiateButton.disabled')
result = action_runner.EvaluateJavaScript('result')
self.AddMeasurement('callerSetLocalDescription',
'ms',
result['callerSetLocalDescription'],
description='Time for caller SetLocalDescription')
self.AddMeasurement('calleeSetLocalDescription',
'ms',
result['calleeSetLocalDescription'],
description='Time for callee SetLocalDescription')
self.AddMeasurement('callerSetRemoteDescription',
'ms',
result['callerSetRemoteDescription'],
description='Time for caller SetRemoteDescription')
self.AddMeasurement('calleeSetRemoteDescription',
'ms',
result['calleeSetRemoteDescription'],
description='Time for callee SetRemoteDescription')
self.AddMeasurement('callerCreateOffer',
'ms',
result['callerCreateOffer'],
description='Time for overall offer/answer handshake')
self.AddMeasurement('calleeCreateAnswer',
'ms',
result['calleeCreateAnswer'],
description='Time for overall offer/answer handshake')
self.AddMeasurement('elapsedTime',
'ms',
result['elapsedTime'],
description='Time for overall offer/answer handshake')
self.AddMeasurement(
'audioImpairment',
'count',
result['audioImpairment'],
description='Number of late audio samples concealed during negotiation')
class WebrtcPageSet(story.StorySet):
def __init__(self):
super(WebrtcPageSet, self).__init__(
cloud_storage_bucket=story.PUBLIC_BUCKET)
self.AddStory(PausePlayPeerConnections(self, tags=['pauseplay']))
self.AddStory(MultiplePeerConnections(self, tags=['stress']))
self.AddStory(DataChannel(self, tags=['datachannel']))
self.AddStory(GetUserMedia(self, tags=['getusermedia']))
self.AddStory(CanvasCapturePeerConnection(self, tags=['smoothness']))
self.AddStory(VideoCodecConstraints(self, 'H264', tags=['videoConstraints']))
self.AddStory(VideoCodecConstraints(self, 'VP8', tags=['videoConstraints']))
self.AddStory(VideoCodecConstraints(self, 'VP9', tags=['videoConstraints']))
self.AddStory(
InsertableStreamsAudioProcessing(self, tags=['insertableStreams']))
self.AddStory(
InsertableStreamsVideoProcessing(self,
'camera',
'webgl',
'video',
tags=['insertableStreams']))
self.AddStory(
InsertableStreamsVideoProcessing(self,
'video',
'webgl',
'video',
tags=['insertableStreams']))
self.AddStory(
InsertableStreamsVideoProcessing(self,
'pc',
'webgl',
'video',
tags=['insertableStreams']))
self.AddStory(
InsertableStreamsVideoProcessing(self,
'camera',
'canvas2d',
'video',
tags=['insertableStreams']))
self.AddStory(
InsertableStreamsVideoProcessing(self,
'camera',
'noop',
'video',
tags=['insertableStreams']))
self.AddStory(
InsertableStreamsVideoProcessing(self,
'camera',
'webgl',
'pc',
tags=['insertableStreams']))
self.AddStory(NegotiateTiming(self, tags=['sdp']))
| |
from itertools import tee, chain, combinations
from collections import defaultdict
from pgmpy.factors import Factor
from pgmpy.factors.Factor import factor_product
from pgmpy.inference import Inference, BeliefPropagation
class DBNInference(Inference):
def __init__(self, model):
"""
Class for performing inference using Belief Propagation method
for the input Dynamic Bayesian Network.
For the exact inference implementation, the interface algorithm
is used which is adapted from [1].
Parameters:
----------
model: Dynamic Bayesian Network
Model for which inference is to performed
Examples:
--------
>>> from pgmpy.factors import TabularCPD
>>> from pgmpy.models import DynamicBayesianNetwork as DBN
>>> from pgmpy.inference import DBNInference
>>> dbnet = DBN()
>>> dbnet.add_edges_from([(('Z', 0), ('X', 0)), (('X', 0), ('Y', 0)),
... (('Z', 0), ('Z', 1))])
>>> z_start_cpd = TabularCPD(('Z', 0), 2, [[0.5, 0.5]])
>>> x_i_cpd = TabularCPD(('X', 0), 2, [[0.6, 0.9],
... [0.4, 0.1]],
... evidence=[('Z', 0)],
... evidence_card=2)
>>> y_i_cpd = TabularCPD(('Y', 0), 2, [[0.2, 0.3],
... [0.8, 0.7]],
... evidence=[('X', 0)],
... evidence_card=2)
>>> z_trans_cpd = TabularCPD(('Z', 1), 2, [[0.4, 0.7],
... [0.6, 0.3]],
... evidence=[('Z', 0)],
... evidence_card=2)
>>> dbnet.add_cpds(z_start_cpd, z_trans_cpd, x_i_cpd, y_i_cpd)
>>> dbnet.initialize_initial_state()
>>> dbn_inf = DBNInference(dbnet)
>>> dbn_inf.start_junction_tree.nodes()
[(('X', 0), ('Z', 0)), (('X', 0), ('Y', 0))]
>>> dbn_inf.one_and_half_junction_tree.nodes()
[(('Z', 1), ('Z', 0)),
(('Y', 1), ('X', 1)),
(('Z', 1), ('X', 1))]
References:
----------
[1] Dynamic Bayesian Networks: Representation, Inference and Learning
by Kevin Patrick Murphy
http://www.cs.ubc.ca/~murphyk/Thesis/thesis.pdf
Public Methods:
--------------
forward_inference
backward_inference
query
"""
super(DBNInference, self).__init__(model)
self.interface_nodes_0 = model.get_interface_nodes(time_slice=0)
self.interface_nodes_1 = model.get_interface_nodes(time_slice=1)
start_markov_model = self.start_bayesian_model.to_markov_model()
one_and_half_markov_model = self.one_and_half_model.to_markov_model()
combinations_slice_0 = tee(combinations(self.interface_nodes_0, 2), 2)
combinations_slice_1 = combinations(self.interface_nodes_1, 2)
start_markov_model.add_edges_from(combinations_slice_0[0])
one_and_half_markov_model.add_edges_from(chain(combinations_slice_0[1], combinations_slice_1))
self.one_and_half_junction_tree = one_and_half_markov_model.to_junction_tree()
self.start_junction_tree = start_markov_model.to_junction_tree()
self.start_interface_clique = self._get_clique(self.start_junction_tree, self.interface_nodes_0)
self.in_clique = self._get_clique(self.one_and_half_junction_tree, self.interface_nodes_0)
self.out_clique = self._get_clique(self.one_and_half_junction_tree, self.interface_nodes_1)
def _shift_nodes(self, nodes, time_slice):
"""
Shifting the nodes to a certain required timeslice.
Parameters:
----------
nodes: list, array-like
List of node names.
nodes that are to be shifted to some other time slice.
time_slice: int
time slice where to shift the nodes.
"""
return [(node[0], time_slice) for node in nodes]
def _get_clique(self, junction_tree, nodes):
"""
Extracting the cliques from the junction tree which are a subset of
the given nodes.
Parameters:
----------
junction_tree: Junction tree
from which the nodes are to be extracted.
nodes: iterable container
A container of nodes (list, dict, set, etc.).
"""
return [clique for clique in junction_tree.nodes() if set(nodes).issubset(clique)][0]
def _get_evidence(self, evidence_dict, time_slice, shift):
"""
Getting the evidence belonging to a particular timeslice.
Parameters:
----------
evidence: dict
a dict key, value pair as {var: state_of_var_observed}
None if no evidence
time: int
the evidence corresponding to the time slice
shift: int
shifting the evidence corresponding to the given time slice.
"""
if evidence_dict:
return {(node[0], shift): evidence_dict[node] for node in evidence_dict if node[1] == time_slice}
def _marginalize_factor(self, nodes, factor):
"""
Marginalizing the factor selectively for a set of variables.
Parameters:
----------
nodes: list, array-like
A container of nodes (list, dict, set, etc.).
factor: factor
factor which is to be marginalized.
"""
marginalizing_nodes = list(set(factor.scope()).difference(nodes))
return factor.marginalize(marginalizing_nodes, inplace=False)
def _update_belief(self, belief_prop, clique, clique_potential, message=None):
"""
Method for updating the belief.
Parameters:
----------
belief_prop: Belief Propagation
Belief Propagation which needs to be updated.
in_clique: clique
The factor which needs to be updated corresponding to the input clique.
out_clique_potential: factor
Multiplying factor which will be multiplied to the factor corresponding to the clique.
"""
old_factor = belief_prop.junction_tree.get_factors(clique)
belief_prop.junction_tree.remove_factors(old_factor)
if message:
if message.scope() and clique_potential.scope():
new_factor = old_factor * message
new_factor = new_factor / clique_potential
else:
new_factor = old_factor
else:
new_factor = old_factor * clique_potential
belief_prop.junction_tree.add_factors(new_factor)
belief_prop.calibrate()
def _get_factor(self, belief_prop, evidence):
"""
Extracts the required factor from the junction tree.
Parameters:
----------
belief_prop: Belief Propagation
Belief Propagation which needs to be updated.
evidence: dict
a dict key, value pair as {var: state_of_var_observed}
"""
final_factor = factor_product(*belief_prop.junction_tree.get_factors())
if evidence:
for var in evidence:
if var in final_factor.scope():
final_factor.reduce([(var, evidence[var])])
return final_factor
def _shift_factor(self, factor, shift):
"""
Shifting the factor to a certain required time slice.
Parameters:
----------
factor: Factor
The factor which needs to be shifted.
shift: int
The new timeslice to which the factor should belong to.
"""
new_scope = self._shift_nodes(factor.scope(), shift)
return Factor(new_scope, factor.cardinality, factor.values)
def forward_inference(self, variables, evidence=None, args=None):
"""
Forward inference method using belief propagation.
Parameters:
----------
variables: list
list of variables for which you want to compute the probability
evidence: dict
a dict key, value pair as {var: state_of_var_observed}
None if no evidence
Examples:
--------
>>> from pgmpy.factors import TabularCPD
>>> from pgmpy.models import DynamicBayesianNetwork as DBN
>>> from pgmpy.inference import DBNInference
>>> dbnet = DBN()
>>> dbnet.add_edges_from([(('Z', 0), ('X', 0)), (('X', 0), ('Y', 0)),
... (('Z', 0), ('Z', 1))])
>>> z_start_cpd = TabularCPD(('Z', 0), 2, [[0.5, 0.5]])
>>> x_i_cpd = TabularCPD(('X', 0), 2, [[0.6, 0.9],
... [0.4, 0.1]],
... evidence=[('Z', 0)],
... evidence_card=2)
>>> y_i_cpd = TabularCPD(('Y', 0), 2, [[0.2, 0.3],
... [0.8, 0.7]],
... evidence=[('X', 0)],
... evidence_card=2)
>>> z_trans_cpd = TabularCPD(('Z', 1), 2, [[0.4, 0.7],
... [0.6, 0.3]],
... evidence=[('Z', 0)],
... evidence_card=2)
>>> dbnet.add_cpds(z_start_cpd, z_trans_cpd, x_i_cpd, y_i_cpd)
>>> dbnet.initialize_initial_state()
>>> dbn_inf = DBNInference(dbnet)
>>> dbn_inf.forward_inference([('X', 2)], {('Y', 0):1, ('Y', 1):0, ('Y', 2):1})[('X', 2)].values
array([ 0.76738736, 0.23261264])
"""
variable_dict = defaultdict(list)
for var in variables:
variable_dict[var[1]].append(var)
time_range = max(variable_dict)
if evidence:
evid_time_range = max([time_slice for var, time_slice in evidence.keys()])
time_range = max(time_range, evid_time_range)
start_bp = BeliefPropagation(self.start_junction_tree)
mid_bp = BeliefPropagation(self.one_and_half_junction_tree)
evidence_0 = self._get_evidence(evidence, 0, 0)
interface_nodes_dict = {}
potential_dict = {}
if evidence:
interface_nodes_dict = {k: v for k, v in evidence_0.items() if k in self.interface_nodes_0}
initial_factor = self._get_factor(start_bp, evidence_0)
marginalized_factor = self._marginalize_factor(self.interface_nodes_0, initial_factor)
potential_dict[0] = marginalized_factor
self._update_belief(mid_bp, self.in_clique, marginalized_factor)
if variable_dict[0]:
factor_values = start_bp.query(variable_dict[0], evidence=evidence_0)
else:
factor_values = {}
for time_slice in range(1, time_range + 1):
evidence_time = self._get_evidence(evidence, time_slice, 1)
if interface_nodes_dict:
evidence_time.update(interface_nodes_dict)
if variable_dict[time_slice]:
variable_time = self._shift_nodes(variable_dict[time_slice], 1)
new_values = mid_bp.query(variable_time, evidence=evidence_time)
changed_values = {}
for key in new_values.keys():
new_key = (key[0], time_slice)
new_factor = Factor([new_key], new_values[key].cardinality, new_values[key].values)
changed_values[new_key] = new_factor
factor_values.update(changed_values)
clique_phi = self._get_factor(mid_bp, evidence_time)
out_clique_phi = self._marginalize_factor(self.interface_nodes_1, clique_phi)
new_factor = self._shift_factor(out_clique_phi, 0)
potential_dict[time_slice] = new_factor
mid_bp = BeliefPropagation(self.one_and_half_junction_tree)
self._update_belief(mid_bp, self.in_clique, new_factor)
if evidence_time:
interface_nodes_dict = {(k[0], 0): v for k, v in evidence_time.items() if k in self.interface_nodes_1}
else:
interface_nodes_dict = {}
if args == 'potential':
return potential_dict
return factor_values
def backward_inference(self, variables, evidence=None):
"""
Backward inference method using belief propagation.
Parameters:
----------
variables: list
list of variables for which you want to compute the probability
evidence: dict
a dict key, value pair as {var: state_of_var_observed}
None if no evidence
Examples:
--------
>>> from pgmpy.factors import TabularCPD
>>> from pgmpy.models import DynamicBayesianNetwork as DBN
>>> from pgmpy.inference import DBNInference
>>> dbnet = DBN()
>>> dbnet.add_edges_from([(('Z', 0), ('X', 0)), (('X', 0), ('Y', 0)),
... (('Z', 0), ('Z', 1))])
>>> z_start_cpd = TabularCPD(('Z', 0), 2, [[0.5, 0.5]])
>>> x_i_cpd = TabularCPD(('X', 0), 2, [[0.6, 0.9],
... [0.4, 0.1]],
... evidence=[('Z', 0)],
... evidence_card=2)
>>> y_i_cpd = TabularCPD(('Y', 0), 2, [[0.2, 0.3],
... [0.8, 0.7]],
... evidence=[('X', 0)],
... evidence_card=2)
>>> z_trans_cpd = TabularCPD(('Z', 1), 2, [[0.4, 0.7],
... [0.6, 0.3]],
... evidence=[('Z', 0)],
... evidence_card=2)
>>> dbnet.add_cpds(z_start_cpd, z_trans_cpd, x_i_cpd, y_i_cpd)
>>> dbnet.initialize_initial_state()
>>> dbn_inf = DBNInference(dbnet)
>>> dbn_inf.backward_inference([('X', 0)], {('Y', 0):0, ('Y', 1):1, ('Y', 2):1})[('X', 0)].values
array([ 0.66594382, 0.33405618])
"""
variable_dict = defaultdict(list)
for var in variables:
variable_dict[var[1]].append(var)
time_range = max(variable_dict)
interface_nodes_dict = {}
if evidence:
evid_time_range = max([time_slice for var, time_slice in evidence.keys()])
time_range = max(time_range, evid_time_range)
end_bp = BeliefPropagation(self.start_junction_tree)
potential_dict = self.forward_inference(variables, evidence, 'potential')
update_factor = self._shift_factor(potential_dict[time_range], 1)
factor_values = {}
for time_slice in range(time_range, 0, -1):
evidence_time = self._get_evidence(evidence, time_slice, 1)
evidence_prev_time = self._get_evidence(evidence, time_slice - 1, 0)
if evidence_prev_time:
interface_nodes_dict = {k: v for k, v in evidence_prev_time.items() if k in self.interface_nodes_0}
if evidence_time:
evidence_time.update(interface_nodes_dict)
mid_bp = BeliefPropagation(self.one_and_half_junction_tree)
self._update_belief(mid_bp, self.in_clique, potential_dict[time_slice - 1])
forward_factor = self._shift_factor(potential_dict[time_slice], 1)
self._update_belief(mid_bp, self.out_clique, forward_factor, update_factor)
if variable_dict[time_slice]:
variable_time = self._shift_nodes(variable_dict[time_slice], 1)
new_values = mid_bp.query(variable_time, evidence=evidence_time)
changed_values = {}
for key in new_values.keys():
new_key = (key[0], time_slice)
new_factor = Factor([new_key], new_values[key].cardinality, new_values[key].values)
changed_values[new_key] = new_factor
factor_values.update(changed_values)
clique_phi = self._get_factor(mid_bp, evidence_time)
in_clique_phi = self._marginalize_factor(self.interface_nodes_0, clique_phi)
update_factor = self._shift_factor(in_clique_phi, 1)
out_clique_phi = self._shift_factor(update_factor, 0)
self._update_belief(end_bp, self.start_interface_clique, potential_dict[0], out_clique_phi)
evidence_0 = self._get_evidence(evidence, 0, 0)
if variable_dict[0]:
factor_values.update(end_bp.query(variable_dict[0], evidence_0))
return factor_values
def query(self, variables, evidence=None, args='exact'):
"""
Query method for Dynamic Bayesian Network using Interface Algorithm.
Parameters:
----------
variables: list
list of variables for which you want to compute the probability
evidence: dict
a dict key, value pair as {var: state_of_var_observed}
None if no evidence
Examples:
--------
>>> from pgmpy.factors import TabularCPD
>>> from pgmpy.models import DynamicBayesianNetwork as DBN
>>> from pgmpy.inference import DBNInference
>>> dbnet = DBN()
>>> dbnet.add_edges_from([(('Z', 0), ('X', 0)), (('X', 0), ('Y', 0)),
... (('Z', 0), ('Z', 1))])
>>> z_start_cpd = TabularCPD(('Z', 0), 2, [[0.5, 0.5]])
>>> x_i_cpd = TabularCPD(('X', 0), 2, [[0.6, 0.9],
... [0.4, 0.1]],
... evidence=[('Z', 0)],
... evidence_card=2)
>>> y_i_cpd = TabularCPD(('Y', 0), 2, [[0.2, 0.3],
... [0.8, 0.7]],
... evidence=[('X', 0)],
... evidence_card=2)
>>> z_trans_cpd = TabularCPD(('Z', 1), 2, [[0.4, 0.7],
... [0.6, 0.3]],
... evidence=[('Z', 0)],
... evidence_card=2)
>>> dbnet.add_cpds(z_start_cpd, z_trans_cpd, x_i_cpd, y_i_cpd)
>>> dbnet.initialize_initial_state()
>>> dbn_inf = DBNInference(dbnet)
>>> dbn_inf.query([('X', 0)], {('Y', 0):0, ('Y', 1):1, ('Y', 2):1})[('X', 0)].values
array([ 0.66594382, 0.33405618])
"""
if args == 'exact':
return self.backward_inference(variables, evidence)
| |
#coding: utf-8
from __future__ import print_function, division
from argparse import ArgumentParser
import math
import sys
import time
import os
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
import utils
import numpy as np
import cPickle
import chainer
from chainer import cuda
import chainer.functions as F
from chainer import optimizers
import os
import socket
p = ArgumentParser()
p.add_argument('--gpu', '-G', default=-1, type=int,
help='GPU ID (negative value indicates CPU)')
p.add_argument('-M', '--model', type=str, help='model_file')
p.add_argument('-O', '--output_file', type=str, default=sys.stderr,
help='output file (default: stderr)')
p.add_argument('-N', '--n_lines', type=int, default=1000,
help='num of lines')
p.add_argument('-H', '--highquality', default=False, action="store_true")
args = p.parse_args()
if args.output_file != sys.stderr:
args.output_file = open(args.output_file, "w")
# Prepare RNNLM model
model = cPickle.load(open(args.model, 'rb'))
vocab = cPickle.load(open("vocab.pkl", "rb"))
inv_vocab = cPickle.load(open("inv_vocab.pkl", "rb"))
n_units = model.embed.W.shape[1]
xp = cuda.cupy if args.gpu >= 0 else np
if args.gpu >= 0:
cuda.check_cuda_available()
cuda.get_device(args.gpu).use()
model.to_gpu()
if args.gpu < 0:
model.to_cpu()
# load mild yankee dict
mild_yankee_dict = utils.load_csv_to_dict("mild_dict.csv")
def force_mild(state, line, word_id):
cur_word = xp.array([word_id], dtype=np.int32)
state, predict = forward_one_step(cur_word, state, train=False)
if mild_yankee_dict.has_key(unicode(inv_vocab[word_id])):
for next_word in mild_yankee_dict[unicode(inv_vocab[word_id])].split():
line.append(next_word)
next_word_id = xp.array([vocab[next_word]], dtype=np.int32)
state, predict = forward_one_step(next_word_id, state, train=False)
return predict
def forward_one_step(x_data, state, train=True):
if args.gpu >= 0:
x_data = cuda.to_gpu(x_data)
x = chainer.Variable(x_data, volatile=not train)
h0 = model.embed(x)
h1_in = model.l1_x(F.dropout(h0, train=train)) + model.l1_h(state['h1'])
c1, h1 = F.lstm(state['c1'], h1_in)
h2_in = model.l2_x(F.dropout(h1, train=train)) + model.l2_h(state['h2'])
c2, h2 = F.lstm(state['c2'], h2_in)
y = model.l3(F.dropout(h2, train=train))
state = {'c1': c1, 'h1': h1, 'c2': c2, 'h2': h2}
return state, F.softmax(y)
def make_initial_state(batchsize=1, train=True):
return {name: chainer.Variable(xp.zeros((batchsize, n_units),
dtype=np.float32),
volatile=not train)
for name in ('c1', 'h1', 'c2', 'h2')}
def generate_line(seed=None):
# start with <s> :1 is assigned to <s>
line = []
state = make_initial_state(batchsize=1, train=False)
if args.gpu >= 0:
for key, value in state.items():
value.data = cuda.to_gpu(value.data)
index = 1
cur_word = xp.array([index], dtype=xp.int32)
if seed is not None:
for word in seed:
state, predict = forward_one_step(cur_word, state, train=False)
index = vocab[word]
cur_word = xp.array([index], dtype=xp.int32)
if index != 1:
line.append(inv_vocab[index])
if index == 1:
state, predict = forward_one_step(cur_word, state, train=False)
probability = cuda.to_cpu(predict.data)[0].astype(np.float64)
probability /= np.sum(probability)
index = np.random.choice(range(len(probability)), p=probability)
line.append(inv_vocab[index])
seq_len = 1
while(1):
predict = force_mild(state, line, index)
probability = cuda.to_cpu(predict.data)[0].astype(np.float64)
if args.highquality:
sorted_prob = sorted(probability, reverse=True)
max_10_list = sorted_prob[:10]
for i in xrange(len(probability)):
if probability[i] not in max_10_list:
probability[i] = 0
probability /= np.sum(probability)
index = np.random.choice(range(len(probability)), p=probability)
# index = cuda.to_cpu(predict.data)[0].astype(np.float64).argmax()
if seq_len < 10 and index == 2:
seq_len += 1
continue
if index == 2: # 2 is assigned to </s>
break
line.append(inv_vocab[index])
seq_len += 1
if seq_len > 30:
break
return " ".join(line)+"\n"
if __name__ == '__main__':
requestMax = 50
PORT = 50100
HOST = '0.0.0.0'
sys.stderr.write("waiting...")
for res in socket.getaddrinfo(HOST, PORT, socket.AF_UNSPEC,
socket.SOCK_STREAM, 0, socket.AI_PASSIVE):
af, socktype, proto, canonname, sa = res
try:
s = socket.socket(af, socktype, proto)
except socket.error, msg:
s = None
continue
try:
s.bind(sa)
s.listen(requestMax)
except socket.error, msg:
s.close()
s = None
continue
break
if s is None:
print('could not open socket')
sys.exit(1)
conn, addr = s.accept()
print('Connected by', addr)
# pid = os.fork()
# if pid == 0:
# print("child process")
# while 1:
# msg = raw_input("> ")
# conn.send('%s' % msg )
# if msg == ".":
# break;
# sys.exit()
while 1:
data = conn.recv(1024)
seed = data.split()
if not data:
print('End')
break
elif data == "close":
print("Client is closed")
os.kill(pid, 9)
break
else:
try:
if "<unk>" in [inv_vocab[vocab[word]] for word in seed]:
conn.send("sorry, out of vocabulary ... \n")
else:
for i in xrange(args.n_lines):
line = generate_line(seed)
conn.send(line)
finally:
conn.send("finish")
conn.close()
sys.exit()
| |
from sqlalchemy import Column
from sqlalchemy import inspect
from sqlalchemy import Integer
from sqlalchemy import MetaData
from sqlalchemy import Table
from sqlalchemy import util
from sqlalchemy.ext.hybrid import hybrid_method
from sqlalchemy.ext.hybrid import hybrid_property
from sqlalchemy.orm import aliased
from sqlalchemy.orm import create_session
from sqlalchemy.orm import mapper
from sqlalchemy.orm import Session
from sqlalchemy.orm import synonym
from sqlalchemy.orm import util as orm_util
from sqlalchemy.orm import with_polymorphic
from sqlalchemy.orm.path_registry import PathRegistry
from sqlalchemy.orm.path_registry import RootRegistry
from sqlalchemy.testing import assert_raises
from sqlalchemy.testing import AssertsCompiledSQL
from sqlalchemy.testing import eq_
from sqlalchemy.testing import expect_warnings
from sqlalchemy.testing import fixtures
from sqlalchemy.testing import is_
from sqlalchemy.util import compat
from test.orm import _fixtures
from .inheritance import _poly_fixtures
class AliasedClassTest(fixtures.TestBase, AssertsCompiledSQL):
__dialect__ = "default"
def _fixture(self, cls, properties={}):
table = Table(
"point",
MetaData(),
Column("id", Integer(), primary_key=True),
Column("x", Integer),
Column("y", Integer),
)
mapper(cls, table, properties=properties)
return table
def test_simple(self):
class Point(object):
pass
table = self._fixture(Point)
alias = aliased(Point)
assert alias.id
assert alias.x
assert alias.y
assert Point.id.__clause_element__().table is table
assert alias.id.__clause_element__().table is not table
def test_not_instantiatable(self):
class Point(object):
pass
self._fixture(Point)
alias = aliased(Point)
assert_raises(TypeError, alias)
def test_instancemethod(self):
class Point(object):
def zero(self):
self.x, self.y = 0, 0
self._fixture(Point)
alias = aliased(Point)
assert Point.zero
assert getattr(alias, "zero")
def test_classmethod(self):
class Point(object):
@classmethod
def max_x(cls):
return 100
self._fixture(Point)
alias = aliased(Point)
assert Point.max_x
assert alias.max_x
assert Point.max_x() == alias.max_x() == 100
def test_simple_property(self):
class Point(object):
@property
def max_x(self):
return 100
self._fixture(Point)
alias = aliased(Point)
assert Point.max_x
assert Point.max_x != 100
assert alias.max_x
assert Point.max_x is alias.max_x
def test_descriptors(self):
class descriptor(object):
def __init__(self, fn):
self.fn = fn
def __get__(self, obj, owner):
if obj is not None:
return self.fn(obj, obj)
else:
return self
def method(self):
return "method"
class Point(object):
center = (0, 0)
@descriptor
def thing(self, arg):
return arg.center
self._fixture(Point)
alias = aliased(Point)
assert Point.thing != (0, 0)
assert Point().thing == (0, 0)
assert Point.thing.method() == "method"
assert alias.thing != (0, 0)
assert alias.thing.method() == "method"
def _assert_has_table(self, expr, table):
from sqlalchemy import Column # override testlib's override
for child in expr.get_children():
if isinstance(child, Column):
assert child.table is table
def test_hybrid_descriptor_one(self):
class Point(object):
def __init__(self, x, y):
self.x, self.y = x, y
@hybrid_method
def left_of(self, other):
return self.x < other.x
self._fixture(Point)
alias = aliased(Point)
sess = Session()
self.assert_compile(
sess.query(alias).filter(alias.left_of(Point)),
"SELECT point_1.id AS point_1_id, point_1.x AS point_1_x, "
"point_1.y AS point_1_y FROM point AS point_1, point "
"WHERE point_1.x < point.x",
)
def test_hybrid_descriptor_two(self):
class Point(object):
def __init__(self, x, y):
self.x, self.y = x, y
@hybrid_property
def double_x(self):
return self.x * 2
self._fixture(Point)
alias = aliased(Point)
eq_(str(Point.double_x), "Point.double_x")
eq_(str(alias.double_x), "AliasedClass_Point.double_x")
eq_(str(Point.double_x.__clause_element__()), "point.x * :x_1")
eq_(str(alias.double_x.__clause_element__()), "point_1.x * :x_1")
sess = Session()
self.assert_compile(
sess.query(alias).filter(alias.double_x > Point.x),
"SELECT point_1.id AS point_1_id, point_1.x AS point_1_x, "
"point_1.y AS point_1_y FROM point AS point_1, point "
"WHERE point_1.x * :x_1 > point.x",
)
def test_hybrid_descriptor_three(self):
class Point(object):
def __init__(self, x, y):
self.x, self.y = x, y
@hybrid_property
def x_alone(self):
return self.x
self._fixture(Point)
alias = aliased(Point)
eq_(str(Point.x_alone), "Point.x_alone")
eq_(str(alias.x_alone), "AliasedClass_Point.x_alone")
# from __clause_element__() perspective, Point.x_alone
# and Point.x return the same thing, so that's good
eq_(str(Point.x.__clause_element__()), "point.x")
eq_(str(Point.x_alone.__clause_element__()), "point.x")
# same for the alias
eq_(str(alias.x + 1), "point_1.x + :x_1")
eq_(str(alias.x_alone + 1), "point_1.x + :x_1")
point_mapper = inspect(Point)
eq_(
Point.x_alone._annotations,
{
"parententity": point_mapper,
"parentmapper": point_mapper,
"orm_key": "x_alone",
},
)
eq_(
Point.x._annotations,
{
"parententity": point_mapper,
"parentmapper": point_mapper,
"orm_key": "x",
},
)
eq_(str(alias.x_alone == alias.x), "point_1.x = point_1.x")
a2 = aliased(Point)
eq_(str(a2.x_alone == alias.x), "point_1.x = point_2.x")
sess = Session()
self.assert_compile(
sess.query(alias).filter(alias.x_alone > Point.x),
"SELECT point_1.id AS point_1_id, point_1.x AS point_1_x, "
"point_1.y AS point_1_y FROM point AS point_1, point "
"WHERE point_1.x > point.x",
)
def test_proxy_descriptor_one(self):
class Point(object):
def __init__(self, x, y):
self.x, self.y = x, y
self._fixture(Point, properties={"x_syn": synonym("x")})
alias = aliased(Point)
eq_(str(Point.x_syn), "Point.x_syn")
eq_(str(alias.x_syn), "AliasedClass_Point.x_syn")
sess = Session()
self.assert_compile(
sess.query(alias.x_syn).filter(alias.x_syn > Point.x_syn),
"SELECT point_1.x AS point_1_x FROM point AS point_1, point "
"WHERE point_1.x > point.x",
)
def test_meta_getattr_one(self):
class MetaPoint(type):
def __getattr__(cls, key):
if key == "x_syn":
return cls.x
raise AttributeError(key)
class Point(compat.with_metaclass(MetaPoint)):
pass
self._fixture(Point)
alias = aliased(Point)
eq_(str(Point.x_syn), "Point.x")
eq_(str(alias.x_syn), "AliasedClass_Point.x")
# from __clause_element__() perspective, Point.x_syn
# and Point.x return the same thing, so that's good
eq_(str(Point.x.__clause_element__()), "point.x")
eq_(str(Point.x_syn.__clause_element__()), "point.x")
# same for the alias
eq_(str(alias.x + 1), "point_1.x + :x_1")
eq_(str(alias.x_syn + 1), "point_1.x + :x_1")
is_(Point.x_syn.__clause_element__(), Point.x.__clause_element__())
eq_(str(alias.x_syn == alias.x), "point_1.x = point_1.x")
a2 = aliased(Point)
eq_(str(a2.x_syn == alias.x), "point_1.x = point_2.x")
sess = Session()
self.assert_compile(
sess.query(alias).filter(alias.x_syn > Point.x),
"SELECT point_1.id AS point_1_id, point_1.x AS point_1_x, "
"point_1.y AS point_1_y FROM point AS point_1, point "
"WHERE point_1.x > point.x",
)
def test_meta_getattr_two(self):
class MetaPoint(type):
def __getattr__(cls, key):
if key == "double_x":
return cls._impl_double_x
raise AttributeError(key)
class Point(compat.with_metaclass(MetaPoint)):
@hybrid_property
def _impl_double_x(self):
return self.x * 2
self._fixture(Point)
alias = aliased(Point)
eq_(str(Point.double_x), "Point._impl_double_x")
eq_(str(alias.double_x), "AliasedClass_Point._impl_double_x")
eq_(str(Point.double_x.__clause_element__()), "point.x * :x_1")
eq_(str(alias.double_x.__clause_element__()), "point_1.x * :x_1")
sess = Session()
self.assert_compile(
sess.query(alias).filter(alias.double_x > Point.x),
"SELECT point_1.id AS point_1_id, point_1.x AS point_1_x, "
"point_1.y AS point_1_y FROM point AS point_1, point "
"WHERE point_1.x * :x_1 > point.x",
)
def test_meta_getattr_three(self):
class MetaPoint(type):
def __getattr__(cls, key):
@hybrid_property
def double_x(me):
return me.x * 2
if key == "double_x":
return double_x.__get__(None, cls)
raise AttributeError(key)
class Point(compat.with_metaclass(MetaPoint)):
pass
self._fixture(Point)
alias = aliased(Point)
eq_(str(Point.double_x.__clause_element__()), "point.x * :x_1")
eq_(str(alias.double_x.__clause_element__()), "point_1.x * :x_1")
sess = Session()
self.assert_compile(
sess.query(alias).filter(alias.double_x > Point.x),
"SELECT point_1.id AS point_1_id, point_1.x AS point_1_x, "
"point_1.y AS point_1_y FROM point AS point_1, point "
"WHERE point_1.x * :x_1 > point.x",
)
def test_parententity_vs_parentmapper(self):
class Point(object):
pass
self._fixture(Point, properties={"x_syn": synonym("x")})
pa = aliased(Point)
is_(Point.x_syn._parententity, inspect(Point))
is_(Point.x._parententity, inspect(Point))
is_(Point.x_syn._parentmapper, inspect(Point))
is_(Point.x._parentmapper, inspect(Point))
is_(
Point.x_syn.__clause_element__()._annotations["parententity"],
inspect(Point),
)
is_(
Point.x.__clause_element__()._annotations["parententity"],
inspect(Point),
)
is_(
Point.x_syn.__clause_element__()._annotations["parentmapper"],
inspect(Point),
)
is_(
Point.x.__clause_element__()._annotations["parentmapper"],
inspect(Point),
)
pa = aliased(Point)
is_(pa.x_syn._parententity, inspect(pa))
is_(pa.x._parententity, inspect(pa))
is_(pa.x_syn._parentmapper, inspect(Point))
is_(pa.x._parentmapper, inspect(Point))
is_(
pa.x_syn.__clause_element__()._annotations["parententity"],
inspect(pa),
)
is_(
pa.x.__clause_element__()._annotations["parententity"], inspect(pa)
)
is_(
pa.x_syn.__clause_element__()._annotations["parentmapper"],
inspect(Point),
)
is_(
pa.x.__clause_element__()._annotations["parentmapper"],
inspect(Point),
)
class IdentityKeyTest(_fixtures.FixtureTest):
run_inserts = None
def test_identity_key_1(self):
User, users = self.classes.User, self.tables.users
mapper(User, users)
key = orm_util.identity_key(User, [1])
eq_(key, (User, (1,), None))
key = orm_util.identity_key(User, ident=[1])
eq_(key, (User, (1,), None))
def test_identity_key_scalar(self):
User, users = self.classes.User, self.tables.users
mapper(User, users)
key = orm_util.identity_key(User, 1)
eq_(key, (User, (1,), None))
key = orm_util.identity_key(User, ident=1)
eq_(key, (User, (1,), None))
def test_identity_key_2(self):
users, User = self.tables.users, self.classes.User
mapper(User, users)
s = create_session()
u = User(name="u1")
s.add(u)
s.flush()
key = orm_util.identity_key(instance=u)
eq_(key, (User, (u.id,), None))
def test_identity_key_3(self):
User, users = self.classes.User, self.tables.users
mapper(User, users)
row = {users.c.id: 1, users.c.name: "Frank"}
key = orm_util.identity_key(User, row=row)
eq_(key, (User, (1,), None))
def test_identity_key_token(self):
User, users = self.classes.User, self.tables.users
mapper(User, users)
key = orm_util.identity_key(User, [1], identity_token="token")
eq_(key, (User, (1,), "token"))
key = orm_util.identity_key(User, ident=[1], identity_token="token")
eq_(key, (User, (1,), "token"))
class PathRegistryTest(_fixtures.FixtureTest):
run_setup_mappers = "once"
run_inserts = None
run_deletes = None
@classmethod
def setup_mappers(cls):
cls._setup_stock_mapping()
def test_root_registry(self):
umapper = inspect(self.classes.User)
is_(RootRegistry()[umapper], umapper._path_registry)
eq_(RootRegistry()[umapper], PathRegistry.coerce((umapper,)))
def test_expand(self):
umapper = inspect(self.classes.User)
amapper = inspect(self.classes.Address)
path = PathRegistry.coerce((umapper,))
eq_(
path[umapper.attrs.addresses][amapper][
amapper.attrs.email_address
],
PathRegistry.coerce(
(
umapper,
umapper.attrs.addresses,
amapper,
amapper.attrs.email_address,
)
),
)
def test_entity_boolean(self):
umapper = inspect(self.classes.User)
path = PathRegistry.coerce((umapper,))
is_(bool(path), True)
def test_key_boolean(self):
umapper = inspect(self.classes.User)
path = PathRegistry.coerce((umapper, umapper.attrs.addresses))
is_(bool(path), True)
def test_aliased_class(self):
User = self.classes.User
ua = aliased(User)
ua_insp = inspect(ua)
path = PathRegistry.coerce((ua_insp, ua_insp.mapper.attrs.addresses))
assert path.parent.is_aliased_class
def test_indexed_entity(self):
umapper = inspect(self.classes.User)
amapper = inspect(self.classes.Address)
path = PathRegistry.coerce(
(
umapper,
umapper.attrs.addresses,
amapper,
amapper.attrs.email_address,
)
)
is_(path[0], umapper)
is_(path[2], amapper)
def test_indexed_key(self):
umapper = inspect(self.classes.User)
amapper = inspect(self.classes.Address)
path = PathRegistry.coerce(
(
umapper,
umapper.attrs.addresses,
amapper,
amapper.attrs.email_address,
)
)
eq_(path[1], umapper.attrs.addresses)
eq_(path[3], amapper.attrs.email_address)
def test_slice(self):
umapper = inspect(self.classes.User)
amapper = inspect(self.classes.Address)
path = PathRegistry.coerce(
(
umapper,
umapper.attrs.addresses,
amapper,
amapper.attrs.email_address,
)
)
eq_(path[1:3], (umapper.attrs.addresses, amapper))
def test_addition(self):
umapper = inspect(self.classes.User)
amapper = inspect(self.classes.Address)
p1 = PathRegistry.coerce((umapper, umapper.attrs.addresses))
p2 = PathRegistry.coerce((amapper, amapper.attrs.email_address))
eq_(
p1 + p2,
PathRegistry.coerce(
(
umapper,
umapper.attrs.addresses,
amapper,
amapper.attrs.email_address,
)
),
)
def test_length(self):
umapper = inspect(self.classes.User)
amapper = inspect(self.classes.Address)
pneg1 = PathRegistry.coerce(())
p0 = PathRegistry.coerce((umapper,))
p1 = PathRegistry.coerce((umapper, umapper.attrs.addresses))
p2 = PathRegistry.coerce((umapper, umapper.attrs.addresses, amapper))
p3 = PathRegistry.coerce(
(
umapper,
umapper.attrs.addresses,
amapper,
amapper.attrs.email_address,
)
)
eq_(len(pneg1), 0)
eq_(len(p0), 1)
eq_(len(p1), 2)
eq_(len(p2), 3)
eq_(len(p3), 4)
eq_(pneg1.length, 0)
eq_(p0.length, 1)
eq_(p1.length, 2)
eq_(p2.length, 3)
eq_(p3.length, 4)
def test_eq(self):
umapper = inspect(self.classes.User)
amapper = inspect(self.classes.Address)
u_alias = inspect(aliased(self.classes.User))
p1 = PathRegistry.coerce((umapper, umapper.attrs.addresses))
p2 = PathRegistry.coerce((umapper, umapper.attrs.addresses))
p3 = PathRegistry.coerce((umapper, umapper.attrs.name))
p4 = PathRegistry.coerce((u_alias, umapper.attrs.addresses))
p5 = PathRegistry.coerce((umapper, umapper.attrs.addresses, amapper))
p6 = PathRegistry.coerce(
(amapper, amapper.attrs.user, umapper, umapper.attrs.addresses)
)
p7 = PathRegistry.coerce(
(
amapper,
amapper.attrs.user,
umapper,
umapper.attrs.addresses,
amapper,
amapper.attrs.email_address,
)
)
is_(p1 == p2, True)
is_(p1 == p3, False)
is_(p1 == p4, False)
is_(p1 == p5, False)
is_(p6 == p7, False)
is_(p6 == p7.parent.parent, True)
is_(p1 != p2, False)
is_(p1 != p3, True)
is_(p1 != p4, True)
is_(p1 != p5, True)
def test_eq_non_path(self):
umapper = inspect(self.classes.User)
amapper = inspect(self.classes.Address)
u_alias = inspect(aliased(self.classes.User))
p1 = PathRegistry.coerce((umapper,))
p2 = PathRegistry.coerce((umapper, umapper.attrs.addresses))
p3 = PathRegistry.coerce((u_alias, umapper.attrs.addresses))
p4 = PathRegistry.coerce((u_alias, umapper.attrs.addresses, amapper))
p5 = PathRegistry.coerce((u_alias,)).token(":*")
non_object = 54.1432
for obj in [p1, p2, p3, p4, p5]:
with expect_warnings(
"Comparison of PathRegistry to "
"<.* 'float'> is not supported"
):
is_(obj == non_object, False)
with expect_warnings(
"Comparison of PathRegistry to "
"<.* 'float'> is not supported"
):
is_(obj != non_object, True)
def test_contains_mapper(self):
umapper = inspect(self.classes.User)
amapper = inspect(self.classes.Address)
p1 = PathRegistry.coerce((umapper, umapper.attrs.addresses))
assert p1.contains_mapper(umapper)
assert not p1.contains_mapper(amapper)
def test_path(self):
umapper = inspect(self.classes.User)
amapper = inspect(self.classes.Address)
p1 = PathRegistry.coerce((umapper, umapper.attrs.addresses))
p2 = PathRegistry.coerce((umapper, umapper.attrs.addresses, amapper))
p3 = PathRegistry.coerce((amapper, amapper.attrs.email_address))
eq_(p1.path, (umapper, umapper.attrs.addresses))
eq_(p2.path, (umapper, umapper.attrs.addresses, amapper))
eq_(p3.path, (amapper, amapper.attrs.email_address))
def test_registry_set(self):
reg = {}
umapper = inspect(self.classes.User)
amapper = inspect(self.classes.Address)
p1 = PathRegistry.coerce((umapper, umapper.attrs.addresses))
p2 = PathRegistry.coerce((umapper, umapper.attrs.addresses, amapper))
p3 = PathRegistry.coerce((amapper, amapper.attrs.email_address))
p1.set(reg, "p1key", "p1value")
p2.set(reg, "p2key", "p2value")
p3.set(reg, "p3key", "p3value")
eq_(
reg,
{
("p1key", p1.path): "p1value",
("p2key", p2.path): "p2value",
("p3key", p3.path): "p3value",
},
)
def test_registry_get(self):
reg = {}
umapper = inspect(self.classes.User)
amapper = inspect(self.classes.Address)
p1 = PathRegistry.coerce((umapper, umapper.attrs.addresses))
p2 = PathRegistry.coerce((umapper, umapper.attrs.addresses, amapper))
p3 = PathRegistry.coerce((amapper, amapper.attrs.email_address))
reg.update(
{
("p1key", p1.path): "p1value",
("p2key", p2.path): "p2value",
("p3key", p3.path): "p3value",
}
)
eq_(p1.get(reg, "p1key"), "p1value")
eq_(p2.get(reg, "p2key"), "p2value")
eq_(p2.get(reg, "p1key"), None)
eq_(p3.get(reg, "p3key"), "p3value")
eq_(p3.get(reg, "p1key"), None)
def test_registry_contains(self):
reg = {}
umapper = inspect(self.classes.User)
amapper = inspect(self.classes.Address)
p1 = PathRegistry.coerce((umapper, umapper.attrs.addresses))
p2 = PathRegistry.coerce((umapper, umapper.attrs.addresses, amapper))
p3 = PathRegistry.coerce((amapper, amapper.attrs.email_address))
reg.update(
{
("p1key", p1.path): "p1value",
("p2key", p2.path): "p2value",
("p3key", p3.path): "p3value",
}
)
assert p1.contains(reg, "p1key")
assert not p1.contains(reg, "p2key")
assert p3.contains(reg, "p3key")
assert not p2.contains(reg, "fake")
def test_registry_setdefault(self):
reg = {}
umapper = inspect(self.classes.User)
amapper = inspect(self.classes.Address)
p1 = PathRegistry.coerce((umapper, umapper.attrs.addresses))
p2 = PathRegistry.coerce((umapper, umapper.attrs.addresses, amapper))
reg.update({("p1key", p1.path): "p1value"})
p1.setdefault(reg, "p1key", "p1newvalue_a")
p1.setdefault(reg, "p1key_new", "p1newvalue_b")
p2.setdefault(reg, "p2key", "p2newvalue")
eq_(
reg,
{
("p1key", p1.path): "p1value",
("p1key_new", p1.path): "p1newvalue_b",
("p2key", p2.path): "p2newvalue",
},
)
def test_serialize(self):
User = self.classes.User
Address = self.classes.Address
umapper = inspect(self.classes.User)
amapper = inspect(self.classes.Address)
p1 = PathRegistry.coerce(
(
umapper,
umapper.attrs.addresses,
amapper,
amapper.attrs.email_address,
)
)
p2 = PathRegistry.coerce((umapper, umapper.attrs.addresses, amapper))
p3 = PathRegistry.coerce((umapper, umapper.attrs.addresses))
eq_(p1.serialize(), [(User, "addresses"), (Address, "email_address")])
eq_(p2.serialize(), [(User, "addresses"), (Address, None)])
eq_(p3.serialize(), [(User, "addresses")])
def test_serialize_context_dict(self):
reg = util.OrderedDict()
umapper = inspect(self.classes.User)
amapper = inspect(self.classes.Address)
p1 = PathRegistry.coerce((umapper, umapper.attrs.addresses))
p2 = PathRegistry.coerce((umapper, umapper.attrs.addresses, amapper))
p3 = PathRegistry.coerce((amapper, amapper.attrs.email_address))
p1.set(reg, "p1key", "p1value")
p2.set(reg, "p2key", "p2value")
p3.set(reg, "p3key", "p3value")
eq_(
reg,
{
("p1key", p1.path): "p1value",
("p2key", p2.path): "p2value",
("p3key", p3.path): "p3value",
},
)
serialized = PathRegistry.serialize_context_dict(
reg, ("p1key", "p2key")
)
eq_(
serialized,
[
(("p1key", p1.serialize()), "p1value"),
(("p2key", p2.serialize()), "p2value"),
],
)
def test_deseralize_context_dict(self):
umapper = inspect(self.classes.User)
amapper = inspect(self.classes.Address)
p1 = PathRegistry.coerce((umapper, umapper.attrs.addresses))
p2 = PathRegistry.coerce((umapper, umapper.attrs.addresses, amapper))
p3 = PathRegistry.coerce((amapper, amapper.attrs.email_address))
serialized = [
(("p1key", p1.serialize()), "p1value"),
(("p2key", p2.serialize()), "p2value"),
(("p3key", p3.serialize()), "p3value"),
]
deserialized = PathRegistry.deserialize_context_dict(serialized)
eq_(
deserialized,
{
("p1key", p1.path): "p1value",
("p2key", p2.path): "p2value",
("p3key", p3.path): "p3value",
},
)
def test_deseralize(self):
User = self.classes.User
Address = self.classes.Address
umapper = inspect(self.classes.User)
amapper = inspect(self.classes.Address)
p1 = PathRegistry.coerce(
(
umapper,
umapper.attrs.addresses,
amapper,
amapper.attrs.email_address,
)
)
p2 = PathRegistry.coerce((umapper, umapper.attrs.addresses, amapper))
p3 = PathRegistry.coerce((umapper, umapper.attrs.addresses))
eq_(
PathRegistry.deserialize(
[(User, "addresses"), (Address, "email_address")]
),
p1,
)
eq_(
PathRegistry.deserialize([(User, "addresses"), (Address, None)]),
p2,
)
eq_(PathRegistry.deserialize([(User, "addresses")]), p3)
class PathRegistryInhTest(_poly_fixtures._Polymorphic):
run_setup_mappers = "once"
run_inserts = None
run_deletes = None
def test_plain(self):
Person = _poly_fixtures.Person
Engineer = _poly_fixtures.Engineer
pmapper = inspect(Person)
emapper = inspect(Engineer)
p1 = PathRegistry.coerce((pmapper, emapper.attrs.machines))
# given a mapper and an attribute on a subclass,
# the path converts what you get to be against that subclass
eq_(p1.path, (emapper, emapper.attrs.machines))
def test_plain_compound(self):
Company = _poly_fixtures.Company
Person = _poly_fixtures.Person
Engineer = _poly_fixtures.Engineer
cmapper = inspect(Company)
pmapper = inspect(Person)
emapper = inspect(Engineer)
p1 = PathRegistry.coerce(
(cmapper, cmapper.attrs.employees, pmapper, emapper.attrs.machines)
)
# given a mapper and an attribute on a subclass,
# the path converts what you get to be against that subclass
eq_(
p1.path,
(
cmapper,
cmapper.attrs.employees,
emapper,
emapper.attrs.machines,
),
)
def test_plain_aliased(self):
Person = _poly_fixtures.Person
Engineer = _poly_fixtures.Engineer
emapper = inspect(Engineer)
p_alias = aliased(Person)
p_alias = inspect(p_alias)
p1 = PathRegistry.coerce((p_alias, emapper.attrs.machines))
# plain AliasedClass - the path keeps that AliasedClass directly
# as is in the path
eq_(p1.path, (p_alias, emapper.attrs.machines))
def test_plain_aliased_compound(self):
Company = _poly_fixtures.Company
Person = _poly_fixtures.Person
Engineer = _poly_fixtures.Engineer
cmapper = inspect(Company)
emapper = inspect(Engineer)
c_alias = aliased(Company)
p_alias = aliased(Person)
c_alias = inspect(c_alias)
p_alias = inspect(p_alias)
p1 = PathRegistry.coerce(
(c_alias, cmapper.attrs.employees, p_alias, emapper.attrs.machines)
)
# plain AliasedClass - the path keeps that AliasedClass directly
# as is in the path
eq_(
p1.path,
(
c_alias,
cmapper.attrs.employees,
p_alias,
emapper.attrs.machines,
),
)
def test_with_poly_sub(self):
Company = _poly_fixtures.Company
Person = _poly_fixtures.Person
Engineer = _poly_fixtures.Engineer
emapper = inspect(Engineer)
cmapper = inspect(Company)
p_poly = with_polymorphic(Person, [Engineer])
e_poly_insp = inspect(p_poly.Engineer) # noqa - used by comment below
p_poly_insp = inspect(p_poly)
p1 = PathRegistry.coerce((p_poly_insp, emapper.attrs.machines))
# changes as of #5082: when a with_polymorphic is in the middle
# of a path, the natural path makes sure it uses the base mappers,
# however when it's at the root, the with_polymorphic stays in
# the natural path
# this behavior is the same as pre #5082, it was temporarily changed
# but this proved to be incorrect. The path starts on a
# with_polymorphic(), so a Query will "naturally" construct a path
# that comes from that wp.
eq_(p1.path, (e_poly_insp, emapper.attrs.machines))
eq_(p1.natural_path, (e_poly_insp, emapper.attrs.machines))
# this behavior is new as of the final version of #5082.
# the path starts on a normal entity and has a with_polymorphic
# in the middle, for this to match what Query will generate it needs
# to use the non aliased mappers in the natural path.
p2 = PathRegistry.coerce(
(
cmapper,
cmapper.attrs.employees,
p_poly_insp,
emapper.attrs.machines,
)
)
eq_(
p2.path,
(
cmapper,
cmapper.attrs.employees,
e_poly_insp,
emapper.attrs.machines,
),
)
eq_(
p2.natural_path,
(
cmapper,
cmapper.attrs.employees,
emapper,
emapper.attrs.machines,
),
)
def test_with_poly_base_two(self):
Company = _poly_fixtures.Company
Person = _poly_fixtures.Person
Engineer = _poly_fixtures.Engineer
cmapper = inspect(Company)
pmapper = inspect(Person)
p_poly = with_polymorphic(Person, [Engineer])
e_poly_insp = inspect(p_poly.Engineer) # noqa - used by comment below
p_poly_insp = inspect(p_poly)
p1 = PathRegistry.coerce(
(
cmapper,
cmapper.attrs.employees,
p_poly_insp,
pmapper.attrs.paperwork,
)
)
eq_(
p1.path,
(
cmapper,
cmapper.attrs.employees,
p_poly_insp,
pmapper.attrs.paperwork,
),
)
eq_(
p1.natural_path,
(
cmapper,
cmapper.attrs.employees,
pmapper,
pmapper.attrs.paperwork,
),
)
def test_nonpoly_oftype_aliased_subclass_onroot(self):
Engineer = _poly_fixtures.Engineer
eng_alias = aliased(Engineer)
ea_insp = inspect(eng_alias)
p1 = PathRegistry.coerce((ea_insp, ea_insp.mapper.attrs.paperwork))
eq_(p1.path, (ea_insp, ea_insp.mapper.attrs.paperwork))
eq_(p1.natural_path, (ea_insp, ea_insp.mapper.attrs.paperwork))
def test_nonpoly_oftype_aliased_subclass(self):
Company = _poly_fixtures.Company
Person = _poly_fixtures.Person
Engineer = _poly_fixtures.Engineer
cmapper = inspect(Company)
pmapper = inspect(Person)
eng_alias = aliased(Engineer)
ea_insp = inspect(eng_alias)
p1 = PathRegistry.coerce(
(
cmapper,
cmapper.attrs.employees,
ea_insp,
ea_insp.mapper.attrs.paperwork,
)
)
eq_(
p1.path,
(
cmapper,
cmapper.attrs.employees,
ea_insp,
ea_insp.mapper.attrs.paperwork,
),
)
eq_(
p1.natural_path,
(
cmapper,
cmapper.attrs.employees,
pmapper,
pmapper.attrs.paperwork,
),
)
def test_nonpoly_oftype_subclass(self):
Company = _poly_fixtures.Company
Person = _poly_fixtures.Person
Engineer = _poly_fixtures.Engineer
emapper = inspect(Engineer)
cmapper = inspect(Company)
pmapper = inspect(Person)
p1 = PathRegistry.coerce(
(
cmapper,
cmapper.attrs.employees,
emapper,
emapper.attrs.paperwork,
)
)
eq_(
p1.path,
(
cmapper,
cmapper.attrs.employees,
pmapper,
pmapper.attrs.paperwork,
),
)
eq_(
p1.natural_path,
(
cmapper,
cmapper.attrs.employees,
pmapper,
pmapper.attrs.paperwork,
),
)
def test_with_poly_base_one(self):
Person = _poly_fixtures.Person
Engineer = _poly_fixtures.Engineer
pmapper = inspect(Person)
emapper = inspect(Engineer)
p_poly = with_polymorphic(Person, [Engineer])
p_poly = inspect(p_poly)
# "name" is actually on Person, not Engineer
p1 = PathRegistry.coerce((p_poly, emapper.attrs.name))
# polymorphic AliasedClass - because "name" is on Person,
# we get Person, not Engineer
eq_(p1.path, (p_poly, pmapper.attrs.name))
def test_with_poly_use_mapper(self):
Person = _poly_fixtures.Person
Engineer = _poly_fixtures.Engineer
emapper = inspect(Engineer)
p_poly = with_polymorphic(Person, [Engineer], _use_mapper_path=True)
p_poly = inspect(p_poly)
p1 = PathRegistry.coerce((p_poly, emapper.attrs.machines))
# polymorphic AliasedClass with the "use_mapper_path" flag -
# the AliasedClass acts just like the base mapper
eq_(p1.path, (emapper, emapper.attrs.machines))
| |
# Copyright (c) 2013 The Chromium OS Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Module containing the various stages that a builder runs."""
from __future__ import print_function
import json
import os
from chromite.cbuildbot import commands
from chromite.cbuildbot import failures_lib
from chromite.cbuildbot.stages import artifact_stages
from chromite.lib import cros_build_lib
from chromite.lib import cros_logging as logging
from chromite.lib import gs
from chromite.lib import osutils
from chromite.lib import parallel
from chromite.lib import timeout_util
from chromite.lib.paygen import gspaths
from chromite.lib.paygen import paygen_build_lib
class InvalidTestConditionException(Exception):
"""Raised when pre-conditions for a test aren't met."""
class SignerTestStage(artifact_stages.ArchivingStage):
"""Run signer related tests."""
option_name = 'tests'
config_name = 'signer_tests'
# If the signer tests take longer than 30 minutes, abort. They usually take
# five minutes to run.
SIGNER_TEST_TIMEOUT = 30 * 60
def PerformStage(self):
if not self.archive_stage.WaitForRecoveryImage():
raise InvalidTestConditionException('Missing recovery image.')
with timeout_util.Timeout(self.SIGNER_TEST_TIMEOUT):
commands.RunSignerTests(self._build_root, self._current_board)
class SignerResultsTimeout(failures_lib.StepFailure):
"""The signer did not produce any results inside the expected time."""
class SignerFailure(failures_lib.StepFailure):
"""The signer returned an error result."""
class MissingInstructionException(failures_lib.StepFailure):
"""We didn't receive the list of signing instructions PushImage uploaded."""
class MalformedResultsException(failures_lib.StepFailure):
"""The Signer results aren't formatted as we expect."""
class PaygenSigningRequirementsError(failures_lib.StepFailure):
"""Paygen stage can't run if signing failed."""
class PaygenCrostoolsNotAvailableError(failures_lib.StepFailure):
"""Paygen stage can't run if signing failed."""
class PaygenNoPaygenConfigForBoard(failures_lib.StepFailure):
"""Paygen can't run with a release.conf config for the board."""
class PaygenStage(artifact_stages.ArchivingStage):
"""Stage that generates release payloads.
If this stage is created with a 'channels' argument, it can run
independently. Otherwise, it's dependent on values queued up by
the ArchiveStage (push_image).
"""
option_name = 'paygen'
config_name = 'paygen'
# Poll for new results every 30 seconds.
SIGNING_PERIOD = 30
# Timeout for the signing process. 2 hours in seconds.
SIGNING_TIMEOUT = 2 * 60 * 60
FINISHED = 'finished'
def __init__(self, builder_run, board, archive_stage, channels=None,
**kwargs):
"""Init that accepts the channels argument, if present.
Args:
builder_run: See builder_run on ArchivingStage.
board: See board on ArchivingStage.
archive_stage: See archive_stage on ArchivingStage.
channels: Explicit list of channels to generate payloads for.
If empty, will instead wait on values from push_image.
Channels is normally None in release builds, and normally set
for trybot 'payloads' builds.
"""
super(PaygenStage, self).__init__(builder_run, board, archive_stage,
**kwargs)
self.signing_results = {}
self.channels = channels
def _HandleStageException(self, exc_info):
"""Override and don't set status to FAIL but FORGIVEN instead."""
exc_type, exc_value, _exc_tb = exc_info
# If Paygen fails to find anything needed in release.conf, treat it
# as a warning, not a failure. This is common during new board bring up.
if issubclass(exc_type, PaygenNoPaygenConfigForBoard):
return self._HandleExceptionAsWarning(exc_info)
# Warn so people look at ArchiveStage for the real error.
if issubclass(exc_type, MissingInstructionException):
return self._HandleExceptionAsWarning(exc_info)
# If the exception is a TestLabFailure that means we couldn't schedule the
# test. We don't fail the build for that. We do the CompoundFailure dance,
# because that's how we'll get failures from background processes returned
# to us.
if (issubclass(exc_type, failures_lib.TestLabFailure) or
(issubclass(exc_type, failures_lib.CompoundFailure) and
exc_value.MatchesFailureType(failures_lib.TestLabFailure))):
return self._HandleExceptionAsWarning(exc_info)
return super(PaygenStage, self)._HandleStageException(exc_info)
def _JsonFromUrl(self, gs_ctx, url):
"""Fetch a GS Url, and parse it as Json.
Args:
gs_ctx: GS Context.
url: Url to fetch and parse.
Returns:
None if the Url doesn't exist.
Parsed Json structure if it did.
Raises:
MalformedResultsException if it failed to parse.
"""
try:
signer_txt = gs_ctx.Cat(url)
except gs.GSNoSuchKey:
return None
try:
return json.loads(signer_txt)
except ValueError:
# We should never see malformed Json, even for intermediate statuses.
raise MalformedResultsException(signer_txt)
def _SigningStatusFromJson(self, signer_json):
"""Extract a signing status from a signer result Json DOM.
Args:
signer_json: The parsed json status from a signer operation.
Returns:
string with a simple status: 'passed', 'failed', 'downloading', etc,
or '' if the json doesn't contain a status.
"""
return (signer_json or {}).get('status', {}).get('status', '')
def _CheckForResults(self, gs_ctx, instruction_urls_per_channel,
channel_notifier):
"""timeout_util.WaitForSuccess func to check a list of signer results.
Args:
gs_ctx: Google Storage Context.
instruction_urls_per_channel: Urls of the signer result files
we're expecting.
channel_notifier: BackgroundTaskRunner into which we push channels for
processing.
Returns:
Number of results not yet collected.
"""
COMPLETED_STATUS = ('passed', 'failed')
# Assume we are done, then try to prove otherwise.
results_completed = True
for channel in instruction_urls_per_channel.keys():
self.signing_results.setdefault(channel, {})
if (len(self.signing_results[channel]) ==
len(instruction_urls_per_channel[channel])):
continue
for url in instruction_urls_per_channel[channel]:
# Convert from instructions URL to instructions result URL.
url += '.json'
# We already have a result for this URL.
if url in self.signing_results[channel]:
continue
try:
signer_json = self._JsonFromUrl(gs_ctx, url)
except MalformedResultsException as e:
logging.warning('Received malformed json: %s', e)
continue
if self._SigningStatusFromJson(signer_json) in COMPLETED_STATUS:
# If we find a completed result, remember it.
self.signing_results[channel][url] = signer_json
# If we don't have full results for this channel, we aren't done
# waiting.
if (len(self.signing_results[channel]) !=
len(instruction_urls_per_channel[channel])):
results_completed = False
continue
# If we reach here, the channel has just been completed for the first
# time.
# If all results 'passed' the channel was successfully signed.
channel_success = True
for signer_result in self.signing_results[channel].values():
if self._SigningStatusFromJson(signer_result) != 'passed':
channel_success = False
# If we successfully completed the channel, inform paygen.
if channel_success:
channel_notifier(channel)
return results_completed
def _WaitForPushImage(self):
"""Block until push_image data is ready.
Returns:
Push_image results, expected to be of the form:
{ 'channel': ['gs://instruction_uri1', 'gs://signer_instruction_uri2'] }
Raises:
MissingInstructionException: If push_image sent us an error, or timed out.
"""
# This call will NEVER time out.
instruction_urls_per_channel = self.board_runattrs.GetParallel(
'instruction_urls_per_channel', timeout=None)
# A value of None signals an error in PushImage.
if instruction_urls_per_channel is None:
raise MissingInstructionException(
'ArchiveStage: PushImage failed. No images means no Paygen.')
return instruction_urls_per_channel
def _WaitForSigningResults(self,
instruction_urls_per_channel,
channel_notifier):
"""Do the work of waiting for signer results and logging them.
Args:
instruction_urls_per_channel: push_image data (see _WaitForPushImage).
channel_notifier: BackgroundTaskRunner into which we push channels for
processing.
Raises:
ValueError: If the signer result isn't valid json.
RunCommandError: If we are unable to download signer results.
"""
gs_ctx = gs.GSContext(dry_run=self._run.debug)
try:
logging.info('Waiting for signer results.')
timeout_util.WaitForReturnTrue(
self._CheckForResults,
func_args=(gs_ctx, instruction_urls_per_channel, channel_notifier),
timeout=self.SIGNING_TIMEOUT, period=self.SIGNING_PERIOD)
except timeout_util.TimeoutError:
msg = 'Image signing timed out.'
logging.error(msg)
cros_build_lib.PrintBuildbotStepText(msg)
raise SignerResultsTimeout(msg)
# Log all signer results, then handle any signing failures.
failures = []
for url_results in self.signing_results.values():
for url, signer_result in url_results.iteritems():
result_description = os.path.basename(url)
cros_build_lib.PrintBuildbotStepText(result_description)
logging.info('Received results for: %s', result_description)
logging.info(json.dumps(signer_result, indent=4))
status = self._SigningStatusFromJson(signer_result)
if status != 'passed':
failures.append(result_description)
logging.error('Signing failed for: %s', result_description)
if failures:
logging.error('Failure summary:')
for failure in failures:
logging.error(' %s', failure)
raise SignerFailure(', '.join([str(f) for f in failures]))
def PerformStage(self):
"""Do the work of generating our release payloads."""
# Convert to release tools naming for boards.
board = self._current_board.replace('_', '-')
version = self._run.attrs.release_tag
assert version, "We can't generate payloads without a release_tag."
logging.info("Generating payloads for: %s, %s", board, version)
# Test to see if the current board has a Paygen configuration. We do
# this here, no in the sub-process so we don't have to pass back a
# failure reason.
try:
paygen_build_lib.ValidateBoardConfig(board)
except paygen_build_lib.BoardNotConfigured:
raise PaygenNoPaygenConfigForBoard(
'No release.conf entry was found for board %s. Get a TPM to fix.' %
board)
with parallel.BackgroundTaskRunner(self._RunPaygenInProcess) as per_channel:
def channel_notifier(channel):
per_channel.put((channel, board, version, self._run.debug,
self._run.config.paygen_skip_testing,
self._run.config.paygen_skip_delta_payloads))
if self.channels:
logging.info("Using explicit channels: %s", self.channels)
# If we have an explicit list of channels, use it.
for channel in self.channels:
channel_notifier(channel)
else:
instruction_urls_per_channel = self._WaitForPushImage()
self._WaitForSigningResults(instruction_urls_per_channel,
channel_notifier)
def _RunPaygenInProcess(self, channel, board, version, debug,
disable_tests, skip_delta_payloads):
"""Helper for PaygenStage that invokes payload generation.
This method is intended to be safe to invoke inside a process.
Args:
channel: Channel of payloads to generate ('stable', 'beta', etc)
board: Board of payloads to generate ('x86-mario', 'x86-alex-he', etc)
version: Version of payloads to generate.
debug: Flag telling if this is a real run, or a test run.
disable_tests: Do not generate test artifacts are run payload tests.
skip_delta_payloads: Skip generating delta payloads.
"""
# Convert to release tools naming for channels.
if not channel.endswith('-channel'):
channel += '-channel'
with osutils.TempDir(sudo_rm=True) as tempdir:
# Create the definition of the build to generate payloads for.
build = gspaths.Build(channel=channel,
board=board,
version=version)
try:
# Generate the payloads.
self._PrintLoudly('Starting %s, %s, %s' % (channel, version, board))
paygen_build_lib.CreatePayloads(build,
work_dir=tempdir,
dry_run=debug,
run_parallel=True,
run_on_builder=True,
skip_delta_payloads=skip_delta_payloads,
disable_tests=disable_tests)
except (paygen_build_lib.BuildFinished,
paygen_build_lib.BuildLocked,
paygen_build_lib.BuildSkip) as e:
# These errors are normal if it's possible for another process to
# work on the same build. This process could be a Paygen server, or
# another builder (perhaps by a trybot generating payloads on request).
#
# This means the build was finished by the other process, is already
# being processed (so the build is locked), or that it's been marked
# to skip (probably done manually).
logging.info('Paygen skipped because: %s', e)
| |
# ----------------------------------------------------------------------------
# pyglet
# Copyright (c) 2006-2008 Alex Holkner
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
# * Neither the name of pyglet nor the names of its
# contributors may be used to endorse or promote products
# derived from this software without specific prior written
# permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
# ----------------------------------------------------------------------------
# $Id$
'''Audio and video playback.
pyglet can play WAV files, and if AVbin is installed, many other audio and
video formats.
Playback is handled by the `Player` class, which reads raw data from `Source`
objects and provides methods for pausing, seeking, adjusting the volume, and
so on. The `Player` class implements the best available audio device
(currently, only OpenAL is supported)::
player = Player()
A `Source` is used to decode arbitrary audio and video files. It is
associated with a single player by "queuing" it::
source = load('background_music.mp3')
player.queue(source)
Use the `Player` to control playback.
If the source contains video, the `Source.video_format` attribute will be
non-None, and the `Player.texture` attribute will contain the current video
image synchronised to the audio.
Decoding sounds can be processor-intensive and may introduce latency,
particularly for short sounds that must be played quickly, such as bullets or
explosions. You can force such sounds to be decoded and retained in memory
rather than streamed from disk by wrapping the source in a `StaticSource`::
bullet_sound = StaticSource(load('bullet.wav'))
The other advantage of a `StaticSource` is that it can be queued on any number
of players, and so played many times simultaneously.
pyglet relies on Python's garbage collector to release resources when a player
has finished playing a source. In this way some operations that could affect
the application performance can be delayed.
The player provides a `Player.delete()` method that can be used to release
resources immediately. Also an explicit call to `gc.collect()`can be used to
collect unused resources.
'''
__docformat__ = 'restructuredtext'
__version__ = '$Id$'
import atexit
import ctypes
import heapq
import sys
import threading
import time
import warnings
import pyglet
from pyglet.compat import bytes_type, BytesIO
_debug = pyglet.options['debug_media']
class MediaException(Exception):
pass
class MediaFormatException(MediaException):
pass
class CannotSeekException(MediaException):
pass
class MediaThread(object):
'''A thread that cleanly exits on interpreter shutdown, and provides
a sleep method that can be interrupted and a termination method.
:Ivariables:
`condition` : threading.Condition
Lock condition on all instance variables.
`stopped` : bool
True if `stop` has been called.
'''
_threads = set()
_threads_lock = threading.Lock()
def __init__(self, target=None):
self._thread = threading.Thread(target=self._thread_run)
self._thread.setDaemon(True)
if target is not None:
self.run = target
self.condition = threading.Condition()
self.stopped = False
@classmethod
def _atexit(cls):
cls._threads_lock.acquire()
threads = list(cls._threads)
cls._threads_lock.release()
for thread in threads:
thread.stop()
def run(self):
pass
def _thread_run(self):
if pyglet.options['debug_trace']:
pyglet._install_trace()
self._threads_lock.acquire()
self._threads.add(self)
self._threads_lock.release()
self.run()
self._threads_lock.acquire()
self._threads.remove(self)
self._threads_lock.release()
def start(self):
self._thread.start()
def stop(self):
'''Stop the thread and wait for it to terminate.
The `stop` instance variable is set to ``True`` and the condition is
notified. It is the responsibility of the `run` method to check
the value of `stop` after each sleep or wait and to return if set.
'''
if _debug:
print('MediaThread.stop()')
self.condition.acquire()
self.stopped = True
self.condition.notify()
self.condition.release()
self._thread.join()
def sleep(self, timeout):
'''Wait for some amount of time, or until notified.
:Parameters:
`timeout` : float
Time to wait, in seconds.
'''
if _debug:
print('MediaThread.sleep(%r)' % timeout)
self.condition.acquire()
self.condition.wait(timeout)
self.condition.release()
def notify(self):
'''Interrupt the current sleep operation.
If the thread is currently sleeping, it will be woken immediately,
instead of waiting the full duration of the timeout.
'''
if _debug:
print('MediaThread.notify()')
self.condition.acquire()
self.condition.notify()
self.condition.release()
atexit.register(MediaThread._atexit)
class WorkerThread(MediaThread):
def __init__(self, target=None):
super(WorkerThread, self).__init__(target)
self._jobs = []
def run(self):
while True:
job = self.get_job()
if not job:
break
job()
def get_job(self):
self.condition.acquire()
while self._empty() and not self.stopped:
self.condition.wait()
if self.stopped:
result = None
else:
result = self._get()
self.condition.release()
return result
def put_job(self, job):
self.condition.acquire()
self._put(job)
self.condition.notify()
self.condition.release()
def clear_jobs(self):
self.condition.acquire()
self._clear()
self.condition.notify()
self.condition.release()
def _empty(self):
return not self._jobs
def _get(self):
return self._jobs.pop(0)
def _put(self, job):
self._jobs.append(job)
def _clear(self):
del self._jobs[:]
class AudioFormat(object):
'''Audio details.
An instance of this class is provided by sources with audio tracks. You
should not modify the fields, as they are used internally to describe the
format of data provided by the source.
:Ivariables:
`channels` : int
The number of channels: 1 for mono or 2 for stereo (pyglet does
not yet support surround-sound sources).
`sample_size` : int
Bits per sample; only 8 or 16 are supported.
`sample_rate` : int
Samples per second (in Hertz).
'''
def __init__(self, channels, sample_size, sample_rate):
self.channels = channels
self.sample_size = sample_size
self.sample_rate = sample_rate
# Convenience
self.bytes_per_sample = (sample_size >> 3) * channels
self.bytes_per_second = self.bytes_per_sample * sample_rate
def __eq__(self, other):
return (self.channels == other.channels and
self.sample_size == other.sample_size and
self.sample_rate == other.sample_rate)
def __ne__(self, other):
return not self.__eq__(other)
def __repr__(self):
return '%s(channels=%d, sample_size=%d, sample_rate=%d)' % (
self.__class__.__name__, self.channels, self.sample_size,
self.sample_rate)
class VideoFormat(object):
'''Video details.
An instance of this class is provided by sources with a video track. You
should not modify the fields.
Note that the sample aspect has no relation to the aspect ratio of the
video image. For example, a video image of 640x480 with sample aspect 2.0
should be displayed at 1280x480. It is the responsibility of the
application to perform this scaling.
:Ivariables:
`width` : int
Width of video image, in pixels.
`height` : int
Height of video image, in pixels.
`sample_aspect` : float
Aspect ratio (width over height) of a single video pixel.
`frame_rate` : float
Frame rate (frames per second) of the video.
AVbin 8 or later is required, otherwise the frame rate will be
``None``.
**Since:** pyglet 1.2.
'''
def __init__(self, width, height, sample_aspect=1.0):
self.width = width
self.height = height
self.sample_aspect = sample_aspect
self.frame_rate = None
class AudioData(object):
'''A single packet of audio data.
This class is used internally by pyglet.
:Ivariables:
`data` : str or ctypes array or pointer
Sample data.
`length` : int
Size of sample data, in bytes.
`timestamp` : float
Time of the first sample, in seconds.
`duration` : float
Total data duration, in seconds.
`events` : list of MediaEvent
List of events contained within this packet. Events are
timestamped relative to this audio packet.
'''
def __init__(self, data, length, timestamp, duration, events):
self.data = data
self.length = length
self.timestamp = timestamp
self.duration = duration
self.events = events
def consume(self, bytes, audio_format):
'''Remove some data from beginning of packet. All events are
cleared.'''
self.events = ()
if bytes == self.length:
self.data = None
self.length = 0
self.timestamp += self.duration
self.duration = 0.
return
elif bytes == 0:
return
if not isinstance(self.data, str):
# XXX Create a string buffer for the whole packet then
# chop it up. Could do some pointer arith here and
# save a bit of data pushing, but my guess is this is
# faster than fudging aruond with ctypes (and easier).
data = ctypes.create_string_buffer(self.length)
ctypes.memmove(data, self.data, self.length)
self.data = data
self.data = self.data[bytes:]
self.length -= bytes
self.duration -= bytes / float(audio_format.bytes_per_second)
self.timestamp += bytes / float(audio_format.bytes_per_second)
def get_string_data(self):
'''Return data as a string. (Python 3: return as bytes)'''
if isinstance(self.data, bytes_type):
return self.data
buf = ctypes.create_string_buffer(self.length)
ctypes.memmove(buf, self.data, self.length)
return buf.raw
class MediaEvent(object):
def __init__(self, timestamp, event, *args):
# Meaning of timestamp is dependent on context; and not seen by
# application.
self.timestamp = timestamp
self.event = event
self.args = args
def _sync_dispatch_to_player(self, player):
pyglet.app.platform_event_loop.post_event(player, self.event, *self.args)
time.sleep(0)
# TODO sync with media.dispatch_events
def __repr__(self):
return '%s(%r, %r, %r)' % (self.__class__.__name__,
self.timestamp, self.event, self.args)
def __lt__(self, other):
return hash(self) < hash(other)
class SourceInfo(object):
'''Source metadata information.
Fields are the empty string or zero if the information is not available.
:Ivariables:
`title` : str
Title
`author` : str
Author
`copyright` : str
Copyright statement
`comment` : str
Comment
`album` : str
Album name
`year` : int
Year
`track` : int
Track number
`genre` : str
Genre
:since: pyglet 1.2
'''
title = ''
author = ''
copyright = ''
comment = ''
album = ''
year = 0
track = 0
genre = ''
class Source(object):
'''An audio and/or video source.
:Ivariables:
`audio_format` : `AudioFormat`
Format of the audio in this source, or None if the source is
silent.
`video_format` : `VideoFormat`
Format of the video in this source, or None if there is no
video.
`info` : `SourceInfo`
Source metadata such as title, artist, etc; or None if the
information is not available.
**Since:** pyglet 1.2
'''
_duration = None
audio_format = None
video_format = None
info = None
def _get_duration(self):
return self._duration
duration = property(lambda self: self._get_duration(),
doc='''The length of the source, in seconds.
Not all source durations can be determined; in this case the value
is None.
Read-only.
:type: float
''')
def play(self):
'''Play the source.
This is a convenience method which creates a Player for
this source and plays it immediately.
:rtype: `Player`
'''
player = Player()
player.queue(self)
player.play()
return player
def get_animation(self):
'''Import all video frames into memory as an `Animation`.
An empty animation will be returned if the source has no video.
Otherwise, the animation will contain all unplayed video frames (the
entire source, if it has not been queued on a player). After creating
the animation, the source will be at EOS.
This method is unsuitable for videos running longer than a
few seconds.
:since: pyglet 1.1
:rtype: `pyglet.image.Animation`
'''
from pyglet.image import Animation, AnimationFrame
if not self.video_format:
return Animation([])
else:
frames = []
last_ts = 0
next_ts = self.get_next_video_timestamp()
while next_ts is not None:
image = self.get_next_video_frame()
if image is not None:
delay = next_ts - last_ts
frames.append(AnimationFrame(image, delay))
last_ts = next_ts
next_ts = self.get_next_video_timestamp()
return Animation(frames)
def get_next_video_timestamp(self):
'''Get the timestamp of the next video frame.
:since: pyglet 1.1
:rtype: float
:return: The next timestamp, or ``None`` if there are no more video
frames.
'''
pass
def get_next_video_frame(self):
'''Get the next video frame.
Video frames may share memory: the previous frame may be invalidated
or corrupted when this method is called unless the application has
made a copy of it.
:since: pyglet 1.1
:rtype: `pyglet.image.AbstractImage`
:return: The next video frame image, or ``None`` if the video frame
could not be decoded or there are no more video frames.
'''
pass
# Internal methods that SourceGroup calls on the source:
def seek(self, timestamp):
'''Seek to given timestamp.'''
raise CannotSeekException()
def _get_queue_source(self):
'''Return the `Source` to be used as the queue source for a player.
Default implementation returns self.'''
return self
def get_audio_data(self, bytes):
'''Get next packet of audio data.
:Parameters:
`bytes` : int
Maximum number of bytes of data to return.
:rtype: `AudioData`
:return: Next packet of audio data, or None if there is no (more)
data.
'''
return None
class StreamingSource(Source):
'''A source that is decoded as it is being played, and can only be
queued once.
'''
_is_queued = False
is_queued = property(lambda self: self._is_queued,
doc='''Determine if this source has been queued
on a `Player` yet.
Read-only.
:type: bool
''')
def _get_queue_source(self):
'''Return the `Source` to be used as the queue source for a player.
Default implementation returns self.'''
if self._is_queued:
raise MediaException('This source is already queued on a player.')
self._is_queued = True
return self
class StaticSource(Source):
'''A source that has been completely decoded in memory. This source can
be queued onto multiple players any number of times.
'''
def __init__(self, source):
'''Construct a `StaticSource` for the data in `source`.
:Parameters:
`source` : `Source`
The source to read and decode audio and video data from.
'''
source = source._get_queue_source()
if source.video_format:
raise NotImplementedError(
'Static sources not supported for video yet.')
self.audio_format = source.audio_format
if not self.audio_format:
return
# Arbitrary: number of bytes to request at a time.
buffer_size = 1 << 20 # 1 MB
# Naive implementation. Driver-specific implementations may override
# to load static audio data into device (or at least driver) memory.
data = BytesIO()
while True:
audio_data = source.get_audio_data(buffer_size)
if not audio_data:
break
data.write(audio_data.get_string_data())
self._data = data.getvalue()
self._duration = len(self._data) / \
float(self.audio_format.bytes_per_second)
def _get_queue_source(self):
return StaticMemorySource(self._data, self.audio_format)
def get_audio_data(self, bytes):
raise RuntimeError('StaticSource cannot be queued.')
class StaticMemorySource(StaticSource):
'''Helper class for default implementation of `StaticSource`. Do not use
directly.'''
def __init__(self, data, audio_format):
'''Construct a memory source over the given data buffer.
'''
self._file = BytesIO(data)
self._max_offset = len(data)
self.audio_format = audio_format
self._duration = len(data) / float(audio_format.bytes_per_second)
def seek(self, timestamp):
offset = int(timestamp * self.audio_format.bytes_per_second)
# Align to sample
if self.audio_format.bytes_per_sample == 2:
offset &= 0xfffffffe
elif self.audio_format.bytes_per_sample == 4:
offset &= 0xfffffffc
self._file.seek(offset)
def get_audio_data(self, bytes):
offset = self._file.tell()
timestamp = float(offset) / self.audio_format.bytes_per_second
# Align to sample size
if self.audio_format.bytes_per_sample == 2:
bytes &= 0xfffffffe
elif self.audio_format.bytes_per_sample == 4:
bytes &= 0xfffffffc
data = self._file.read(bytes)
if not len(data):
return None
duration = float(len(data)) / self.audio_format.bytes_per_second
return AudioData(data, len(data), timestamp, duration, [])
class SourceGroup(object):
'''Read data from a queue of sources, with support for looping. All
sources must share the same audio format.
:Ivariables:
`audio_format` : `AudioFormat`
Required audio format for queued sources.
'''
# TODO can sources list go empty? what behaviour (ignore or error)?
_advance_after_eos = False
_loop = False
def __init__(self, audio_format, video_format):
self.audio_format = audio_format
self.video_format = video_format
self.duration = 0.
self._timestamp_offset = 0.
self._dequeued_durations = []
self._sources = []
def seek(self, time):
if self._sources:
self._sources[0].seek(time)
def queue(self, source):
source = source._get_queue_source()
assert(source.audio_format == self.audio_format)
self._sources.append(source)
self.duration += source.duration
def has_next(self):
return len(self._sources) > 1
def next_source(self, immediate=True):
if immediate:
self._advance()
else:
self._advance_after_eos = True
#: :deprecated: Use `next_source` instead.
next = next_source # old API, worked badly with 2to3
def get_current_source(self):
if self._sources:
return self._sources[0]
def _advance(self):
if self._sources:
self._timestamp_offset += self._sources[0].duration
self._dequeued_durations.insert(0, self._sources[0].duration)
old_source = self._sources.pop(0)
self.duration -= old_source.duration
def _get_loop(self):
return self._loop
def _set_loop(self, loop):
self._loop = loop
loop = property(_get_loop, _set_loop,
doc='''Loop the current source indefinitely or until
`next` is called. Initially False.
:type: bool
''')
def get_audio_data(self, bytes):
'''Get next audio packet.
:Parameters:
`bytes` : int
Hint for preferred size of audio packet; may be ignored.
:rtype: `AudioData`
:return: Audio data, or None if there is no more data.
'''
data = self._sources[0].get_audio_data(bytes)
eos = False
while not data:
eos = True
if self._loop and not self._advance_after_eos:
self._timestamp_offset += self._sources[0].duration
self._dequeued_durations.insert(0, self._sources[0].duration)
self._sources[0].seek(0)
else:
self._advance_after_eos = False
# Advance source if there's something to advance to.
# Otherwise leave last source paused at EOS.
if len(self._sources) > 1:
self._advance()
else:
return None
data = self._sources[0].get_audio_data(bytes) # TODO method rename
data.timestamp += self._timestamp_offset
if eos:
if _debug:
print('adding on_eos event to audio data')
data.events.append(MediaEvent(0, 'on_eos'))
return data
def translate_timestamp(self, timestamp):
'''Get source-relative timestamp for the audio player's timestamp.'''
# XXX
if timestamp is None:
return None
timestamp = timestamp - self._timestamp_offset
if timestamp < 0:
for duration in self._dequeued_durations[::-1]:
timestamp += duration
if timestamp > 0:
break
assert timestamp >= 0, 'Timestamp beyond dequeued source memory'
return timestamp
def get_next_video_timestamp(self):
'''Get the timestamp of the next video frame.
:rtype: float
:return: The next timestamp, or ``None`` if there are no more video
frames.
'''
# TODO track current video source independently from audio source for
# better prebuffering.
timestamp = self._sources[0].get_next_video_timestamp()
if timestamp is not None:
timestamp += self._timestamp_offset
return timestamp
def get_next_video_frame(self):
'''Get the next video frame.
Video frames may share memory: the previous frame may be invalidated
or corrupted when this method is called unless the application has
made a copy of it.
:rtype: `pyglet.image.AbstractImage`
:return: The next video frame image, or ``None`` if the video frame
could not be decoded or there are no more video frames.
'''
return self._sources[0].get_next_video_frame()
class AbstractAudioPlayer(object):
'''Base class for driver audio players.
'''
def __init__(self, source_group, player):
'''Create a new audio player.
:Parameters:
`source_group` : `SourceGroup`
Source group to play from.
`player` : `Player`
Player to receive EOS and video frame sync events.
'''
self.source_group = source_group
self.player = player
def play(self):
'''Begin playback.'''
raise NotImplementedError('abstract')
def stop(self):
'''Stop (pause) playback.'''
raise NotImplementedError('abstract')
def delete(self):
'''Stop playing and clean up all resources used by player.'''
raise NotImplementedError('abstract')
def _play_group(self, audio_players):
'''Begin simultaneous playback on a list of audio players.'''
# This should be overridden by subclasses for better synchrony.
for player in audio_players:
player.play()
def _stop_group(self, audio_players):
'''Stop simultaneous playback on a list of audio players.'''
# This should be overridden by subclasses for better synchrony.
for player in audio_players:
player.play()
def clear(self):
'''Clear all buffered data and prepare for replacement data.
The player should be stopped before calling this method.
'''
raise NotImplementedError('abstract')
def get_time(self):
'''Return approximation of current playback time within current source.
Returns ``None`` if the audio player does not know what the playback
time is (for example, before any valid audio data has been read).
:rtype: float
:return: current play cursor time, in seconds.
'''
# TODO determine which source within group
raise NotImplementedError('abstract')
def set_volume(self, volume):
'''See `Player.volume`.'''
pass
def set_position(self, position):
'''See `Player.position`.'''
pass
def set_min_distance(self, min_distance):
'''See `Player.min_distance`.'''
pass
def set_max_distance(self, max_distance):
'''See `Player.max_distance`.'''
pass
def set_pitch(self, pitch):
'''See `Player.pitch`.'''
pass
def set_cone_orientation(self, cone_orientation):
'''See `Player.cone_orientation`.'''
pass
def set_cone_inner_angle(self, cone_inner_angle):
'''See `Player.cone_inner_angle`.'''
pass
def set_cone_outer_angle(self, cone_outer_angle):
'''See `Player.cone_outer_angle`.'''
pass
def set_cone_outer_gain(self, cone_outer_gain):
'''See `Player.cone_outer_gain`.'''
pass
class Player(pyglet.event.EventDispatcher):
'''High-level sound and video player.
'''
_last_video_timestamp = None
_texture = None
# Spacialisation attributes, preserved between audio players
_volume = 1.0
_min_distance = 1.0
_max_distance = 100000000.
_position = (0, 0, 0)
_pitch = 1.0
_cone_orientation = (0, 0, 1)
_cone_inner_angle = 360.
_cone_outer_angle = 360.
_cone_outer_gain = 1.
#: The player will pause when it reaches the end of the stream.
#:
#: :deprecated: Use `SourceGroup.advance_after_eos`
EOS_PAUSE = 'pause'
#: The player will loop the current stream continuosly.
#:
#: :deprecated: Use `SourceGroup.loop`
EOS_LOOP = 'loop'
#: The player will move on to the next queued stream when it reaches the
#: end of the current source. If there is no source queued, the player
#: will pause.
#:
#: :deprecated: Use `SourceGroup.advance_after_eos`
EOS_NEXT = 'next'
#: The player will stop entirely; valid only for ManagedSoundPlayer.
#:
#: :deprecated: Use `SourceGroup.advance_after_eos`
EOS_STOP = 'stop'
#: :deprecated:
_eos_action = EOS_NEXT
def __init__(self):
# List of queued source groups
self._groups = []
self._audio_player = None
# Desired play state (not an indication of actual state).
self._playing = False
self._paused_time = 0.0
def queue(self, source):
if isinstance(source, SourceGroup):
self._groups.append(source)
else:
if (self._groups and
source.audio_format == self._groups[-1].audio_format and
source.video_format == self._groups[-1].video_format):
self._groups[-1].queue(source)
else:
group = SourceGroup(source.audio_format, source.video_format)
group.queue(source)
self._groups.append(group)
self._set_eos_action(self._eos_action)
self._set_playing(self._playing)
def _set_playing(self, playing):
#stopping = self._playing and not playing
#starting = not self._playing and playing
self._playing = playing
source = self.source
if playing and source:
if not self._audio_player:
self._create_audio_player()
self._audio_player.play()
if source.video_format:
if not self._texture:
self._create_texture()
if self.source.video_format.frame_rate:
period = 1. / self.source.video_format.frame_rate
else:
period = 1. / 30.
pyglet.clock.schedule_interval(self.update_texture, period)
else:
if self._audio_player:
self._audio_player.stop()
pyglet.clock.unschedule(self.update_texture)
def play(self):
self._set_playing(True)
def pause(self):
self._set_playing(False)
if self._audio_player:
time = self._audio_player.get_time()
time = self._groups[0].translate_timestamp(time)
if time is not None:
self._paused_time = time
self._audio_player.stop()
def delete(self):
self.pause()
if self._audio_player:
self._audio_player.delete()
self._audio_player = None
while self._groups:
del self._groups[0]
def next_source(self):
if not self._groups:
return
group = self._groups[0]
if group.has_next():
group.next_source()
return
if self.source.video_format:
self._texture = None
pyglet.clock.unschedule(self.update_texture)
if self._audio_player:
self._audio_player.delete()
self._audio_player = None
del self._groups[0]
if self._groups:
self._set_playing(self._playing)
return
self._set_playing(False)
self.dispatch_event('on_player_eos')
#: :deprecated: Use `next_source` instead.
next = next_source # old API, worked badly with 2to3
def seek(self, time):
if _debug:
print('Player.seek(%r)' % time)
self._paused_time = time
self.source.seek(time)
if self._audio_player: self._audio_player.clear()
if self.source.video_format:
self._last_video_timestamp = None
self.update_texture(time=time)
def _create_audio_player(self):
assert not self._audio_player
assert self._groups
group = self._groups[0]
audio_format = group.audio_format
if audio_format:
audio_driver = get_audio_driver()
else:
audio_driver = get_silent_audio_driver()
self._audio_player = audio_driver.create_audio_player(group, self)
_class = self.__class__
def _set(name):
private_name = '_' + name
value = getattr(self, private_name)
if value != getattr(_class, private_name):
getattr(self._audio_player, 'set_' + name)(value)
_set('volume')
_set('min_distance')
_set('max_distance')
_set('position')
_set('pitch')
_set('cone_orientation')
_set('cone_inner_angle')
_set('cone_outer_angle')
_set('cone_outer_gain')
def _get_source(self):
if not self._groups:
return None
return self._groups[0].get_current_source()
source = property(_get_source)
playing = property(lambda self: self._playing)
def _get_time(self):
time = None
if self._playing and self._audio_player:
time = self._audio_player.get_time()
time = self._groups[0].translate_timestamp(time)
if time is None:
return self._paused_time
else:
return time
time = property(_get_time)
def _create_texture(self):
video_format = self.source.video_format
self._texture = pyglet.image.Texture.create(
video_format.width, video_format.height, rectangle=True)
self._texture = self._texture.get_transform(flip_y=True)
self._texture.anchor_y = 0
def get_texture(self):
return self._texture
def seek_next_frame(self):
'''Step forwards one video frame in the current Source.
'''
time = self._groups[0].get_next_video_timestamp()
if time is None:
return
self.seek(time)
def update_texture(self, dt=None, time=None):
if time is None:
time = self._audio_player.get_time()
if time is None:
return
if (self._last_video_timestamp is not None and
time <= self._last_video_timestamp):
return
ts = self._groups[0].get_next_video_timestamp()
while ts is not None and ts < time:
self._groups[0].get_next_video_frame() # Discard frame
ts = self._groups[0].get_next_video_timestamp()
if ts is None:
self._last_video_timestamp = None
return
image = self._groups[0].get_next_video_frame()
if image is not None:
if self._texture is None:
self._create_texture()
self._texture.blit_into(image, 0, 0, 0)
self._last_video_timestamp = ts
def _set_eos_action(self, eos_action):
''':deprecated:'''
warnings.warn('Player.eos_action is deprecated in favor of SourceGroup.loop and SourceGroup.advance_after_eos',
category=DeprecationWarning)
assert eos_action in (self.EOS_NEXT, self.EOS_STOP,
self.EOS_PAUSE, self.EOS_LOOP)
self._eos_action = eos_action
for group in self._groups:
group.loop = eos_action == self.EOS_LOOP
group.advance_after_eos = eos_action == self.EOS_NEXT
eos_action = property(lambda self: self._eos_action,
_set_eos_action,
doc='''Set the behaviour of the player when it
reaches the end of the current source.
This must be one of the constants `EOS_NEXT`, `EOS_PAUSE`, `EOS_STOP` or
`EOS_LOOP`.
:deprecated: Use `SourceGroup.loop` and `SourceGroup.advance_after_eos`
:type: str
''')
def _player_property(name, doc=None):
private_name = '_' + name
set_name = 'set_' + name
def _player_property_set(self, value):
setattr(self, private_name, value)
if self._audio_player:
getattr(self._audio_player, set_name)(value)
def _player_property_get(self):
return getattr(self, private_name)
return property(_player_property_get, _player_property_set, doc=doc)
# TODO docstrings for these...
volume = _player_property('volume')
min_distance = _player_property('min_distance')
max_distance = _player_property('max_distance')
position = _player_property('position')
pitch = _player_property('pitch')
cone_orientation = _player_property('cone_orientation')
cone_inner_angle = _player_property('cone_inner_angle')
cone_outer_angle = _player_property('cone_outer_angle')
cone_outer_gain = _player_property('cone_outer_gain')
# Events
def on_player_eos(self):
'''The player ran out of sources.
:event:
'''
if _debug:
print('Player.on_player_eos')
def on_source_group_eos(self):
'''The current source group ran out of data.
The default behaviour is to advance to the next source group if
possible.
:event:
'''
self.next_source()
if _debug:
print('Player.on_source_group_eos')
def on_eos(self):
'''
:event:
'''
if _debug:
print('Player.on_eos')
Player.register_event_type('on_eos')
Player.register_event_type('on_player_eos')
Player.register_event_type('on_source_group_eos')
class ManagedSoundPlayer(Player):
''':deprecated: Use `Player`'''
def __init__(self, *args, **kwargs):
warnings.warn('Use `Player` instead.', category=DeprecationWarning)
super(ManagedSoundPlayer, self).__init__(*args, **kwargs)
class PlayerGroup(object):
'''Group of players that can be played and paused simultaneously.
:Ivariables:
`players` : list of `Player`
Players in this group.
'''
def __init__(self, players):
'''Create a player group for the given set of players.
All players in the group must currently not belong to any other
group.
:Parameters:
`players` : Sequence of `Player`
Players to add to this group.
'''
self.players = list(players)
def play(self):
'''Begin playing all players in the group simultaneously.
'''
audio_players = [p._audio_player \
for p in self.players if p._audio_player]
if audio_players:
audio_players[0]._play_group(audio_players)
for player in self.players:
player.play()
def pause(self):
'''Pause all players in the group simultaneously.
'''
audio_players = [p._audio_player \
for p in self.players if p._audio_player]
if audio_players:
audio_players[0]._stop_group(audio_players)
for player in self.players:
player.pause()
class AbstractAudioDriver(object):
def create_audio_player(self, source_group, player):
raise NotImplementedError('abstract')
def get_listener(self):
raise NotImplementedError('abstract')
class AbstractListener(object):
'''The listener properties for positional audio.
You can obtain the singleton instance of this class by calling
`AbstractAudioDriver.get_listener`.
'''
_volume = 1.0
_position = (0, 0, 0)
_forward_orientation = (0, 0, -1)
_up_orientation = (0, 1, 0)
def _set_volume(self, volume):
raise NotImplementedError('abstract')
volume = property(lambda self: self._volume,
lambda self, volume: self._set_volume(volume),
doc='''The master volume for sound playback.
All sound volumes are multiplied by this master volume before being
played. A value of 0 will silence playback (but still consume
resources). The nominal volume is 1.0.
:type: float
''')
def _set_position(self, position):
raise NotImplementedError('abstract')
position = property(lambda self: self._position,
lambda self, position: self._set_position(position),
doc='''The position of the listener in 3D space.
The position is given as a tuple of floats (x, y, z). The unit
defaults to meters, but can be modified with the listener
properties.
:type: 3-tuple of float
''')
def _set_forward_orientation(self, orientation):
raise NotImplementedError('abstract')
forward_orientation = property(lambda self: self._forward_orientation,
lambda self, o: self._set_forward_orientation(o),
doc='''A vector giving the direction the
listener is facing.
The orientation is given as a tuple of floats (x, y, z), and has
no unit. The forward orientation should be orthagonal to the
up orientation.
:type: 3-tuple of float
''')
def _set_up_orientation(self, orientation):
raise NotImplementedError('abstract')
up_orientation = property(lambda self: self._up_orientation,
lambda self, o: self._set_up_orientation(o),
doc='''A vector giving the "up" orientation
of the listener.
The orientation is given as a tuple of floats (x, y, z), and has
no unit. The up orientation should be orthagonal to the
forward orientation.
:type: 3-tuple of float
''')
class _LegacyListener(AbstractListener):
def _set_volume(self, volume):
get_audio_driver().get_listener().volume = volume
self._volume = volume
def _set_position(self, position):
get_audio_driver().get_listener().position = position
self._position = position
def _set_forward_orientation(self, forward_orientation):
get_audio_driver().get_listener().forward_orientation = \
forward_orientation
self._forward_orientation = forward_orientation
def _set_up_orientation(self, up_orientation):
get_audio_driver().get_listener().up_orientation = up_orientation
self._up_orientation = up_orientation
#: The singleton `AbstractListener` object.
#:
#: :deprecated: Use `AbstractAudioDriver.get_listener`
#:
#: :type: `AbstractListener`
listener = _LegacyListener()
class AbstractSourceLoader(object):
def load(self, filename, file):
raise NotImplementedError('abstract')
class AVbinSourceLoader(AbstractSourceLoader):
def load(self, filename, file):
from . import avbin
return avbin.AVbinSource(filename, file)
class RIFFSourceLoader(AbstractSourceLoader):
def load(self, filename, file):
from . import riff
return riff.WaveSource(filename, file)
def load(filename, file=None, streaming=True):
'''Load a source from a file.
Currently the `file` argument is not supported; media files must exist
as real paths.
:Parameters:
`filename` : str
Filename of the media file to load.
`file` : file-like object
Not yet supported.
`streaming` : bool
If False, a `StaticSource` will be returned; otherwise (default) a
`StreamingSource` is created.
:rtype: `Source`
'''
source = get_source_loader().load(filename, file)
if not streaming:
source = StaticSource(source)
return source
def get_audio_driver():
global _audio_driver
if _audio_driver:
return _audio_driver
_audio_driver = None
for driver_name in pyglet.options['audio']:
try:
if driver_name == 'pulse':
from .drivers import pulse
_audio_driver = pulse.create_audio_driver()
break
elif driver_name == 'openal':
from .drivers import openal
_audio_driver = openal.create_audio_driver()
break
elif driver_name == 'directsound':
from .drivers import directsound
_audio_driver = directsound.create_audio_driver()
break
elif driver_name == 'silent':
_audio_driver = get_silent_audio_driver()
break
except Exception as exp:
if _debug:
print('Error importing driver %s:\n%s' % (driver_name, str(exp)))
return _audio_driver
def get_silent_audio_driver():
global _silent_audio_driver
if not _silent_audio_driver:
from .drivers import silent
_silent_audio_driver = silent.create_audio_driver()
return _silent_audio_driver
_audio_driver = None
_silent_audio_driver = None
def get_source_loader():
global _source_loader
if _source_loader:
return _source_loader
try:
from . import avbin
_source_loader = AVbinSourceLoader()
except ImportError:
_source_loader = RIFFSourceLoader()
return _source_loader
_source_loader = None
try:
from . import avbin
have_avbin = True
except ImportError:
have_avbin = False
| |
# Copyright (c) 2013 Mirantis Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Implementation of SQLAlchemy backend."""
import datetime
import json
import sys
import six
import sqlalchemy as sa
from sqlalchemy.sql.expression import asc
from sqlalchemy.sql.expression import desc
from climate.db import exceptions as db_exc
from climate.db import utils as db_utils
from climate.db.sqlalchemy import facade_wrapper
from climate.db.sqlalchemy import models
from climate.openstack.common.db import exception as common_db_exc
from climate.openstack.common.db import options as db_options
from climate.openstack.common.db.sqlalchemy import session as db_session
from climate.openstack.common.gettextutils import _
from climate.openstack.common import log as logging
from keystoneclient.v2_0 import client
import kwrankingclient.client as kwrclient
LOG = logging.getLogger(__name__)
get_engine = facade_wrapper.get_engine
get_session = facade_wrapper.get_session
def get_backend():
"""The backend is this module itself."""
return sys.modules[__name__]
def model_query(model, session=None):
"""Query helper.
:param model: base model to query
:param project_only: if present and current context is user-type,
then restrict query to match the project_id from current context.
"""
session = session or get_session()
return session.query(model)
def setup_db():
try:
engine = db_session.EngineFacade(db_options.CONF.database.connection,
sqlite_fk=True).get_engine()
models.Lease.metadata.create_all(engine)
except sa.exc.OperationalError as e:
LOG.error(_("Database registration exception: %s"), e)
return False
return True
def drop_db():
try:
engine = db_session.EngineFacade(db_options.CONF.database.connection,
sqlite_fk=True).get_engine()
models.Lease.metadata.drop_all(engine)
except Exception as e:
LOG.error(_("Database shutdown exception: %s"), e)
return False
return True
# Helpers for building constraints / equality checks
def constraint(**conditions):
return Constraint(conditions)
def equal_any(*values):
return EqualityCondition(values)
def not_equal(*values):
return InequalityCondition(values)
class Constraint(object):
def __init__(self, conditions):
self.conditions = conditions
def apply(self, model, query):
for key, condition in self.conditions.iteritems():
for clause in condition.clauses(getattr(model, key)):
query = query.filter(clause)
return query
class EqualityCondition(object):
def __init__(self, values):
self.values = values
def clauses(self, field):
return sa.or_([field == value for value in self.values])
class InequalityCondition(object):
def __init__(self, values):
self.values = values
def clauses(self, field):
return [field != value for value in self.values]
# Reservation
def _reservation_get(session, reservation_id):
query = model_query(models.Reservation, session)
return query.filter_by(id=reservation_id).first()
def reservation_get(reservation_id):
return _reservation_get(get_session(), reservation_id)
def reservation_get_all():
query = model_query(models.Reservation, get_session())
return query.all()
def reservation_get_all_by_lease_id(lease_id):
reservations = (model_query(models.Reservation,
get_session()).filter_by(lease_id=lease_id))
return reservations.all()
def reservation_get_all_by_values(**kwargs):
"""Returns all entries filtered by col=value."""
reservation_query = model_query(models.Reservation, get_session())
for name, value in kwargs.items():
column = getattr(models.Reservation, name, None)
if column:
reservation_query = reservation_query.filter(column == value)
return reservation_query.all()
def reservation_create(values):
values = values.copy()
reservation = models.Reservation()
reservation.update(values)
session = get_session()
with session.begin():
try:
reservation.save(session=session)
except common_db_exc.DBDuplicateEntry as e:
# raise exception about duplicated columns (e.columns)
raise db_exc.ClimateDBDuplicateEntry(
model=reservation.__class__.__name__, columns=e.columns)
return reservation_get(reservation.id)
def reservation_update(reservation_id, values):
session = get_session()
with session.begin():
reservation = _reservation_get(session, reservation_id)
reservation.update(values)
reservation.save(session=session)
return reservation_get(reservation_id)
def reservation_destroy(reservation_id):
session = get_session()
with session.begin():
reservation = _reservation_get(session, reservation_id)
if not reservation:
# raise not found error
raise db_exc.ClimateDBNotFound(id=reservation_id,
model='Reservation')
session.delete(reservation)
# Lease
def _lease_get(session, lease_id):
query = model_query(models.Lease, session)
return query.filter_by(id=lease_id).first()
def lease_get(lease_id):
return _lease_get(get_session(), lease_id)
def lease_get_all():
query = model_query(models.Lease, get_session())
return query.all()
def lease_get_all_by_project(project_id):
raise NotImplementedError
def lease_get_all_by_user(user_id):
raise NotImplementedError
def lease_list(project_id=None):
query = model_query(models.Lease, get_session())
if project_id is not None:
query = query.filter_by(project_id=project_id)
return query.all()
def lease_create(values):
values = values.copy()
lease = models.Lease()
reservations = values.pop("reservations", [])
events = values.pop("events", [])
lease.update(values)
session = get_session()
with session.begin():
try:
lease.save(session=session)
except common_db_exc.DBDuplicateEntry as e:
# raise exception about duplicated columns (e.columns)
raise db_exc.ClimateDBDuplicateEntry(
model=lease.__class__.__name__, columns=e.columns)
try:
for r in reservations:
reservation = models.Reservation()
reservation.update({"lease_id": lease.id})
reservation.update(r)
reservation.save(session=session)
except common_db_exc.DBDuplicateEntry as e:
# raise exception about duplicated columns (e.columns)
raise db_exc.ClimateDBDuplicateEntry(
model=reservation.__class__.__name__, columns=e.columns)
try:
for e in events:
event = models.Event()
event.update({"lease_id": lease.id})
event.update(e)
event.save(session=session)
except common_db_exc.DBDuplicateEntry as e:
# raise exception about duplicated columns (e.columns)
raise db_exc.ClimateDBDuplicateEntry(
model=event.__class__.__name__, columns=e.columns)
return lease_get(lease.id)
def lease_update(lease_id, values):
session = get_session()
with session.begin():
lease = _lease_get(session, lease_id)
lease.update(values)
lease.save(session=session)
return lease_get(lease_id)
def lease_destroy(lease_id):
session = get_session()
with session.begin():
lease = _lease_get(session, lease_id)
if not lease:
# raise not found error
raise db_exc.ClimateDBNotFound(id=lease_id, model='Lease')
session.delete(lease)
# Event
def _event_get(session, event_id):
query = model_query(models.Event, session)
return query.filter_by(id=event_id).first()
def _event_get_all(session):
query = model_query(models.Event, session)
return query
def event_get(event_id):
return _event_get(get_session(), event_id)
def event_get_all():
return _event_get_all(get_session()).all()
def _event_get_sorted_by_filters(sort_key, sort_dir, filters):
"""Return an event query filtered and sorted by name of the field."""
sort_fn = {'desc': desc, 'asc': asc}
events_query = _event_get_all(get_session())
if 'status' in filters:
events_query = (
events_query.filter(models.Event.status == filters['status']))
if 'lease_id' in filters:
events_query = (
events_query.filter(models.Event.lease_id == filters['lease_id']))
if 'event_type' in filters:
events_query = events_query.filter(models.Event.event_type ==
filters['event_type'])
events_query = events_query.order_by(
sort_fn[sort_dir](getattr(models.Event, sort_key))
)
return events_query
def event_get_first_sorted_by_filters(sort_key, sort_dir, filters):
"""Return first result for events
Return the first result for all events matching the filters
and sorted by name of the field.
"""
return _event_get_sorted_by_filters(sort_key, sort_dir, filters).first()
def event_get_all_sorted_by_filters(sort_key, sort_dir, filters):
"""Return events filtered and sorted by name of the field."""
return _event_get_sorted_by_filters(sort_key, sort_dir, filters).all()
def event_create(values):
values = values.copy()
event = models.Event()
event.update(values)
session = get_session()
with session.begin():
try:
event.save(session=session)
except common_db_exc.DBDuplicateEntry as e:
# raise exception about duplicated columns (e.columns)
raise db_exc.ClimateDBDuplicateEntry(
model=event.__class__.__name__, columns=e.columns)
return event_get(event.id)
def event_update(event_id, values):
session = get_session()
with session.begin():
event = _event_get(session, event_id)
event.update(values)
event.save(session=session)
return event_get(event_id)
def event_destroy(event_id):
session = get_session()
with session.begin():
event = _event_get(session, event_id)
if not event:
# raise not found error
raise db_exc.ClimateDBNotFound(id=event_id, model='Event')
session.delete(event)
# ComputeHostReservation
def _host_reservation_get(session, host_reservation_id):
query = model_query(models.ComputeHostReservation, session)
return query.filter_by(id=host_reservation_id).first()
def host_reservation_get(host_reservation_id):
return _host_reservation_get(get_session(),
host_reservation_id)
def host_reservation_get_all():
query = model_query(models.ComputeHostReservation, get_session())
return query.all()
def _host_reservation_get_by_reservation_id(session, reservation_id):
query = model_query(models.ComputeHostReservation, session)
return query.filter_by(reservation_id=reservation_id).first()
def host_reservation_get_by_reservation_id(reservation_id):
return _host_reservation_get_by_reservation_id(get_session(),
reservation_id)
def host_reservation_create(values):
values = values.copy()
host_reservation = models.ComputeHostReservation()
host_reservation.update(values)
session = get_session()
with session.begin():
try:
host_reservation.save(session=session)
except common_db_exc.DBDuplicateEntry as e:
# raise exception about duplicated columns (e.columns)
raise db_exc.ClimateDBDuplicateEntry(
model=host_reservation.__class__.__name__, columns=e.columns)
return host_reservation_get(host_reservation.id)
def host_reservation_update(host_reservation_id, values):
session = get_session()
with session.begin():
host_reservation = _host_reservation_get(session,
host_reservation_id)
host_reservation.update(values)
host_reservation.save(session=session)
return host_reservation_get(host_reservation_id)
def host_reservation_destroy(host_reservation_id):
session = get_session()
with session.begin():
host_reservation = _host_reservation_get(session,
host_reservation_id)
if not host_reservation:
# raise not found error
raise db_exc.ClimateDBNotFound(
id=host_reservation_id, model='ComputeHostReservation')
session.delete(host_reservation)
# ComputeHostAllocation
def _host_allocation_get(session, host_allocation_id):
query = model_query(models.ComputeHostAllocation, session)
return query.filter_by(id=host_allocation_id).first()
def host_allocation_get(host_allocation_id):
return _host_allocation_get(get_session(),
host_allocation_id)
def host_allocation_get_all():
query = model_query(models.ComputeHostAllocation, get_session())
return query.all()
def host_allocation_get_all_by_values(**kwargs):
"""Returns all entries filtered by col=value."""
allocation_query = model_query(models.ComputeHostAllocation, get_session())
for name, value in kwargs.items():
column = getattr(models.ComputeHostAllocation, name, None)
if column:
allocation_query = allocation_query.filter(column == value)
return allocation_query.all()
def host_allocation_create(values):
values = values.copy()
host_allocation = models.ComputeHostAllocation()
host_allocation.update(values)
session = get_session()
with session.begin():
try:
host_allocation.save(session=session)
except common_db_exc.DBDuplicateEntry as e:
# raise exception about duplicated columns (e.columns)
raise db_exc.ClimateDBDuplicateEntry(
model=host_allocation.__class__.__name__, columns=e.columns)
return host_allocation_get(host_allocation.id)
def host_allocation_update(host_allocation_id, values):
session = get_session()
with session.begin():
host_allocation = _host_allocation_get(session,
host_allocation_id)
host_allocation.update(values)
host_allocation.save(session=session)
return host_allocation_get(host_allocation_id)
def host_allocation_destroy(host_allocation_id):
session = get_session()
with session.begin():
host_allocation = _host_allocation_get(session,
host_allocation_id)
if not host_allocation:
# raise not found error
raise db_exc.ClimateDBNotFound(
id=host_allocation_id, model='ComputeHostAllocation')
session.delete(host_allocation)
def host_allocation_optimize():
# plugin = host_plugin.PhysicalHostPlugin()
# Create a reservation list R
leases = lease_get_all()
leases = [lease for lease in leases if lease['start_date'] > datetime.datetime.now()]
# Sort R by length
leases = sorted(leases, key=lambda k: k['end_date'] - k['start_date'], reverse=True)
for lease in leases:
reservations = reservation_get_all_by_lease_id(lease['id'])
# For each r in R:
for reservation in reservations:
host_reservations = host_reservation_get_by_reservation_id(reservation['id'])
if not isinstance(host_reservations, (list)):
host_reservations = [host_reservations]
for host_reservation in host_reservations:
# Create a list of matching hosts H
hosts = _matching_hosts(host_reservation['hypervisor_properties'], host_reservation['resource_properties'],
host_reservation['count_range'], lease['start_date'], lease['end_date'], hardware_only=True)
# Joindre la liste des hotes alloues actuellement
for alloc in host_allocation_get_all_by_values(reservation_id=reservation['id']):
if alloc['compute_host_id'] not in hosts:
hosts.append(alloc['compute_host_id'])
# Sort this list by decreasing efficiency
ks = client.Client(auth_url='http://10.5.5.5:35357/v2.0', username='kwranking', password='password', tenant_name='service')
endpoint = ks.service_catalog.url_for(service_type='efficiency', endpoint_type='publicURL')
kwr = kwrclient.Client('1', endpoint, ks.auth_token)
hosts_lst = ''
for x in hosts:
hosts_lst += x + ';'
hosts = kwr.node.rank_hosts_list({'hosts': hosts_lst, 'method': 'Efficiency', 'number': len(hosts)})
# Create a list of current allocations
current_allocations = host_allocation_get_all_by_values(reservation_id=reservation['id'])
# Sort this list by increasing efficiency
current_hosts = [h['compute_host_id'] for h in current_allocations]
hosts_lst = ''
for x in current_hosts:
hosts_lst += x + ';'
current_hosts = kwr.node.rank_hosts_list({'hosts': hosts_lst, 'method': 'Efficiency', 'number': len(current_hosts)})
current_hosts = current_hosts[::-1]
current_allocations_sorted = []
for h in current_hosts:
current_allocations_sorted += [alloc for alloc in current_allocations if alloc['compute_host_id'] == h]
if hosts[:len(current_hosts)] == current_hosts[::-1]:
break
# Put r on H (if ok), try the other ones otherwise
for worst_allocation in current_allocations_sorted:
for host in hosts:
if worst_allocation['compute_host_id'] == host:
break
allocation = host_allocation_get_all_by_values(
compute_host_id=host)
if not allocation:
host_allocation_destroy(worst_allocation['id'])
host_allocation_create({'compute_host_id': host,
'reservation_id': reservation['id']})
break
elif db_utils.get_free_periods(
host,
lease['start_date'],
lease['end_date'],
lease['end_date'] - lease['start_date'],
) == [
(lease['start_date'], lease['end_date']),
]:
host_allocation_destroy(worst_allocation['id'])
host_allocation_create({'compute_host_id': host,
'reservation_id': reservation['id']})
break
def _matching_hosts(hypervisor_properties, resource_properties,
count_range, start_date, end_date, hardware_only=False):
"""Return the matching hosts (preferably not allocated)
"""
count_range = count_range.split('-')
min_host = count_range[0]
max_host = count_range[1]
allocated_host_ids = []
not_allocated_host_ids = []
filter_array = []
# TODO(frossigneux) support "or" operator
if hypervisor_properties:
filter_array = _convert_requirements(
hypervisor_properties)
if resource_properties:
filter_array += _convert_requirements(
resource_properties)
return [host['id'] for host in host_get_all_by_queries(filter_array)]
def _convert_requirements(requirements):
"""Convert the requirements to an array of strings.
["key op value", "key op value", ...]
"""
# TODO(frossigneux) Support the "or" operator
# Convert text to json
if isinstance(requirements, six.string_types):
try:
requirements = json.loads(requirements)
except ValueError:
raise manager_ex.MalformedRequirements(rqrms=requirements)
# Requirement list looks like ['<', '$ram', '1024']
if _requirements_with_three_elements(requirements):
result = []
if requirements[0] == '=':
requirements[0] = '=='
string = (requirements[1][1:] + " " + requirements[0] + " " +
requirements[2])
result.append(string)
return result
# Remove the 'and' element at the head of the requirement list
elif _requirements_with_and_keyword(requirements):
return [_convert_requirements(x)[0]
for x in requirements[1:]]
# Empty requirement list0
elif isinstance(requirements, list) and not requirements:
return requirements
else:
raise manager_ex.MalformedRequirements(rqrms=requirements)
def _requirements_with_three_elements(requirements):
"""Return true if requirement list looks like ['<', '$ram', '1024']."""
return (isinstance(requirements, list) and
len(requirements) == 3 and
isinstance(requirements[0], six.string_types) and
isinstance(requirements[1], six.string_types) and
isinstance(requirements[2], six.string_types) and
requirements[0] in ['==', '=', '!=', '>=', '<=', '>', '<'] and
len(requirements[1]) > 1 and requirements[1][0] == '$' and
len(requirements[2]) > 0)
#ComputeHost
def _host_get(session, host_id):
query = model_query(models.ComputeHost, session)
return query.filter_by(id=host_id).first()
def _host_get_all(session):
query = model_query(models.ComputeHost, session)
return query
def host_get(host_id):
return _host_get(get_session(), host_id)
def host_list():
return model_query(models.ComputeHost, get_session()).all()
def host_get_all_by_filters(filters):
"""Returns hosts filtered by name of the field."""
hosts_query = _host_get_all(get_session())
if 'status' in filters:
hosts_query = hosts_query.filter(
models.ComputeHost.status == filters['status'])
return hosts_query.all()
def host_get_all_by_queries(queries):
"""Returns hosts filtered by an array of queries.
:param queries: array of queries "key op value" where op can be
http://docs.sqlalchemy.org/en/rel_0_7/core/expression_api.html
#sqlalchemy.sql.operators.ColumnOperators
"""
hosts_query = model_query(models.ComputeHost, get_session())
oper = {
'<': ['lt', lambda a, b: a >= b],
'>': ['gt', lambda a, b: a <= b],
'<=': ['le', lambda a, b: a > b],
'>=': ['ge', lambda a, b: a < b],
'==': ['eq', lambda a, b: a != b],
'!=': ['ne', lambda a, b: a == b],
}
hosts = []
for query in queries:
try:
key, op, value = query.split(' ', 3)
except ValueError:
raise db_exc.ClimateDBInvalidFilter(query_filter=query)
column = getattr(models.ComputeHost, key, None)
if column:
if op == 'in':
filt = column.in_(value.split(','))
else:
if op in oper:
op = oper[op][0]
try:
attr = filter(lambda e: hasattr(column, e % op),
['%s', '%s_', '__%s__'])[0] % op
except IndexError:
raise db_exc.ClimateDBInvalidFilterOperator(
filter_operator=op)
if value == 'null':
value = None
filt = getattr(column, attr)(value)
hosts_query = hosts_query.filter(filt)
else:
# looking for extra capabilities matches
extra_filter = model_query(
models.ComputeHostExtraCapability, get_session()
).filter(models.ComputeHostExtraCapability.capability_name == key
).all()
if not extra_filter:
raise db_exc.ClimateDBNotFound(
id=key, model='ComputeHostExtraCapability')
for host in extra_filter:
if op in oper and oper[op][1](host.capability_value, value):
hosts.append(host.computehost_id)
elif op not in oper:
msg = 'Operator %s for extra capabilities not implemented'
raise NotImplementedError(msg % op)
return hosts_query.filter(~models.ComputeHost.id.in_(hosts)).all()
def host_create(values):
values = values.copy()
host = models.ComputeHost()
host.update(values)
session = get_session()
with session.begin():
try:
host.save(session=session)
except common_db_exc.DBDuplicateEntry as e:
# raise exception about duplicated columns (e.columns)
raise db_exc.ClimateDBDuplicateEntry(
model=host.__class__.__name__, columns=e.columns)
return host_get(host.id)
def host_update(host_id, values):
session = get_session()
with session.begin():
host = _host_get(session, host_id)
host.update(values)
host.save(session=session)
return host_get(host_id)
def host_destroy(host_id):
session = get_session()
with session.begin():
host = _host_get(session, host_id)
if not host:
# raise not found error
raise db_exc.ClimateDBNotFound(id=host_id, model='Host')
session.delete(host)
# ComputeHostExtraCapability
def _host_extra_capability_get(session, host_extra_capability_id):
query = model_query(models.ComputeHostExtraCapability, session)
return query.filter_by(id=host_extra_capability_id).first()
def host_extra_capability_get(host_extra_capability_id):
return _host_extra_capability_get(get_session(),
host_extra_capability_id)
def _host_extra_capability_get_all_per_host(session, host_id):
query = model_query(models.ComputeHostExtraCapability, session)
return query.filter_by(computehost_id=host_id)
def host_extra_capability_get_all_per_host(host_id):
return _host_extra_capability_get_all_per_host(get_session(),
host_id).all()
def host_extra_capability_create(values):
values = values.copy()
host_extra_capability = models.ComputeHostExtraCapability()
host_extra_capability.update(values)
session = get_session()
with session.begin():
try:
host_extra_capability.save(session=session)
except common_db_exc.DBDuplicateEntry as e:
# raise exception about duplicated columns (e.columns)
raise db_exc.ClimateDBDuplicateEntry(
model=host_extra_capability.__class__.__name__,
columns=e.columns)
return host_extra_capability_get(host_extra_capability.id)
def host_extra_capability_update(host_extra_capability_id, values):
session = get_session()
with session.begin():
host_extra_capability = (
_host_extra_capability_get(session,
host_extra_capability_id))
host_extra_capability.update(values)
host_extra_capability.save(session=session)
return host_extra_capability_get(host_extra_capability_id)
def host_extra_capability_destroy(host_extra_capability_id):
session = get_session()
with session.begin():
host_extra_capability = (
_host_extra_capability_get(session,
host_extra_capability_id))
if not host_extra_capability:
# raise not found error
raise db_exc.ClimateDBNotFound(
id=host_extra_capability_id,
model='ComputeHostExtraCapability')
session.delete(host_extra_capability)
def host_extra_capability_get_all_per_name(host_id, capability_name):
session = get_session()
with session.begin():
query = _host_extra_capability_get_all_per_host(session, host_id)
return query.filter_by(capability_name=capability_name).all()
| |
"""GTFS Feed Reader."""
import csv
import zipfile
import collections
import json
import os
import tempfile
import glob
import subprocess
import csv
try:
import unicodecsv
except ImportError:
unicodecsv = None
import util
import entities
import validation
class Feed(object):
"""Read a GTFS feed."""
# Classes for each GTFS table.
FACTORIES = {
'agency': entities.Agency,
'routes': entities.Route,
'trips': entities.Trip,
'stops': entities.Stop,
'stop_times': entities.StopTime,
'shapes': entities.ShapeRow,
'calendar': entities.ServicePeriod,
'calendar_dates': entities.ServiceDate,
'fare_rules': entities.FareRule,
'fare_attributes': entities.FareAttribute,
'transfers': entities.Transfer,
'frequencies': entities.Frequency,
'feed_info': entities.FeedInfo
}
def __init__(self, filename=None, path=None, debug=False):
"""Filename required."""
self.filename = filename
self.path = path
self.debug = debug
self.cache = {}
self.by_id = {}
self._shapes = None
self._zones = None
def __repr__(self):
return '<%s %s>'%(self.__class__.__name__, self.filename)
##### Read / write #####
def log(self, msg):
if self.debug:
print msg
def _open(self, table):
arcname = '%s.txt'%table
f = None
zf = None
if self.path and os.path.exists(os.path.join(self.path, arcname)):
f = open(os.path.join(self.path, arcname))
elif self.filename and os.path.exists(self.filename):
zf = zipfile.ZipFile(self.filename)
try:
f = zf.open(arcname)
except KeyError:
pass
elif self.filename and not os.path.exists(self.filename):
raise KeyError("File not found: %s"%self.filename)
if not f:
raise KeyError("File not found in path or zip file: %s"%arcname)
return f
def iterread(self, table):
"""Iteratively read data from a GTFS table. Returns namedtuples."""
self.log('Reading: %s'%table)
# Entity class
cls = self.FACTORIES[table]
f = self._open(table)
# csv reader
if unicodecsv:
data = unicodecsv.reader(f, encoding='utf-8-sig')
else:
data = csv.reader(f)
header = data.next()
headerlen = len(header)
ent = collections.namedtuple(
'EntityNamedTuple',
map(str, header)
)
for row in data:
if len(row) == 0:
continue
# Get rid of extra spaces.
row = [i.strip() for i in row]
# pad to length if necessary... :(
if len(row) < headerlen:
row += ['']*(headerlen-len(row))
yield cls.from_row(ent._make(row), self)
f.close()
def read(self, table):
"""..."""
# Table exists
if table in self.by_id:
return self.by_id[table].values()
if table in self.cache:
return self.cache[table]
# Read table
cls = self.FACTORIES[table]
key = cls.KEY
if key:
if table not in self.by_id:
self.by_id[table] = {}
t = self.by_id[table]
for item in self.iterread(table):
t[item.get(key)] = item
return t.values()
if table not in self.cache:
self.cache[table] = []
t = self.cache[table]
for item in self.iterread(table):
t.append(item)
return t
def write(self, filename, entities, sortkey=None, columns=None):
"""Write entities out to filename in csv format.
Note: this doesn't write directly into a Zip archive, because this behavior
is difficult to achieve with Zip archives. Use make_zip() to create a new
GTFS Zip archive.
"""
if os.path.exists(filename):
raise IOError('File exists: %s'%filename)
# Make sure we have all the entities loaded.
if sortkey:
entities = sorted(entities, key=lambda x:x[sortkey])
if not columns:
columns = set()
for entity in entities:
columns |= set(entity.keys())
columns = sorted(columns)
# Write the csv file
with open(filename, 'wb') as f:
writer = unicodecsv.writer(f) # , encoding='utf-8-sig'
writer.writerow(columns)
for entity in entities:
writer.writerow([entity.get(column) for column in columns])
def make_zip(self, filename, files=None, path=None, clone=None):
"""Create a Zip archive.
Provide any of the following:
files - A list of files
path - A directory of .txt files
clone - Copy any files from a zip archive not specified above
Duplicate files will be ignored. The 'files' argument will be used first,
then files found in the specified 'path', then in the
specified 'clone' archive.
"""
if filename and os.path.exists(filename):
raise IOError('File exists: %s'%filename)
files = files or []
arcnames = []
if path and os.path.isdir(path):
files += glob.glob(os.path.join(path, '*.txt'))
# Write files.
self.log("Creating zip archive: %s"%filename)
zf = zipfile.ZipFile(filename, 'a')
for f in files:
base = os.path.basename(f)
if base in arcnames:
self.log('... skipping: %s'%f)
else:
self.log('... adding: %s'%f)
arcnames.append(base)
zf.write(f, base)
# Clone from existing zip archive.
if clone and os.path.exists(clone):
zc = zipfile.ZipFile(clone)
for f in zc.namelist():
base = os.path.basename(f)
if os.path.splitext(base)[-1] != '.txt':
pass
# self.log('... skipping from clone: %s'%f)
elif base in arcnames:
self.log('... skipping from clone: %s'%f)
else:
self.log('... adding from clone: %s'%f)
arcnames.append(base)
with zc.open(f) as i:
data = i.read()
zf.writestr(base, data)
zf.close()
def preload(self):
# Load tables with primary key
for table,cls in self.FACTORIES.items():
if not cls.KEY:
continue
try:
self.read(table)
except KeyError:
pass
default_agency_id = None
agencies = self.agencies()
if len(agencies) == 1:
default_agency_id = agencies[0].get('agency_id')
for route in self.routes():
route.add_parent(self.agency(route.get('agency_id') or default_agency_id))
for trip in self.trips():
trip.add_parent(self.route(trip.get('route_id')))
# Load stop_times
for stoptime in self.read('stop_times'):
stoptime.add_parent(self.trip(stoptime.get('trip_id')))
stoptime.add_child(self.stop(stoptime.get('stop_id')))
##### Keyed entities #####
def _entities(self, table):
return self.read(table)
def _entity(self, table, key):
if table not in self.by_id:
self.read(table)
return self.by_id[table][key]
def agencies(self): return self._entities('agency')
def agency(self, key): return self._entity('agency', key)
def routes(self): return self._entities('routes')
def route(self, key): return self._entity('routes', key)
def stops(self): return self._entities('stops')
def stop(self, key): return self._entity('stops', key)
def trips(self): return self._entities('trips')
def trip(self, key): return self._entity('trips', key)
def fares(self): return self._entities('fare_attributes')
def fare(self, key): return self._entity('fare_attributes', key)
def fare_rules(self): return self._entities('fare_rules')
def service_periods(self): return self._entities('calendar')
def service_period(self, key): return self._entity('calendar', key)
def service_exceptions(self): return self._entities('calendar_dates')
def stop_times(self): return self._entities('stop_times')
def transfers(self): return self._entities('transfers')
def frequencies(self): return self._entities('frequencies')
def feed_infos(self): return self._entities('feed_info')
# backwards compat
def serviceperiods(self): return self.service_periods()
def serviceperiod(self, key): return self.service_period(key)
def shapes(self):
"""Return the route shapes as a dictionary."""
# Todo: Cache?
if self._shapes:
return self._shapes
# Group together by shape_id
self.log("Generating shapes...")
ret = collections.defaultdict(entities.ShapeLine)
for point in self.read('shapes'):
ret[point['shape_id']].add_child(point)
self._shapes = ret
return self._shapes
def shape_line(self, key):
if self._shapes:
return self._shapes[key]
return self.shapes()[key]
##### Other methods #####
def dates(self):
data = self.read('calendar')
return [
min(i.start() for i in data),
max(i.end() for i in data)
]
##### Validation #####
def validate(self, validator=None):
validator = validation.make_validator(validator)
self.log('Loading...')
self.preload()
# required
required = [
'agency',
'stops',
'routes',
'trips',
'stop_times',
'calendar'
]
for f in required:
self.log("Validating required file: %s"%f)
data = self.read(f)
for i in data:
i.validate(validator=validator)
i.validate_feed(validator=validator)
# optional
optional = [
'calendar_dates',
'fare_attributes',
'fare_rules',
'shapes',
'frequencies',
'transfers',
'feed_info'
]
for f in optional:
self.log("Validating optional file: %s"%f)
try:
data = self.read(f)
except KeyError, e:
data = []
for i in data:
i.validate(validator=validator)
i.validate_feed(validator=validator)
return validator
def validate_feedvalidator(
self,
validator=None,
feedvalidator=None,
report='report.html'
):
feedvalidator = feedvalidator or 'feedvalidator.py'
validator = validation.make_validator(validator)
p = subprocess.Popen(
[
feedvalidator,
'--memory_db',
'--noprompt',
'--output',
report,
self.filename
],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE
)
stdout, stderr = p.communicate()
returncode = p.returncode
with validator(self):
errors = [i for i in stdout.split('\n') if i.startswith('ERROR:')]
if returncode:
raise validation.ValidationError('Errors reported by feedvalidator.py; see %s for details'%report)
| |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, AsyncIterable, Callable, Dict, Generic, Optional, TypeVar, Union
import warnings
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest
from azure.core.polling import AsyncLROPoller, AsyncNoPolling, AsyncPollingMethod
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.async_arm_polling import AsyncARMPolling
from ... import models as _models
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class VirtualRouterPeeringsOperations:
"""VirtualRouterPeeringsOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.network.v2019_11_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
async def _delete_initial(
self,
resource_group_name: str,
virtual_router_name: str,
peering_name: str,
**kwargs: Any
) -> None:
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-11-01"
accept = "application/json"
# Construct URL
url = self._delete_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualRouterName': self._serialize.url("virtual_router_name", virtual_router_name, 'str'),
'peeringName': self._serialize.url("peering_name", peering_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.Error, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualRouters/{virtualRouterName}/peerings/{peeringName}'} # type: ignore
async def begin_delete(
self,
resource_group_name: str,
virtual_router_name: str,
peering_name: str,
**kwargs: Any
) -> AsyncLROPoller[None]:
"""Deletes the specified peering from a Virtual Router.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param virtual_router_name: The name of the Virtual Router.
:type virtual_router_name: str
:param peering_name: The name of the peering.
:type peering_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._delete_initial(
resource_group_name=resource_group_name,
virtual_router_name=virtual_router_name,
peering_name=peering_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualRouterName': self._serialize.url("virtual_router_name", virtual_router_name, 'str'),
'peeringName': self._serialize.url("peering_name", peering_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualRouters/{virtualRouterName}/peerings/{peeringName}'} # type: ignore
async def get(
self,
resource_group_name: str,
virtual_router_name: str,
peering_name: str,
**kwargs: Any
) -> "_models.VirtualRouterPeering":
"""Gets the specified Virtual Router Peering.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param virtual_router_name: The name of the Virtual Router.
:type virtual_router_name: str
:param peering_name: The name of the Virtual Router Peering.
:type peering_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: VirtualRouterPeering, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2019_11_01.models.VirtualRouterPeering
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.VirtualRouterPeering"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-11-01"
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualRouterName': self._serialize.url("virtual_router_name", virtual_router_name, 'str'),
'peeringName': self._serialize.url("peering_name", peering_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.Error, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('VirtualRouterPeering', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualRouters/{virtualRouterName}/peerings/{peeringName}'} # type: ignore
async def _create_or_update_initial(
self,
resource_group_name: str,
virtual_router_name: str,
peering_name: str,
parameters: "_models.VirtualRouterPeering",
**kwargs: Any
) -> "_models.VirtualRouterPeering":
cls = kwargs.pop('cls', None) # type: ClsType["_models.VirtualRouterPeering"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-11-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._create_or_update_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualRouterName': self._serialize.url("virtual_router_name", virtual_router_name, 'str'),
'peeringName': self._serialize.url("peering_name", peering_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'VirtualRouterPeering')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.Error, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('VirtualRouterPeering', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('VirtualRouterPeering', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_or_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualRouters/{virtualRouterName}/peerings/{peeringName}'} # type: ignore
async def begin_create_or_update(
self,
resource_group_name: str,
virtual_router_name: str,
peering_name: str,
parameters: "_models.VirtualRouterPeering",
**kwargs: Any
) -> AsyncLROPoller["_models.VirtualRouterPeering"]:
"""Creates or updates the specified Virtual Router Peering.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param virtual_router_name: The name of the Virtual Router.
:type virtual_router_name: str
:param peering_name: The name of the Virtual Router Peering.
:type peering_name: str
:param parameters: Parameters supplied to the create or update Virtual Router Peering
operation.
:type parameters: ~azure.mgmt.network.v2019_11_01.models.VirtualRouterPeering
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either VirtualRouterPeering or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.network.v2019_11_01.models.VirtualRouterPeering]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.VirtualRouterPeering"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._create_or_update_initial(
resource_group_name=resource_group_name,
virtual_router_name=virtual_router_name,
peering_name=peering_name,
parameters=parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('VirtualRouterPeering', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualRouterName': self._serialize.url("virtual_router_name", virtual_router_name, 'str'),
'peeringName': self._serialize.url("peering_name", peering_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, lro_options={'final-state-via': 'azure-async-operation'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualRouters/{virtualRouterName}/peerings/{peeringName}'} # type: ignore
def list(
self,
resource_group_name: str,
virtual_router_name: str,
**kwargs: Any
) -> AsyncIterable["_models.VirtualRouterPeeringListResult"]:
"""Lists all Virtual Router Peerings in a Virtual Router resource.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param virtual_router_name: The name of the Virtual Router.
:type virtual_router_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either VirtualRouterPeeringListResult or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.network.v2019_11_01.models.VirtualRouterPeeringListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.VirtualRouterPeeringListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-11-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualRouterName': self._serialize.url("virtual_router_name", virtual_router_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('VirtualRouterPeeringListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
error = self._deserialize.failsafe_deserialize(_models.Error, response)
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualRouters/{virtualRouterName}/peerings'} # type: ignore
| |
import datetime
from collections import OrderedDict
from django.conf import settings
from django_remote_forms import logger, widgets
class RemoteField(object):
"""
A base object for being able to return a Django Form Field as a Python
dictionary.
This object also takes into account if there is initial data for the field
coming in from the form directly, which overrides any initial data
specified on the field per Django's rules:
https://docs.djangoproject.com/en/dev/ref/forms/api/#dynamic-initial-values
"""
def __init__(self, field, form_initial_data=None, field_name=None):
self.field_name = field_name
self.field = field
self.form_initial_data = form_initial_data
def as_dict(self):
field_dict = OrderedDict()
field_dict['title'] = self.field.__class__.__name__
field_dict['required'] = self.field.required
field_dict['label'] = self.field.label
field_dict['initial'] = self.form_initial_data or self.field.initial
field_dict['help_text'] = self.field.help_text
field_dict['error_messages'] = self.field.error_messages
# Instantiate the Remote Forms equivalent of the widget if possible
# in order to retrieve the widget contents as a dictionary.
remote_widget_class_name = 'Remote%s' % self.field.widget.__class__.__name__
try:
remote_widget_class = getattr(widgets, remote_widget_class_name)
remote_widget = remote_widget_class(self.field.widget, field_name=self.field_name)
except Exception, e:
logger.warning('Error serializing %s: %s', remote_widget_class_name, str(e))
widget_dict = {}
else:
widget_dict = remote_widget.as_dict()
field_dict['widget'] = widget_dict
return field_dict
class RemoteCharField(RemoteField):
def as_dict(self):
field_dict = super(RemoteCharField, self).as_dict()
field_dict.update({
'max_length': self.field.max_length,
'min_length': self.field.min_length
})
return field_dict
class RemoteIntegerField(RemoteField):
def as_dict(self):
field_dict = super(RemoteIntegerField, self).as_dict()
field_dict.update({
'max_value': self.field.max_value,
'min_value': self.field.min_value
})
return field_dict
class RemoteFloatField(RemoteIntegerField):
def as_dict(self):
return super(RemoteFloatField, self).as_dict()
class RemoteDecimalField(RemoteIntegerField):
def as_dict(self):
field_dict = super(RemoteDecimalField, self).as_dict()
field_dict.update({
'max_digits': self.field.max_digits,
'decimal_places': self.field.decimal_places
})
return field_dict
class RemoteTimeField(RemoteField):
def as_dict(self):
field_dict = super(RemoteTimeField, self).as_dict()
field_dict['input_formats'] = self.field.input_formats
if (field_dict['initial']):
if callable(field_dict['initial']):
field_dict['initial'] = field_dict['initial']()
# If initial value is datetime then convert it using first available input format
if (isinstance(field_dict['initial'], (datetime.datetime, datetime.time, datetime.date))):
if not len(field_dict['input_formats']):
if isinstance(field_dict['initial'], datetime.date):
field_dict['input_formats'] = settings.DATE_INPUT_FORMATS
elif isinstance(field_dict['initial'], datetime.time):
field_dict['input_formats'] = settings.TIME_INPUT_FORMATS
elif isinstance(field_dict['initial'], datetime.datetime):
field_dict['input_formats'] = settings.DATETIME_INPUT_FORMATS
input_format = field_dict['input_formats'][0]
field_dict['initial'] = field_dict['initial'].strftime(input_format)
return field_dict
class RemoteDateField(RemoteTimeField):
def as_dict(self):
return super(RemoteDateField, self).as_dict()
class RemoteDateTimeField(RemoteTimeField):
def as_dict(self):
return super(RemoteDateTimeField, self).as_dict()
class RemoteRegexField(RemoteCharField):
def as_dict(self):
field_dict = super(RemoteRegexField, self).as_dict()
# We don't need the pattern object in the frontend
# field_dict['regex'] = self.field.regex
return field_dict
class RemoteEmailField(RemoteCharField):
def as_dict(self):
return super(RemoteEmailField, self).as_dict()
class RemoteFileField(RemoteField):
def as_dict(self):
field_dict = super(RemoteFileField, self).as_dict()
field_dict['max_length'] = self.field.max_length
return field_dict
class RemoteImageField(RemoteFileField):
def as_dict(self):
return super(RemoteImageField, self).as_dict()
class RemoteURLField(RemoteCharField):
def as_dict(self):
return super(RemoteURLField, self).as_dict()
class RemoteBooleanField(RemoteField):
def as_dict(self):
return super(RemoteBooleanField, self).as_dict()
class RemoteNullBooleanField(RemoteBooleanField):
def as_dict(self):
return super(RemoteNullBooleanField, self).as_dict()
class RemoteChoiceField(RemoteField):
def as_dict(self):
field_dict = super(RemoteChoiceField, self).as_dict()
field_dict['choices'] = []
for key, value in self.field.choices:
field_dict['choices'].append({
'value': key,
'display': value
})
return field_dict
class RemoteModelChoiceField(RemoteChoiceField):
def as_dict(self):
return super(RemoteModelChoiceField, self).as_dict()
class RemoteTypedChoiceField(RemoteChoiceField):
def as_dict(self):
field_dict = super(RemoteTypedChoiceField, self).as_dict()
field_dict.update({
'coerce': self.field.coerce,
'empty_value': self.field.empty_value
})
return field_dict
class RemoteMultipleChoiceField(RemoteChoiceField):
def as_dict(self):
return super(RemoteMultipleChoiceField, self).as_dict()
class RemoteModelMultipleChoiceField(RemoteMultipleChoiceField):
def as_dict(self):
return super(RemoteModelMultipleChoiceField, self).as_dict()
class RemoteTypedMultipleChoiceField(RemoteMultipleChoiceField):
def as_dict(self):
field_dict = super(RemoteTypedMultipleChoiceField, self).as_dict()
field_dict.update({
'coerce': self.field.coerce,
'empty_value': self.field.empty_value
})
return field_dict
class RemoteComboField(RemoteField):
def as_dict(self):
field_dict = super(RemoteComboField, self).as_dict()
field_dict.update(fields=self.field.fields)
return field_dict
class RemoteMultiValueField(RemoteField):
def as_dict(self):
field_dict = super(RemoteMultiValueField, self).as_dict()
field_dict['fields'] = self.field.fields
return field_dict
class RemoteFilePathField(RemoteChoiceField):
def as_dict(self):
field_dict = super(RemoteFilePathField, self).as_dict()
field_dict.update({
'path': self.field.path,
'match': self.field.match,
'recursive': self.field.recursive
})
return field_dict
class RemoteSplitDateTimeField(RemoteMultiValueField):
def as_dict(self):
field_dict = super(RemoteSplitDateTimeField, self).as_dict()
field_dict.update({
'input_date_formats': self.field.input_date_formats,
'input_time_formats': self.field.input_time_formats
})
return field_dict
class RemoteIPAddressField(RemoteCharField):
def as_dict(self):
return super(RemoteIPAddressField, self).as_dict()
class RemoteSlugField(RemoteCharField):
def as_dict(self):
return super(RemoteSlugField, self).as_dict()
| |
# Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utilities for the `gcloud feedback` command."""
import os
import re
import urllib
from googlecloudsdk.core import log
from googlecloudsdk.core.console import console_attr_os
ISSUE_TRACKER_URL = 'https://code.google.com/p/google-cloud-sdk/issues'
NEW_ISSUE_URL = 'https://code.google.com/p/google-cloud-sdk/issues/entry'
# The new issue URL has a maximum length, so we need to limit the length of
# pre-filled form fields
MAX_URL_LENGTH = 2106
COMMENT_PRE_STACKTRACE_TEMPLATE = """\
{formatted_command}What steps will reproduce the problem?
What is the expected output? What do you see instead?
Please provide any additional information below.
"""
COMMENT_TEMPLATE = COMMENT_PRE_STACKTRACE_TEMPLATE + """\
{formatted_traceback}
Installation information:
{gcloud_info}\
"""
TRUNCATED_INFO_MESSAGE = '[output truncated]'
STACKTRACE_LINES_PER_ENTRY = 2
# Pattern for splitting the traceback into stacktrace and exception.
PARTITION_TRACEBACK_PATTERN = (
r'(?P<stacktrace>'
r'Traceback \(most recent call last\):\n'
r'(?: {2}File ".*", line \d+, in .+\n {4}.+\n)+'
r')'
r'(?P<exception>\S.+)')
TRACEBACK_ENTRY_REGEXP = (
r'File "(?P<file>.*)", line (?P<line>\d+), in (?P<function>.+)\n'
r'(?P<code_snippet>.+)\n')
MAX_CODE_SNIPPET_LENGTH = 40
class CommentHolder(object):
def __init__(self, body, pre_stacktrace, stacktrace, exception):
self.body = body
self.pre_stacktrace = pre_stacktrace
self.stacktrace = stacktrace
self.exception = exception
def _FormatNewIssueUrl(comment, status='New', summary=''):
params = {
'status': status,
'summary': summary,
'comment': comment,
}
return NEW_ISSUE_URL + '?' + urllib.urlencode(params)
def OpenInBrowser(url):
# pylint: disable=g-import-not-at-top
# Import in here for performance reasons
import webbrowser
# pylint: enable=g-import-not-at-top
webbrowser.open_new_tab(url)
def _UrlEncodeLen(string):
"""Return the length of string when URL-encoded."""
# urlencode turns a dict into a string of 'key=value' pairs. We use a blank
# key and don't want to count the '='.
encoded = urllib.urlencode({'': string})[1:]
return len(encoded)
def _FormatStackTrace(first_entry, rest):
return '\n'.join([first_entry, ' [...]'] + rest) + '\n'
def _ShortenStacktrace(stacktrace, url_encoded_length):
# pylint: disable=g-docstring-has-escape
"""Cut out the middle entries of the stack trace to a given length.
For instance:
>>> stacktrace = '''
... File "foo.py", line 10, in run
... result = bar.run()
... File "bar.py", line 11, in run
... result = baz.run()
... File "baz.py", line 12, in run
... result = qux.run()
... File "qux.py", line 13, in run
... raise Exception(':(')
... '''
>>> _ShortenStacktrace(stacktrace, 300) == '''\
... File "foo.py", line 10, in run
... result = bar.run()
... [...]
... File "baz.py", line 12, in run
... result = qux.run()
... File "qux.py", line 13, in run
... raise Exception(':(')
... '''
True
Args:
stacktrace: str, the stacktrace (might be formatted by _FormatTraceback)
without the leading 'Traceback (most recent call last):' or 'Trace:'
url_encoded_length: int, the length to shorten the stacktrace to (when
URL-encoded).
Returns:
str, the shortened stacktrace.
"""
# A stacktrace consists of several entries, each of which is a pair of lines.
# The first describes the file containing the line of source in the stack
# trace; the second shows the line of source in the stack trace as it appears
# in the source.
stacktrace = stacktrace.strip('\n')
lines = stacktrace.split('\n')
entries = ['\n'.join(lines[i:i+STACKTRACE_LINES_PER_ENTRY]) for i in
range(0, len(lines), STACKTRACE_LINES_PER_ENTRY)]
if _UrlEncodeLen(stacktrace) <= url_encoded_length:
return stacktrace + '\n'
rest = entries[1:]
while (_UrlEncodeLen(_FormatStackTrace(entries[0], rest)) >
url_encoded_length) and len(rest) > 1:
rest = rest[1:]
# If we've eliminated the entire middle of the stacktrace and it's still
# longer than the max allowed length, nothing we can do beyond that. We'll
# return the short-as-possible stacktrace and let the caller deal with it.
return _FormatStackTrace(entries[0], rest)
def _ShortenIssueBody(comment, url_encoded_length):
"""Shortens the comment to be at most the given length (URL-encoded).
Does one of two things:
(1) If the whole stacktrace and everything before it fits within the
URL-encoded max length, truncates the remainder of the comment (which
should include e.g. the output of `gcloud info`.
(2) Otherwise, chop out the middle of the stacktrace until it fits. (See
_ShortenStacktrace docstring for an example).
(3) If the stacktrace cannot be shortened to fit in (2), then revert to (1).
That is, truncate the comment.
Args:
comment: CommentHolder, an object containing the formatted comment for
inclusion before shortening, and its constituent components
url_encoded_length: the max length of the comment after shortening (when
comment is URL-encoded).
Returns:
(str, str): the shortened comment and a message containing the parts of the
comment that were omitted by the shortening process.
"""
# * critical_info contains all of the critical information: the name of the
# command, the stacktrace, and places for the user to provide additional
# information.
# * optional_info is the less essential `gcloud info output`.
critical_info, middle, optional_info = comment.body.partition(
'Installation information:\n')
optional_info = middle + optional_info
# We need to count the message about truncating the output towards our total
# character count.
max_str_len = (url_encoded_length -
_UrlEncodeLen(TRUNCATED_INFO_MESSAGE + '\n'))
truncated_issue_body, remaining = _UrlTruncateLines(comment.body, max_str_len)
# Case (1) from the docstring
if _UrlEncodeLen(critical_info) <= max_str_len:
return truncated_issue_body, remaining
else:
# Attempt to shorten stacktrace by cutting out middle
non_stacktrace_encoded_len = _UrlEncodeLen(
comment.pre_stacktrace + 'Trace:\n' + comment.exception + '\n' +
TRUNCATED_INFO_MESSAGE)
max_stacktrace_len = url_encoded_length - non_stacktrace_encoded_len
shortened_stacktrace = _ShortenStacktrace(comment.stacktrace,
max_stacktrace_len)
critical_info_with_shortened_stacktrace = (
comment.pre_stacktrace + 'Trace:\n' + shortened_stacktrace +
comment.exception + '\n' + TRUNCATED_INFO_MESSAGE)
optional_info_with_full_stacktrace = ('Full stack trace (formatted):\n' +
comment.stacktrace +
comment.exception + '\n\n' +
optional_info)
# Case (2) from the docstring
if _UrlEncodeLen(critical_info_with_shortened_stacktrace) <= max_str_len:
return (critical_info_with_shortened_stacktrace,
optional_info_with_full_stacktrace)
# Case (3) from the docstring
else:
return truncated_issue_body, optional_info_with_full_stacktrace
def _UrlTruncateLines(string, url_encoded_length):
"""Truncates the given string to the given URL-encoded length.
Always cuts at a newline.
Args:
string: str, the string to truncate
url_encoded_length: str, the length to which to truncate
Returns:
tuple of (str, str), where the first str is the truncated version of the
original string and the second str is the remainder.
"""
lines = string.split('\n')
included_lines = []
excluded_lines = []
# Adjust the max length for the truncation message in case it is needed
max_str_len = (url_encoded_length -
_UrlEncodeLen(TRUNCATED_INFO_MESSAGE + '\n'))
while (lines and
_UrlEncodeLen('\n'.join(included_lines + lines[:1])) <= max_str_len):
included_lines.append(lines.pop(0))
excluded_lines = lines
if excluded_lines:
included_lines.append(TRUNCATED_INFO_MESSAGE)
return '\n'.join(included_lines), '\n'.join(excluded_lines)
def GetDivider(text=''):
"""Return a console-width divider (ex: '======================' (etc.)).
Supports messages (ex: '=== Messsage Here ===').
Args:
text: str, a message to display centered in the divider.
Returns:
str, the formatted divider
"""
if text:
text = ' ' + text + ' '
width, _ = console_attr_os.GetTermSize()
return text.center(width, '=')
def _CommonPrefix(paths):
"""Given a list of paths, return the longest shared directory prefix.
We want to:
(1) Only split at path boundaries (i.e.
_CommonPrefix(['/foo/bar', '/foo/baz']) => '/foo' , not '/foo/b')
(2) Ignore the path basenames, even when files are identical (i.e.
_CommonPrefix(['/foo/bar'] * 3') => '/foo'
For these reasons, we can't just us os.path.commonprefix.
Args:
paths: list of str, list of path names
Returns:
str, common prefix
"""
prefix = os.path.commonprefix(map(os.path.dirname, paths))
if not prefix:
return prefix
if all([path.startswith(prefix + os.path.sep) for path in paths]):
return prefix + os.path.sep
else:
return os.path.dirname(prefix) + os.path.sep
def _FormatIssueBody(info, log_data=None):
"""Construct a useful issue body with which to pre-populate the issue tracker.
Args:
info: InfoHolder, holds information about the Cloud SDK install
log_data: LogData, parsed log data for a gcloud run
Returns:
CommentHolder, a class containing the issue comment body, part of comment
before stacktrace, the stacktrace portion of the comment, and the
exception
"""
gcloud_info = str(info)
formatted_command = ''
if log_data and log_data.command:
formatted_command = 'Issue running command [{0}].\n\n'.format(
log_data.command)
pre_stacktrace = COMMENT_PRE_STACKTRACE_TEMPLATE.format(
formatted_command=formatted_command)
formatted_traceback = ''
formatted_stacktrace = ''
exception = ''
if log_data and log_data.traceback:
# Because we have a limited number of characters to work with (see
# MAX_URL_LENGTH), we reduce the size of the traceback by stripping out the
# unnecessary information, such as the runtime root and function names.
formatted_stacktrace, exception = _FormatTraceback(log_data.traceback)
formatted_traceback = 'Trace:\n' + formatted_stacktrace + exception
comment_body = COMMENT_TEMPLATE.format(
formatted_command=formatted_command, gcloud_info=gcloud_info.strip(),
formatted_traceback=formatted_traceback)
return CommentHolder(comment_body, pre_stacktrace, formatted_stacktrace,
exception)
def _StacktraceEntryReplacement(entry):
"""Used in re.sub to format a stacktrace entry to make it more compact.
File "qux.py", line 13, in run ===> qux.py:13
foo = math.sqrt(bar) / foo foo = math.sqrt(bar)...
Args:
entry: re.MatchObject, the original unformatted stacktrace entry
Returns:
str, the formatted stacktrace entry
"""
filename = entry.group('file')
line_no = entry.group('line')
code_snippet = entry.group('code_snippet')
formatted_code_snippet = code_snippet.strip()[:MAX_CODE_SNIPPET_LENGTH]
if len(code_snippet) > MAX_CODE_SNIPPET_LENGTH:
formatted_code_snippet += '...'
formatted_entry = '{0}:{1}\n {2}\n'.format(filename, line_no,
formatted_code_snippet)
return formatted_entry
def _FormatTraceback(traceback):
"""Compacts stack trace portion of traceback and extracts exception.
Args:
traceback: str, the original unformatted traceback
Returns:
tuple of (str, str) where the first str is the formatted stack trace and the
second str is exception.
"""
# Separate stacktrace and exception
match = re.search(PARTITION_TRACEBACK_PATTERN, traceback)
if not match:
return traceback, ''
stacktrace = match.group('stacktrace')
exception = match.group('exception')
# Strip trailing whitespace.
formatted_stacktrace = '\n'.join(line.strip() for line in
stacktrace.splitlines())
formatted_stacktrace += '\n'
stacktrace_files = re.findall(r'File "(.*)"', stacktrace)
common_prefix = _CommonPrefix(stacktrace_files)
sep = os.path.sep
# Strip out lib/googlecloudsdk
formatted_stacktrace = formatted_stacktrace.replace(
common_prefix + 'lib' + sep + 'googlecloudsdk' + sep, '')
formatted_stacktrace = formatted_stacktrace.replace(
sep + 'lib' + sep + 'googlecloudsdk' + sep, sep)
# Strip out lib/third_party
formatted_stacktrace = formatted_stacktrace.replace(
common_prefix + 'lib' + sep + 'third_party' + sep, '')
formatted_stacktrace = formatted_stacktrace.replace(
sep + 'lib' + sep + 'third_party' + sep, sep)
# Strip out ./
formatted_stacktrace = formatted_stacktrace.replace(common_prefix + '.' + sep,
'')
formatted_stacktrace = formatted_stacktrace.replace(sep + '.' + sep, sep)
# Strip out common prefix
formatted_stacktrace = formatted_stacktrace.replace(common_prefix, '')
# Make each stack frame entry more compact
formatted_stacktrace = re.sub(TRACEBACK_ENTRY_REGEXP,
_StacktraceEntryReplacement,
formatted_stacktrace)
formatted_stacktrace = formatted_stacktrace.replace(
'Traceback (most recent call last):\n', '')
return formatted_stacktrace, exception
def OpenNewIssueInBrowser(info, log_data):
"""Opens a new tab in the web browser to the new issue page for Cloud SDK.
The page will be pre-populated with relevant information.
Args:
info: InfoHolder, the data from of `gcloud info`
log_data: LogData, parsed representation of a recent log
"""
comment = _FormatIssueBody(info, log_data)
url = _FormatNewIssueUrl(comment.body)
if len(url) > MAX_URL_LENGTH:
max_info_len = MAX_URL_LENGTH - len(_FormatNewIssueUrl(''))
truncated, remaining = _ShortenIssueBody(comment, max_info_len)
log.warn('Truncating included information. '
'Please consider including the remainder:')
divider_text = 'TRUNCATED INFORMATION (PLEASE CONSIDER INCLUDING)'
log.status.Print(GetDivider(divider_text))
log.status.Print(remaining.strip())
log.status.Print(GetDivider('END ' + divider_text))
log.warn('The output of gcloud info is too long to pre-populate the '
'new issue form.')
log.warn('Please consider including the remainder (above).')
url = _FormatNewIssueUrl(truncated)
OpenInBrowser(url)
log.status.Print('Opening your browser to a new Google Cloud SDK issue.')
log.status.Print("If your browser doesn't open, please file an issue: " +
ISSUE_TRACKER_URL)
| |
# Copyright 2014-2016 Presslabs SRL
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from contextlib import contextmanager
from datetime import datetime
import collections
import os
import subprocess
import time
import pytest
from six import string_types
class Sh(object):
def __init__(self, cwd=None):
self.command = ""
self.cwd = cwd
def __getattr__(self, item):
self.command += item + " "
return self
def __call__(self, *args, **kwargs):
command = self.command + " ".join(args)
self.command = ""
return (
subprocess.Popen(command, shell=True, stdout=subprocess.PIPE, cwd=self.cwd)
.stdout.read()
.decode()
)
class pull(object):
def __init__(self, sh):
self.sh = sh
def __enter__(self):
self.sh.git.pull("origin", "master")
def __exit__(self, exc_type, exc_val, exc_tb):
pass
class BaseTest(object):
def setup(self):
self.mount_path = "{}".format(os.environ["MOUNT_PATH"])
self.repo_name = os.environ["REPO_NAME"]
self.repo_path = os.environ["REPO_PATH"]
self.current_path = "%s/current" % self.mount_path
self.remote_repo_path = os.environ["REMOTE"]
self.sh = Sh(self.remote_repo_path)
self.last_commit_hash = self.commit_hash()
@property
def today(self):
now = datetime.now()
return now.strftime("%Y-%m-%d")
def commit_hash(self, index=0):
return self.sh.git.log("--pretty=%H").splitlines()[index]
def commit_message(self, index=0):
return self.sh.git.log("--pretty=%B").splitlines()[index]
def get_commits_by_date(self, date=None):
if date is None:
date = self.today
lines = self.sh.git.log(
"--before",
'"%s 23:59:59"' % date,
"--after",
'"%s 00:00:00"' % date,
'--pretty="%ai %H"',
).splitlines()
lines = map(lambda line: line.split(), lines)
return list(
map(
lambda tokens: "%s-%s" % (tokens[1].replace(":", "-"), tokens[3][:10]),
lines,
)
)
def get_commit_dates(self):
return list(set(self.sh.git.log("--pretty=%ad", "--date=short").splitlines()))
def assert_commit_message(self, message):
assert message == self.commit_message()
def assert_new_commit(self, steps=1):
current_index = 0
while self.commit_hash(current_index) != self.last_commit_hash:
current_index += 1
self.last_commit_hash = self.commit_hash(0)
assert current_index == steps
def assert_file_content(self, file_path, content):
with open(self.repo_path + "/" + file_path) as f:
assert f.read() == content
class GitFSLog(object):
def __init__(self, file_descriptor):
self._partial_line = None
self.line_buffer = collections.deque()
self.file_descriptor = file_descriptor
def _read_data(self):
# file should be opened in non-blocking mode, so this will
# return None if it can't read any data
data = os.read(self.file_descriptor, 2048).decode().splitlines(True)
if not data:
return False
if self._partial_line:
data[0] = self._partial_line + data[0]
if not data[-1].endswith("\n"):
self._partial_line = data[-1]
data = data[:-1] # discard the partial line
else:
self._partial_line = None
self.line_buffer.extend(data)
return True
def clear(self):
"""Discards any logs produced so far."""
# seek to the end of the file, since we want to discard old messages
os.lseek(self.file_descriptor, 0, os.SEEK_END)
self._partial_line = None
self.line_buffer = collections.deque()
def __call__(self, expected, **kwargs):
"""Returns a context manager so you can wrap operations with expected
log output.
Example usage:
with gitfs_log("Expected log output"):
do_operation_that_produces_expected_log_output()
"""
@contextmanager
def log_context(gitfs_log):
gitfs_log.clear()
yield
if isinstance(expected, string_types):
gitfs_log.expect(expected, **kwargs)
else:
gitfs_log.expect_multiple(expected, **kwargs)
return log_context(self)
def _get_line(self, timeout, pollfreq=0.01):
"""Blocks until it can return a line. Returns None if it timedout."""
if self.line_buffer:
# got buffered lines, consume from these first
return self.line_buffer.popleft()
elapsed = 0
while elapsed < timeout:
if self._read_data():
return self.line_buffer.popleft()
time.sleep(pollfreq)
elapsed += pollfreq
return None
def expect(self, expected, timeout=10):
"""Blocks untill `expected` is found in a line of the stream,
or until timeout is reached.
"""
started = time.time()
elapsed = 0
while elapsed < timeout:
line = self._get_line(timeout=(timeout - elapsed))
if line is None:
break # timed out waiting for line
elif expected in line:
return
elapsed = time.time() - started
raise AssertionError(
"Timed out waiting for '{}' in the stream".format(expected)
)
def expect_multiple(self, expected, *args, **kwargs):
"""Blocks untill all `expected` strings are found in the stream, in the
order they were passed.
"""
for exp in expected:
self.expect(exp, *args, **kwargs)
@pytest.fixture(scope="session")
def gitfs_log():
return GitFSLog(os.open("log.txt", os.O_NONBLOCK))
| |
"""Create web2py model (python code) to represent MS SQL Server tables.
Features:
* Uses ANSI Standard INFORMATION_SCHEMA (might work with other RDBMS)
* Detects legacy "keyed" tables (not having an "id" PK)
* Handles 'funny' column names. web2py requires all column names be valid python identifiers. This script uses rname
* for column names that have spaces or are otherwise invalid python identifiers.
* Connects directly to running databases, no need to do a SQL dump
* Handles notnull, unique and referential constraints
* Detects most common datatypes and default values
* Supports running from the command line as well as from an IDE's debug menu. See the COMMAND_LINE_MODE constant below
* for more info.
Requirements:
* Needs pyodbc python connector
Created by Kyle Flanagan. Based on a script by Mariano Reingart which was
based on a script to "generate schemas from dbs" (mysql) by Alexandre Andrade
"""
_author__ = "Kyle Flanagan <kyleflanagan@gmail.com>"
HELP = """
USAGE: extract_mssql_models db host port user passwd
Call with SQL Server database connection parameters,
web2py model will be printed on standard output.
EXAMPLE: python extract_mssql_models.py mydb localhost 3306 kflanaga pass
or
python extract_mssql_models.py mydb localhost 3306 kflanaga pass > db_model.py
"""
# Config options
DEBUG = False # print debug messages to STDERR
SCHEMA = 'dbo'
COMMAND_LINE_MODE = True # running from command prompt. Disable to specify variables and use in IDE
# Only specify values below if not running from command line
DB = None
HOST = None
USER = None
PASSWD = None
PORT = None
# Constant for Field keyword parameter order (and filter):
KWARGS = ('type', 'length', 'default', 'required', 'ondelete',
'notnull', 'unique', 'label', 'comment', 'rname')
import sys
import re
# This is from pydal/helpers/regex.py as of 2016-06-16
# Use this to recognize if a field name need to have an rname representation
REGEX_VALID_TB_FLD = re.compile(r'^[^\d_][_0-9a-zA-Z]*\Z')
# For replacing invalid characters in field names
INVALID_CHARS = re.compile(r'[^a-zA-Z0-9_]')
def get_valid_column_name(field):
"""Return a valid column name that follows Python's rules for identifiers, which is what web2py requires for column
names. Replaces invalid characters with underscores and leading digits with their associated English word."""
if not REGEX_VALID_TB_FLD.match(field):
# If the first character is a digit, replace it with its word counterpart
if re.match(r'^[0-9]', field):
numbers = ['Zero', 'One', 'Two', 'Three', 'Four',
'Five', 'Six', 'Seven', 'Eight', 'Nine']
field = numbers[int(field[0])] + field[1:]
field = INVALID_CHARS.sub('_', field)
return field
def query(conn, sql, *args):
"Execute a SQL query and return rows as a list of dicts"
cur = conn.cursor()
ret = []
try:
if DEBUG: print >> sys.stderr, "QUERY: ", sql % args
cur.execute(sql % args)
for row in cur:
dic = {}
for i, value in enumerate(row):
field = cur.description[i][0]
dic[field] = value
if DEBUG: print >> sys.stderr, "RET: ", dic
ret.append(dic)
return ret
finally:
cur.close()
def get_tables(conn, schema=SCHEMA):
"List table names in a given schema"
rows = query(conn, """SELECT table_name FROM information_schema.tables
WHERE table_schema = '%s'
ORDER BY table_name""", schema)
return [row['table_name'] for row in rows]
def get_fields(conn, table):
"Retrieve field list for a given table"
if DEBUG: print >> sys.stderr, "Processing TABLE", table
rows = query(conn, """
SELECT column_name, data_type,
is_nullable,
character_maximum_length,
numeric_precision, numeric_precision_radix, numeric_scale,
column_default
FROM information_schema.columns
WHERE table_name='%s'
ORDER BY ordinal_position""", table)
return rows
def define_field(conn, table, field, pks):
"Determine field type, default value, references, etc."
f = {}
ref = references(conn, table, field['column_name'])
if ref:
f.update(ref)
elif field['column_default'] and \
field['column_default'].startswith("nextval") and \
field['column_name'] in pks:
f['type'] = "'id'"
elif field['data_type'].startswith('character'):
f['type'] = "'string'"
if field['character_maximum_length']:
f['length'] = field['character_maximum_length']
elif field['data_type'] in ('text', 'ntext'):
f['type'] = "'text'"
elif field['data_type'] in ('boolean', 'bit'):
f['type'] = "'boolean'"
elif field['data_type'] in ('tinyint', 'smallint', 'bigint', 'int'):
f['type'] = "'integer'"
elif field['data_type'] in ('real', 'float'):
f['type'] = "'double'"
elif field['data_type'] in ('datetime', 'datetime2', 'smalldatetime'):
f['type'] = "'datetime'"
elif field['data_type'] in ('timestamp',):
f['type'] = "'datetime'"
f['default'] = "request.now"
f['update'] = "request.now"
elif field['data_type'] in ('date',):
f['type'] = "'date'"
elif field['data_type'] in ('time',):
f['type'] = "'time'"
elif field['data_type'] in ('numeric', 'money', 'smallmoney', 'decimal'):
f['type'] = "'decimal'"
f['precision'] = field['numeric_precision']
f['scale'] = field['numeric_scale'] or 0
elif field['data_type'] in ('binary', 'varbinary', 'image'):
f['type'] = "'blob'"
elif field['data_type'] in ('point', 'lseg', 'polygon', 'unknown', 'USER-DEFINED', 'sql_variant'):
f['type'] = "" # unsupported?
elif field['data_type'] in ('varchar', 'char', 'nchar', 'nvarchar', 'uniqueidentifer'):
f['type'] = "'string'"
else:
raise RuntimeError("Data Type not supported: %s " % str(field))
try:
if field['column_default']:
if field['column_default'] == "now()":
d = "request.now"
elif field['column_default'] == "true":
d = "True"
elif field['column_default'] == "false":
d = "False"
else:
d = repr(eval(field['column_default']))
f['default'] = str(d)
except (ValueError, SyntaxError):
pass
except Exception, e:
raise RuntimeError("Default unsupported '%s'" % field['column_default'])
if not field['is_nullable']:
f['notnull'] = "True"
# For field names that are not valid python identifiers, we need to add a reference to their actual name
# in the back end database
if not REGEX_VALID_TB_FLD.match(field['column_name']):
f['rname'] = "'[%s]'" % field['column_name']
return f
def is_unique(conn, table, field):
"Find unique columns (incomplete support)"
rows = query(conn, """
SELECT c.column_name
FROM information_schema.table_constraints t
INNER JOIN information_schema.constraint_column_usage c
ON (t.CONSTRAINT_CATALOG = c.CONSTRAINT_CATALOG
AND t.CONSTRAINT_NAME = c.CONSTRAINT_NAME
AND t.CONSTRAINT_SCHEMA = c.CONSTRAINT_SCHEMA
AND t.TABLE_CATALOG = c.TABLE_CATALOG
AND t.TABLE_NAME = c.TABLE_NAME
AND t.TABLE_SCHEMA = c.TABLE_SCHEMA)
WHERE t.table_name='%s'
AND c.column_name='%s'
AND t.constraint_type='UNIQUE'
;""", table, field['column_name'])
return rows and True or False
def primarykeys(conn, table):
"Find primary keys"
rows = query(conn, """
SELECT c.column_name
FROM information_schema.table_constraints t
INNER JOIN information_schema.constraint_column_usage c
ON (t.CONSTRAINT_CATALOG = c.CONSTRAINT_CATALOG
AND t.CONSTRAINT_NAME = c.CONSTRAINT_NAME
AND t.CONSTRAINT_SCHEMA = c.CONSTRAINT_SCHEMA
AND t.TABLE_CATALOG = c.TABLE_CATALOG
AND t.TABLE_NAME = c.TABLE_NAME
AND t.TABLE_SCHEMA = c.TABLE_SCHEMA)
WHERE t.table_name='%s'
AND t.constraint_type='PRIMARY KEY'
;""", table)
return [row['column_name'] for row in rows]
def references(conn, table, field):
"Find a FK (fails if multiple)"
rows1 = query(conn, """
SELECT k.table_name, k.column_name, k.constraint_name,
r.update_rule, r.delete_rule, k.ordinal_position
FROM information_schema.key_column_usage k
INNER JOIN information_schema.referential_constraints r
ON (k.CONSTRAINT_CATALOG = r.CONSTRAINT_CATALOG
AND k.CONSTRAINT_NAME = r.CONSTRAINT_NAME
AND k.CONSTRAINT_SCHEMA = r.CONSTRAINT_SCHEMA)
INNER JOIN information_schema.table_constraints t
ON (r.CONSTRAINT_CATALOG = t.CONSTRAINT_CATALOG
AND r.CONSTRAINT_NAME = t.CONSTRAINT_NAME
AND r.CONSTRAINT_SCHEMA = t.CONSTRAINT_SCHEMA)
WHERE k.table_name='%s'
AND k.column_name='%s'
AND t.constraint_type='FOREIGN KEY'
;""", table, field)
if len(rows1) == 1:
rows2 = query(conn, """
SELECT table_name, column_name, *
FROM information_schema.constraint_column_usage
WHERE constraint_name='%s'
""", rows1[0]['constraint_name'])
row = None
if len(rows2) > 1:
row = rows2[int(rows1[0]['ordinal_position']) - 1]
keyed = True
if len(rows2) == 1:
row = rows2[0]
keyed = False
if row:
if keyed: # THIS IS BAD, DON'T MIX "id" and primarykey!!!
ref = {'type': "'reference %s.%s'" % (row['table_name'],
row['column_name'])}
else:
ref = {'type': "'reference %s'" % (row['table_name'],)}
if rows1[0]['delete_rule'] != "NO ACTION":
ref['ondelete'] = repr(rows1[0]['delete_rule'])
return ref
elif rows2:
raise RuntimeError("Unsupported foreign key reference: %s" %
str(rows2))
elif rows1:
raise RuntimeError("Unsupported referential constraint: %s" %
str(rows1))
def define_table(conn, table):
"Output single table definition"
fields = get_fields(conn, table)
pks = primarykeys(conn, table)
print "db.define_table('%s'," % (table,)
for field in fields:
fname = field['column_name']
fdef = define_field(conn, table, field, pks)
if fname not in pks and is_unique(conn, table, field):
fdef['unique'] = "True"
if fdef['type'] == "'id'" and fname in pks:
pks.pop(pks.index(fname))
print " Field('%s', %s)," % (get_valid_column_name(fname),
', '.join(["%s=%s" % (k, fdef[k]) for k in KWARGS
if k in fdef and fdef[k]]))
if pks:
print " primarykey=[%s]," % ", ".join(["'%s'" % pk for pk in pks])
print " migrate=migrate)"
print
def define_db(conn, db, host, port, user, passwd):
"Output database definition (model)"
dal = 'db = DAL("mssql4://%s:%s@%s:%s/%s", pool_size=10, decode_credentials=True)'
print dal % (
user.replace('@', '%40').replace(':', '%3A'), passwd.replace('@', '%40').replace(':', '%3A'), host, port, db)
print
print "migrate = False"
print
for table in get_tables(conn):
define_table(conn, table)
if __name__ == "__main__":
# Parse arguments from command line:
if len(sys.argv) < 6 and COMMAND_LINE_MODE:
print HELP
else:
# Parse arguments from command line:
if COMMAND_LINE_MODE:
db, host, port, user, passwd = sys.argv[1:6]
else:
db = DB
host = HOST
user = USER
passwd = PASSWD
port = PORT
# Make the database connection (change driver if required)
import pyodbc
# cnn = pyodbc.connect(database=db, host=host, port=port,
# user=user, password=passwd,
# )
cnn = pyodbc.connect(
r'DRIVER={{SQL Server Native Client 11.0}};SERVER={server};PORT={port};DATABASE={db};UID={user};PWD={passwd}'.format(
server=host, port=port, db=db, user=user, passwd=passwd)
)
# Start model code generation:
define_db(cnn, db, host, port, user, passwd)
| |
# Python imports
import base64
import json
# 3rd party imports
from django.db import connection
from django.test import TestCase
# Project imports
from register.models import RegistrationCenter
from register.tests.factories import RegistrationFactory
from reporting_api import create_test_data, reports, tasks, views
from reporting_api.data_pull import registrations_by_phone
BASE_URI = '/reporting/'
ELECTION_DAY_REPORT_REL_URI = 'election_day.json'
ELECTION_DAY_LOG_REL_URI = 'election_day_log.json'
REGISTRATIONS_REL_URI = 'registrations.json'
TEST_USERNAME = 'some_test_user'
TEST_PASSWORD = 'some_password'
NUM_REGISTRATIONS = 100
NUM_COPY_CENTERS = 3
NUM_NO_REG_CENTERS = 3
class TestReports(TestCase):
def setUp(self):
create_test_data.create(num_registrations=NUM_REGISTRATIONS,
num_copy_centers=NUM_COPY_CENTERS,
num_no_reg_centers=NUM_NO_REG_CENTERS)
tasks.election_day()
tasks.registrations()
credentials = base64.b64encode((TEST_USERNAME + ':' + TEST_PASSWORD).encode())
self.client.defaults['HTTP_AUTHORIZATION'] = b'Basic ' + credentials
views.REPORT_USER_DB[TEST_USERNAME] = TEST_PASSWORD
def test_log(self):
rsp = self.client.get(BASE_URI + ELECTION_DAY_LOG_REL_URI)
self.assertEqual(200, rsp.status_code)
self.assertEqual('application/json', rsp['Content-Type'])
self.assertNotEqual('{}', rsp.content.decode())
allowable_phone_keys = {'phone_number', 'type', 'center_code',
'creation_date', 'data'}
log = json.loads(rsp.content.decode())
for key in log.keys():
int(key) # shouldn't raise
for phone in log[key]:
actual_phone_keys = set(phone.keys())
self.assertTrue(actual_phone_keys.issubset(allowable_phone_keys))
self.assertEqual(int(key), phone['center_code'])
def _check_slice(self, d, key_for_slice, required_item_keys=(), forbidden_item_keys=()):
self.assertIn(key_for_slice, d)
if isinstance(d[key_for_slice], dict):
slice_items = [d[key_for_slice][k] for k in d[key_for_slice].keys()]
else:
slice_items = d[key_for_slice]
assert slice_items
for item in slice_items:
for k in required_item_keys:
self.assertIn(k, item)
for k in forbidden_item_keys:
self.assertNotIn(k, item)
def _count_copy_centers(self, by_polling_center):
copy_centers_found = 0
if isinstance(by_polling_center, dict):
by_polling_center = [by_polling_center[key] for key in by_polling_center.keys()]
for center_dict in by_polling_center:
if center_dict['polling_center_type'] == RegistrationCenter.Types.COPY:
copy_centers_found += 1
self.assertIn('copy_of_polling_center', center_dict)
return copy_centers_found
def test_election_day(self):
rsp = self.client.get(BASE_URI + ELECTION_DAY_REPORT_REL_URI)
self.assertEqual(200, rsp.status_code)
self.assertEqual('application/json', rsp['Content-Type'])
d = json.loads(rsp.content.decode())
self._check_slice(d, 'by_country',
required_item_keys=('country', 'office_count', 'polling_center_count',
'region_count', 'registration_count'),
forbidden_item_keys=('copy_of_polling_center', 'polling_center_type',
'inactive_for_election'))
self._check_slice(d, 'by_office',
required_item_keys=('country', 'polling_center_count', 'region',
'registration_count', 'inactive_for_election'),
forbidden_item_keys=('copy_of_polling_center', 'polling_center_type'))
self._check_slice(d, 'by_polling_center',
required_item_keys=('country', 'office_id', 'polling_center_code',
'polling_center_type', 'region',
'subconstituency_id', 'registration_count'),
forbidden_item_keys=('polling_center_count',))
self._check_slice(d, 'by_region',
required_item_keys=('country', 'office_count', 'polling_center_count',
'region', 'registration_count'),
forbidden_item_keys=('copy_of_polling_center', 'polling_center_type',
'inactive_for_election'))
self.assertEqual(NUM_COPY_CENTERS, self._count_copy_centers(d['by_polling_center']))
def test_registrations(self):
rsp = self.client.get(BASE_URI + REGISTRATIONS_REL_URI)
self.assertEqual(200, rsp.status_code)
self.assertEqual('application/json', rsp['Content-Type'])
d = json.loads(rsp.content.decode())
self._check_slice(d, 'by_country',
required_item_keys=('country', 'office_count', 'polling_center_count',
'region_count', 'total'),
forbidden_item_keys=('copy_of_polling_center', 'polling_center_type'))
self._check_slice(d, 'by_office_id',
required_item_keys=('country', 'polling_center_count', 'region',
'total'),
forbidden_item_keys=('copy_of_polling_center', 'polling_center_type'))
self._check_slice(d, 'by_polling_center_code',
required_item_keys=('country', 'office_id', 'polling_center_code',
'polling_center_type', 'region',
'subconstituency_id'),
forbidden_item_keys=('polling_center_count',))
self._check_slice(d, 'by_region',
required_item_keys=('country', 'office_count', 'polling_center_count',
'region', 'total'),
forbidden_item_keys=('copy_of_polling_center', 'polling_center_type'))
self._check_slice(d, 'by_subconstituency_id',
required_item_keys=('country', 'office_id', 'polling_center_count',
'region', 'subconstituency_id', 'total'),
forbidden_item_keys=('copy_of_polling_center', 'polling_center_type'))
# Registrations aren't against a copy center, so no copy centers should show up here.
self.assertEqual(0, self._count_copy_centers(d['by_polling_center_code']))
# Registrations aren't against a center specifically defined to not support registrations.
no_reg_centers = RegistrationCenter.objects.filter(reg_open=False)
reported_centers = [
center_info['polling_center_code'] for center_info in d['by_polling_center_code']
]
self.assertEqual(NUM_NO_REG_CENTERS, no_reg_centers.count())
for center in no_reg_centers:
self.assertNotIn(center.center_id, reported_centers)
def test_registration_slices(self):
d = reports.retrieve_report(reports.REGISTRATIONS_METADATA_KEY)
self.assertEqual(set(d.keys()), {'demographic_breakdowns', 'subconstituencies',
'offices', 'last_updated', 'dates'})
d = reports.retrieve_report(reports.REGISTRATIONS_STATS_KEY)
self.assertEqual(set(d.keys()), {'sms_stats', 'phone_multiple_family_book',
'phone_duplicate_registrations', 'message_stats',
'headline'})
def test_lists_of_reports(self):
r1, r2 = reports.retrieve_report([reports.REGISTRATIONS_METADATA_KEY,
reports.REGISTRATIONS_STATS_KEY])
self.assertTrue(bool(r1))
self.assertTrue(bool(r2))
class BadElection(object):
def __init__(self):
self.id = -1234
election_with_bogus_id = BadElection()
r1, r2 = reports.retrieve_report([
reports.REGISTRATIONS_METADATA_KEY,
reports.election_key(reports.ELECTION_DAY_METADATA_KEY, election_with_bogus_id)
])
# although _METADATA_ exists, 1st result is None to indicate that something failed
self.assertFalse(bool(r1))
# 2nd result is None because there's no such election
self.assertFalse(bool(r2))
class TestRegistrationsByPhone(TestCase):
@classmethod
def setUpTestData(cls):
# create a registration and record the phone number
cls.reg = RegistrationFactory(archive_time=None)
cls.phone_number = cls.reg.sms.from_number
cls.cursor = connection.cursor()
def test_registrations_by_phone_report_is_correct(self):
# create a second registration with the same phone number
RegistrationFactory(sms__from_number=self.phone_number, archive_time=None)
report = registrations_by_phone(self.cursor)
self.assertEqual(report, [(self.phone_number, 2)])
def test_multiple_rows(self):
# create a second registration with the same phone number
RegistrationFactory(sms__from_number=self.phone_number, archive_time=None)
# create 3 more registrations sharing a phone number (but different than the previous two)
reg_group_2 = RegistrationFactory(archive_time=None)
second_phone_number = reg_group_2.sms.from_number
RegistrationFactory(sms__from_number=second_phone_number, archive_time=None)
RegistrationFactory(sms__from_number=second_phone_number, archive_time=None)
report = registrations_by_phone(self.cursor)
expected_report = [
(self.phone_number, 2),
(second_phone_number, 3),
]
self.assertEqual(sorted(report), sorted(expected_report))
def test_ignore_singletons(self):
"Phone numbers with only 1 registration are not included in the report."
report = registrations_by_phone(self.cursor)
self.assertEqual(report, [])
def test_ignore_deleted_sms(self):
RegistrationFactory(sms__from_number=self.phone_number, sms__deleted=True,
archive_time=None)
report = registrations_by_phone(self.cursor)
self.assertEqual(report, [])
def test_ignore_deleted_registration(self):
RegistrationFactory(sms__from_number=self.phone_number, deleted=True,
archive_time=None)
report = registrations_by_phone(self.cursor)
self.assertEqual(report, [])
def test_ignore_archived_registration(self):
RegistrationFactory(sms__from_number=self.phone_number)
report = registrations_by_phone(self.cursor)
self.assertEqual(report, [])
class TestMissingReports(TestCase):
def setUp(self):
# The client in this test class can log in okay but reports aren't
# present.
reports.empty_report_store()
credentials = base64.b64encode((TEST_USERNAME + ':' + TEST_PASSWORD).encode())
self.client.defaults['HTTP_AUTHORIZATION'] = b'Basic ' + credentials
views.REPORT_USER_DB[TEST_USERNAME] = TEST_PASSWORD
def test(self):
for relative_uri in (REGISTRATIONS_REL_URI, ELECTION_DAY_LOG_REL_URI,
ELECTION_DAY_REPORT_REL_URI):
rsp = self.client.get(BASE_URI + relative_uri)
self.assertEqual(503, rsp.status_code, 'expected report at %s to be unavailable' %
relative_uri)
self.assertEqual('text/plain', rsp['Content-Type'])
class TestNoAuth(TestCase):
def setUp(self):
views.REPORT_USER_DB.clear()
def test_election_day_log(self):
rsp = self.client.get(BASE_URI + ELECTION_DAY_LOG_REL_URI)
self.assertEqual(401, rsp.status_code)
class TestBadAuth(TestCase):
def setUp(self):
views.REPORT_USER_DB.clear()
views.REPORT_USER_DB['validuser'] = 'validpass'
credentials = base64.b64encode('invaliduser:invalidpass'.encode())
self.client.defaults['HTTP_AUTHORIZATION'] = b'Basic ' + credentials
def test_election_day_log(self):
rsp = self.client.get(BASE_URI + ELECTION_DAY_LOG_REL_URI)
self.assertEqual(401, rsp.status_code)
class TestNoAuthDB(TestCase):
""" REPORT_USER_DB is empty, and we try to log in. It fails of
course, and if you look in the log you should see a hint that
the user database is not set up.
"""
def setUp(self):
views.REPORT_USER_DB.clear()
credentials = base64.b64encode('anyuser:anypass'.encode())
self.client.defaults['HTTP_AUTHORIZATION'] = b'Basic ' + credentials
def test_election_day_log(self):
rsp = self.client.get(BASE_URI + ELECTION_DAY_LOG_REL_URI)
self.assertEqual(401, rsp.status_code)
| |
import math
import numpy
import six
import chainer
from chainer import backend
from chainer.backends import cuda
from chainer.backends import intel64
from chainer import function_node
import chainer.functions
from chainer.functions.math import floor as _floor
from chainer import utils
from chainer.utils import type_check
from chainer import variable
def _convert_value_to_string(value):
if isinstance(value, variable.Variable):
value = value.data
if numpy.isscalar(value):
if value < 0:
return '({})'.format(value)
else:
return str(value)
array_types = chainer.get_array_types()
if isinstance(value, array_types):
return 'constant array'
else:
raise ValueError(
'Value must be a Variable, scalar, {} or {}. Actual: {}'.format(
', '.join([str(at) for at in array_types[:-1]]),
array_types[-1], type(value)))
def _preprocess_const(x, value):
return x.dtype.type(value)
def _chainerx_preprocess_const(x, value, label):
# Allow mixing of numpy/cupy array and chainerx array as long as
# conversion without copy is possible.
if isinstance(value, (numpy.ndarray, cuda.ndarray)):
# TODO(niboshi): force zero-copy
return backend.to_chainerx(value)
if isinstance(value, (six.integer_types, float)):
return value
if numpy.isscalar(value):
return numpy.asscalar(value)
if isinstance(value, variable.Variable):
value = variable.as_array(value)
utils._check_arrays_forward_compatible((x, value), label)
return value
def _preprocess_rhs(x, value):
if isinstance(value, chainer.Variable):
return value
if not (numpy.isscalar(value)
or isinstance(value, chainer.get_array_types())):
raise TypeError(
'Value must be a scalar, `numpy.ndarray`, `cupy.ndarray` '
'or a `Variable`.\nActual: {}'.format(type(value)))
return value.astype(x.dtype, copy=False)
class Neg(function_node.FunctionNode):
@property
def label(self):
return '__neg__'
def check_type_forward(self, in_types):
type_check._argname(in_types, ('x',))
def forward_chainerx(self, x):
return -x[0],
def forward(self, x):
self.retain_inputs(())
return utils.force_array(-x[0]),
def backward(self, indexes, gy):
return -gy[0],
def neg(self): # -x
"""Element-wise negation.
Returns:
~chainer.Variable: Output variable.
"""
return Neg().apply((self,))[0]
class Absolute(function_node.FunctionNode):
@property
def label(self):
return '|_|'
def check_type_forward(self, in_types):
type_check._argname(in_types, ('x',))
type_check.expect(in_types[0].dtype.kind == 'f')
def forward(self, x):
self.retain_inputs((0,))
return utils.force_array(abs(x[0])),
def backward(self, indexes, grad_outputs):
x = self.get_retained_inputs()[0]
return AbsoluteGrad(x.data).apply(grad_outputs)
class AbsoluteGrad(function_node.FunctionNode):
def __init__(self, x):
super(AbsoluteGrad, self).__init__()
self.x = x
def check_type_forward(self, in_types):
type_check._argname(in_types, ('gy',))
type_check.expect(in_types[0].dtype.kind == 'f')
def forward_cpu(self, inputs):
return utils.force_array(numpy.sign(self.x) * inputs[0]),
def forward_gpu(self, inputs):
gx0 = cuda.elementwise(
'T x0, T gy', 'T gx0',
'gx0 = ((x0 > 0) - (x0 < 0)) * gy',
'abs_bwd')(self.x, inputs[0])
return gx0,
def backward(self, indexes, grad_outputs):
return AbsoluteGrad(self.x).apply(grad_outputs)
def absolute(self):
"""Element-wise absolute.
Returns:
~chainer.Variable: Output variable.
"""
return Absolute().apply((self,))[0]
class Add(function_node.FunctionNode):
@property
def label(self):
return '_ + _'
def check_type_forward(self, in_types):
type_check._argname(in_types, ('lhs', 'rhs'))
type_check.expect(
in_types[0].dtype == in_types[1].dtype,
)
type_check.expect_broadcast_shapes(
in_types[0].shape, in_types[1].shape)
def forward_chainerx(self, x):
return x[0] + x[1],
def forward(self, x):
# may broadcast
y = utils.force_array(x[0] + x[1])
return y,
def backward(self, indexes, gy):
return tuple(chainer.functions.sum_to(gy[0], self.inputs[i].shape)
for i in indexes)
class AddConstant(function_node.FunctionNode):
def __init__(self, value):
self.value = value
@property
def label(self):
return '_ + %s' % _convert_value_to_string(self.value)
def check_type_forward(self, in_types):
type_check._argname(in_types, ('x',))
type_check.expect(in_types.size() == 1)
def forward_chainerx(self, x):
value = _chainerx_preprocess_const(x[0], self.value, 'add')
return x[0] + value,
def forward(self, x):
value = _preprocess_const(x[0], self.value)
return utils.force_array(x[0] + value),
def backward(self, indexes, gy):
x_node, = self.inputs
return gy
class MultiAdd(function_node.FunctionNode):
def check_type_forward(self, in_types):
for i, in_type in enumerate(in_types):
type_check._argname((in_type,), ('x{}'.format(i),))
type_check.expect(in_types[0].dtype == in_type.dtype)
def forward(self, xs):
self.len = len(xs)
if len(xs) == 1:
return xs
if (intel64.should_use_ideep('>=auto')
and intel64.inputs_all_ready(xs)
and all(x.shape == xs[0].shape for x in xs[1:])):
y = intel64.ideep.multi_add(xs)
else:
# The output should be a new array. Add the first 2 arrays
# and get the result y. Then add the rest arrays to y.
y = xs[0] + xs[1]
for x in xs[2:]:
if x.shape == y.shape:
y += x
else:
y = x + y
return utils.force_array(y),
def backward(self, indexes, gy):
return tuple(chainer.functions.sum_to(gy[0], x_node.shape)
for x_node in self.inputs)
# TODO(hvy): Implement multi-add with chainerx.ndarrays.
def add(*xs): # lhs + rhs or add more than 2 variables
"""Element-wise addition.
Returns:
~chainer.Variable: Output variable.
"""
if len(xs) == 2:
lhs, rhs = xs
if numpy.isscalar(rhs):
return AddConstant(rhs).apply((lhs,))[0]
rhs = _preprocess_rhs(lhs, rhs)
return Add().apply((lhs, rhs))[0]
else:
return MultiAdd().apply(xs)[0]
class Sub(function_node.FunctionNode):
@property
def label(self):
return '_ - _'
def check_type_forward(self, in_types):
type_check._argname(in_types, ('lhs', 'rhs'))
type_check.expect(in_types[0].dtype == in_types[1].dtype)
type_check.expect_broadcast_shapes(
in_types[0].shape, in_types[1].shape)
def forward_chainerx(self, x):
return x[0] - x[1],
def forward(self, x):
# may broadcast
return utils.force_array(x[0] - x[1]),
def backward(self, indexes, gy):
x1, x2 = self.inputs
g, = gy
return (
chainer.functions.sum_to(g, x1.shape) if 0 in indexes else None,
-chainer.functions.sum_to(g, x2.shape) if 1 in indexes else None,
)
def sub(self, rhs): # lhs - rhs
"""Element-wise subtraction.
Returns:
~chainer.Variable: Output variable.
"""
if numpy.isscalar(rhs):
return AddConstant(-rhs).apply((self,))[0]
rhs = _preprocess_rhs(self, rhs)
return Sub().apply((self, rhs))[0]
class SubFromConstant(function_node.FunctionNode):
def __init__(self, value):
self.value = value
@property
def label(self):
return '%s - _' % _convert_value_to_string(self.value)
def check_type_forward(self, in_types):
type_check._argname(in_types, ('x',))
def forward(self, x):
value = _preprocess_const(x[0], self.value)
return utils.force_array(value - x[0]),
def backward(self, indexes, gy):
g, = gy
return -g,
def rsub(self, rhs): # rhs - lhs
"""Element-wise subtraction.
Returns:
~chainer.Variable: Output variable.
"""
if numpy.isscalar(rhs):
return SubFromConstant(rhs).apply((self,))[0]
rhs = _preprocess_rhs(self, rhs)
return Sub().apply((rhs, self))[0]
class Mul(function_node.FunctionNode):
@property
def label(self):
return '_ * _'
def check_type_forward(self, in_types):
type_check._argname(in_types, ('lhs', 'rhs'))
type_check.expect(
in_types[0].dtype.kind == 'f',
in_types[0].dtype == in_types[1].dtype,
)
type_check.expect_broadcast_shapes(
in_types[0].shape, in_types[1].shape)
def forward_chainerx(self, x):
return x[0] * x[1],
def forward(self, x):
self.retain_inputs((0, 1))
# may broadcast
return utils.force_array(x[0] * x[1]),
def backward(self, indexes, gy):
xs = self.get_retained_inputs()
return tuple(
chainer.functions.sum_to(gy[0] * xs[1 - i], xs[i].shape)
for i in indexes
)
class MulConstant(function_node.FunctionNode):
def __init__(self, value):
self.value = value
@property
def label(self):
return '_ * %s' % _convert_value_to_string(self.value)
def check_type_forward(self, in_types):
type_check._argname(in_types, ('x',))
def forward_chainerx(self, x):
value = _chainerx_preprocess_const(x[0], self.value, 'mul')
return x[0] * value,
def forward(self, x):
value = _preprocess_const(x[0], self.value)
return utils.force_array(value * x[0]),
def backward(self, indexes, gy):
g, = gy
return self.value * g,
def mul(self, rhs): # lhs * rhs
"""Element-wise multiplication.
Returns:
~chainer.Variable: Output variable.
"""
if numpy.isscalar(rhs):
return MulConstant(rhs).apply((self,))[0]
rhs = _preprocess_rhs(self, rhs)
return Mul().apply((self, rhs))[0]
class Div(function_node.FunctionNode):
@property
def label(self):
return '_ / _'
def check_type_forward(self, in_types):
type_check._argname(in_types, ('lhs', 'rhs'))
type_check.expect(
in_types[0].dtype.kind == 'f',
in_types[0].dtype == in_types[1].dtype,
)
type_check.expect_broadcast_shapes(
in_types[0].shape, in_types[1].shape)
def forward_chainerx(self, x):
return x[0] / x[1],
def forward(self, x):
self.retain_inputs((0, 1))
# may broadcast
return utils.force_array(x[0] / x[1]),
def backward(self, indexes, grad_outputs):
x = self.get_retained_inputs()
return DivGrad().apply((x[0], x[1], grad_outputs[0]))
class DivGrad(function_node.FunctionNode):
def forward_cpu(self, inputs):
self.retain_inputs((0, 1, 2))
x0, x1, gy = inputs
gx0 = utils.force_array(gy / x1)
gx1 = utils.force_array(-gx0 * x0 / x1)
return utils.sum_to(gx0, x0.shape), utils.sum_to(gx1, x1.shape)
def forward_gpu(self, inputs):
self.retain_inputs((0, 1, 2))
x0, x1, gy = inputs
gx0, gx1 = cuda.elementwise(
'T x0, T x1, T gy',
'T gx0, T gx1',
'''
gx0 = gy / x1;
gx1 = -gx0 * x0 / x1;
''', 'div_bwd')(x0, x1, gy)
return utils.sum_to(gx0, x0.shape), utils.sum_to(gx1, x1.shape)
def backward(self, indexes, grad_outputs):
x0, x1, gy = self.get_retained_inputs()
ggx0, ggx1 = grad_outputs
ret = []
x1_square = x1 * x1
if 0 in indexes:
if ggx1 is None:
ret.append(None)
else:
gx0 = -ggx1 * gy / x1_square
ret.append(chainer.functions.sum_to(gx0, x0.shape))
if 1 in indexes:
gx1 = None if ggx0 is None else -ggx0 * gy / x1_square
gx1_1 = (None if ggx1 is None else
ggx1 * 2 * gy * x0 / (x1_square * x1))
if gx1 is None:
gx1 = gx1_1
elif gx1_1 is not None:
gx1 += gx1_1
ret.append(None if gx1 is None else
chainer.functions.sum_to(gx1, x1.shape))
if 2 in indexes:
ggy = None if ggx0 is None else ggx0 / x1
ggy_1 = None if ggx1 is None else ggx1 * x0 / x1_square
if ggy is None:
ggy = -ggy_1
elif ggy_1 is not None:
ggy -= ggy_1
ret.append(ggy)
return ret
def div(self, rhs): # lhs / rhs
"""Element-wise division
Returns:
~chainer.Variable: Output variable.
"""
if numpy.isscalar(rhs):
return MulConstant(1. / rhs).apply((self,))[0]
rhs = _preprocess_rhs(self, rhs)
return Div().apply((self, rhs))[0]
# TODO(sonots): Support chainerx
class DivFromConstant(function_node.FunctionNode):
def __init__(self, value):
self.value = value
@property
def label(self):
return '%s / _' % _convert_value_to_string(self.value)
def check_type_forward(self, in_types):
type_check._argname(in_types, ('x',))
type_check.expect(in_types[0].dtype.kind == 'f')
def forward(self, x):
self.retain_inputs((0,))
value = _preprocess_const(x[0], self.value)
return utils.force_array(value / x[0]),
def backward(self, indexes, grad_outputs):
x = self.get_retained_inputs()
return DivFromConstantGrad(self.value).apply((x[0], grad_outputs[0]))
class DivFromConstantGrad(function_node.FunctionNode):
def __init__(self, value):
super(DivFromConstantGrad, self).__init__()
self.value = value
def forward_cpu(self, inputs):
self.retain_inputs((0, 1))
x, gy = inputs
value = _preprocess_const(x, self.value)
return utils.force_array(-value * gy / (x ** 2)),
def forward_gpu(self, inputs):
self.retain_inputs((0, 1))
x, gy = inputs
# TODO(beam2d): Make it not use the input
value = _preprocess_const(x, self.value)
return cuda.elementwise('T x, T gy, T value', 'T gx',
'gx = -value * gy / (x * x)',
'div_from_const_bwd')(x, gy, value),
def backward(self, indexes, grad_outputs):
x, gy = self.get_retained_inputs()
value = _preprocess_const(x.data, self.value)
ret = []
if 0 in indexes:
ret.append(grad_outputs[0] * 2 * value * gy / (x ** 3))
if 1 in indexes:
ret.append(grad_outputs[0] * -value / (x ** 2))
return ret
def rdiv(self, rhs): # rhs / lhs
"""Element-wise division.
Returns:
~chainer.Variable: Output variable.
"""
if numpy.isscalar(rhs):
return DivFromConstant(rhs).apply((self,))[0]
rhs = _preprocess_rhs(self, rhs)
return Div().apply((rhs, self))[0]
def floordiv(self, rhs): # lhs // rhs
"""Element-wise floor division.
Returns:
~chainer.Variable: Output variable.
"""
return _floor.floor(div(self, rhs))
def rfloordiv(self, rhs): # rhs // lhs
"""Element-wise floor division.
Returns:
~chainer.Variable: Output variable.
"""
return _floor.floor(rdiv(self, rhs))
class PowVarVar(function_node.FunctionNode):
@property
def label(self):
return '_ ** _'
def check_type_forward(self, in_types):
type_check._argname(in_types, ('lhs', 'rhs'))
type_check.expect(
in_types[0].dtype.kind == 'f',
in_types[0].dtype == in_types[1].dtype,
)
type_check.expect_broadcast_shapes(
in_types[0].shape, in_types[1].shape)
def forward(self, x):
self.retain_inputs((0, 1))
# may broadcast
self.y = x[0] ** x[1]
return utils.force_array(self.y),
def backward(self, indexes, gy):
inputs = self.get_retained_inputs()
return PowVarVarGrad(self.y).apply((inputs[0], inputs[1], gy[0]))
class PowVarVarGrad(function_node.FunctionNode):
def __init__(self, y):
self.y = y
def check_type_forward(self, in_types):
type_check._argname(in_types, ('lhs', 'rhs', 'gy'))
type_check.expect(
in_types[0].dtype.kind == 'f',
in_types[0].dtype == in_types[1].dtype,
in_types[0].dtype == in_types[2].dtype,
)
def forward_cpu(self, inputs):
self.retain_inputs((0, 1, 2))
x0, x1, gy = inputs
one = x1.dtype.type(1)
gx0 = utils.sum_to(
utils.force_array(x1 * (x0 ** (x1 - one)) * gy), x0.shape)
gx1 = utils.sum_to(
utils.force_array(numpy.log(x0) * self.y * gy), x1.shape)
return gx0, gx1
def forward_gpu(self, inputs):
self.retain_inputs((0, 1, 2))
x0, x1, gy = inputs
gx0, gx1 = cuda.elementwise(
'T x0, T x1, T gy, T y', 'T gx0, T gx1',
'''
gx0 = x1 * pow(x0, x1 - 1) * gy;
gx1 = log(x0) * y * gy;
''', 'pow_var_var_bwd')(x0, x1, gy, self.y)
gx0 = utils.sum_to(gx0, x0.shape)
gx1 = utils.sum_to(gx1, x1.shape)
return gx0, gx1
def backward(self, indexes, ggx):
x0, x1, gy = self.get_retained_inputs()
ggx0, ggx1 = ggx
log_x0 = chainer.functions.log(x0)
pow_x0_x1 = x0 ** x1
pow_x0_x1_1 = x0 ** (x1 - 1)
pow_x0_x1_2 = x0 ** (x1 - 2)
ret = []
if 0 in indexes:
gx0_0 = (0 if ggx0 is None else
ggx0 * x1 * (x1 - 1) * pow_x0_x1_2)
gx0_1 = (0 if ggx1 is None else
ggx1 * pow_x0_x1_1 * (log_x0 * x1 + 1))
gx0 = (gx0_0 + gx0_1) * gy
ret.append(chainer.functions.sum_to(gx0, x0.shape))
if 1 in indexes:
gx1_0 = (0 if ggx0 is None else
ggx0 * pow_x0_x1_1 * (log_x0 * x1 + 1))
gx1_1 = (0 if ggx1 is None else
ggx1 * log_x0 * log_x0 * pow_x0_x1)
gx1 = (gx1_0 + gx1_1) * gy
ret.append(chainer.functions.sum_to(gx1, x1.shape))
if 2 in indexes:
ggy_0 = 0 if ggx0 is None else ggx0 * x1 * pow_x0_x1_1
ggy_1 = 0 if ggx1 is None else ggx1 * log_x0 * pow_x0_x1
ggy = ggy_0 + ggy_1
ret.append(ggy)
return ret
class PowVarConst(function_node.FunctionNode):
def __init__(self, value):
self.value = value
@property
def label(self):
return '_ ** %s' % _convert_value_to_string(self.value)
def check_type_forward(self, in_types):
type_check._argname(in_types, ('x',))
type_check.expect(in_types[0].dtype.kind == 'f')
def forward(self, x):
self.retain_inputs((0,))
y = x[0] ** _preprocess_const(x[0], self.value)
return utils.force_array(y, x[0].dtype),
def backward(self, indexes, gy):
inputs = self.get_retained_inputs()
return PowVarConstGrad(self.value).apply((inputs[0], gy[0]))
class PowVarConstGrad(function_node.FunctionNode):
def __init__(self, value):
self.value = value
self.val = self.val_1 = None
def check_type_forward(self, in_types):
type_check._argname(in_types, ('x', 'gy'))
type_check.expect(
in_types[0].dtype.kind == 'f',
in_types[0].dtype == in_types[1].dtype,
in_types[0].shape == in_types[1].shape
)
def forward_cpu(self, inputs):
self.retain_inputs((0, 1))
x, gy = inputs
self.val_1 = _preprocess_const(x, self.value - 1)
gx = utils.force_type(x.dtype, self.value) * (x ** self.val_1) * gy
gx = utils.force_array(gx)
return gx,
def forward_gpu(self, inputs):
self.retain_inputs((0, 1))
x, gy = inputs
self.val = _preprocess_const(x, self.value)
gx = cuda.elementwise(
'T x, T gy, T value', 'T gx',
'gx = value * pow(x, value - 1) * gy',
'pow_var_const_bwd')(x, gy, self.val)
return gx,
def backward(self, indexes, ggx):
x, gy = self.get_retained_inputs()
if self.val is None:
self.val = _preprocess_const(x.data, self.value)
if self.val_1 is None:
self.val_1 = _preprocess_const(x.data, self.value - 1)
val_2 = _preprocess_const(x.data, self.value - 2)
ret = []
if 0 in indexes:
ret.append(ggx[0] * self.val * gy * self.val_1 * x ** val_2)
if 1 in indexes:
ret.append(ggx[0] * self.val * x ** self.val_1)
return ret
def pow(self, rhs): # lhs ** rhs
"""Element-wise power function.
Returns:
~chainer.Variable: Output variable.
"""
if numpy.isscalar(rhs):
return PowVarConst(rhs).apply((self,))[0]
rhs = _preprocess_rhs(self, rhs)
return PowVarVar().apply((self, rhs))[0]
class PowConstVar(function_node.FunctionNode):
def __init__(self, value):
self.value = value
@property
def label(self):
return '%s ** _' % _convert_value_to_string(self.value)
def check_type_forward(self, in_types):
type_check._argname(in_types, ('x',))
type_check.expect(in_types[0].dtype.kind == 'f')
def forward(self, x):
self.retain_outputs((0,))
value = _preprocess_const(x[0], self.value)
y = value ** x[0]
return utils.force_array(y),
def backward(self, indexes, gy):
outputs = self.get_retained_outputs()
return PowConstVarGrad(self.value).apply((outputs[0], gy[0]))
class PowConstVarGrad(function_node.FunctionNode):
def __init__(self, value):
self.value = value
self.log_value = math.log(value)
def check_type_forward(self, in_types):
type_check._argname(in_types, ('y', 'gy'))
type_check.expect(
in_types[0].dtype.kind == 'f',
in_types[0].dtype == in_types[1].dtype,
in_types[0].shape == in_types[1].shape
)
def forward_cpu(self, inputs):
self.retain_inputs((0, 1))
y, gy = inputs
gx = utils.force_array(y.dtype.type(self.log_value) * y * gy)
return gx,
def forward_gpu(self, inputs):
self.retain_inputs((0, 1))
y, gy = inputs
value = _preprocess_const(y, self.value)
gx = cuda.elementwise(
'T y, T gy, T value', 'T gx',
'gx = log(value) * y * gy',
'pow_const_var_bwd')(y, gy, value)
return gx,
def backward(self, indexes, ggx):
y, gy = self.get_retained_inputs()
gygy = y.dtype.type(self.log_value) * ggx[0]
ret = []
if 0 in indexes:
ret.append(gygy * gy)
if 1 in indexes:
ret.append(gygy * y)
return ret
def rpow(self, rhs): # rhs ** lhs
"""Element-wise power function.
Returns:
~chainer.Variable: Output variable.
"""
if numpy.isscalar(rhs):
return PowConstVar(rhs).apply((self,))[0]
rhs = _preprocess_rhs(self, rhs)
return PowVarVar().apply((rhs, self))[0]
def matmul(self, rhs): # lhs @ rhs
"""Matrix multiplication.
Returns:
~chainer.Variable: Output variable.
"""
rhs = _preprocess_rhs(self, rhs)
return chainer.functions.matmul(self, rhs)
def rmatmul(self, rhs): # rhs @ lhs
"""Matrix multiplication.
Returns:
~chainer.Variable: Output variable.
"""
rhs = _preprocess_rhs(self, rhs)
return chainer.functions.matmul(rhs, self)
def install_variable_arithmetics():
variable.Variable.__neg__ = neg
variable.Variable.__abs__ = absolute
variable.Variable.__add__ = add
variable.Variable.__radd__ = add
variable.Variable.__sub__ = sub
variable.Variable.__rsub__ = rsub
variable.Variable.__mul__ = mul
variable.Variable.__rmul__ = mul
variable.Variable.__div__ = div
variable.Variable.__truediv__ = div
variable.Variable.__rdiv__ = rdiv
variable.Variable.__rtruediv__ = rdiv
variable.Variable.__floordiv__ = floordiv
variable.Variable.__rfloordiv__ = rfloordiv
variable.Variable.__pow__ = pow
variable.Variable.__rpow__ = rpow
variable.Variable.__matmul__ = matmul
variable.Variable.__rmatmul__ = rmatmul
| |
"""Dependency Resolution
The dependency resolution in pip is performed as follows:
for top-level requirements:
a. only one spec allowed per project, regardless of conflicts or not.
otherwise a "double requirement" exception is raised
b. they override sub-dependency requirements.
for sub-dependencies
a. "first found, wins" (where the order is breadth first)
"""
# The following comment should be removed at some point in the future.
# mypy: strict-optional=False
# mypy: disallow-untyped-defs=False
import logging
import sys
from collections import defaultdict
from itertools import chain
from pip._vendor.packaging import specifiers
from pip._internal.exceptions import (
BestVersionAlreadyInstalled,
DistributionNotFound,
HashError,
HashErrors,
UnsupportedPythonVersion,
)
from pip._internal.req.req_install import check_invalid_constraint_type
from pip._internal.req.req_set import RequirementSet
from pip._internal.resolution.base import BaseResolver
from pip._internal.utils.compatibility_tags import get_supported
from pip._internal.utils.logging import indent_log
from pip._internal.utils.misc import dist_in_usersite, normalize_version_info
from pip._internal.utils.packaging import check_requires_python, get_requires_python
from pip._internal.utils.typing import MYPY_CHECK_RUNNING
if MYPY_CHECK_RUNNING:
from typing import DefaultDict, List, Optional, Set, Tuple
from pip._vendor.pkg_resources import Distribution
from pip._internal.cache import WheelCache
from pip._internal.index.package_finder import PackageFinder
from pip._internal.models.link import Link
from pip._internal.operations.prepare import RequirementPreparer
from pip._internal.req.req_install import InstallRequirement
from pip._internal.resolution.base import InstallRequirementProvider
DiscoveredDependencies = DefaultDict[str, List[InstallRequirement]]
logger = logging.getLogger(__name__)
def _check_dist_requires_python(
dist, # type: Distribution
version_info, # type: Tuple[int, int, int]
ignore_requires_python=False, # type: bool
):
# type: (...) -> None
"""
Check whether the given Python version is compatible with a distribution's
"Requires-Python" value.
:param version_info: A 3-tuple of ints representing the Python
major-minor-micro version to check.
:param ignore_requires_python: Whether to ignore the "Requires-Python"
value if the given Python version isn't compatible.
:raises UnsupportedPythonVersion: When the given Python version isn't
compatible.
"""
requires_python = get_requires_python(dist)
try:
is_compatible = check_requires_python(
requires_python, version_info=version_info,
)
except specifiers.InvalidSpecifier as exc:
logger.warning(
"Package %r has an invalid Requires-Python: %s",
dist.project_name, exc,
)
return
if is_compatible:
return
version = '.'.join(map(str, version_info))
if ignore_requires_python:
logger.debug(
'Ignoring failed Requires-Python check for package %r: '
'%s not in %r',
dist.project_name, version, requires_python,
)
return
raise UnsupportedPythonVersion(
'Package {!r} requires a different Python: {} not in {!r}'.format(
dist.project_name, version, requires_python,
))
class Resolver(BaseResolver):
"""Resolves which packages need to be installed/uninstalled to perform \
the requested operation without breaking the requirements of any package.
"""
_allowed_strategies = {"eager", "only-if-needed", "to-satisfy-only"}
def __init__(
self,
preparer, # type: RequirementPreparer
finder, # type: PackageFinder
wheel_cache, # type: Optional[WheelCache]
make_install_req, # type: InstallRequirementProvider
use_user_site, # type: bool
ignore_dependencies, # type: bool
ignore_installed, # type: bool
ignore_requires_python, # type: bool
force_reinstall, # type: bool
upgrade_strategy, # type: str
py_version_info=None, # type: Optional[Tuple[int, ...]]
):
# type: (...) -> None
super(Resolver, self).__init__()
assert upgrade_strategy in self._allowed_strategies
if py_version_info is None:
py_version_info = sys.version_info[:3]
else:
py_version_info = normalize_version_info(py_version_info)
self._py_version_info = py_version_info
self.preparer = preparer
self.finder = finder
self.wheel_cache = wheel_cache
self.upgrade_strategy = upgrade_strategy
self.force_reinstall = force_reinstall
self.ignore_dependencies = ignore_dependencies
self.ignore_installed = ignore_installed
self.ignore_requires_python = ignore_requires_python
self.use_user_site = use_user_site
self._make_install_req = make_install_req
self._discovered_dependencies = \
defaultdict(list) # type: DiscoveredDependencies
def resolve(self, root_reqs, check_supported_wheels):
# type: (List[InstallRequirement], bool) -> RequirementSet
"""Resolve what operations need to be done
As a side-effect of this method, the packages (and their dependencies)
are downloaded, unpacked and prepared for installation. This
preparation is done by ``pip.operations.prepare``.
Once PyPI has static dependency metadata available, it would be
possible to move the preparation to become a step separated from
dependency resolution.
"""
requirement_set = RequirementSet(
check_supported_wheels=check_supported_wheels
)
for req in root_reqs:
if req.constraint:
check_invalid_constraint_type(req)
requirement_set.add_requirement(req)
# Actually prepare the files, and collect any exceptions. Most hash
# exceptions cannot be checked ahead of time, because
# _populate_link() needs to be called before we can make decisions
# based on link type.
discovered_reqs = [] # type: List[InstallRequirement]
hash_errors = HashErrors()
for req in chain(requirement_set.all_requirements, discovered_reqs):
try:
discovered_reqs.extend(self._resolve_one(requirement_set, req))
except HashError as exc:
exc.req = req
hash_errors.append(exc)
if hash_errors:
raise hash_errors
return requirement_set
def _is_upgrade_allowed(self, req):
# type: (InstallRequirement) -> bool
if self.upgrade_strategy == "to-satisfy-only":
return False
elif self.upgrade_strategy == "eager":
return True
else:
assert self.upgrade_strategy == "only-if-needed"
return req.user_supplied or req.constraint
def _set_req_to_reinstall(self, req):
# type: (InstallRequirement) -> None
"""
Set a requirement to be installed.
"""
# Don't uninstall the conflict if doing a user install and the
# conflict is not a user install.
if not self.use_user_site or dist_in_usersite(req.satisfied_by):
req.should_reinstall = True
req.satisfied_by = None
def _check_skip_installed(self, req_to_install):
# type: (InstallRequirement) -> Optional[str]
"""Check if req_to_install should be skipped.
This will check if the req is installed, and whether we should upgrade
or reinstall it, taking into account all the relevant user options.
After calling this req_to_install will only have satisfied_by set to
None if the req_to_install is to be upgraded/reinstalled etc. Any
other value will be a dist recording the current thing installed that
satisfies the requirement.
Note that for vcs urls and the like we can't assess skipping in this
routine - we simply identify that we need to pull the thing down,
then later on it is pulled down and introspected to assess upgrade/
reinstalls etc.
:return: A text reason for why it was skipped, or None.
"""
if self.ignore_installed:
return None
req_to_install.check_if_exists(self.use_user_site)
if not req_to_install.satisfied_by:
return None
if self.force_reinstall:
self._set_req_to_reinstall(req_to_install)
return None
if not self._is_upgrade_allowed(req_to_install):
if self.upgrade_strategy == "only-if-needed":
return 'already satisfied, skipping upgrade'
return 'already satisfied'
# Check for the possibility of an upgrade. For link-based
# requirements we have to pull the tree down and inspect to assess
# the version #, so it's handled way down.
if not req_to_install.link:
try:
self.finder.find_requirement(req_to_install, upgrade=True)
except BestVersionAlreadyInstalled:
# Then the best version is installed.
return 'already up-to-date'
except DistributionNotFound:
# No distribution found, so we squash the error. It will
# be raised later when we re-try later to do the install.
# Why don't we just raise here?
pass
self._set_req_to_reinstall(req_to_install)
return None
def _find_requirement_link(self, req):
# type: (InstallRequirement) -> Optional[Link]
upgrade = self._is_upgrade_allowed(req)
best_candidate = self.finder.find_requirement(req, upgrade)
if not best_candidate:
return None
# Log a warning per PEP 592 if necessary before returning.
link = best_candidate.link
if link.is_yanked:
reason = link.yanked_reason or '<none given>'
msg = (
# Mark this as a unicode string to prevent
# "UnicodeEncodeError: 'ascii' codec can't encode character"
# in Python 2 when the reason contains non-ascii characters.
u'The candidate selected for download or install is a '
'yanked version: {candidate}\n'
'Reason for being yanked: {reason}'
).format(candidate=best_candidate, reason=reason)
logger.warning(msg)
return link
def _populate_link(self, req):
# type: (InstallRequirement) -> None
"""Ensure that if a link can be found for this, that it is found.
Note that req.link may still be None - if the requirement is already
installed and not needed to be upgraded based on the return value of
_is_upgrade_allowed().
If preparer.require_hashes is True, don't use the wheel cache, because
cached wheels, always built locally, have different hashes than the
files downloaded from the index server and thus throw false hash
mismatches. Furthermore, cached wheels at present have undeterministic
contents due to file modification times.
"""
if req.link is None:
req.link = self._find_requirement_link(req)
if self.wheel_cache is None or self.preparer.require_hashes:
return
cache_entry = self.wheel_cache.get_cache_entry(
link=req.link,
package_name=req.name,
supported_tags=get_supported(),
)
if cache_entry is not None:
logger.debug('Using cached wheel link: %s', cache_entry.link)
if req.link is req.original_link and cache_entry.persistent:
req.original_link_is_in_wheel_cache = True
req.link = cache_entry.link
def _get_dist_for(self, req):
# type: (InstallRequirement) -> Distribution
"""Takes a InstallRequirement and returns a single AbstractDist \
representing a prepared variant of the same.
"""
if req.editable:
return self.preparer.prepare_editable_requirement(req)
# satisfied_by is only evaluated by calling _check_skip_installed,
# so it must be None here.
assert req.satisfied_by is None
skip_reason = self._check_skip_installed(req)
if req.satisfied_by:
return self.preparer.prepare_installed_requirement(
req, skip_reason
)
# We eagerly populate the link, since that's our "legacy" behavior.
self._populate_link(req)
dist = self.preparer.prepare_linked_requirement(req)
# NOTE
# The following portion is for determining if a certain package is
# going to be re-installed/upgraded or not and reporting to the user.
# This should probably get cleaned up in a future refactor.
# req.req is only avail after unpack for URL
# pkgs repeat check_if_exists to uninstall-on-upgrade
# (#14)
if not self.ignore_installed:
req.check_if_exists(self.use_user_site)
if req.satisfied_by:
should_modify = (
self.upgrade_strategy != "to-satisfy-only" or
self.force_reinstall or
self.ignore_installed or
req.link.scheme == 'file'
)
if should_modify:
self._set_req_to_reinstall(req)
else:
logger.info(
'Requirement already satisfied (use --upgrade to upgrade):'
' %s', req,
)
return dist
def _resolve_one(
self,
requirement_set, # type: RequirementSet
req_to_install, # type: InstallRequirement
):
# type: (...) -> List[InstallRequirement]
"""Prepare a single requirements file.
:return: A list of additional InstallRequirements to also install.
"""
# Tell user what we are doing for this requirement:
# obtain (editable), skipping, processing (local url), collecting
# (remote url or package name)
if req_to_install.constraint or req_to_install.prepared:
return []
req_to_install.prepared = True
# Parse and return dependencies
dist = self._get_dist_for(req_to_install)
# This will raise UnsupportedPythonVersion if the given Python
# version isn't compatible with the distribution's Requires-Python.
_check_dist_requires_python(
dist, version_info=self._py_version_info,
ignore_requires_python=self.ignore_requires_python,
)
more_reqs = [] # type: List[InstallRequirement]
def add_req(subreq, extras_requested):
sub_install_req = self._make_install_req(
str(subreq),
req_to_install,
)
parent_req_name = req_to_install.name
to_scan_again, add_to_parent = requirement_set.add_requirement(
sub_install_req,
parent_req_name=parent_req_name,
extras_requested=extras_requested,
)
if parent_req_name and add_to_parent:
self._discovered_dependencies[parent_req_name].append(
add_to_parent
)
more_reqs.extend(to_scan_again)
with indent_log():
# We add req_to_install before its dependencies, so that we
# can refer to it when adding dependencies.
if not requirement_set.has_requirement(req_to_install.name):
# 'unnamed' requirements will get added here
# 'unnamed' requirements can only come from being directly
# provided by the user.
assert req_to_install.user_supplied
requirement_set.add_requirement(
req_to_install, parent_req_name=None,
)
if not self.ignore_dependencies:
if req_to_install.extras:
logger.debug(
"Installing extra requirements: %r",
','.join(req_to_install.extras),
)
missing_requested = sorted(
set(req_to_install.extras) - set(dist.extras)
)
for missing in missing_requested:
logger.warning(
"%s does not provide the extra '%s'",
dist, missing
)
available_requested = sorted(
set(dist.extras) & set(req_to_install.extras)
)
for subreq in dist.requires(available_requested):
add_req(subreq, extras_requested=available_requested)
return more_reqs
def get_installation_order(self, req_set):
# type: (RequirementSet) -> List[InstallRequirement]
"""Create the installation order.
The installation order is topological - requirements are installed
before the requiring thing. We break cycles at an arbitrary point,
and make no other guarantees.
"""
# The current implementation, which we may change at any point
# installs the user specified things in the order given, except when
# dependencies must come earlier to achieve topological order.
order = []
ordered_reqs = set() # type: Set[InstallRequirement]
def schedule(req):
if req.satisfied_by or req in ordered_reqs:
return
if req.constraint:
return
ordered_reqs.add(req)
for dep in self._discovered_dependencies[req.name]:
schedule(dep)
order.append(req)
for install_req in req_set.requirements.values():
schedule(install_req)
return order
| |
# -*- coding: utf-8 -*-
"""
The idea of MultilingualManager is taken from
django-linguo by Zach Mathew
https://github.com/zmathew/django-linguo
"""
from django.db import models
from django.db.models import FieldDoesNotExist
from django.db.models.fields.related import RelatedField, RelatedObject
from django.db.models.sql.where import Constraint
from django.utils.tree import Node
from modeltranslation import settings
from modeltranslation.fields import TranslationField
from modeltranslation.utils import (build_localized_fieldname, get_language,
auto_populate)
def get_translatable_fields_for_model(model):
from modeltranslation.translator import NotRegistered, translator
try:
return translator.get_options_for_model(model).get_field_names()
except NotRegistered:
return None
def rewrite_lookup_key(model, lookup_key):
pieces = lookup_key.split('__', 1)
original_key = pieces[0]
translatable_fields = get_translatable_fields_for_model(model)
if translatable_fields is not None:
# If we are doing a lookup on a translatable field,
# we want to rewrite it to the actual field name
# For example, we want to rewrite "name__startswith" to "name_fr__startswith"
if pieces[0] in translatable_fields:
pieces[0] = build_localized_fieldname(pieces[0], get_language())
if len(pieces) > 1:
# Check if we are doing a lookup to a related trans model
fields_to_trans_models = get_fields_to_translatable_models(model)
for field_to_trans, transmodel in fields_to_trans_models:
# Check ``original key``, as pieces[0] may have been already rewritten.
if original_key == field_to_trans:
pieces[1] = rewrite_lookup_key(transmodel, pieces[1])
break
return '__'.join(pieces)
def rewrite_order_lookup_key(model, lookup_key):
if lookup_key.startswith('-'):
return '-' + rewrite_lookup_key(model, lookup_key[1:])
else:
return rewrite_lookup_key(model, lookup_key)
_F2TM_CACHE = {}
def get_fields_to_translatable_models(model):
if model not in _F2TM_CACHE:
results = []
for field_name in model._meta.get_all_field_names():
field_object, modelclass, direct, m2m = model._meta.get_field_by_name(field_name)
# Direct relationship
if direct and isinstance(field_object, RelatedField):
if get_translatable_fields_for_model(field_object.related.parent_model) is not None:
results.append((field_name, field_object.related.parent_model))
# Reverse relationship
if isinstance(field_object, RelatedObject):
if get_translatable_fields_for_model(field_object.model) is not None:
results.append((field_name, field_object.model))
_F2TM_CACHE[model] = results
return _F2TM_CACHE[model]
_C2F_CACHE = {}
def get_field_by_colum_name(model, col):
# First, try field with the column name
try:
field = model._meta.get_field(col)
if field.column == col:
return field
except FieldDoesNotExist:
pass
field = _C2F_CACHE.get((model, col), None)
if field:
return field
# D'oh, need to search through all of them.
for field in model._meta.fields:
if field.column == col:
_C2F_CACHE[(model, col)] = field
return field
assert False, "No field found for column %s" % col
class MultilingualQuerySet(models.query.QuerySet):
def __init__(self, *args, **kwargs):
super(MultilingualQuerySet, self).__init__(*args, **kwargs)
self._post_init()
def _post_init(self):
self._rewrite = True
self._populate = None
if self.model and (not self.query.order_by):
if self.model._meta.ordering:
# If we have default ordering specified on the model, set it now so that
# it can be rewritten. Otherwise sql.compiler will grab it directly from _meta
ordering = []
for key in self.model._meta.ordering:
ordering.append(rewrite_order_lookup_key(self.model, key))
self.query.add_ordering(*ordering)
# This method was not present in django-linguo
def _clone(self, klass=None, *args, **kwargs):
if klass is not None and not issubclass(klass, MultilingualQuerySet):
class NewClass(klass, MultilingualQuerySet):
pass
NewClass.__name__ = 'Multilingual%s' % klass.__name__
klass = NewClass
kwargs.setdefault('_rewrite', self._rewrite)
kwargs.setdefault('_populate', self._populate)
return super(MultilingualQuerySet, self)._clone(klass, *args, **kwargs)
# This method was not present in django-linguo
def rewrite(self, mode=True):
return self._clone(_rewrite=mode)
# This method was not present in django-linguo
def populate(self, mode='all'):
"""
Overrides the translation fields population mode for this query set.
"""
return self._clone(_populate=mode)
def _rewrite_applied_operations(self):
"""
Rewrite fields in already applied filters/ordering.
Useful when converting any QuerySet into MultilingualQuerySet.
"""
self._rewrite_where(self.query.where)
self._rewrite_where(self.query.having)
self._rewrite_order()
def _rewrite_where(self, q):
"""
Rewrite field names inside WHERE tree.
"""
if isinstance(q, tuple) and isinstance(q[0], Constraint):
c = q[0]
if c.field is None:
c.field = get_field_by_colum_name(self.model, c.col)
new_name = rewrite_lookup_key(self.model, c.field.name)
if c.field.name != new_name:
c.field = self.model._meta.get_field(new_name)
c.col = c.field.column
if isinstance(q, Node):
for child in q.children:
self._rewrite_where(child)
def _rewrite_order(self):
self.query.order_by = [rewrite_order_lookup_key(self.model, field_name)
for field_name in self.query.order_by]
# This method was not present in django-linguo
def _rewrite_q(self, q):
"""Rewrite field names inside Q call."""
if isinstance(q, tuple) and len(q) == 2:
return rewrite_lookup_key(self.model, q[0]), q[1]
if isinstance(q, Node):
q.children = list(map(self._rewrite_q, q.children))
return q
# This method was not present in django-linguo
def _rewrite_f(self, q):
"""
Rewrite field names inside F call.
"""
if isinstance(q, models.F):
q.name = rewrite_lookup_key(self.model, q.name)
return q
if isinstance(q, Node):
q.children = list(map(self._rewrite_f, q.children))
return q
def _filter_or_exclude(self, negate, *args, **kwargs):
if not self._rewrite:
return super(MultilingualQuerySet, self)._filter_or_exclude(negate, *args, **kwargs)
args = map(self._rewrite_q, args)
for key, val in kwargs.items():
new_key = rewrite_lookup_key(self.model, key)
del kwargs[key]
kwargs[new_key] = self._rewrite_f(val)
return super(MultilingualQuerySet, self)._filter_or_exclude(negate, *args, **kwargs)
def _get_original_fields(self):
return [f.attname for f in self.model._meta.fields if not isinstance(f, TranslationField)]
def order_by(self, *field_names):
"""
Change translatable field names in an ``order_by`` argument
to translation fields for the current language.
"""
if not self._rewrite:
return super(MultilingualQuerySet, self).order_by(*field_names)
new_args = []
for key in field_names:
new_args.append(rewrite_order_lookup_key(self.model, key))
return super(MultilingualQuerySet, self).order_by(*new_args)
def update(self, **kwargs):
if not self._rewrite:
return super(MultilingualQuerySet, self).update(**kwargs)
for key, val in kwargs.items():
new_key = rewrite_lookup_key(self.model, key)
del kwargs[key]
kwargs[new_key] = self._rewrite_f(val)
return super(MultilingualQuerySet, self).update(**kwargs)
update.alters_data = True
# This method was not present in django-linguo
@property
def _populate_mode(self):
# Populate can be set using a global setting or a manager method.
if self._populate is None:
return settings.AUTO_POPULATE
return self._populate
# This method was not present in django-linguo
def create(self, **kwargs):
"""
Allows to override population mode with a ``populate`` method.
"""
with auto_populate(self._populate_mode):
return super(MultilingualQuerySet, self).create(**kwargs)
# This method was not present in django-linguo
def get_or_create(self, **kwargs):
"""
Allows to override population mode with a ``populate`` method.
"""
with auto_populate(self._populate_mode):
return super(MultilingualQuerySet, self).get_or_create(**kwargs)
def _append_translated(self, fields):
"If translated field is encountered, add also all its translation fields."
fields = set(fields)
from modeltranslation.translator import translator
opts = translator.get_options_for_model(self.model)
for key, translated in opts.fields.items():
if key in fields:
fields = fields.union(f.name for f in translated)
return fields
# This method was not present in django-linguo
def defer(self, *fields):
fields = self._append_translated(fields)
return super(MultilingualQuerySet, self).defer(*fields)
# This method was not present in django-linguo
def only(self, *fields):
fields = self._append_translated(fields)
return super(MultilingualQuerySet, self).only(*fields)
# This method was not present in django-linguo
def raw_values(self, *fields):
return super(MultilingualQuerySet, self).values(*fields)
# This method was not present in django-linguo
def values(self, *fields):
if not self._rewrite:
return super(MultilingualQuerySet, self).values(*fields)
if not fields:
# Emulate original queryset behaviour: get all fields that are not translation fields
fields = self._get_original_fields()
new_args = []
for key in fields:
new_args.append(rewrite_lookup_key(self.model, key))
vqs = super(MultilingualQuerySet, self).values(*new_args)
vqs.field_names = list(fields)
return vqs
# This method was not present in django-linguo
def values_list(self, *fields, **kwargs):
if not self._rewrite:
return super(MultilingualQuerySet, self).values_list(*fields, **kwargs)
if not fields:
# Emulate original queryset behaviour: get all fields that are not translation fields
fields = self._get_original_fields()
new_args = []
for key in fields:
new_args.append(rewrite_lookup_key(self.model, key))
return super(MultilingualQuerySet, self).values_list(*new_args, **kwargs)
# This method was not present in django-linguo
def dates(self, field_name, *args, **kwargs):
if not self._rewrite:
return super(MultilingualQuerySet, self).dates(field_name, *args, **kwargs)
new_key = rewrite_lookup_key(self.model, field_name)
return super(MultilingualQuerySet, self).dates(new_key, *args, **kwargs)
class MultilingualManager(models.Manager):
use_for_related_fields = True
def rewrite(self, *args, **kwargs):
return self.get_queryset().rewrite(*args, **kwargs)
def populate(self, *args, **kwargs):
return self.get_queryset().populate(*args, **kwargs)
def raw_values(self, *args, **kwargs):
return self.get_queryset().raw_values(*args, **kwargs)
def get_queryset(self):
if hasattr(super(MultilingualManager, self), 'get_queryset'):
qs = super(MultilingualManager, self).get_queryset()
else: # Django 1.4 / 1.5 compat
qs = super(MultilingualManager, self).get_query_set()
if qs.__class__ == models.query.QuerySet:
qs.__class__ = MultilingualQuerySet
else:
class NewClass(qs.__class__, MultilingualQuerySet):
pass
NewClass.__name__ = 'Multilingual%s' % qs.__class__.__name__
qs.__class__ = NewClass
qs._post_init()
qs._rewrite_applied_operations()
return qs
get_query_set = get_queryset
| |
from model.address import Adress
import re
class Address:
def __init__(self, app):
self.app = app
def create_new_address(self, address):
wd = self.app.wd
# open home page
self.app.open_home_page_address()
# add_new_address
wd.find_element_by_link_text("add new").click()
# fill_information_about_somebody
wd.find_element_by_name("firstname").click()
wd.find_element_by_name("firstname").clear()
wd.find_element_by_name("firstname").send_keys("%s" % address.firstname)
wd.find_element_by_name("middlename").click()
wd.find_element_by_name("middlename").clear()
wd.find_element_by_name("middlename").send_keys("%s" % address.middlename)
wd.find_element_by_name("lastname").click()
wd.find_element_by_name("lastname").clear()
wd.find_element_by_name("lastname").send_keys("%s" % address.lastname)
wd.find_element_by_name("nickname").click()
wd.find_element_by_name("nickname").clear()
wd.find_element_by_name("nickname").send_keys("%s" % address.nickname)
wd.find_element_by_name("theform").click()
wd.find_element_by_name("title").click()
wd.find_element_by_name("title").clear()
wd.find_element_by_name("title").send_keys("%s" % address.title)
wd.find_element_by_name("company").click()
wd.find_element_by_name("company").clear()
wd.find_element_by_name("company").send_keys("%s" % address.company)
wd.find_element_by_name("address").click()
wd.find_element_by_name("address").clear()
wd.find_element_by_name("address").send_keys("%s" % address.address)
wd.find_element_by_name("home").click()
wd.find_element_by_name("home").clear()
wd.find_element_by_name("home").send_keys("%s" % address.home)
wd.find_element_by_name("mobile").click()
wd.find_element_by_name("mobile").clear()
wd.find_element_by_name("mobile").send_keys("%s" % address.mobile)
wd.find_element_by_name("work").click()
wd.find_element_by_name("work").clear()
wd.find_element_by_name("work").send_keys("%s" % address.work)
wd.find_element_by_name("fax").click()
wd.find_element_by_name("fax").clear()
wd.find_element_by_name("fax").send_keys("%s" % address.fax)
wd.find_element_by_name("homepage").click()
wd.find_element_by_name("homepage").clear()
wd.find_element_by_name("homepage").send_keys("%s" % address.homepage)
wd.find_element_by_name("address2").click()
wd.find_element_by_name("address2").clear()
wd.find_element_by_name("address2").send_keys("%s" % address.address2)
wd.find_element_by_name("phone2").click()
wd.find_element_by_name("phone2").clear()
wd.find_element_by_name("phone2").send_keys("%s" % address.phone2)
wd.find_element_by_name("notes").click()
wd.find_element_by_name("notes").clear()
wd.find_element_by_name("notes").send_keys("%s" % address.notes)
# submit_address_create
wd.find_element_by_xpath("//div[@id='content']/form/input[21]").click()
self.address_cache = None
def delete_first_address(self):
self.delete_some_address(0)
def delete_some_address(self, index):
wd = self.app.wd
# open home page
self.app.open_home_page_address()
self.select_address_by_index(index)
wd.find_element_by_xpath("//input[@value='Delete']").click()
wd.switch_to_alert().accept()
self.address_cache = None
def select_first_address(self):
wd = self.app.wd
wd.find_element_by_name("selected[]").click()
def select_address_by_index(self, index):
wd = self.app.wd
wd.find_elements_by_name("selected[]")[index].click()
def count(self):
wd = self.app.wd
self.app.open_home_page_address()
return len(wd.find_elements_by_name("selected[]"))
def fill_group_form(self, address):
wd = self.app.wd
self.change_field_value("firstname", address.firstname)
self.change_field_value("middlename", address.middlename)
self.change_field_value("lastname", address.lastname)
self.change_field_value("nickname", address.nickname)
self.change_field_value("title", address.title)
self.change_field_value("company", address.company)
self.change_field_value("address", address.address)
self.change_field_value("home", address.home)
self.change_field_value("mobile", address.mobile)
self.change_field_value("work", address.work)
self.change_field_value("fax", address.fax)
self.change_field_value("homepage", address.homepage)
self.change_field_value("address2", address.address2)
self.change_field_value("phone2", address.phone2)
self.change_field_value("notes", address.notes)
def change_field_value(self, field_name, text):
wd = self.app.wd
if text is not None:
wd.find_element_by_name(field_name).click()
wd.find_element_by_name(field_name).clear()
wd.find_element_by_name(field_name).send_keys(text)
def modify_some_address(self, index, new_address_data):
wd = self.app.wd
self.app.open_home_page_address()
self.select_address_by_index(index)
# open modification form
wd.find_element_by_xpath("//div[@id='content']/form[@name='MainForm']/table/tbody/tr["+str(index+2)+"]/td[8]/a/img").click()
# fill group form
self.fill_group_form(new_address_data)
# submit modification
wd.find_element_by_name("update").click()
self.address_cache = None
def modify_first_address(self):
self.modify_some_address(0)
def sometimes_add_new_address(self):
if self.count() == 0:
self.create_new_address(
Adress(firstname="efwgwe", middlename="gweegweggeweg", lastname="wgewegwegwegweg",
nickname="wegwegwegeggg", title="egegegweg", company="dfgfgdfgdgdf",
address="rgergerrherg", home="rgrgerger", mobile="rgegrrg", work="fgfgbfb",
fax="rgergeg", homepage="dfhhdfhhd", address2="fhdhdfhfhdf", phone2="ddhfdfbfbd",
notes="dfhhdhfhdhfh"))
address_cache = None
def get_address_list(self):
if self.address_cache is None:
wd = self.app.wd
self.app.open_home_page_address()
self.address_cache = []
for row in wd.find_elements_by_name("entry"):
cells = row.find_elements_by_tag_name("td")
firstname = cells[1].text
lastname = cells[2].text
id = cells[0].find_element_by_tag_name('input').get_attribute('value')
all_phones = cells[5].text.splitlines()
self.address_cache.append(Adress(firstname = firstname, id = id,lastname = lastname, home = all_phones[0] , mobile = all_phones[1], work = all_phones[2], phone2 = all_phones[3]))
return list(self.address_cache)
def open_contact_to_edit_by_index(self, index):
wd = self.app.wd
self.app.open_home_page_address()
row = wd.find_elements_by_name("entry")[index]
cell = row.find_elements_by_tag_name("td")[7]
cell.find_element_by_tag_name('a').click()
def open_contact_view_by_index(self, index):
wd = self.app.wd
self.app.open_home_page_address()
row = wd.find_elements_by_name("entry")[index]
cell = row.find_elements_by_tag_name("td")[6]
cell.find_element_by_tag_name('a').click()
def get_contact_info_from_edit_page(self,index):
wd = self.app.wd
self.open_contact_to_edit_by_index(index)
firstname = wd.find_element_by_name("firstname").get_attribute("value")
id = wd.find_element_by_name("id").get_attribute("value")
lastname = wd.find_element_by_name("lastname").get_attribute("value")
home = wd.find_element_by_name("home").get_attribute("value")
mobile = wd.find_element_by_name("mobile").get_attribute("value")
work = wd.find_element_by_name("work").get_attribute("value")
phone2 = wd.find_element_by_name("phone2").get_attribute("value")
return Adress(firstname = firstname, id = id, home = home, mobile = mobile, work=work, phone2 = phone2, lastname = lastname )
def get_address_from_view_page(self, index):
wd = self.app.wd
self.open_contact_view_by_index(index)
text = wd.find_element_by_id('content').text
home = re.search ("H: (.*)", text).group(1)
work = re.search ("W: (.*)", text).group(1)
mobile = re.search ("M: (.*)", text).group(1)
phone2 = re.search ("P: (.*)", text).group(1)
return Adress (home = home, mobile = mobile, work=work, phone2 = phone2)
| |
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Multi-dimensional (Vector) SinhArcsinh transformation of a distribution."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.distributions.python.ops import bijectors
from tensorflow.contrib.distributions.python.ops import distribution_util
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops.distributions import normal
from tensorflow.python.ops.distributions import transformed_distribution
__all__ = [
"VectorSinhArcsinhDiag",
]
class VectorSinhArcsinhDiag(transformed_distribution.TransformedDistribution):
"""The (diagonal) SinhArcsinh transformation of a distribution on `R^k`.
This distribution models a random vector `Y = (Y1,...,Yk)`, making use of
a `SinhArcsinh` transformation (which has adjustable tailweight and skew),
a rescaling, and a shift.
The `SinhArcsinh` transformation of the Normal is described in great depth in
[Sinh-arcsinh distributions](https://www.jstor.org/stable/27798865).
Here we use a slightly different parameterization, in terms of `tailweight`
and `skewness`. Additionally we allow for distributions other than Normal,
and control over `scale` as well as a "shift" parameter `loc`.
#### Mathematical Details
Given iid random vector `Z = (Z1,...,Zk)`, we define the VectorSinhArcsinhDiag
transformation of `Z`, `Y`, parameterized by
`(loc, scale, skewness, tailweight)`, via the relation (with `@` denoting
matrix multiplication):
```
Y := loc + scale @ F(Z) * (2 / F_0(2))
F(Z) := Sinh( (Arcsinh(Z) + skewness) * tailweight )
F_0(Z) := Sinh( Arcsinh(Z) * tailweight )
```
This distribution is similar to the location-scale transformation
`L(Z) := loc + scale @ Z` in the following ways:
* If `skewness = 0` and `tailweight = 1` (the defaults), `F(Z) = Z`, and then
`Y = L(Z)` exactly.
* `loc` is used in both to shift the result by a constant factor.
* The multiplication of `scale` by `2 / F_0(2)` ensures that if `skewness = 0`
`P[Y - loc <= 2 * scale] = P[L(Z) - loc <= 2 * scale]`.
Thus it can be said that the weights in the tails of `Y` and `L(Z)` beyond
`loc + 2 * scale` are the same.
This distribution is different than `loc + scale @ Z` due to the
reshaping done by `F`:
* Positive (negative) `skewness` leads to positive (negative) skew.
* positive skew means, the mode of `F(Z)` is "tilted" to the right.
* positive skew means positive values of `F(Z)` become more likely, and
negative values become less likely.
* Larger (smaller) `tailweight` leads to fatter (thinner) tails.
* Fatter tails mean larger values of `|F(Z)|` become more likely.
* `tailweight < 1` leads to a distribution that is "flat" around `Y = loc`,
and a very steep drop-off in the tails.
* `tailweight > 1` leads to a distribution more peaked at the mode with
heavier tails.
To see the argument about the tails, note that for `|Z| >> 1` and
`|Z| >> (|skewness| * tailweight)**tailweight`, we have
`Y approx 0.5 Z**tailweight e**(sign(Z) skewness * tailweight)`.
To see the argument regarding multiplying `scale` by `2 / F_0(2)`,
```
P[(Y - loc) / scale <= 2] = P[F(Z) * (2 / F_0(2)) <= 2]
= P[F(Z) <= F_0(2)]
= P[Z <= 2] (if F = F_0).
```
"""
def __init__(self,
loc=None,
scale_diag=None,
scale_identity_multiplier=None,
skewness=None,
tailweight=None,
distribution=None,
validate_args=False,
allow_nan_stats=True,
name="MultivariateNormalLinearOperator"):
"""Construct VectorSinhArcsinhDiag distribution on `R^k`.
The arguments `scale_diag` and `scale_identity_multiplier` combine to
define the diagonal `scale` referred to in this class docstring:
```none
scale = diag(scale_diag + scale_identity_multiplier * ones(k))
```
The `batch_shape` is the broadcast shape between `loc` and `scale`
arguments.
The `event_shape` is given by last dimension of the matrix implied by
`scale`. The last dimension of `loc` (if provided) must broadcast with this
Additional leading dimensions (if any) will index batches.
Args:
loc: Floating-point `Tensor`. If this is set to `None`, `loc` is
implicitly `0`. When specified, may have shape `[B1, ..., Bb, k]` where
`b >= 0` and `k` is the event size.
scale_diag: Non-zero, floating-point `Tensor` representing a diagonal
matrix added to `scale`. May have shape `[B1, ..., Bb, k]`, `b >= 0`,
and characterizes `b`-batches of `k x k` diagonal matrices added to
`scale`. When both `scale_identity_multiplier` and `scale_diag` are
`None` then `scale` is the `Identity`.
scale_identity_multiplier: Non-zero, floating-point `Tensor` representing
a scale-identity-matrix added to `scale`. May have shape
`[B1, ..., Bb]`, `b >= 0`, and characterizes `b`-batches of scale
`k x k` identity matrices added to `scale`. When both
`scale_identity_multiplier` and `scale_diag` are `None` then `scale`
is the `Identity`.
skewness: Skewness parameter. floating-point `Tensor` with shape
broadcastable with `event_shape`.
tailweight: Tailweight parameter. floating-point `Tensor` with shape
broadcastable with `event_shape`.
distribution: `tf.Distribution`-like instance. Distribution from which `k`
iid samples are used as input to transformation `F`. Default is
`tf.distributions.Normal(loc=0., scale=1.)`.
Must be a scalar-batch, scalar-event distribution. Typically
`distribution.reparameterization_type = FULLY_REPARAMETERIZED` or it is
a function of non-trainable parameters. WARNING: If you backprop through
a VectorSinhArcsinhDiag sample and `distribution` is not
`FULLY_REPARAMETERIZED` yet is a function of trainable variables, then
the gradient will be incorrect!
validate_args: Python `bool`, default `False`. When `True` distribution
parameters are checked for validity despite possibly degrading runtime
performance. When `False` invalid inputs may silently render incorrect
outputs.
allow_nan_stats: Python `bool`, default `True`. When `True`,
statistics (e.g., mean, mode, variance) use the value "`NaN`" to
indicate the result is undefined. When `False`, an exception is raised
if one or more of the statistic's batch members are undefined.
name: Python `str` name prefixed to Ops created by this class.
Raises:
ValueError: if at most `scale_identity_multiplier` is specified.
"""
parameters = locals()
with ops.name_scope(
name,
values=[
loc, scale_diag, scale_identity_multiplier, skewness, tailweight
]):
loc = ops.convert_to_tensor(loc, name="loc") if loc is not None else loc
tailweight = 1. if tailweight is None else tailweight
has_default_skewness = skewness is None
skewness = 0. if skewness is None else skewness
# Recall, with Z a random variable,
# Y := loc + C * F(Z),
# F(Z) := Sinh( (Arcsinh(Z) + skewness) * tailweight )
# F_0(Z) := Sinh( Arcsinh(Z) * tailweight )
# C := 2 * scale / F_0(2)
# Construct shapes and 'scale' out of the scale_* and loc kwargs.
# scale_linop is only an intermediary to:
# 1. get shapes from looking at loc and the two scale args.
# 2. combine scale_diag with scale_identity_multiplier, which gives us
# 'scale', which in turn gives us 'C'.
scale_linop = distribution_util.make_diag_scale(
loc=loc,
scale_diag=scale_diag,
scale_identity_multiplier=scale_identity_multiplier,
validate_args=False,
assert_positive=False)
batch_shape, event_shape = distribution_util.shapes_from_loc_and_scale(
loc, scale_linop)
# scale_linop.diag_part() is efficient since it is a diag type linop.
scale_diag_part = scale_linop.diag_part()
dtype = scale_diag_part.dtype
if distribution is None:
distribution = normal.Normal(
loc=array_ops.zeros([], dtype=dtype),
scale=array_ops.ones([], dtype=dtype),
allow_nan_stats=allow_nan_stats)
else:
asserts = distribution_util.maybe_check_scalar_distribution(
distribution, dtype, validate_args)
if asserts:
scale_diag_part = control_flow_ops.with_dependencies(
asserts, scale_diag_part)
# Make the SAS bijector, 'F'.
skewness = ops.convert_to_tensor(skewness, dtype=dtype, name="skewness")
tailweight = ops.convert_to_tensor(
tailweight, dtype=dtype, name="tailweight")
f = bijectors.SinhArcsinh(
skewness=skewness, tailweight=tailweight, event_ndims=1)
if has_default_skewness:
f_noskew = f
else:
f_noskew = bijectors.SinhArcsinh(
skewness=skewness.dtype.as_numpy_dtype(0.),
tailweight=tailweight, event_ndims=0)
# Make the Affine bijector, Z --> loc + C * Z.
c = 2 * scale_diag_part / f_noskew.forward(
ops.convert_to_tensor(2, dtype=dtype))
affine = bijectors.Affine(
shift=loc, scale_diag=c, validate_args=validate_args)
bijector = bijectors.Chain([affine, f])
super(VectorSinhArcsinhDiag, self).__init__(
distribution=distribution,
bijector=bijector,
batch_shape=batch_shape,
event_shape=event_shape,
validate_args=validate_args,
name=name)
self._parameters = parameters
self._loc = loc
self._scale = scale_linop
self._tailweight = tailweight
self._skewness = skewness
@property
def loc(self):
"""The `loc` in `Y := loc + scale @ F(Z) * (2 / F(2))."""
return self._loc
@property
def scale(self):
"""The `LinearOperator` `scale` in `Y := loc + scale @ F(Z) * (2 / F(2))."""
return self._scale
@property
def tailweight(self):
"""Controls the tail decay. `tailweight > 1` means faster than Normal."""
return self._tailweight
@property
def skewness(self):
"""Controls the skewness. `Skewness > 0` means right skew."""
return self._skewness
| |
# Generated from left.g4 by ANTLR 4.5.1
# encoding: utf-8
from antlr4 import *
from io import StringIO
def serializedATN():
with StringIO() as buf:
buf.write("\3\u0430\ud6d1\u8206\uad2d\u4417\uaef1\u8d80\uaadd\3\35")
buf.write("\u00b1\4\2\t\2\4\3\t\3\4\4\t\4\4\5\t\5\4\6\t\6\4\7\t\7")
buf.write("\4\b\t\b\4\t\t\t\4\n\t\n\4\13\t\13\4\f\t\f\4\r\t\r\4\16")
buf.write("\t\16\4\17\t\17\4\20\t\20\4\21\t\21\4\22\t\22\3\2\3\2")
buf.write("\3\2\3\2\6\2)\n\2\r\2\16\2*\3\3\3\3\3\3\3\3\3\3\3\3\3")
buf.write("\3\3\3\3\3\5\3\66\n\3\3\4\3\4\3\4\7\4;\n\4\f\4\16\4>\13")
buf.write("\4\3\5\3\5\3\5\7\5C\n\5\f\5\16\5F\13\5\3\5\5\5I\n\5\3")
buf.write("\6\3\6\3\6\3\6\5\6O\n\6\3\6\3\6\3\6\3\6\3\6\3\6\3\6\3")
buf.write("\6\3\6\3\6\3\6\3\6\3\6\3\6\3\6\7\6`\n\6\f\6\16\6c\13\6")
buf.write("\3\7\3\7\5\7g\n\7\3\b\3\b\3\b\3\b\3\b\3\b\3\b\3\b\3\t")
buf.write("\3\t\3\t\3\t\3\t\3\t\3\t\3\t\3\t\3\t\3\t\3\n\3\n\3\n\7")
buf.write("\n\177\n\n\f\n\16\n\u0082\13\n\3\n\5\n\u0085\n\n\3\13")
buf.write("\3\13\3\13\3\13\3\f\3\f\3\f\3\r\3\r\3\r\3\16\3\16\3\16")
buf.write("\3\16\3\16\3\16\3\16\3\16\3\17\3\17\3\17\3\17\3\17\3\17")
buf.write("\3\17\3\17\3\17\3\17\3\17\3\17\3\20\3\20\3\20\3\20\3\20")
buf.write("\3\20\3\21\3\21\3\21\3\22\3\22\3\22\3\22\2\3\n\23\2\4")
buf.write("\6\b\n\f\16\20\22\24\26\30\32\34\36 \"\2\2\u00b6\2(\3")
buf.write("\2\2\2\4\65\3\2\2\2\6<\3\2\2\2\bH\3\2\2\2\nN\3\2\2\2\f")
buf.write("f\3\2\2\2\16h\3\2\2\2\20p\3\2\2\2\22\u0084\3\2\2\2\24")
buf.write("\u0086\3\2\2\2\26\u008a\3\2\2\2\30\u008d\3\2\2\2\32\u0090")
buf.write("\3\2\2\2\34\u0098\3\2\2\2\36\u00a4\3\2\2\2 \u00aa\3\2")
buf.write("\2\2\"\u00ad\3\2\2\2$%\5\4\3\2%&\7\3\2\2&)\3\2\2\2\')")
buf.write("\5\20\t\2($\3\2\2\2(\'\3\2\2\2)*\3\2\2\2*(\3\2\2\2*+\3")
buf.write("\2\2\2+\3\3\2\2\2,\66\5\n\6\2-\66\5\26\f\2.\66\5\24\13")
buf.write("\2/\66\5\16\b\2\60\66\5\30\r\2\61\66\5\32\16\2\62\66\5")
buf.write("\34\17\2\63\66\5\"\22\2\64\66\5 \21\2\65,\3\2\2\2\65-")
buf.write("\3\2\2\2\65.\3\2\2\2\65/\3\2\2\2\65\60\3\2\2\2\65\61\3")
buf.write("\2\2\2\65\62\3\2\2\2\65\63\3\2\2\2\65\64\3\2\2\2\66\5")
buf.write("\3\2\2\2\678\5\4\3\289\7\3\2\29;\3\2\2\2:\67\3\2\2\2;")
buf.write(">\3\2\2\2<:\3\2\2\2<=\3\2\2\2=\7\3\2\2\2><\3\2\2\2?D\5")
buf.write("\n\6\2@A\7\4\2\2AC\5\n\6\2B@\3\2\2\2CF\3\2\2\2DB\3\2\2")
buf.write("\2DE\3\2\2\2EI\3\2\2\2FD\3\2\2\2GI\3\2\2\2H?\3\2\2\2H")
buf.write("G\3\2\2\2I\t\3\2\2\2JK\b\6\1\2KO\5\f\7\2LO\7\30\2\2MO")
buf.write("\5\36\20\2NJ\3\2\2\2NL\3\2\2\2NM\3\2\2\2Oa\3\2\2\2PQ\f")
buf.write("\n\2\2QR\7\5\2\2R`\5\n\6\13ST\f\t\2\2TU\7\6\2\2U`\5\n")
buf.write("\6\nVW\f\b\2\2WX\7\7\2\2X`\5\n\6\tYZ\f\7\2\2Z[\7\b\2\2")
buf.write("[`\5\n\6\b\\]\f\3\2\2]^\7\34\2\2^`\5\n\6\4_P\3\2\2\2_")
buf.write("S\3\2\2\2_V\3\2\2\2_Y\3\2\2\2_\\\3\2\2\2`c\3\2\2\2a_\3")
buf.write("\2\2\2ab\3\2\2\2b\13\3\2\2\2ca\3\2\2\2dg\7\32\2\2eg\7")
buf.write("\33\2\2fd\3\2\2\2fe\3\2\2\2g\r\3\2\2\2hi\7\t\2\2ij\7\30")
buf.write("\2\2jk\7\n\2\2kl\5\22\n\2lm\7\13\2\2mn\7\f\2\2no\7\30")
buf.write("\2\2o\17\3\2\2\2pq\7\t\2\2qr\7\30\2\2rs\7\n\2\2st\5\22")
buf.write("\n\2tu\7\13\2\2uv\7\f\2\2vw\7\30\2\2wx\7\r\2\2xy\5\6\4")
buf.write("\2yz\7\16\2\2z\21\3\2\2\2{\u0080\5\26\f\2|}\7\4\2\2}\177")
buf.write("\5\26\f\2~|\3\2\2\2\177\u0082\3\2\2\2\u0080~\3\2\2\2\u0080")
buf.write("\u0081\3\2\2\2\u0081\u0085\3\2\2\2\u0082\u0080\3\2\2\2")
buf.write("\u0083\u0085\3\2\2\2\u0084{\3\2\2\2\u0084\u0083\3\2\2")
buf.write("\2\u0085\23\3\2\2\2\u0086\u0087\7\30\2\2\u0087\u0088\7")
buf.write("\17\2\2\u0088\u0089\5\n\6\2\u0089\25\3\2\2\2\u008a\u008b")
buf.write("\7\30\2\2\u008b\u008c\7\30\2\2\u008c\27\3\2\2\2\u008d")
buf.write("\u008e\7\20\2\2\u008e\u008f\5\n\6\2\u008f\31\3\2\2\2\u0090")
buf.write("\u0091\7\21\2\2\u0091\u0092\7\22\2\2\u0092\u0093\5\n\6")
buf.write("\2\u0093\u0094\7\23\2\2\u0094\u0095\7\r\2\2\u0095\u0096")
buf.write("\5\6\4\2\u0096\u0097\7\16\2\2\u0097\33\3\2\2\2\u0098\u0099")
buf.write("\7\21\2\2\u0099\u009a\7\22\2\2\u009a\u009b\5\n\6\2\u009b")
buf.write("\u009c\7\23\2\2\u009c\u009d\7\r\2\2\u009d\u009e\5\6\4")
buf.write("\2\u009e\u009f\7\16\2\2\u009f\u00a0\7\24\2\2\u00a0\u00a1")
buf.write("\7\r\2\2\u00a1\u00a2\5\6\4\2\u00a2\u00a3\7\16\2\2\u00a3")
buf.write("\35\3\2\2\2\u00a4\u00a5\7\25\2\2\u00a5\u00a6\7\30\2\2")
buf.write("\u00a6\u00a7\7\22\2\2\u00a7\u00a8\5\b\5\2\u00a8\u00a9")
buf.write("\7\23\2\2\u00a9\37\3\2\2\2\u00aa\u00ab\7\26\2\2\u00ab")
buf.write("\u00ac\5\16\b\2\u00ac!\3\2\2\2\u00ad\u00ae\7\27\2\2\u00ae")
buf.write("\u00af\7\31\2\2\u00af#\3\2\2\2\16(*\65<DHN_af\u0080\u0084")
return buf.getvalue()
class leftParser ( Parser ):
grammarFileName = "left.g4"
atn = ATNDeserializer().deserialize(serializedATN())
decisionsToDFA = [ DFA(ds, i) for i, ds in enumerate(atn.decisionToState) ]
sharedContextCache = PredictionContextCache()
literalNames = [ "<INVALID>", "';'", "','", "'/'", "'*'", "'-'", "'+'",
"'function'", "'['", "']'", "'type'", "'{'", "'}'",
"'='", "'return'", "'if'", "'('", "')'", "'else'",
"'call'", "'external'", "'use'" ]
symbolicNames = [ "<INVALID>", "<INVALID>", "<INVALID>", "<INVALID>",
"<INVALID>", "<INVALID>", "<INVALID>", "<INVALID>",
"<INVALID>", "<INVALID>", "<INVALID>", "<INVALID>",
"<INVALID>", "<INVALID>", "<INVALID>", "<INVALID>",
"<INVALID>", "<INVALID>", "<INVALID>", "<INVALID>",
"<INVALID>", "<INVALID>", "IDENTIFIER", "MODULENAME",
"INTEGER", "FLOAT", "COMPARISON", "WHITESPACE" ]
RULE_script = 0
RULE_statement = 1
RULE_statementList = 2
RULE_expressionList = 3
RULE_expression = 4
RULE_acceptedNumber = 5
RULE_functionPrototype = 6
RULE_function = 7
RULE_parameters = 8
RULE_assignment = 9
RULE_varDeclaration = 10
RULE_functionReturn = 11
RULE_ifStatement = 12
RULE_ifElseStatement = 13
RULE_call = 14
RULE_extern = 15
RULE_use = 16
ruleNames = [ "script", "statement", "statementList", "expressionList",
"expression", "acceptedNumber", "functionPrototype",
"function", "parameters", "assignment", "varDeclaration",
"functionReturn", "ifStatement", "ifElseStatement", "call",
"extern", "use" ]
EOF = Token.EOF
T__0=1
T__1=2
T__2=3
T__3=4
T__4=5
T__5=6
T__6=7
T__7=8
T__8=9
T__9=10
T__10=11
T__11=12
T__12=13
T__13=14
T__14=15
T__15=16
T__16=17
T__17=18
T__18=19
T__19=20
T__20=21
IDENTIFIER=22
MODULENAME=23
INTEGER=24
FLOAT=25
COMPARISON=26
WHITESPACE=27
def __init__(self, input:TokenStream):
super().__init__(input)
self.checkVersion("4.5.1")
self._interp = ParserATNSimulator(self, self.atn, self.decisionsToDFA, self.sharedContextCache)
self._predicates = None
class ScriptContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def function(self, i:int=None):
if i is None:
return self.getTypedRuleContexts(leftParser.FunctionContext)
else:
return self.getTypedRuleContext(leftParser.FunctionContext,i)
def statement(self, i:int=None):
if i is None:
return self.getTypedRuleContexts(leftParser.StatementContext)
else:
return self.getTypedRuleContext(leftParser.StatementContext,i)
def getRuleIndex(self):
return leftParser.RULE_script
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitScript" ):
return visitor.visitScript(self)
else:
return visitor.visitChildren(self)
def script(self):
localctx = leftParser.ScriptContext(self, self._ctx, self.state)
self.enterRule(localctx, 0, self.RULE_script)
self._la = 0 # Token type
try:
self.enterOuterAlt(localctx, 1)
self.state = 38
self._errHandler.sync(self)
_la = self._input.LA(1)
while True:
self.state = 38
la_ = self._interp.adaptivePredict(self._input,0,self._ctx)
if la_ == 1:
self.state = 34
self.statement()
self.state = 35
self.match(leftParser.T__0)
pass
elif la_ == 2:
self.state = 37
self.function()
pass
self.state = 40
self._errHandler.sync(self)
_la = self._input.LA(1)
if not ((((_la) & ~0x3f) == 0 and ((1 << _la) & ((1 << leftParser.T__6) | (1 << leftParser.T__13) | (1 << leftParser.T__14) | (1 << leftParser.T__18) | (1 << leftParser.T__19) | (1 << leftParser.T__20) | (1 << leftParser.IDENTIFIER) | (1 << leftParser.INTEGER) | (1 << leftParser.FLOAT))) != 0)):
break
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class StatementContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def expression(self):
return self.getTypedRuleContext(leftParser.ExpressionContext,0)
def varDeclaration(self):
return self.getTypedRuleContext(leftParser.VarDeclarationContext,0)
def assignment(self):
return self.getTypedRuleContext(leftParser.AssignmentContext,0)
def functionPrototype(self):
return self.getTypedRuleContext(leftParser.FunctionPrototypeContext,0)
def functionReturn(self):
return self.getTypedRuleContext(leftParser.FunctionReturnContext,0)
def ifStatement(self):
return self.getTypedRuleContext(leftParser.IfStatementContext,0)
def ifElseStatement(self):
return self.getTypedRuleContext(leftParser.IfElseStatementContext,0)
def use(self):
return self.getTypedRuleContext(leftParser.UseContext,0)
def extern(self):
return self.getTypedRuleContext(leftParser.ExternContext,0)
def getRuleIndex(self):
return leftParser.RULE_statement
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitStatement" ):
return visitor.visitStatement(self)
else:
return visitor.visitChildren(self)
def statement(self):
localctx = leftParser.StatementContext(self, self._ctx, self.state)
self.enterRule(localctx, 2, self.RULE_statement)
try:
self.state = 51
la_ = self._interp.adaptivePredict(self._input,2,self._ctx)
if la_ == 1:
self.enterOuterAlt(localctx, 1)
self.state = 42
self.expression(0)
pass
elif la_ == 2:
self.enterOuterAlt(localctx, 2)
self.state = 43
self.varDeclaration()
pass
elif la_ == 3:
self.enterOuterAlt(localctx, 3)
self.state = 44
self.assignment()
pass
elif la_ == 4:
self.enterOuterAlt(localctx, 4)
self.state = 45
self.functionPrototype()
pass
elif la_ == 5:
self.enterOuterAlt(localctx, 5)
self.state = 46
self.functionReturn()
pass
elif la_ == 6:
self.enterOuterAlt(localctx, 6)
self.state = 47
self.ifStatement()
pass
elif la_ == 7:
self.enterOuterAlt(localctx, 7)
self.state = 48
self.ifElseStatement()
pass
elif la_ == 8:
self.enterOuterAlt(localctx, 8)
self.state = 49
self.use()
pass
elif la_ == 9:
self.enterOuterAlt(localctx, 9)
self.state = 50
self.extern()
pass
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class StatementListContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def statement(self, i:int=None):
if i is None:
return self.getTypedRuleContexts(leftParser.StatementContext)
else:
return self.getTypedRuleContext(leftParser.StatementContext,i)
def getRuleIndex(self):
return leftParser.RULE_statementList
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitStatementList" ):
return visitor.visitStatementList(self)
else:
return visitor.visitChildren(self)
def statementList(self):
localctx = leftParser.StatementListContext(self, self._ctx, self.state)
self.enterRule(localctx, 4, self.RULE_statementList)
self._la = 0 # Token type
try:
self.enterOuterAlt(localctx, 1)
self.state = 58
self._errHandler.sync(self)
_la = self._input.LA(1)
while (((_la) & ~0x3f) == 0 and ((1 << _la) & ((1 << leftParser.T__6) | (1 << leftParser.T__13) | (1 << leftParser.T__14) | (1 << leftParser.T__18) | (1 << leftParser.T__19) | (1 << leftParser.T__20) | (1 << leftParser.IDENTIFIER) | (1 << leftParser.INTEGER) | (1 << leftParser.FLOAT))) != 0):
self.state = 53
self.statement()
self.state = 54
self.match(leftParser.T__0)
self.state = 60
self._errHandler.sync(self)
_la = self._input.LA(1)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class ExpressionListContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
self._expression = None # ExpressionContext
self.expressions = list() # of ExpressionContexts
def expression(self, i:int=None):
if i is None:
return self.getTypedRuleContexts(leftParser.ExpressionContext)
else:
return self.getTypedRuleContext(leftParser.ExpressionContext,i)
def getRuleIndex(self):
return leftParser.RULE_expressionList
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitExpressionList" ):
return visitor.visitExpressionList(self)
else:
return visitor.visitChildren(self)
def expressionList(self):
localctx = leftParser.ExpressionListContext(self, self._ctx, self.state)
self.enterRule(localctx, 6, self.RULE_expressionList)
self._la = 0 # Token type
try:
self.state = 70
token = self._input.LA(1)
if token in [leftParser.T__18, leftParser.IDENTIFIER, leftParser.INTEGER, leftParser.FLOAT]:
self.enterOuterAlt(localctx, 1)
self.state = 61
localctx._expression = self.expression(0)
localctx.expressions.append(localctx._expression)
self.state = 66
self._errHandler.sync(self)
_la = self._input.LA(1)
while _la==leftParser.T__1:
self.state = 62
self.match(leftParser.T__1)
self.state = 63
localctx._expression = self.expression(0)
localctx.expressions.append(localctx._expression)
self.state = 68
self._errHandler.sync(self)
_la = self._input.LA(1)
elif token in [leftParser.T__16]:
self.enterOuterAlt(localctx, 2)
else:
raise NoViableAltException(self)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class ExpressionContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def getRuleIndex(self):
return leftParser.RULE_expression
def copyFrom(self, ctx:ParserRuleContext):
super().copyFrom(ctx)
class CallExpressionContext(ExpressionContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a leftParser.ExpressionContext
super().__init__(parser)
self.copyFrom(ctx)
def call(self):
return self.getTypedRuleContext(leftParser.CallContext,0)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitCallExpression" ):
return visitor.visitCallExpression(self)
else:
return visitor.visitChildren(self)
class MulContext(ExpressionContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a leftParser.ExpressionContext
super().__init__(parser)
self.left = None # ExpressionContext
self.right = None # ExpressionContext
self.copyFrom(ctx)
def expression(self, i:int=None):
if i is None:
return self.getTypedRuleContexts(leftParser.ExpressionContext)
else:
return self.getTypedRuleContext(leftParser.ExpressionContext,i)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitMul" ):
return visitor.visitMul(self)
else:
return visitor.visitChildren(self)
class VariableContext(ExpressionContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a leftParser.ExpressionContext
super().__init__(parser)
self.varName = None # Token
self.copyFrom(ctx)
def IDENTIFIER(self):
return self.getToken(leftParser.IDENTIFIER, 0)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitVariable" ):
return visitor.visitVariable(self)
else:
return visitor.visitChildren(self)
class NumberContext(ExpressionContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a leftParser.ExpressionContext
super().__init__(parser)
self.number = None # AcceptedNumberContext
self.copyFrom(ctx)
def acceptedNumber(self):
return self.getTypedRuleContext(leftParser.AcceptedNumberContext,0)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitNumber" ):
return visitor.visitNumber(self)
else:
return visitor.visitChildren(self)
class AddContext(ExpressionContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a leftParser.ExpressionContext
super().__init__(parser)
self.left = None # ExpressionContext
self.right = None # ExpressionContext
self.copyFrom(ctx)
def expression(self, i:int=None):
if i is None:
return self.getTypedRuleContexts(leftParser.ExpressionContext)
else:
return self.getTypedRuleContext(leftParser.ExpressionContext,i)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitAdd" ):
return visitor.visitAdd(self)
else:
return visitor.visitChildren(self)
class SubContext(ExpressionContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a leftParser.ExpressionContext
super().__init__(parser)
self.left = None # ExpressionContext
self.right = None # ExpressionContext
self.copyFrom(ctx)
def expression(self, i:int=None):
if i is None:
return self.getTypedRuleContexts(leftParser.ExpressionContext)
else:
return self.getTypedRuleContext(leftParser.ExpressionContext,i)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitSub" ):
return visitor.visitSub(self)
else:
return visitor.visitChildren(self)
class DivContext(ExpressionContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a leftParser.ExpressionContext
super().__init__(parser)
self.left = None # ExpressionContext
self.right = None # ExpressionContext
self.copyFrom(ctx)
def expression(self, i:int=None):
if i is None:
return self.getTypedRuleContexts(leftParser.ExpressionContext)
else:
return self.getTypedRuleContext(leftParser.ExpressionContext,i)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitDiv" ):
return visitor.visitDiv(self)
else:
return visitor.visitChildren(self)
class ComparisonContext(ExpressionContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a leftParser.ExpressionContext
super().__init__(parser)
self.left = None # ExpressionContext
self.comp = None # Token
self.right = None # ExpressionContext
self.copyFrom(ctx)
def expression(self, i:int=None):
if i is None:
return self.getTypedRuleContexts(leftParser.ExpressionContext)
else:
return self.getTypedRuleContext(leftParser.ExpressionContext,i)
def COMPARISON(self):
return self.getToken(leftParser.COMPARISON, 0)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitComparison" ):
return visitor.visitComparison(self)
else:
return visitor.visitChildren(self)
def expression(self, _p:int=0):
_parentctx = self._ctx
_parentState = self.state
localctx = leftParser.ExpressionContext(self, self._ctx, _parentState)
_prevctx = localctx
_startState = 8
self.enterRecursionRule(localctx, 8, self.RULE_expression, _p)
try:
self.enterOuterAlt(localctx, 1)
self.state = 76
token = self._input.LA(1)
if token in [leftParser.INTEGER, leftParser.FLOAT]:
localctx = leftParser.NumberContext(self, localctx)
self._ctx = localctx
_prevctx = localctx
self.state = 73
localctx.number = self.acceptedNumber()
elif token in [leftParser.IDENTIFIER]:
localctx = leftParser.VariableContext(self, localctx)
self._ctx = localctx
_prevctx = localctx
self.state = 74
localctx.varName = self.match(leftParser.IDENTIFIER)
elif token in [leftParser.T__18]:
localctx = leftParser.CallExpressionContext(self, localctx)
self._ctx = localctx
_prevctx = localctx
self.state = 75
self.call()
else:
raise NoViableAltException(self)
self._ctx.stop = self._input.LT(-1)
self.state = 95
self._errHandler.sync(self)
_alt = self._interp.adaptivePredict(self._input,8,self._ctx)
while _alt!=2 and _alt!=ATN.INVALID_ALT_NUMBER:
if _alt==1:
if self._parseListeners is not None:
self.triggerExitRuleEvent()
_prevctx = localctx
self.state = 93
la_ = self._interp.adaptivePredict(self._input,7,self._ctx)
if la_ == 1:
localctx = leftParser.DivContext(self, leftParser.ExpressionContext(self, _parentctx, _parentState))
localctx.left = _prevctx
self.pushNewRecursionContext(localctx, _startState, self.RULE_expression)
self.state = 78
if not self.precpred(self._ctx, 8):
from antlr4.error.Errors import FailedPredicateException
raise FailedPredicateException(self, "self.precpred(self._ctx, 8)")
self.state = 79
self.match(leftParser.T__2)
self.state = 80
localctx.right = self.expression(9)
pass
elif la_ == 2:
localctx = leftParser.MulContext(self, leftParser.ExpressionContext(self, _parentctx, _parentState))
localctx.left = _prevctx
self.pushNewRecursionContext(localctx, _startState, self.RULE_expression)
self.state = 81
if not self.precpred(self._ctx, 7):
from antlr4.error.Errors import FailedPredicateException
raise FailedPredicateException(self, "self.precpred(self._ctx, 7)")
self.state = 82
self.match(leftParser.T__3)
self.state = 83
localctx.right = self.expression(8)
pass
elif la_ == 3:
localctx = leftParser.SubContext(self, leftParser.ExpressionContext(self, _parentctx, _parentState))
localctx.left = _prevctx
self.pushNewRecursionContext(localctx, _startState, self.RULE_expression)
self.state = 84
if not self.precpred(self._ctx, 6):
from antlr4.error.Errors import FailedPredicateException
raise FailedPredicateException(self, "self.precpred(self._ctx, 6)")
self.state = 85
self.match(leftParser.T__4)
self.state = 86
localctx.right = self.expression(7)
pass
elif la_ == 4:
localctx = leftParser.AddContext(self, leftParser.ExpressionContext(self, _parentctx, _parentState))
localctx.left = _prevctx
self.pushNewRecursionContext(localctx, _startState, self.RULE_expression)
self.state = 87
if not self.precpred(self._ctx, 5):
from antlr4.error.Errors import FailedPredicateException
raise FailedPredicateException(self, "self.precpred(self._ctx, 5)")
self.state = 88
self.match(leftParser.T__5)
self.state = 89
localctx.right = self.expression(6)
pass
elif la_ == 5:
localctx = leftParser.ComparisonContext(self, leftParser.ExpressionContext(self, _parentctx, _parentState))
localctx.left = _prevctx
self.pushNewRecursionContext(localctx, _startState, self.RULE_expression)
self.state = 90
if not self.precpred(self._ctx, 1):
from antlr4.error.Errors import FailedPredicateException
raise FailedPredicateException(self, "self.precpred(self._ctx, 1)")
self.state = 91
localctx.comp = self.match(leftParser.COMPARISON)
self.state = 92
localctx.right = self.expression(2)
pass
self.state = 97
self._errHandler.sync(self)
_alt = self._interp.adaptivePredict(self._input,8,self._ctx)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.unrollRecursionContexts(_parentctx)
return localctx
class AcceptedNumberContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
def getRuleIndex(self):
return leftParser.RULE_acceptedNumber
def copyFrom(self, ctx:ParserRuleContext):
super().copyFrom(ctx)
class FloatContext(AcceptedNumberContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a leftParser.AcceptedNumberContext
super().__init__(parser)
self.num = None # Token
self.copyFrom(ctx)
def FLOAT(self):
return self.getToken(leftParser.FLOAT, 0)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitFloat" ):
return visitor.visitFloat(self)
else:
return visitor.visitChildren(self)
class IntegerContext(AcceptedNumberContext):
def __init__(self, parser, ctx:ParserRuleContext): # actually a leftParser.AcceptedNumberContext
super().__init__(parser)
self.num = None # Token
self.copyFrom(ctx)
def INTEGER(self):
return self.getToken(leftParser.INTEGER, 0)
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitInteger" ):
return visitor.visitInteger(self)
else:
return visitor.visitChildren(self)
def acceptedNumber(self):
localctx = leftParser.AcceptedNumberContext(self, self._ctx, self.state)
self.enterRule(localctx, 10, self.RULE_acceptedNumber)
try:
self.state = 100
token = self._input.LA(1)
if token in [leftParser.INTEGER]:
localctx = leftParser.IntegerContext(self, localctx)
self.enterOuterAlt(localctx, 1)
self.state = 98
localctx.num = self.match(leftParser.INTEGER)
elif token in [leftParser.FLOAT]:
localctx = leftParser.FloatContext(self, localctx)
self.enterOuterAlt(localctx, 2)
self.state = 99
localctx.num = self.match(leftParser.FLOAT)
else:
raise NoViableAltException(self)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class FunctionPrototypeContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
self.funcName = None # Token
self.params = None # ParametersContext
self.funcType = None # Token
def IDENTIFIER(self, i:int=None):
if i is None:
return self.getTokens(leftParser.IDENTIFIER)
else:
return self.getToken(leftParser.IDENTIFIER, i)
def parameters(self):
return self.getTypedRuleContext(leftParser.ParametersContext,0)
def getRuleIndex(self):
return leftParser.RULE_functionPrototype
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitFunctionPrototype" ):
return visitor.visitFunctionPrototype(self)
else:
return visitor.visitChildren(self)
def functionPrototype(self):
localctx = leftParser.FunctionPrototypeContext(self, self._ctx, self.state)
self.enterRule(localctx, 12, self.RULE_functionPrototype)
try:
self.enterOuterAlt(localctx, 1)
self.state = 102
self.match(leftParser.T__6)
self.state = 103
localctx.funcName = self.match(leftParser.IDENTIFIER)
self.state = 104
self.match(leftParser.T__7)
self.state = 105
localctx.params = self.parameters()
self.state = 106
self.match(leftParser.T__8)
self.state = 107
self.match(leftParser.T__9)
self.state = 108
localctx.funcType = self.match(leftParser.IDENTIFIER)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class FunctionContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
self.funcName = None # Token
self.params = None # ParametersContext
self.funcType = None # Token
self.statements = None # StatementListContext
def IDENTIFIER(self, i:int=None):
if i is None:
return self.getTokens(leftParser.IDENTIFIER)
else:
return self.getToken(leftParser.IDENTIFIER, i)
def parameters(self):
return self.getTypedRuleContext(leftParser.ParametersContext,0)
def statementList(self):
return self.getTypedRuleContext(leftParser.StatementListContext,0)
def getRuleIndex(self):
return leftParser.RULE_function
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitFunction" ):
return visitor.visitFunction(self)
else:
return visitor.visitChildren(self)
def function(self):
localctx = leftParser.FunctionContext(self, self._ctx, self.state)
self.enterRule(localctx, 14, self.RULE_function)
try:
self.enterOuterAlt(localctx, 1)
self.state = 110
self.match(leftParser.T__6)
self.state = 111
localctx.funcName = self.match(leftParser.IDENTIFIER)
self.state = 112
self.match(leftParser.T__7)
self.state = 113
localctx.params = self.parameters()
self.state = 114
self.match(leftParser.T__8)
self.state = 115
self.match(leftParser.T__9)
self.state = 116
localctx.funcType = self.match(leftParser.IDENTIFIER)
self.state = 117
self.match(leftParser.T__10)
self.state = 118
localctx.statements = self.statementList()
self.state = 119
self.match(leftParser.T__11)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class ParametersContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
self._varDeclaration = None # VarDeclarationContext
self.params = list() # of VarDeclarationContexts
def varDeclaration(self, i:int=None):
if i is None:
return self.getTypedRuleContexts(leftParser.VarDeclarationContext)
else:
return self.getTypedRuleContext(leftParser.VarDeclarationContext,i)
def getRuleIndex(self):
return leftParser.RULE_parameters
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitParameters" ):
return visitor.visitParameters(self)
else:
return visitor.visitChildren(self)
def parameters(self):
localctx = leftParser.ParametersContext(self, self._ctx, self.state)
self.enterRule(localctx, 16, self.RULE_parameters)
self._la = 0 # Token type
try:
self.state = 130
token = self._input.LA(1)
if token in [leftParser.IDENTIFIER]:
self.enterOuterAlt(localctx, 1)
self.state = 121
localctx._varDeclaration = self.varDeclaration()
localctx.params.append(localctx._varDeclaration)
self.state = 126
self._errHandler.sync(self)
_la = self._input.LA(1)
while _la==leftParser.T__1:
self.state = 122
self.match(leftParser.T__1)
self.state = 123
localctx._varDeclaration = self.varDeclaration()
localctx.params.append(localctx._varDeclaration)
self.state = 128
self._errHandler.sync(self)
_la = self._input.LA(1)
elif token in [leftParser.T__8]:
self.enterOuterAlt(localctx, 2)
else:
raise NoViableAltException(self)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class AssignmentContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
self.varName = None # Token
self.expr = None # ExpressionContext
def IDENTIFIER(self):
return self.getToken(leftParser.IDENTIFIER, 0)
def expression(self):
return self.getTypedRuleContext(leftParser.ExpressionContext,0)
def getRuleIndex(self):
return leftParser.RULE_assignment
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitAssignment" ):
return visitor.visitAssignment(self)
else:
return visitor.visitChildren(self)
def assignment(self):
localctx = leftParser.AssignmentContext(self, self._ctx, self.state)
self.enterRule(localctx, 18, self.RULE_assignment)
try:
self.enterOuterAlt(localctx, 1)
self.state = 132
localctx.varName = self.match(leftParser.IDENTIFIER)
self.state = 133
self.match(leftParser.T__12)
self.state = 134
localctx.expr = self.expression(0)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class VarDeclarationContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
self.varType = None # Token
self.varName = None # Token
def IDENTIFIER(self, i:int=None):
if i is None:
return self.getTokens(leftParser.IDENTIFIER)
else:
return self.getToken(leftParser.IDENTIFIER, i)
def getRuleIndex(self):
return leftParser.RULE_varDeclaration
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitVarDeclaration" ):
return visitor.visitVarDeclaration(self)
else:
return visitor.visitChildren(self)
def varDeclaration(self):
localctx = leftParser.VarDeclarationContext(self, self._ctx, self.state)
self.enterRule(localctx, 20, self.RULE_varDeclaration)
try:
self.enterOuterAlt(localctx, 1)
self.state = 136
localctx.varType = self.match(leftParser.IDENTIFIER)
self.state = 137
localctx.varName = self.match(leftParser.IDENTIFIER)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class FunctionReturnContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
self.expr = None # ExpressionContext
def expression(self):
return self.getTypedRuleContext(leftParser.ExpressionContext,0)
def getRuleIndex(self):
return leftParser.RULE_functionReturn
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitFunctionReturn" ):
return visitor.visitFunctionReturn(self)
else:
return visitor.visitChildren(self)
def functionReturn(self):
localctx = leftParser.FunctionReturnContext(self, self._ctx, self.state)
self.enterRule(localctx, 22, self.RULE_functionReturn)
try:
self.enterOuterAlt(localctx, 1)
self.state = 139
self.match(leftParser.T__13)
self.state = 140
localctx.expr = self.expression(0)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class IfStatementContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
self.condition = None # ExpressionContext
self.statements = None # StatementListContext
def expression(self):
return self.getTypedRuleContext(leftParser.ExpressionContext,0)
def statementList(self):
return self.getTypedRuleContext(leftParser.StatementListContext,0)
def getRuleIndex(self):
return leftParser.RULE_ifStatement
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitIfStatement" ):
return visitor.visitIfStatement(self)
else:
return visitor.visitChildren(self)
def ifStatement(self):
localctx = leftParser.IfStatementContext(self, self._ctx, self.state)
self.enterRule(localctx, 24, self.RULE_ifStatement)
try:
self.enterOuterAlt(localctx, 1)
self.state = 142
self.match(leftParser.T__14)
self.state = 143
self.match(leftParser.T__15)
self.state = 144
localctx.condition = self.expression(0)
self.state = 145
self.match(leftParser.T__16)
self.state = 146
self.match(leftParser.T__10)
self.state = 147
localctx.statements = self.statementList()
self.state = 148
self.match(leftParser.T__11)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class IfElseStatementContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
self.condition = None # ExpressionContext
self.then = None # StatementListContext
self.otherwise = None # StatementListContext
def expression(self):
return self.getTypedRuleContext(leftParser.ExpressionContext,0)
def statementList(self, i:int=None):
if i is None:
return self.getTypedRuleContexts(leftParser.StatementListContext)
else:
return self.getTypedRuleContext(leftParser.StatementListContext,i)
def getRuleIndex(self):
return leftParser.RULE_ifElseStatement
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitIfElseStatement" ):
return visitor.visitIfElseStatement(self)
else:
return visitor.visitChildren(self)
def ifElseStatement(self):
localctx = leftParser.IfElseStatementContext(self, self._ctx, self.state)
self.enterRule(localctx, 26, self.RULE_ifElseStatement)
try:
self.enterOuterAlt(localctx, 1)
self.state = 150
self.match(leftParser.T__14)
self.state = 151
self.match(leftParser.T__15)
self.state = 152
localctx.condition = self.expression(0)
self.state = 153
self.match(leftParser.T__16)
self.state = 154
self.match(leftParser.T__10)
self.state = 155
localctx.then = self.statementList()
self.state = 156
self.match(leftParser.T__11)
self.state = 157
self.match(leftParser.T__17)
self.state = 158
self.match(leftParser.T__10)
self.state = 159
localctx.otherwise = self.statementList()
self.state = 160
self.match(leftParser.T__11)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class CallContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
self.funcName = None # Token
self.expressions = None # ExpressionListContext
def IDENTIFIER(self):
return self.getToken(leftParser.IDENTIFIER, 0)
def expressionList(self):
return self.getTypedRuleContext(leftParser.ExpressionListContext,0)
def getRuleIndex(self):
return leftParser.RULE_call
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitCall" ):
return visitor.visitCall(self)
else:
return visitor.visitChildren(self)
def call(self):
localctx = leftParser.CallContext(self, self._ctx, self.state)
self.enterRule(localctx, 28, self.RULE_call)
try:
self.enterOuterAlt(localctx, 1)
self.state = 162
self.match(leftParser.T__18)
self.state = 163
localctx.funcName = self.match(leftParser.IDENTIFIER)
self.state = 164
self.match(leftParser.T__15)
self.state = 165
localctx.expressions = self.expressionList()
self.state = 166
self.match(leftParser.T__16)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class ExternContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
self.prot = None # FunctionPrototypeContext
def functionPrototype(self):
return self.getTypedRuleContext(leftParser.FunctionPrototypeContext,0)
def getRuleIndex(self):
return leftParser.RULE_extern
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitExtern" ):
return visitor.visitExtern(self)
else:
return visitor.visitChildren(self)
def extern(self):
localctx = leftParser.ExternContext(self, self._ctx, self.state)
self.enterRule(localctx, 30, self.RULE_extern)
try:
self.enterOuterAlt(localctx, 1)
self.state = 168
self.match(leftParser.T__19)
self.state = 169
localctx.prot = self.functionPrototype()
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
class UseContext(ParserRuleContext):
def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1):
super().__init__(parent, invokingState)
self.parser = parser
self.mname = None # Token
def MODULENAME(self):
return self.getToken(leftParser.MODULENAME, 0)
def getRuleIndex(self):
return leftParser.RULE_use
def accept(self, visitor:ParseTreeVisitor):
if hasattr( visitor, "visitUse" ):
return visitor.visitUse(self)
else:
return visitor.visitChildren(self)
def use(self):
localctx = leftParser.UseContext(self, self._ctx, self.state)
self.enterRule(localctx, 32, self.RULE_use)
try:
self.enterOuterAlt(localctx, 1)
self.state = 171
self.match(leftParser.T__20)
self.state = 172
localctx.mname = self.match(leftParser.MODULENAME)
except RecognitionException as re:
localctx.exception = re
self._errHandler.reportError(self, re)
self._errHandler.recover(self, re)
finally:
self.exitRule()
return localctx
def sempred(self, localctx:RuleContext, ruleIndex:int, predIndex:int):
if self._predicates == None:
self._predicates = dict()
self._predicates[4] = self.expression_sempred
pred = self._predicates.get(ruleIndex, None)
if pred is None:
raise Exception("No predicate with index:" + str(ruleIndex))
else:
return pred(localctx, predIndex)
def expression_sempred(self, localctx:ExpressionContext, predIndex:int):
if predIndex == 0:
return self.precpred(self._ctx, 8)
if predIndex == 1:
return self.precpred(self._ctx, 7)
if predIndex == 2:
return self.precpred(self._ctx, 6)
if predIndex == 3:
return self.precpred(self._ctx, 5)
if predIndex == 4:
return self.precpred(self._ctx, 1)
| |
import numpy as np
from typing import Any, List, Tuple
from ray.rllib.models.torch.misc import Reshape
from ray.rllib.models.torch.torch_modelv2 import TorchModelV2
from ray.rllib.utils.framework import try_import_torch
from ray.rllib.utils.framework import TensorType
torch, nn = try_import_torch()
if torch:
from torch import distributions as td
from ray.rllib.agents.dreamer.utils import (
Linear,
Conv2d,
ConvTranspose2d,
GRUCell,
TanhBijector,
)
ActFunc = Any
# Encoder, part of PlaNET
class ConvEncoder(nn.Module):
"""Standard Convolutional Encoder for Dreamer. This encoder is used
to encode images frm an enviornment into a latent state for the
RSSM model in PlaNET.
"""
def __init__(
self, depth: int = 32, act: ActFunc = None, shape: Tuple[int] = (3, 64, 64)
):
"""Initializes Conv Encoder
Args:
depth (int): Number of channels in the first conv layer
act (Any): Activation for Encoder, default ReLU
shape (List): Shape of observation input
"""
super().__init__()
self.act = act
if not act:
self.act = nn.ReLU
self.depth = depth
self.shape = shape
init_channels = self.shape[0]
self.layers = [
Conv2d(init_channels, self.depth, 4, stride=2),
self.act(),
Conv2d(self.depth, 2 * self.depth, 4, stride=2),
self.act(),
Conv2d(2 * self.depth, 4 * self.depth, 4, stride=2),
self.act(),
Conv2d(4 * self.depth, 8 * self.depth, 4, stride=2),
self.act(),
]
self.model = nn.Sequential(*self.layers)
def forward(self, x):
# Flatten to [batch*horizon, 3, 64, 64] in loss function
orig_shape = list(x.size())
x = x.view(-1, *(orig_shape[-3:]))
x = self.model(x)
new_shape = orig_shape[:-3] + [32 * self.depth]
x = x.view(*new_shape)
return x
# Decoder, part of PlaNET
class ConvDecoder(nn.Module):
"""Standard Convolutional Decoder for Dreamer.
This decoder is used to decode images from the latent state generated
by the transition dynamics model. This is used in calculating loss and
logging gifs for imagined trajectories.
"""
def __init__(
self,
input_size: int,
depth: int = 32,
act: ActFunc = None,
shape: Tuple[int] = (3, 64, 64),
):
"""Initializes a ConvDecoder instance.
Args:
input_size (int): Input size, usually feature size output from
RSSM.
depth (int): Number of channels in the first conv layer
act (Any): Activation for Encoder, default ReLU
shape (List): Shape of observation input
"""
super().__init__()
self.act = act
if not act:
self.act = nn.ReLU
self.depth = depth
self.shape = shape
self.layers = [
Linear(input_size, 32 * self.depth),
Reshape([-1, 32 * self.depth, 1, 1]),
ConvTranspose2d(32 * self.depth, 4 * self.depth, 5, stride=2),
self.act(),
ConvTranspose2d(4 * self.depth, 2 * self.depth, 5, stride=2),
self.act(),
ConvTranspose2d(2 * self.depth, self.depth, 6, stride=2),
self.act(),
ConvTranspose2d(self.depth, self.shape[0], 6, stride=2),
]
self.model = nn.Sequential(*self.layers)
def forward(self, x):
# x is [batch, hor_length, input_size]
orig_shape = list(x.size())
x = self.model(x)
reshape_size = orig_shape[:-1] + list(self.shape)
mean = x.view(*reshape_size)
# Equivalent to making a multivariate diag
return td.Independent(td.Normal(mean, 1), len(self.shape))
# Reward Model (PlaNET), and Value Function
class DenseDecoder(nn.Module):
"""FC network that outputs a distribution for calculating log_prob.
Used later in DreamerLoss.
"""
def __init__(
self,
input_size: int,
output_size: int,
layers: int,
units: int,
dist: str = "normal",
act: ActFunc = None,
):
"""Initializes FC network
Args:
input_size (int): Input size to network
output_size (int): Output size to network
layers (int): Number of layers in network
units (int): Size of the hidden layers
dist (str): Output distribution, parameterized by FC output
logits.
act (Any): Activation function
"""
super().__init__()
self.layrs = layers
self.units = units
self.act = act
if not act:
self.act = nn.ELU
self.dist = dist
self.input_size = input_size
self.output_size = output_size
self.layers = []
cur_size = input_size
for _ in range(self.layrs):
self.layers.extend([Linear(cur_size, self.units), self.act()])
cur_size = units
self.layers.append(Linear(cur_size, output_size))
self.model = nn.Sequential(*self.layers)
def forward(self, x):
x = self.model(x)
if self.output_size == 1:
x = torch.squeeze(x)
if self.dist == "normal":
output_dist = td.Normal(x, 1)
elif self.dist == "binary":
output_dist = td.Bernoulli(logits=x)
else:
raise NotImplementedError("Distribution type not implemented!")
return td.Independent(output_dist, 0)
# Represents dreamer policy
class ActionDecoder(nn.Module):
"""ActionDecoder is the policy module in Dreamer.
It outputs a distribution parameterized by mean and std, later to be
transformed by a custom TanhBijector in utils.py for Dreamer.
"""
def __init__(
self,
input_size: int,
action_size: int,
layers: int,
units: int,
dist: str = "tanh_normal",
act: ActFunc = None,
min_std: float = 1e-4,
init_std: float = 5.0,
mean_scale: float = 5.0,
):
"""Initializes Policy
Args:
input_size (int): Input size to network
action_size (int): Action space size
layers (int): Number of layers in network
units (int): Size of the hidden layers
dist (str): Output distribution, with tanh_normal implemented
act (Any): Activation function
min_std (float): Minimum std for output distribution
init_std (float): Intitial std
mean_scale (float): Augmenting mean output from FC network
"""
super().__init__()
self.layrs = layers
self.units = units
self.dist = dist
self.act = act
if not act:
self.act = nn.ReLU
self.min_std = min_std
self.init_std = init_std
self.mean_scale = mean_scale
self.action_size = action_size
self.layers = []
self.softplus = nn.Softplus()
# MLP Construction
cur_size = input_size
for _ in range(self.layrs):
self.layers.extend([Linear(cur_size, self.units), self.act()])
cur_size = self.units
if self.dist == "tanh_normal":
self.layers.append(Linear(cur_size, 2 * action_size))
elif self.dist == "onehot":
self.layers.append(Linear(cur_size, action_size))
self.model = nn.Sequential(*self.layers)
# Returns distribution
def forward(self, x):
raw_init_std = np.log(np.exp(self.init_std) - 1)
x = self.model(x)
if self.dist == "tanh_normal":
mean, std = torch.chunk(x, 2, dim=-1)
mean = self.mean_scale * torch.tanh(mean / self.mean_scale)
std = self.softplus(std + raw_init_std) + self.min_std
dist = td.Normal(mean, std)
transforms = [TanhBijector()]
dist = td.transformed_distribution.TransformedDistribution(dist, transforms)
dist = td.Independent(dist, 1)
elif self.dist == "onehot":
dist = td.OneHotCategorical(logits=x)
raise NotImplementedError("Atari not implemented yet!")
return dist
# Represents TD model in PlaNET
class RSSM(nn.Module):
"""RSSM is the core recurrent part of the PlaNET module. It consists of
two networks, one (obs) to calculate posterior beliefs and states and
the second (img) to calculate prior beliefs and states. The prior network
takes in the previous state and action, while the posterior network takes
in the previous state, action, and a latent embedding of the most recent
observation.
"""
def __init__(
self,
action_size: int,
embed_size: int,
stoch: int = 30,
deter: int = 200,
hidden: int = 200,
act: ActFunc = None,
):
"""Initializes RSSM
Args:
action_size (int): Action space size
embed_size (int): Size of ConvEncoder embedding
stoch (int): Size of the distributional hidden state
deter (int): Size of the deterministic hidden state
hidden (int): General size of hidden layers
act (Any): Activation function
"""
super().__init__()
self.stoch_size = stoch
self.deter_size = deter
self.hidden_size = hidden
self.act = act
if act is None:
self.act = nn.ELU
self.obs1 = Linear(embed_size + deter, hidden)
self.obs2 = Linear(hidden, 2 * stoch)
self.cell = GRUCell(self.hidden_size, hidden_size=self.deter_size)
self.img1 = Linear(stoch + action_size, hidden)
self.img2 = Linear(deter, hidden)
self.img3 = Linear(hidden, 2 * stoch)
self.softplus = nn.Softplus
self.device = (
torch.device("cuda") if torch.cuda.is_available() else torch.device("cpu")
)
def get_initial_state(self, batch_size: int) -> List[TensorType]:
"""Returns the inital state for the RSSM, which consists of mean,
std for the stochastic state, the sampled stochastic hidden state
(from mean, std), and the deterministic hidden state, which is
pushed through the GRUCell.
Args:
batch_size (int): Batch size for initial state
Returns:
List of tensors
"""
return [
torch.zeros(batch_size, self.stoch_size).to(self.device),
torch.zeros(batch_size, self.stoch_size).to(self.device),
torch.zeros(batch_size, self.stoch_size).to(self.device),
torch.zeros(batch_size, self.deter_size).to(self.device),
]
def observe(
self, embed: TensorType, action: TensorType, state: List[TensorType] = None
) -> Tuple[List[TensorType], List[TensorType]]:
"""Returns the corresponding states from the embedding from ConvEncoder
and actions. This is accomplished by rolling out the RNN from the
starting state through each index of embed and action, saving all
intermediate states between.
Args:
embed (TensorType): ConvEncoder embedding
action (TensorType): Actions
state (List[TensorType]): Initial state before rollout
Returns:
Posterior states and prior states (both List[TensorType])
"""
if state is None:
state = self.get_initial_state(action.size()[0])
if embed.dim() <= 2:
embed = torch.unsqueeze(embed, 1)
if action.dim() <= 2:
action = torch.unsqueeze(action, 1)
embed = embed.permute(1, 0, 2)
action = action.permute(1, 0, 2)
priors = [[] for i in range(len(state))]
posts = [[] for i in range(len(state))]
last = (state, state)
for index in range(len(action)):
# Tuple of post and prior
last = self.obs_step(last[0], action[index], embed[index])
[o.append(s) for s, o in zip(last[0], posts)]
[o.append(s) for s, o in zip(last[1], priors)]
prior = [torch.stack(x, dim=0) for x in priors]
post = [torch.stack(x, dim=0) for x in posts]
prior = [e.permute(1, 0, 2) for e in prior]
post = [e.permute(1, 0, 2) for e in post]
return post, prior
def imagine(
self, action: TensorType, state: List[TensorType] = None
) -> List[TensorType]:
"""Imagines the trajectory starting from state through a list of actions.
Similar to observe(), requires rolling out the RNN for each timestep.
Args:
action (TensorType): Actions
state (List[TensorType]): Starting state before rollout
Returns:
Prior states
"""
if state is None:
state = self.get_initial_state(action.size()[0])
action = action.permute(1, 0, 2)
indices = range(len(action))
priors = [[] for _ in range(len(state))]
last = state
for index in indices:
last = self.img_step(last, action[index])
[o.append(s) for s, o in zip(last, priors)]
prior = [torch.stack(x, dim=0) for x in priors]
prior = [e.permute(1, 0, 2) for e in prior]
return prior
def obs_step(
self, prev_state: TensorType, prev_action: TensorType, embed: TensorType
) -> Tuple[List[TensorType], List[TensorType]]:
"""Runs through the posterior model and returns the posterior state
Args:
prev_state (TensorType): The previous state
prev_action (TensorType): The previous action
embed (TensorType): Embedding from ConvEncoder
Returns:
Post and Prior state
"""
prior = self.img_step(prev_state, prev_action)
x = torch.cat([prior[3], embed], dim=-1)
x = self.obs1(x)
x = self.act()(x)
x = self.obs2(x)
mean, std = torch.chunk(x, 2, dim=-1)
std = self.softplus()(std) + 0.1
stoch = self.get_dist(mean, std).rsample()
post = [mean, std, stoch, prior[3]]
return post, prior
def img_step(
self, prev_state: TensorType, prev_action: TensorType
) -> List[TensorType]:
"""Runs through the prior model and returns the prior state
Args:
prev_state (TensorType): The previous state
prev_action (TensorType): The previous action
Returns:
Prior state
"""
x = torch.cat([prev_state[2], prev_action], dim=-1)
x = self.img1(x)
x = self.act()(x)
deter = self.cell(x, prev_state[3])
x = deter
x = self.img2(x)
x = self.act()(x)
x = self.img3(x)
mean, std = torch.chunk(x, 2, dim=-1)
std = self.softplus()(std) + 0.1
stoch = self.get_dist(mean, std).rsample()
return [mean, std, stoch, deter]
def get_feature(self, state: List[TensorType]) -> TensorType:
# Constructs feature for input to reward, decoder, actor, critic
return torch.cat([state[2], state[3]], dim=-1)
def get_dist(self, mean: TensorType, std: TensorType) -> TensorType:
return td.Normal(mean, std)
# Represents all models in Dreamer, unifies them all into a single interface
class DreamerModel(TorchModelV2, nn.Module):
def __init__(self, obs_space, action_space, num_outputs, model_config, name):
super().__init__(obs_space, action_space, num_outputs, model_config, name)
nn.Module.__init__(self)
self.depth = model_config["depth_size"]
self.deter_size = model_config["deter_size"]
self.stoch_size = model_config["stoch_size"]
self.hidden_size = model_config["hidden_size"]
self.action_size = action_space.shape[0]
self.encoder = ConvEncoder(self.depth)
self.decoder = ConvDecoder(self.stoch_size + self.deter_size, depth=self.depth)
self.reward = DenseDecoder(
self.stoch_size + self.deter_size, 1, 2, self.hidden_size
)
self.dynamics = RSSM(
self.action_size,
32 * self.depth,
stoch=self.stoch_size,
deter=self.deter_size,
)
self.actor = ActionDecoder(
self.stoch_size + self.deter_size, self.action_size, 4, self.hidden_size
)
self.value = DenseDecoder(
self.stoch_size + self.deter_size, 1, 3, self.hidden_size
)
self.state = None
self.device = (
torch.device("cuda") if torch.cuda.is_available() else torch.device("cpu")
)
def policy(
self, obs: TensorType, state: List[TensorType], explore=True
) -> Tuple[TensorType, List[float], List[TensorType]]:
"""Returns the action. Runs through the encoder, recurrent model,
and policy to obtain action.
"""
if state is None:
self.state = self.get_initial_state(batch_size=obs.shape[0])
else:
self.state = state
post = self.state[:4]
action = self.state[4]
embed = self.encoder(obs)
post, _ = self.dynamics.obs_step(post, action, embed)
feat = self.dynamics.get_feature(post)
action_dist = self.actor(feat)
if explore:
action = action_dist.sample()
else:
action = action_dist.mean
logp = action_dist.log_prob(action)
self.state = post + [action]
return action, logp, self.state
def imagine_ahead(self, state: List[TensorType], horizon: int) -> TensorType:
"""Given a batch of states, rolls out more state of length horizon."""
start = []
for s in state:
s = s.contiguous().detach()
shpe = [-1] + list(s.size())[2:]
start.append(s.view(*shpe))
def next_state(state):
feature = self.dynamics.get_feature(state).detach()
action = self.actor(feature).rsample()
next_state = self.dynamics.img_step(state, action)
return next_state
last = start
outputs = [[] for i in range(len(start))]
for _ in range(horizon):
last = next_state(last)
[o.append(s) for s, o in zip(last, outputs)]
outputs = [torch.stack(x, dim=0) for x in outputs]
imag_feat = self.dynamics.get_feature(outputs)
return imag_feat
def get_initial_state(self) -> List[TensorType]:
self.state = self.dynamics.get_initial_state(1) + [
torch.zeros(1, self.action_space.shape[0]).to(self.device)
]
return self.state
def value_function(self) -> TensorType:
return None
| |
# -*- coding: utf-8 -*-
""" Disaster Victim Identification, Controllers """
if not settings.has_module(c):
raise HTTP(404, body="Module disabled: %s" % c)
# -----------------------------------------------------------------------------
def s3_menu_postp():
# @todo: rewrite this for new framework
menu_selected = []
body_id = s3base.s3_get_last_record_id("dvi_body")
if body_id:
body = s3db.dvi_body
query = (body.id == body_id)
record = db(query).select(body.id, body.pe_label,
limitby=(0, 1)).first()
if record:
label = record.pe_label
response.menu_options[-3][-1].append(
[T("Candidate Matches for Body %(label)s") % dict(label=label),
False, URL(f="person",
vars=dict(match=record.id))]
)
menu_selected.append(
["%s: %s" % (T("Body"), label),
False, URL(f="body", args=[record.id])]
)
person_id = s3base.s3_get_last_record_id("pr_person")
if person_id:
person = s3db.pr_person
query = (person.id == person_id)
record = db(query).select(person.id, limitby=(0, 1)).first()
if record:
name = s3db.pr_person_id().represent(record.id)
menu_selected.append(
["%s: %s" % (T("Person"), name),
False, URL(f="person", args=[record.id])]
)
if menu_selected:
menu_selected = [T("Open recent"), True, None, menu_selected]
response.menu_options.append(menu_selected)
# -----------------------------------------------------------------------------
def index():
""" Module's Home Page """
module_name = settings.modules[c].get("name_nice", T("Disaster Victim Identification"))
btable = s3db.dvi_body
itable = s3db.dvi_identification
query = (btable.deleted == False)
left = itable.on(itable.pe_id == btable.pe_id)
body_count = btable.id.count()
rows = db(query).select(body_count,
itable.status,
left=left,
groupby=itable.status)
numbers = {None: 0}
for row in rows:
numbers[row[itable.status]] = row[body_count]
total = sum(numbers.values())
dvi_id_status = dict(s3db.dvi_id_status)
dvi_id_status[None] = T("unidentified")
statistics = []
for status in dvi_id_status:
count = numbers.get(status) or 0
statistics.append((str(dvi_id_status[status]), count))
response.title = module_name
return dict(module_name=module_name,
total=total,
status=json.dumps(statistics))
# -----------------------------------------------------------------------------
def recreq():
""" Recovery Requests List """
table = s3db.dvi_recreq
table.person_id.default = auth.s3_logged_in_person()
def prep(r):
if r.interactive and not r.record:
table.status.readable = False
table.status.writable = False
table.bodies_recovered.readable = False
table.bodies_recovered.writable = False
return True
s3.prep = prep
return s3_rest_controller()
# -----------------------------------------------------------------------------
def morgue():
""" Morgue Registry """
morgue_tabs = [(T("Morgue Details"), ""),
(T("Bodies"), "body"),
]
rheader = S3ResourceHeader([[(T("Morgue"), "name")]
], tabs=morgue_tabs)
# Pre-processor
def prep(r):
# Function to call for all Site Instance Types
from s3db.org import org_site_prep
org_site_prep(r)
return True
s3.prep = prep
return s3_rest_controller(rheader = rheader)
# -----------------------------------------------------------------------------
def body():
""" Dead Bodies Registry """
gender_opts = s3db.pr_gender_opts
gender_opts[1] = T("unknown")
ntable = s3db.pr_note
ntable.status.readable = False
ntable.status.writable = False
dvi_tabs = [(T("Recovery"), ""),
(T("Checklist"), "checklist"),
(T("Images"), "image"),
(T("Physical Description"), "physical_description"),
(T("Effects Inventory"), "effects"),
(T("Journal"), "note"),
(T("Identification"), "identification"),
]
rheader = S3ResourceHeader([[(T("ID Tag Number"), "pe_label")],
["gender"],
["age_group"],
],
tabs=dvi_tabs)
return s3_rest_controller(rheader=rheader)
# -----------------------------------------------------------------------------
def person():
""" Missing Persons Registry (Match Finder) """
table = s3db.pr_person
s3.crud_strings["pr_person"].update(
title_display = T("Missing Person Details"),
title_list = T("Missing Persons"),
label_list_button = T("List Missing Persons"),
msg_list_empty = T("No Persons found"),
msg_no_match = T("No Persons currently reported missing"))
s3db.configure("pr_group_membership",
list_fields = ["id",
"group_id",
"group_head",
"comments"
],
)
s3db.configure("pr_person",
deletable = False,
editable = False,
listadd = False,
list_fields = ["id",
"first_name",
"middle_name",
"last_name",
"picture",
"gender",
"age_group"
],
)
def prep(r):
if not r.id and not r.method and not r.component:
body_id = r.get_vars.get("match", None)
body = db(db.dvi_body.id == body_id).select(db.dvi_body.pe_label,
limitby = (0, 1)
).first()
label = body and body.pe_label or "#%s" % body_id
if body_id:
query = dvi_match_query(body_id)
r.resource.add_filter(query)
s3.crud_strings["pr_person"].update(
#subtitle_list = T("Candidate Matches for Body %s" % label),
msg_no_match = T("No matching records found"))
return True
s3.prep = prep
# @ToDo: Add to crud_fields
field = s3db.pr_person_details.missing.default = True
table.age_group.readable = True
table.age_group.writable = True
# Show only missing persons in list views
if len(request.args) == 0:
from s3 import FS
s3.filter = (FS("person_details.missing") == True)
mpr_tabs = [(T("Missing Report"), "missing_report"),
(T("Person Details"), None),
(T("Physical Description"), "physical_description"),
(T("Images"), "image"),
(T("Identity"), "identity"),
(T("Address"), "address"),
(T("Contact Data"), "contact"),
(T("Journal"), "note"),
]
rheader = lambda r: s3db.pr_rheader(r, tabs=mpr_tabs)
return s3_rest_controller("pr", "person",
main = "first_name",
extra = "last_name",
rheader = rheader,
)
# -------------------------------------------------------------------------
def dvi_match_query(body_id):
"""
Get a query for candidate matches between the missing
persons registry and a dead body
@param body_id: the dvi_body record ID
"""
ptable = s3db.pr_person
pdtable = s3db.pr_person_details
ntable = s3db.pr_note
btable = s3db.dvi_body
query = ((ptable.deleted == False) & \
(pdtable.person_id == ptable.id) & \
(pdtable.missing == True) & \
(ntable.pe_id == ptable.pe_id) & \
(ntable.status == 1))
body = db(btable.body_id == body_id).select(btable.date_of_recovery,
btable.age_group,
btable.gender,
limitby = (0, 1)
).first()
if not body:
return query
# last seen should be before date of recovery
if body.date_of_recovery:
q = ((ntable.timestmp <= body.date_of_recovery) | \
(ntable.timestmp == None))
query &= q
# age group should match
if body.age_group and body.age_group != 1:
q = ((ptable.age_group == None) | \
(ptable.age_group == 1) | \
(ptable.age_group == body.age_group))
query &= q
# gender should match
if body.gender and body.gender != 1:
q = ((ptable.gender == None) | \
(ptable.gender == 1) | \
(ptable.gender == body.gender))
query &= q
return query
# -----------------------------------------------------------------------------
def tooltip():
""" Ajax Tooltips """
formfield = request.vars.get("formfield", None)
if formfield:
response.view = "pr/ajaxtips/%s.html" % formfield
return {}
# END =========================================================================
| |
"""Support for ISY994 binary sensors."""
from __future__ import annotations
from datetime import timedelta
from pyisy.constants import (
CMD_OFF,
CMD_ON,
ISY_VALUE_UNKNOWN,
PROTO_INSTEON,
PROTO_ZWAVE,
)
from pyisy.nodes import Group, Node
from homeassistant.components.binary_sensor import (
DOMAIN as BINARY_SENSOR,
BinarySensorDeviceClass,
BinarySensorEntity,
)
from homeassistant.config_entries import ConfigEntry
from homeassistant.core import HomeAssistant, callback
from homeassistant.helpers.entity_platform import AddEntitiesCallback
from homeassistant.helpers.event import async_track_point_in_utc_time
from homeassistant.util import dt as dt_util
from .const import (
_LOGGER,
BINARY_SENSOR_DEVICE_TYPES_ISY,
BINARY_SENSOR_DEVICE_TYPES_ZWAVE,
DOMAIN as ISY994_DOMAIN,
ISY994_NODES,
ISY994_PROGRAMS,
SUBNODE_CLIMATE_COOL,
SUBNODE_CLIMATE_HEAT,
SUBNODE_DUSK_DAWN,
SUBNODE_HEARTBEAT,
SUBNODE_LOW_BATTERY,
SUBNODE_MOTION_DISABLED,
SUBNODE_NEGATIVE,
SUBNODE_TAMPER,
TYPE_CATEGORY_CLIMATE,
TYPE_INSTEON_MOTION,
)
from .entity import ISYNodeEntity, ISYProgramEntity
from .helpers import migrate_old_unique_ids
DEVICE_PARENT_REQUIRED = [
BinarySensorDeviceClass.OPENING,
BinarySensorDeviceClass.MOISTURE,
BinarySensorDeviceClass.MOTION,
]
async def async_setup_entry(
hass: HomeAssistant,
entry: ConfigEntry,
async_add_entities: AddEntitiesCallback,
) -> bool:
"""Set up the ISY994 binary sensor platform."""
devices = []
devices_by_address = {}
child_nodes = []
hass_isy_data = hass.data[ISY994_DOMAIN][entry.entry_id]
for node in hass_isy_data[ISY994_NODES][BINARY_SENSOR]:
device_class, device_type = _detect_device_type_and_class(node)
if node.protocol == PROTO_INSTEON:
if node.parent_node is not None:
# We'll process the Insteon child nodes last, to ensure all parent
# nodes have been processed
child_nodes.append((node, device_class, device_type))
continue
device = ISYInsteonBinarySensorEntity(node, device_class)
else:
device = ISYBinarySensorEntity(node, device_class)
devices.append(device)
devices_by_address[node.address] = device
# Handle some special child node cases for Insteon Devices
for (node, device_class, device_type) in child_nodes:
subnode_id = int(node.address.split(" ")[-1], 16)
# Handle Insteon Thermostats
if device_type.startswith(TYPE_CATEGORY_CLIMATE):
if subnode_id == SUBNODE_CLIMATE_COOL:
# Subnode 2 is the "Cool Control" sensor
# It never reports its state until first use is
# detected after an ISY Restart, so we assume it's off.
# As soon as the ISY Event Stream connects if it has a
# valid state, it will be set.
device = ISYInsteonBinarySensorEntity(
node, BinarySensorDeviceClass.COLD, False
)
devices.append(device)
elif subnode_id == SUBNODE_CLIMATE_HEAT:
# Subnode 3 is the "Heat Control" sensor
device = ISYInsteonBinarySensorEntity(
node, BinarySensorDeviceClass.HEAT, False
)
devices.append(device)
continue
if device_class in DEVICE_PARENT_REQUIRED:
parent_device = devices_by_address.get(node.parent_node.address)
if not parent_device:
_LOGGER.error(
"Node %s has a parent node %s, but no device "
"was created for the parent. Skipping",
node.address,
node.parent_node,
)
continue
if device_class in (
BinarySensorDeviceClass.OPENING,
BinarySensorDeviceClass.MOISTURE,
):
# These sensors use an optional "negative" subnode 2 to
# snag all state changes
if subnode_id == SUBNODE_NEGATIVE:
parent_device.add_negative_node(node)
elif subnode_id == SUBNODE_HEARTBEAT:
# Subnode 4 is the heartbeat node, which we will
# represent as a separate binary_sensor
device = ISYBinarySensorHeartbeat(node, parent_device)
parent_device.add_heartbeat_device(device)
devices.append(device)
continue
if (
device_class == BinarySensorDeviceClass.MOTION
and device_type is not None
and any(device_type.startswith(t) for t in TYPE_INSTEON_MOTION)
):
# Special cases for Insteon Motion Sensors I & II:
# Some subnodes never report status until activated, so
# the initial state is forced "OFF"/"NORMAL" if the
# parent device has a valid state. This is corrected
# upon connection to the ISY event stream if subnode has a valid state.
initial_state = None if parent_device.state is None else False
if subnode_id == SUBNODE_DUSK_DAWN:
# Subnode 2 is the Dusk/Dawn sensor
device = ISYInsteonBinarySensorEntity(
node, BinarySensorDeviceClass.LIGHT
)
devices.append(device)
continue
if subnode_id == SUBNODE_LOW_BATTERY:
# Subnode 3 is the low battery node
device = ISYInsteonBinarySensorEntity(
node, BinarySensorDeviceClass.BATTERY, initial_state
)
devices.append(device)
continue
if subnode_id in SUBNODE_TAMPER:
# Tamper Sub-node for MS II. Sometimes reported as "A" sometimes
# reported as "10", which translate from Hex to 10 and 16 resp.
device = ISYInsteonBinarySensorEntity(
node, BinarySensorDeviceClass.PROBLEM, initial_state
)
devices.append(device)
continue
if subnode_id in SUBNODE_MOTION_DISABLED:
# Motion Disabled Sub-node for MS II ("D" or "13")
device = ISYInsteonBinarySensorEntity(node)
devices.append(device)
continue
# We don't yet have any special logic for other sensor
# types, so add the nodes as individual devices
device = ISYBinarySensorEntity(node, device_class)
devices.append(device)
for name, status, _ in hass_isy_data[ISY994_PROGRAMS][BINARY_SENSOR]:
devices.append(ISYBinarySensorProgramEntity(name, status))
await migrate_old_unique_ids(hass, BINARY_SENSOR, devices)
async_add_entities(devices)
def _detect_device_type_and_class(node: Group | Node) -> (str, str):
try:
device_type = node.type
except AttributeError:
# The type attribute didn't exist in the ISY's API response
return (None, None)
# Z-Wave Devices:
if node.protocol == PROTO_ZWAVE:
device_type = f"Z{node.zwave_props.category}"
for device_class, values in BINARY_SENSOR_DEVICE_TYPES_ZWAVE.items():
if node.zwave_props.category in values:
return device_class, device_type
return (None, device_type)
# Other devices (incl Insteon.)
for device_class, values in BINARY_SENSOR_DEVICE_TYPES_ISY.items():
if any(device_type.startswith(t) for t in values):
return device_class, device_type
return (None, device_type)
class ISYBinarySensorEntity(ISYNodeEntity, BinarySensorEntity):
"""Representation of a basic ISY994 binary sensor device."""
def __init__(self, node, force_device_class=None, unknown_state=None) -> None:
"""Initialize the ISY994 binary sensor device."""
super().__init__(node)
self._device_class = force_device_class
@property
def is_on(self) -> bool:
"""Get whether the ISY994 binary sensor device is on."""
if self._node.status == ISY_VALUE_UNKNOWN:
return None
return bool(self._node.status)
@property
def device_class(self) -> str:
"""Return the class of this device.
This was discovered by parsing the device type code during init
"""
return self._device_class
class ISYInsteonBinarySensorEntity(ISYBinarySensorEntity):
"""Representation of an ISY994 Insteon binary sensor device.
Often times, a single device is represented by multiple nodes in the ISY,
allowing for different nuances in how those devices report their on and
off events. This class turns those multiple nodes into a single Home
Assistant entity and handles both ways that ISY binary sensors can work.
"""
def __init__(self, node, force_device_class=None, unknown_state=None) -> None:
"""Initialize the ISY994 binary sensor device."""
super().__init__(node, force_device_class)
self._negative_node = None
self._heartbeat_device = None
if self._node.status == ISY_VALUE_UNKNOWN:
self._computed_state = unknown_state
self._status_was_unknown = True
else:
self._computed_state = bool(self._node.status)
self._status_was_unknown = False
async def async_added_to_hass(self) -> None:
"""Subscribe to the node and subnode event emitters."""
await super().async_added_to_hass()
self._node.control_events.subscribe(self._async_positive_node_control_handler)
if self._negative_node is not None:
self._negative_node.control_events.subscribe(
self._async_negative_node_control_handler
)
def add_heartbeat_device(self, device) -> None:
"""Register a heartbeat device for this sensor.
The heartbeat node beats on its own, but we can gain a little
reliability by considering any node activity for this sensor
to be a heartbeat as well.
"""
self._heartbeat_device = device
def _async_heartbeat(self) -> None:
"""Send a heartbeat to our heartbeat device, if we have one."""
if self._heartbeat_device is not None:
self._heartbeat_device.async_heartbeat()
def add_negative_node(self, child) -> None:
"""Add a negative node to this binary sensor device.
The negative node is a node that can receive the 'off' events
for the sensor, depending on device configuration and type.
"""
self._negative_node = child
# If the negative node has a value, it means the negative node is
# in use for this device. Next we need to check to see if the
# negative and positive nodes disagree on the state (both ON or
# both OFF).
if (
self._negative_node.status != ISY_VALUE_UNKNOWN
and self._negative_node.status == self._node.status
):
# The states disagree, therefore we cannot determine the state
# of the sensor until we receive our first ON event.
self._computed_state = None
@callback
def _async_negative_node_control_handler(self, event: object) -> None:
"""Handle an "On" control event from the "negative" node."""
if event.control == CMD_ON:
_LOGGER.debug(
"Sensor %s turning Off via the Negative node sending a DON command",
self.name,
)
self._computed_state = False
self.async_write_ha_state()
self._async_heartbeat()
@callback
def _async_positive_node_control_handler(self, event: object) -> None:
"""Handle On and Off control event coming from the primary node.
Depending on device configuration, sometimes only On events
will come to this node, with the negative node representing Off
events
"""
if event.control == CMD_ON:
_LOGGER.debug(
"Sensor %s turning On via the Primary node sending a DON command",
self.name,
)
self._computed_state = True
self.async_write_ha_state()
self._async_heartbeat()
if event.control == CMD_OFF:
_LOGGER.debug(
"Sensor %s turning Off via the Primary node sending a DOF command",
self.name,
)
self._computed_state = False
self.async_write_ha_state()
self._async_heartbeat()
@callback
def async_on_update(self, event: object) -> None:
"""Primary node status updates.
We MOSTLY ignore these updates, as we listen directly to the Control
events on all nodes for this device. However, there is one edge case:
If a leak sensor is unknown, due to a recent reboot of the ISY, the
status will get updated to dry upon the first heartbeat. This status
update is the only way that a leak sensor's status changes without
an accompanying Control event, so we need to watch for it.
"""
if self._status_was_unknown and self._computed_state is None:
self._computed_state = bool(self._node.status)
self._status_was_unknown = False
self.async_write_ha_state()
self._async_heartbeat()
@property
def is_on(self) -> bool:
"""Get whether the ISY994 binary sensor device is on.
Insteon leak sensors set their primary node to On when the state is
DRY, not WET, so we invert the binary state if the user indicates
that it is a moisture sensor.
"""
if self._computed_state is None:
# Do this first so we don't invert None on moisture sensors
return None
if self.device_class == BinarySensorDeviceClass.MOISTURE:
return not self._computed_state
return self._computed_state
class ISYBinarySensorHeartbeat(ISYNodeEntity, BinarySensorEntity):
"""Representation of the battery state of an ISY994 sensor."""
def __init__(self, node, parent_device) -> None:
"""Initialize the ISY994 binary sensor device.
Computed state is set to UNKNOWN unless the ISY provided a valid
state. See notes above regarding ISY Sensor status on ISY restart.
If a valid state is provided (either on or off), the computed state in
HA is set to OFF (Normal). If the heartbeat is not received in 25 hours
then the computed state is set to ON (Low Battery).
"""
super().__init__(node)
self._parent_device = parent_device
self._heartbeat_timer = None
self._computed_state = None
if self.state is None:
self._computed_state = False
async def async_added_to_hass(self) -> None:
"""Subscribe to the node and subnode event emitters."""
await super().async_added_to_hass()
self._node.control_events.subscribe(self._heartbeat_node_control_handler)
# Start the timer on bootup, so we can change from UNKNOWN to OFF
self._restart_timer()
def _heartbeat_node_control_handler(self, event: object) -> None:
"""Update the heartbeat timestamp when any ON/OFF event is sent.
The ISY uses both DON and DOF commands (alternating) for a heartbeat.
"""
if event.control in (CMD_ON, CMD_OFF):
self.async_heartbeat()
@callback
def async_heartbeat(self):
"""Mark the device as online, and restart the 25 hour timer.
This gets called when the heartbeat node beats, but also when the
parent sensor sends any events, as we can trust that to mean the device
is online. This mitigates the risk of false positives due to a single
missed heartbeat event.
"""
self._computed_state = False
self._restart_timer()
self.async_write_ha_state()
def _restart_timer(self):
"""Restart the 25 hour timer."""
try:
self._heartbeat_timer()
self._heartbeat_timer = None
except TypeError:
# No heartbeat timer is active
pass
@callback
def timer_elapsed(now) -> None:
"""Heartbeat missed; set state to ON to indicate dead battery."""
self._computed_state = True
self._heartbeat_timer = None
self.async_write_ha_state()
point_in_time = dt_util.utcnow() + timedelta(hours=25)
_LOGGER.debug(
"Heartbeat timer starting. Now: %s Then: %s",
dt_util.utcnow(),
point_in_time,
)
self._heartbeat_timer = async_track_point_in_utc_time(
self.hass, timer_elapsed, point_in_time
)
@callback
def async_on_update(self, event: object) -> None:
"""Ignore node status updates.
We listen directly to the Control events for this device.
"""
@property
def is_on(self) -> bool:
"""Get whether the ISY994 binary sensor device is on.
Note: This method will return false if the current state is UNKNOWN
which occurs after a restart until the first heartbeat or control
parent control event is received.
"""
return bool(self._computed_state)
@property
def device_class(self) -> str:
"""Get the class of this device."""
return BinarySensorDeviceClass.BATTERY
@property
def extra_state_attributes(self):
"""Get the state attributes for the device."""
attr = super().extra_state_attributes
attr["parent_entity_id"] = self._parent_device.entity_id
return attr
class ISYBinarySensorProgramEntity(ISYProgramEntity, BinarySensorEntity):
"""Representation of an ISY994 binary sensor program.
This does not need all of the subnode logic in the device version of binary
sensors.
"""
@property
def is_on(self) -> bool:
"""Get whether the ISY994 binary sensor device is on."""
return bool(self._node.status)
| |
# Copyright 2016 NEC Corporation. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import six
from tempest.lib.common.utils import data_utils
from tempest.lib.services.image.v2 import images_client
from tempest.tests.lib import fake_auth_provider
from tempest.tests.lib.services import base
class TestImagesClient(base.BaseServiceTest):
FAKE_CREATE_UPDATE_SHOW_IMAGE = {
"id": "e485aab9-0907-4973-921c-bb6da8a8fcf8",
"name": u"\u2740(*\xb4\u25e2`*)\u2740",
"status": "active",
"visibility": "public",
"size": 2254249,
"checksum": "2cec138d7dae2aa59038ef8c9aec2390",
"tags": [
"fedora",
"beefy"
],
"created_at": "2012-08-10T19:23:50Z",
"updated_at": "2012-08-12T11:11:33Z",
"self": "/v2/images/da3b75d9-3f4a-40e7-8a2c-bfab23927dea",
"file": "/v2/images/da3b75d9-3f4a-40e7-8a2c-bfab23927"
"dea/file",
"schema": "/v2/schemas/image",
"owner": None,
"min_ram": None,
"min_disk": None,
"disk_format": None,
"virtual_size": None,
"container_format": None,
"os_hash_algo": "sha512",
"os_hash_value": "ef7d1ed957ffafefb324d50ebc6685ed03d0e645d",
"os_hidden": False,
"protected": False,
}
FAKE_LIST_IMAGES = {
"images": [
{
"status": "active",
"name": "cirros-0.3.2-x86_64-disk",
"tags": [],
"container_format": "bare",
"created_at": "2014-11-07T17:07:06Z",
"disk_format": "qcow2",
"updated_at": "2014-11-07T17:19:09Z",
"visibility": "public",
"self": "/v2/images/1bea47ed-f6a9-463b-b423-14b9cca9ad27",
"min_disk": 0,
"protected": False,
"id": "1bea47ed-f6a9-463b-b423-14b9cca9ad27",
"file": "/v2/images/1bea47ed-f6a9-463b-b423-14b9cca9ad27/file",
"checksum": "64d7c1cd2b6f60c92c14662941cb7913",
"owner": "5ef70662f8b34079a6eddb8da9d75fe8",
"size": 13167616,
"min_ram": 0,
"schema": "/v2/schemas/image",
"virtual_size": None,
"os_hash_algo": "sha512",
"os_hash_value": "ef7d1ed957ffafefb324d50ebc6685ed03d0e645d",
"os_hidden": False
},
{
"status": "active",
"name": "F17-x86_64-cfntools",
"tags": [],
"container_format": "bare",
"created_at": "2014-10-30T08:23:39Z",
"disk_format": "qcow2",
"updated_at": "2014-11-03T16:40:10Z",
"visibility": "public",
"self": "/v2/images/781b3762-9469-4cec-b58d-3349e5de4e9c",
"min_disk": 0,
"protected": False,
"id": "781b3762-9469-4cec-b58d-3349e5de4e9c",
"file": "/v2/images/781b3762-9469-4cec-b58d-3349e5de4e9c/file",
"checksum": "afab0f79bac770d61d24b4d0560b5f70",
"owner": "5ef70662f8b34079a6eddb8da9d75fe8",
"size": 476704768,
"min_ram": 0,
"schema": "/v2/schemas/image",
"virtual_size": None,
"os_hash_algo": "sha512",
"os_hash_value": "ef7d1ed957ffafefb324d50ebc6685ed03d0e645d",
"os_hidden": False
}
],
"schema": "/v2/schemas/images",
"first": "/v2/images"
}
FAKE_TAG_NAME = "fake tag"
def setUp(self):
super(TestImagesClient, self).setUp()
fake_auth = fake_auth_provider.FakeAuthProvider()
self.client = images_client.ImagesClient(fake_auth,
'image', 'regionOne')
def _test_update_image(self, bytes_body=False):
self.check_service_client_function(
self.client.update_image,
'tempest.lib.common.rest_client.RestClient.patch',
self.FAKE_CREATE_UPDATE_SHOW_IMAGE,
bytes_body,
image_id="e485aab9-0907-4973-921c-bb6da8a8fcf8",
patch=[{"op": "add", "path": "/a/b/c", "value": ["foo", "bar"]}])
def _test_create_image(self, bytes_body=False):
self.check_service_client_function(
self.client.create_image,
'tempest.lib.common.rest_client.RestClient.post',
self.FAKE_CREATE_UPDATE_SHOW_IMAGE,
bytes_body,
name="virtual machine image",
status=201)
def _test_show_image(self, bytes_body=False):
self.check_service_client_function(
self.client.show_image,
'tempest.lib.common.rest_client.RestClient.get',
self.FAKE_CREATE_UPDATE_SHOW_IMAGE,
bytes_body,
image_id="e485aab9-0907-4973-921c-bb6da8a8fcf8")
def _test_list_images(self, bytes_body=False):
self.check_service_client_function(
self.client.list_images,
'tempest.lib.common.rest_client.RestClient.get',
self.FAKE_LIST_IMAGES,
bytes_body,
mock_args=['images'])
def test_create_image_with_str_body(self):
self._test_create_image()
def test_create_image_with_bytes_body(self):
self._test_create_image(bytes_body=True)
def test_update_image_with_str_body(self):
self._test_update_image()
def test_update_image_with_bytes_body(self):
self._test_update_image(bytes_body=True)
def test_deactivate_image(self):
self.check_service_client_function(
self.client.deactivate_image,
'tempest.lib.common.rest_client.RestClient.post',
{}, image_id="e485aab9-0907-4973-921c-bb6da8a8fcf8", status=204)
def test_reactivate_image(self):
self.check_service_client_function(
self.client.reactivate_image,
'tempest.lib.common.rest_client.RestClient.post',
{}, image_id="e485aab9-0907-4973-921c-bb6da8a8fcf8", status=204)
def test_delete_image(self):
self.check_service_client_function(
self.client.delete_image,
'tempest.lib.common.rest_client.RestClient.delete',
{}, image_id="e485aab9-0907-4973-921c-bb6da8a8fcf8", status=204)
def test_store_image_file(self):
data = six.BytesIO(data_utils.random_bytes())
self.check_service_client_function(
self.client.store_image_file,
'tempest.lib.common.rest_client.RestClient.raw_request',
{},
image_id=self.FAKE_CREATE_UPDATE_SHOW_IMAGE["id"],
status=204,
data=data)
def test_show_image_file(self):
# NOTE: The response for this API returns raw binary data, but an error
# is thrown if random bytes are used for the resp body since
# ``create_response`` then calls ``json.dumps``.
self.check_service_client_function(
self.client.show_image_file,
'tempest.lib.common.rest_client.RestClient.get',
{},
resp_as_string=True,
image_id=self.FAKE_CREATE_UPDATE_SHOW_IMAGE["id"],
headers={'Content-Type': 'application/octet-stream'},
status=200)
def test_add_image_tag(self):
self.check_service_client_function(
self.client.add_image_tag,
'tempest.lib.common.rest_client.RestClient.put',
{},
image_id=self.FAKE_CREATE_UPDATE_SHOW_IMAGE["id"],
status=204,
tag=self.FAKE_TAG_NAME)
def test_delete_image_tag(self):
self.check_service_client_function(
self.client.delete_image_tag,
'tempest.lib.common.rest_client.RestClient.delete',
{},
image_id=self.FAKE_CREATE_UPDATE_SHOW_IMAGE["id"],
status=204,
tag=self.FAKE_TAG_NAME)
def test_show_image_with_str_body(self):
self._test_show_image()
def test_show_image_with_bytes_body(self):
self._test_show_image(bytes_body=True)
def test_list_images_with_str_body(self):
self._test_list_images()
def test_list_images_with_bytes_body(self):
self._test_list_images(bytes_body=True)
| |
"""The exceptions used by Home Assistant."""
from __future__ import annotations
from collections.abc import Generator, Sequence
from typing import TYPE_CHECKING
import attr
if TYPE_CHECKING:
from .core import Context
# mypy: disallow-any-generics
class HomeAssistantError(Exception):
"""General Home Assistant exception occurred."""
class InvalidEntityFormatError(HomeAssistantError):
"""When an invalid formatted entity is encountered."""
class NoEntitySpecifiedError(HomeAssistantError):
"""When no entity is specified."""
class TemplateError(HomeAssistantError):
"""Error during template rendering."""
def __init__(self, exception: Exception) -> None:
"""Init the error."""
super().__init__(f"{exception.__class__.__name__}: {exception}")
@attr.s
class ConditionError(HomeAssistantError):
"""Error during condition evaluation."""
# The type of the failed condition, such as 'and' or 'numeric_state'
type: str = attr.ib()
@staticmethod
def _indent(indent: int, message: str) -> str:
"""Return indentation."""
return " " * indent + message
def output(self, indent: int) -> Generator[str, None, None]:
"""Yield an indented representation."""
raise NotImplementedError()
def __str__(self) -> str:
"""Return string representation."""
return "\n".join(list(self.output(indent=0)))
@attr.s
class ConditionErrorMessage(ConditionError):
"""Condition error message."""
# A message describing this error
message: str = attr.ib()
def output(self, indent: int) -> Generator[str, None, None]:
"""Yield an indented representation."""
yield self._indent(indent, f"In '{self.type}' condition: {self.message}")
@attr.s
class ConditionErrorIndex(ConditionError):
"""Condition error with index."""
# The zero-based index of the failed condition, for conditions with multiple parts
index: int = attr.ib()
# The total number of parts in this condition, including non-failed parts
total: int = attr.ib()
# The error that this error wraps
error: ConditionError = attr.ib()
def output(self, indent: int) -> Generator[str, None, None]:
"""Yield an indented representation."""
if self.total > 1:
yield self._indent(
indent, f"In '{self.type}' (item {self.index+1} of {self.total}):"
)
else:
yield self._indent(indent, f"In '{self.type}':")
yield from self.error.output(indent + 1)
@attr.s
class ConditionErrorContainer(ConditionError):
"""Condition error with subconditions."""
# List of ConditionErrors that this error wraps
errors: Sequence[ConditionError] = attr.ib()
def output(self, indent: int) -> Generator[str, None, None]:
"""Yield an indented representation."""
for item in self.errors:
yield from item.output(indent)
class IntegrationError(HomeAssistantError):
"""Base class for platform and config entry exceptions."""
def __str__(self) -> str:
"""Return a human readable error."""
return super().__str__() or str(self.__cause__)
class PlatformNotReady(IntegrationError):
"""Error to indicate that platform is not ready."""
class ConfigEntryNotReady(IntegrationError):
"""Error to indicate that config entry is not ready."""
class ConfigEntryAuthFailed(IntegrationError):
"""Error to indicate that config entry could not authenticate."""
class InvalidStateError(HomeAssistantError):
"""When an invalid state is encountered."""
class Unauthorized(HomeAssistantError):
"""When an action is unauthorized."""
def __init__(
self,
context: Context | None = None,
user_id: str | None = None,
entity_id: str | None = None,
config_entry_id: str | None = None,
perm_category: str | None = None,
permission: str | None = None,
) -> None:
"""Unauthorized error."""
super().__init__(self.__class__.__name__)
self.context = context
if user_id is None and context is not None:
user_id = context.user_id
self.user_id = user_id
self.entity_id = entity_id
self.config_entry_id = config_entry_id
# Not all actions have an ID (like adding config entry)
# We then use this fallback to know what category was unauth
self.perm_category = perm_category
self.permission = permission
class UnknownUser(Unauthorized):
"""When call is made with user ID that doesn't exist."""
class ServiceNotFound(HomeAssistantError):
"""Raised when a service is not found."""
def __init__(self, domain: str, service: str) -> None:
"""Initialize error."""
super().__init__(self, f"Service {domain}.{service} not found")
self.domain = domain
self.service = service
def __str__(self) -> str:
"""Return string representation."""
return f"Unable to find service {self.domain}.{self.service}"
class MaxLengthExceeded(HomeAssistantError):
"""Raised when a property value has exceeded the max character length."""
def __init__(self, value: str, property_name: str, max_length: int) -> None:
"""Initialize error."""
super().__init__(
self,
(
f"Value {value} for property {property_name} has a max length of "
f"{max_length} characters"
),
)
self.value = value
self.property_name = property_name
self.max_length = max_length
class RequiredParameterMissing(HomeAssistantError):
"""Raised when a required parameter is missing from a function call."""
def __init__(self, parameter_names: list[str]) -> None:
"""Initialize error."""
super().__init__(
self,
(
"Call must include at least one of the following parameters: "
f"{', '.join(parameter_names)}"
),
)
self.parameter_names = parameter_names
| |
from __future__ import unicode_literals
import time
import hmac
import hashlib
import re
from .common import InfoExtractor
from ..compat import (
compat_str,
compat_urllib_parse,
)
from ..utils import (
int_or_none,
float_or_none,
sanitized_Request,
xpath_text,
ExtractorError,
)
class AtresPlayerIE(InfoExtractor):
_VALID_URL = r'https?://(?:www\.)?atresplayer\.com/television/[^/]+/[^/]+/[^/]+/(?P<id>.+?)_\d+\.html'
_NETRC_MACHINE = 'atresplayer'
_TESTS = [
{
'url': 'http://www.atresplayer.com/television/programas/el-club-de-la-comedia/temporada-4/capitulo-10-especial-solidario-nochebuena_2014122100174.html',
'md5': 'efd56753cda1bb64df52a3074f62e38a',
'info_dict': {
'id': 'capitulo-10-especial-solidario-nochebuena',
'ext': 'mp4',
'title': 'Especial Solidario de Nochebuena',
'description': 'md5:e2d52ff12214fa937107d21064075bf1',
'duration': 5527.6,
'thumbnail': 're:^https?://.*\.jpg$',
},
'skip': 'This video is only available for registered users'
},
{
'url': 'http://www.atresplayer.com/television/especial/videoencuentros/temporada-1/capitulo-112-david-bustamante_2014121600375.html',
'md5': '0d0e918533bbd4b263f2de4d197d4aac',
'info_dict': {
'id': 'capitulo-112-david-bustamante',
'ext': 'flv',
'title': 'David Bustamante',
'description': 'md5:f33f1c0a05be57f6708d4dd83a3b81c6',
'duration': 1439.0,
'thumbnail': 're:^https?://.*\.jpg$',
},
},
{
'url': 'http://www.atresplayer.com/television/series/el-secreto-de-puente-viejo/el-chico-de-los-tres-lunares/capitulo-977-29-12-14_2014122400174.html',
'only_matching': True,
},
]
_USER_AGENT = 'Dalvik/1.6.0 (Linux; U; Android 4.3; GT-I9300 Build/JSS15J'
_MAGIC = 'QWtMLXs414Yo+c#_+Q#K@NN)'
_TIMESTAMP_SHIFT = 30000
_TIME_API_URL = 'http://servicios.atresplayer.com/api/admin/time.json'
_URL_VIDEO_TEMPLATE = 'https://servicios.atresplayer.com/api/urlVideo/{1}/{0}/{1}|{2}|{3}.json'
_PLAYER_URL_TEMPLATE = 'https://servicios.atresplayer.com/episode/getplayer.json?episodePk=%s'
_EPISODE_URL_TEMPLATE = 'http://www.atresplayer.com/episodexml/%s'
_LOGIN_URL = 'https://servicios.atresplayer.com/j_spring_security_check'
_ERRORS = {
'UNPUBLISHED': 'We\'re sorry, but this video is not yet available.',
'DELETED': 'This video has expired and is no longer available for online streaming.',
'GEOUNPUBLISHED': 'We\'re sorry, but this video is not available in your region due to right restrictions.',
# 'PREMIUM': 'PREMIUM',
}
def _real_initialize(self):
self._login()
def _login(self):
(username, password) = self._get_login_info()
if username is None:
return
login_form = {
'j_username': username,
'j_password': password,
}
request = sanitized_Request(
self._LOGIN_URL, compat_urllib_parse.urlencode(login_form).encode('utf-8'))
request.add_header('Content-Type', 'application/x-www-form-urlencoded')
response = self._download_webpage(
request, None, 'Logging in as %s' % username)
error = self._html_search_regex(
r'(?s)<ul class="list_error">(.+?)</ul>', response, 'error', default=None)
if error:
raise ExtractorError(
'Unable to login: %s' % error, expected=True)
def _real_extract(self, url):
video_id = self._match_id(url)
webpage = self._download_webpage(url, video_id)
episode_id = self._search_regex(
r'episode="([^"]+)"', webpage, 'episode id')
request = sanitized_Request(
self._PLAYER_URL_TEMPLATE % episode_id,
headers={'User-Agent': self._USER_AGENT})
player = self._download_json(request, episode_id, 'Downloading player JSON')
episode_type = player.get('typeOfEpisode')
error_message = self._ERRORS.get(episode_type)
if error_message:
raise ExtractorError(
'%s returned error: %s' % (self.IE_NAME, error_message), expected=True)
formats = []
video_url = player.get('urlVideo')
if video_url:
format_info = {
'url': video_url,
'format_id': 'http',
}
mobj = re.search(r'(?P<bitrate>\d+)K_(?P<width>\d+)x(?P<height>\d+)', video_url)
if mobj:
format_info.update({
'width': int_or_none(mobj.group('width')),
'height': int_or_none(mobj.group('height')),
'tbr': int_or_none(mobj.group('bitrate')),
})
formats.append(format_info)
m3u8_url = player.get('urlVideoHls')
if m3u8_url:
formats.extend(self._extract_m3u8_formats(
m3u8_url, episode_id, 'mp4', 'm3u8_native', m3u8_id='hls', fatal=False))
timestamp = int_or_none(self._download_webpage(
self._TIME_API_URL,
video_id, 'Downloading timestamp', fatal=False), 1000, time.time())
timestamp_shifted = compat_str(timestamp + self._TIMESTAMP_SHIFT)
token = hmac.new(
self._MAGIC.encode('ascii'),
(episode_id + timestamp_shifted).encode('utf-8'), hashlib.md5
).hexdigest()
request = sanitized_Request(
self._URL_VIDEO_TEMPLATE.format('windows', episode_id, timestamp_shifted, token),
headers={'User-Agent': self._USER_AGENT})
fmt_json = self._download_json(
request, video_id, 'Downloading windows video JSON')
result = fmt_json.get('resultDes')
if result.lower() != 'ok':
raise ExtractorError(
'%s returned error: %s' % (self.IE_NAME, result), expected=True)
for format_id, video_url in fmt_json['resultObject'].items():
if format_id == 'token' or not video_url.startswith('http'):
continue
if 'geodeswowsmpra3player' in video_url:
f4m_path = video_url.split('smil:', 1)[-1].split('free_', 1)[0]
f4m_url = 'http://drg.antena3.com/{0}hds/es/sd.f4m'.format(f4m_path)
# this videos are protected by DRM, the f4m downloader doesn't support them
continue
else:
f4m_url = video_url[:-9] + '/manifest.f4m'
formats.extend(self._extract_f4m_formats(f4m_url, video_id, f4m_id='hds', fatal=False))
self._sort_formats(formats)
path_data = player.get('pathData')
episode = self._download_xml(
self._EPISODE_URL_TEMPLATE % path_data, video_id,
'Downloading episode XML')
duration = float_or_none(xpath_text(
episode, './media/asset/info/technical/contentDuration', 'duration'))
art = episode.find('./media/asset/info/art')
title = xpath_text(art, './name', 'title')
description = xpath_text(art, './description', 'description')
thumbnail = xpath_text(episode, './media/asset/files/background', 'thumbnail')
subtitles = {}
subtitle_url = xpath_text(episode, './media/asset/files/subtitle', 'subtitle')
if subtitle_url:
subtitles['es'] = [{
'ext': 'srt',
'url': subtitle_url,
}]
return {
'id': video_id,
'title': title,
'description': description,
'thumbnail': thumbnail,
'duration': duration,
'formats': formats,
'subtitles': subtitles,
}
| |
from flask import json
from nose.tools import assert_equal, assert_in
from app import db
from app.models import Supplier, ContactInformation, AuditEvent, \
SelectionAnswers, Framework
from ..helpers import BaseApplicationTest, JSONUpdateTestMixin
class TestGetSupplier(BaseApplicationTest):
def setup(self):
super(TestGetSupplier, self).setup()
with self.app.app_context():
payload = self.load_example_listing("Supplier")
self.supplier = payload
self.supplier_id = payload['id']
response = self.client.put(
'/suppliers/{}'.format(self.supplier_id),
data=json.dumps({
'suppliers': self.supplier
}),
content_type='application/json')
assert_equal(response.status_code, 201)
def test_get_non_existent_supplier(self):
response = self.client.get('/suppliers/100')
assert_equal(404, response.status_code)
def test_invalid_supplier_id(self):
response = self.client.get('/suppliers/abc123')
assert_equal(404, response.status_code)
def test_get_supplier(self):
response = self.client.get('/suppliers/{}'.format(self.supplier_id))
data = json.loads(response.get_data())
assert_equal(200, response.status_code)
assert_equal(self.supplier_id, data['suppliers']['id'])
assert_equal(self.supplier['name'], data['suppliers']['name'])
def test_supplier_clients_exist(self):
response = self.client.get('/suppliers/{}'.format(self.supplier_id))
data = json.loads(response.get_data())
assert_equal(200, response.status_code)
assert_in('clients', data['suppliers'].keys())
assert_equal(3, len(data['suppliers']['clients']))
def test_supplier_client_key_still_exists_even_without_clients(self):
# Insert a new supplier with a different id and no clients
with self.app.app_context():
new_payload = self.load_example_listing("Supplier")
new_payload['id'] = 111111
new_payload['clients'] = []
response = self.client.put(
'/suppliers/{}'.format(new_payload['id']),
data=json.dumps({
'suppliers': new_payload
}),
content_type='application/json')
assert_equal(response.status_code, 201)
response = self.client.get('/suppliers/{}'.format(new_payload['id']))
data = json.loads(response.get_data())
assert_equal(200, response.status_code)
assert_in('clients', data['suppliers'].keys())
assert_equal(0, len(data['suppliers']['clients']))
def test_get_supplier_returns_service_counts(self):
self.setup_dummy_services(
5, supplier_id=self.supplier_id, framework_id=1
)
self.setup_dummy_services(
10, start_id=5, supplier_id=self.supplier_id, framework_id=2
)
self.setup_dummy_services(
15, start_id=15, supplier_id=self.supplier_id, framework_id=3
)
response = self.client.get('/suppliers/{}'.format(self.supplier_id))
data = json.loads(response.get_data())
assert_equal(data['suppliers']['service_counts'], {
u'G-Cloud 5': 15,
u'G-Cloud 6': 5
})
class TestListSuppliers(BaseApplicationTest):
def setup(self):
super(TestListSuppliers, self).setup()
# Supplier names like u"Supplier {n}"
self.setup_dummy_suppliers(7)
def test_query_string_missing(self):
response = self.client.get('/suppliers')
assert_equal(200, response.status_code)
def test_query_string_prefix_empty(self):
response = self.client.get('/suppliers?prefix=')
assert_equal(200, response.status_code)
def test_query_string_prefix_returns_none(self):
response = self.client.get('/suppliers?prefix=canada')
assert_equal(200, response.status_code)
data = json.loads(response.get_data())
assert_equal(0, len(data['suppliers']))
def test_other_prefix_returns_non_alphanumeric_suppliers(self):
with self.app.app_context():
db.session.add(
Supplier(supplier_id=999, name=u"123 Supplier")
)
self.setup_dummy_service(service_id=123, supplier_id=999)
db.session.commit()
response = self.client.get('/suppliers?prefix=123')
data = json.loads(response.get_data())
assert_equal(200, response.status_code)
assert_equal(1, len(data['suppliers']))
assert_equal(999, data['suppliers'][0]['id'])
assert_equal(
u"123 Supplier",
data['suppliers'][0]['name']
)
def test_query_string_prefix_returns_paginated_page_one(self):
response = self.client.get('/suppliers?prefix=s')
data = json.loads(response.get_data())
assert_equal(200, response.status_code)
assert_equal(5, len(data['suppliers']))
next_link = data['links']['next']
assert_in('page=2', next_link)
def test_query_string_prefix_returns_paginated_page_two(self):
response = self.client.get('/suppliers?prefix=s&page=2')
data = json.loads(response.get_data())
assert_equal(response.status_code, 200)
assert_equal(len(data['suppliers']), 2)
prev_link = data['links']['prev']
assert_in('page=1', prev_link)
def test_query_string_prefix_page_out_of_range(self):
response = self.client.get('/suppliers?prefix=s&page=10')
assert_equal(response.status_code, 404)
def test_query_string_prefix_invalid_page_argument(self):
response = self.client.get('/suppliers?prefix=s&page=a')
assert_equal(response.status_code, 400)
def test_below_one_page_number_is_404(self):
response = self.client.get('/suppliers?page=0')
assert_equal(response.status_code, 404)
class TestListSuppliersOnFramework(BaseApplicationTest):
def setup(self):
super(TestListSuppliersOnFramework, self).setup()
with self.app.app_context():
db.session.add(
Supplier(supplier_id=1, name=u"Active")
)
db.session.add(
Supplier(supplier_id=2, name=u"Inactive Framework")
)
db.session.add(
Supplier(supplier_id=3, name=u"Unpublished Service")
)
self.setup_dummy_service(
service_id=1, supplier_id=1
)
self.setup_dummy_service(
service_id=2, supplier_id=2, framework_id=2
)
self.setup_dummy_service(
service_id=3, supplier_id=3, status='enabled'
)
db.session.commit()
def test_invalid_framework_returns_400(self):
response = self.client.get('/suppliers?framework=invalid!')
assert_equal(400, response.status_code)
def test_should_return_suppliers_on_framework(self):
response = self.client.get('/suppliers?framework=gcloud')
assert_equal(200, response.status_code)
data = json.loads(response.get_data())
assert_equal(1, len(data['suppliers']))
assert_equal('Active', data['suppliers'][0]['name'])
def test_should_return_no_suppliers_no_framework(self):
response = self.client.get('/suppliers?framework=bad')
assert_equal(400, response.status_code)
def test_should_return_all_suppliers_if_no_framework(self):
response = self.client.get('/suppliers')
assert_equal(200, response.status_code)
data = json.loads(response.get_data())
assert_equal(3, len(data['suppliers']))
class TestPutSupplier(BaseApplicationTest, JSONUpdateTestMixin):
method = "put"
endpoint = "/suppliers/123456"
def setup(self):
super(TestPutSupplier, self).setup()
def put_import_supplier(self, supplier, route_parameter=None):
if route_parameter is None:
route_parameter = '/{}'.format(supplier.get('id', 1))
return self.client.put(
'/suppliers' + route_parameter,
data=json.dumps({
'suppliers': supplier
}),
content_type='application/json')
def test_add_a_new_supplier(self):
with self.app.app_context():
payload = self.load_example_listing("Supplier")
response = self.put_import_supplier(payload)
assert_equal(response.status_code, 201)
supplier = Supplier.query.filter(
Supplier.supplier_id == 123456
).first()
assert_equal(supplier.name, payload['name'])
def test_null_clients_list(self):
with self.app.app_context():
payload = self.load_example_listing("Supplier")
del payload['clients']
response = self.put_import_supplier(payload)
assert_equal(response.status_code, 201)
supplier = Supplier.query.filter(
Supplier.supplier_id == 123456
).first()
assert_equal(supplier.clients, [])
def test_reinserting_the_same_supplier(self):
with self.app.app_context():
payload = self.load_example_listing("Supplier")
example_listing_contact_information = payload['contactInformation']
# Exact loop number is arbitrary
for i in range(3):
response = self.put_import_supplier(payload)
assert_equal(response.status_code, 201)
supplier = Supplier.query.filter(
Supplier.supplier_id == 123456
).first()
assert_equal(supplier.name, payload['name'])
contact_informations = ContactInformation.query.filter(
ContactInformation.supplier_id == supplier.supplier_id
).all()
assert_equal(
len(example_listing_contact_information),
len(contact_informations)
)
# Contact Information without a supplier_id should not exist
contact_informations_no_supplier_id = \
ContactInformation.query.filter(
ContactInformation.supplier_id == None # noqa
).all()
assert_equal(
0,
len(contact_informations_no_supplier_id)
)
def test_cannot_put_to_root_suppliers_url(self):
payload = self.load_example_listing("Supplier")
response = self.put_import_supplier(payload, "")
assert_equal(response.status_code, 405)
def test_supplier_json_id_does_not_match_route_id_parameter(self):
payload = self.load_example_listing("Supplier")
response = self.put_import_supplier(payload, '/1234567890')
assert_equal(response.status_code, 400)
assert_in('id parameter must match id in data',
json.loads(response.get_data())['error'])
def test_when_supplier_has_missing_contact_information(self):
payload = self.load_example_listing("Supplier")
payload.pop('contactInformation')
response = self.put_import_supplier(payload)
assert_equal(response.status_code, 400)
for item in ['Invalid JSON must have', 'contactInformation']:
assert_in(item,
json.loads(response.get_data())['error'])
def test_when_supplier_has_missing_keys(self):
payload = self.load_example_listing("Supplier")
payload.pop('id')
payload.pop('name')
response = self.put_import_supplier(payload)
assert_equal(response.status_code, 400)
for item in ['Invalid JSON must have', 'id', 'name']:
assert_in(item,
json.loads(response.get_data())['error'])
def test_when_supplier_contact_information_has_missing_keys(self):
payload = self.load_example_listing("Supplier")
payload['contactInformation'][0].pop('email')
payload['contactInformation'][0].pop('postcode')
payload['contactInformation'][0].pop('contactName')
response = self.put_import_supplier(payload)
assert_equal(response.status_code, 400)
for item in ['Invalid JSON must have',
'contactName',
'email',
'postcode']:
assert_in(item,
json.loads(response.get_data())['error'])
def test_when_supplier_has_extra_keys(self):
payload = self.load_example_listing("Supplier")
payload.update({'newKey': 1})
response = self.put_import_supplier(payload)
assert_equal(response.status_code, 400)
assert_in('Additional properties are not allowed',
json.loads(response.get_data())['error'])
def test_when_supplier_contact_information_has_extra_keys(self):
payload = self.load_example_listing("Supplier")
payload['contactInformation'][0].update({'newKey': 1})
response = self.put_import_supplier(payload)
assert_equal(response.status_code, 400)
assert_in('Additional properties are not allowed',
json.loads(response.get_data())['error'])
def test_supplier_duns_number_invalid(self):
payload = self.load_example_listing("Supplier")
payload.update({'dunsNumber': "only-digits-permitted"})
response = self.put_import_supplier(payload)
assert_equal(response.status_code, 400)
for item in ['only-digits-permitted', 'does not match']:
assert_in(item,
json.loads(response.get_data())['error'])
def test_supplier_esourcing_id_invalid(self):
payload = self.load_example_listing("Supplier")
payload.update({'eSourcingId': "only-digits-permitted"})
response = self.put_import_supplier(payload)
assert_equal(response.status_code, 400)
for item in ['only-digits-permitted', 'does not match']:
assert_in(item,
json.loads(response.get_data())['error'])
def test_when_supplier_contact_information_email_invalid(self):
payload = self.load_example_listing("Supplier")
payload['contactInformation'][0].update({'email': "bad-email-99"})
response = self.put_import_supplier(payload)
assert_equal(response.status_code, 400)
for item in ['bad-email-99', 'is not a']:
assert_in(item,
json.loads(response.get_data())['error'])
class TestUpdateSupplier(BaseApplicationTest, JSONUpdateTestMixin):
method = "post"
endpoint = "/suppliers/123456"
def setup(self):
super(TestUpdateSupplier, self).setup()
with self.app.app_context():
payload = self.load_example_listing("Supplier")
self.supplier = payload
self.supplier_id = payload['id']
self.client.put('/suppliers/{}'.format(self.supplier_id),
data=json.dumps({'suppliers': self.supplier}),
content_type='application/json')
def update_request(self, data=None, user=None, full_data=None):
return self.client.post(
self.endpoint,
data=json.dumps({
'suppliers': data,
'updated_by': user or 'supplier@user.dmdev',
} if full_data is None else full_data),
content_type='application/json',
)
def test_empty_update_request(self):
response = self.update_request(full_data={})
assert_equal(response.status_code, 400)
def test_empty_update_supplier(self):
response = self.update_request({})
assert_equal(response.status_code, 200)
def test_name_update(self):
response = self.update_request({'name': "New Name"})
assert_equal(response.status_code, 200)
with self.app.app_context():
supplier = Supplier.query.filter(
Supplier.supplier_id == 123456
).first()
assert_equal(supplier.name, "New Name")
def test_supplier_update_creates_audit_event(self):
self.update_request({'name': "Name"})
with self.app.app_context():
supplier = Supplier.query.filter(
Supplier.supplier_id == 123456
).first()
audit = AuditEvent.query.filter(
AuditEvent.object == supplier
).first()
assert_equal(audit.type, "supplier_update")
assert_equal(audit.user, "supplier@user.dmdev")
assert_equal(audit.data, {
'update': {'name': "Name"},
})
def test_update_response_matches_payload(self):
payload = self.load_example_listing("Supplier")
response = self.update_request({'name': "New Name"})
assert_equal(response.status_code, 200)
payload.update({'name': 'New Name'})
supplier = json.loads(response.get_data())['suppliers']
payload.pop('contactInformation')
supplier.pop('contactInformation')
supplier.pop('links')
assert_equal(supplier, payload)
def test_update_all_fields(self):
response = self.update_request({
'name': "New Name",
'description': "New Description",
'dunsNumber': "010101",
'eSourcingId': "010101",
'clients': ["Client1", "Client2"]
})
assert_equal(response.status_code, 200)
with self.app.app_context():
supplier = Supplier.query.filter(
Supplier.supplier_id == 123456
).first()
assert_equal(supplier.name, 'New Name')
assert_equal(supplier.description, "New Description")
assert_equal(supplier.duns_number, "010101")
assert_equal(supplier.esourcing_id, "010101")
assert_equal(supplier.clients, ["Client1", "Client2"])
def test_supplier_json_id_does_not_match_oiginal_id(self):
response = self.update_request({
'id': 234567,
'name': "New Name"
})
assert_equal(response.status_code, 400)
def test_update_missing_supplier(self):
response = self.client.post(
'/suppliers/234567',
data=json.dumps({'suppliers': {}}),
content_type='application/json',
)
assert_equal(response.status_code, 404)
def test_links_and_contact_information_are_ignored(self):
response = self.update_request(full_data={'suppliers': {
'name': "New Name",
'contactInformation': [],
'links': [],
}, 'links': [], 'updated_by': 'supplier@user.dmdev'})
with self.app.app_context():
supplier = Supplier.query.filter(
Supplier.supplier_id == 123456
).first()
assert_equal(response.status_code, 200)
assert_equal(len(supplier.contact_information), 2)
def test_update_with_unexpected_keys(self):
response = self.update_request({
'new_key': "value",
'name': "New Name"
})
assert_equal(response.status_code, 400)
def test_update_without_updated_by(self):
response = self.update_request(full_data={
'suppliers': {'name': "New Name"},
})
assert_equal(response.status_code, 400)
class TestUpdateContactInformation(BaseApplicationTest):
def setup(self):
super(TestUpdateContactInformation, self).setup()
with self.app.app_context():
payload = self.load_example_listing("Supplier")
self.supplier = payload
self.supplier_id = payload['id']
response = self.client.put(
'/suppliers/{}'.format(self.supplier_id),
data=json.dumps({'suppliers': self.supplier}),
content_type='application/json')
supplier = json.loads(response.get_data())['suppliers']
self.contact_id = supplier['contactInformation'][0]['id']
def update_request(self, data=None, user=None, full_data=None):
return self.client.post(
'/suppliers/123456/contact-information/{}'.format(self.contact_id),
data=json.dumps({
'contactInformation': data,
'updated_by': user or 'supplier@user.dmdev',
} if full_data is None else full_data),
content_type='application/json',
)
def test_empty_update_request(self):
response = self.update_request(full_data={})
assert_equal(response.status_code, 400)
def test_empty_update(self):
response = self.update_request({})
assert_equal(response.status_code, 200)
def test_simple_field_update(self):
response = self.update_request({
'city': "New City"
})
assert_equal(response.status_code, 200)
with self.app.app_context():
contact = ContactInformation.query.filter(
ContactInformation.id == self.contact_id
).first()
assert_equal(contact.city, "New City")
def test_update_creates_audit_event(self):
self.update_request({
'city': "New City"
})
with self.app.app_context():
contact = ContactInformation.query.filter(
ContactInformation.id == self.contact_id
).first()
audit = AuditEvent.query.filter(
AuditEvent.object == contact.supplier
).first()
assert_equal(audit.type, "contact_update")
assert_equal(audit.user, "supplier@user.dmdev")
assert_equal(audit.data, {
'update': {'city': "New City"},
})
def test_update_response_matches_payload(self):
payload = self.load_example_listing("Supplier")
response = self.update_request({
'city': "New City"
})
assert_equal(response.status_code, 200)
payload = payload['contactInformation'][0]
payload.update({'city': 'New City'})
payload.pop('links')
contact = json.loads(response.get_data())['contactInformation']
contact.pop('id')
contact.pop('links')
assert_equal(contact, payload)
def test_update_all_fields(self):
response = self.update_request({
"contactName": "New contact",
"phoneNumber": "New phone",
"email": "new-value@example.com",
"website": "example.com",
"address1": "New address1",
"address2": "New address2",
"city": "New city",
"country": "New country",
"postcode": "New postcode",
})
assert_equal(response.status_code, 200)
with self.app.app_context():
contact = ContactInformation.query.filter(
ContactInformation.id == self.contact_id
).first()
assert_equal(contact.contact_name, "New contact")
assert_equal(contact.phone_number, "New phone")
assert_equal(contact.email, "new-value@example.com")
assert_equal(contact.website, "example.com")
assert_equal(contact.address1, "New address1")
assert_equal(contact.address2, "New address2")
assert_equal(contact.city, "New city")
assert_equal(contact.country, "New country")
assert_equal(contact.postcode, "New postcode")
def test_supplier_json_id_does_not_match_oiginal_id(self):
response = self.update_request({
'supplierId': 234567,
'city': "New City"
})
assert_equal(response.status_code, 400)
def test_json_id_does_not_match_oiginal_id(self):
response = self.update_request({
'id': 2,
'city': "New City"
})
assert_equal(response.status_code, 400)
def test_update_missing_supplier(self):
response = self.client.post(
'/suppliers/234567/contact-information/%s' % self.contact_id,
data=json.dumps({}),
content_type='application/json',
)
assert_equal(response.status_code, 404)
def test_update_missing_contact_information(self):
response = self.client.post(
'/suppliers/123456/contact-information/100000',
data=json.dumps({'contactInformation': {}}),
content_type='application/json',
)
assert_equal(response.status_code, 404)
def test_update_with_unexpected_keys(self):
response = self.update_request({
'new_key': "value",
'city': "New City"
})
assert_equal(response.status_code, 400)
def test_update_ignores_links(self):
response = self.update_request({
'links': "value",
'city': "New City"
})
assert_equal(response.status_code, 200)
def test_update_without_updated_by(self):
response = self.update_request(full_data={
'contactInformation': {'city': "New City"},
})
assert_equal(response.status_code, 400)
class TestGetSupplierSelectionAnswers(BaseApplicationTest):
def setup(self):
super(TestGetSupplierSelectionAnswers, self).setup()
self.setup_dummy_suppliers(1)
with self.app.app_context():
answers = SelectionAnswers(
supplier_id=0, framework_id=2,
question_answers={})
db.session.add(answers)
db.session.commit()
def test_get_selection_answers(self):
response = self.client.get(
'/suppliers/0/selection-answers/g-cloud-4')
data = json.loads(response.get_data())
assert_equal(response.status_code, 200)
assert_equal(data['selectionAnswers']['supplierId'], 0)
assert_equal(data['selectionAnswers']['frameworkSlug'],
'g-cloud-4')
def test_get_non_existent_by_framework(self):
response = self.client.get(
'/suppliers/0/selection-answers/g-cloud-5')
assert_equal(response.status_code, 404)
def test_get_non_existent_by_supplier(self):
response = self.client.get(
'/suppliers/123/selection-answers/g-cloud-4')
assert_equal(response.status_code, 404)
class TestSetSupplierSelectionAnswers(BaseApplicationTest):
method = 'put'
endpoint = '/suppliers/0/selection-answers/g-cloud-4'
def setup(self):
super(TestSetSupplierSelectionAnswers, self).setup()
with self.app.app_context():
framework = Framework(
slug='test-open',
name='Test open',
framework='gcloud',
status='open')
db.session.add(framework)
db.session.commit()
self.setup_dummy_suppliers(1)
def teardown(self):
super(TestSetSupplierSelectionAnswers, self).teardown()
with self.app.app_context():
frameworks = Framework.query.filter(
Framework.slug.like('test-%')
).all()
for framework in frameworks:
db.session.delete(framework)
db.session.commit()
def test_add_new_selection_answers(self):
with self.app.app_context():
response = self.client.put(
'/suppliers/0/selection-answers/test-open',
data=json.dumps({
'updated_by': 'testing',
'selectionAnswers': {
'supplierId': 0,
'frameworkSlug': 'test-open',
'questionAnswers': {
'question': 'answer'
}
}
}),
content_type='application/json')
assert_equal(response.status_code, 201)
answers = SelectionAnswers \
.find_by_supplier_and_framework(0, 'test-open')
assert_equal(answers.question_answers['question'], 'answer')
def test_update_existing_selection_answers(self):
with self.app.app_context():
framework_id = Framework.query.filter(
Framework.slug == 'test-open').first().id
answers = SelectionAnswers(
supplier_id=0,
framework_id=framework_id,
question_answers={'question': 'answer'})
db.session.add(answers)
db.session.commit()
response = self.client.put(
'/suppliers/0/selection-answers/test-open',
data=json.dumps({
'updated_by': 'testing',
'selectionAnswers': {
'supplierId': 0,
'frameworkSlug': 'test-open',
'questionAnswers': {
'question': 'answer2',
}
}
}),
content_type='application/json')
assert_equal(response.status_code, 200)
answers = SelectionAnswers \
.find_by_supplier_and_framework(0, 'test-open')
assert_equal(answers.question_answers['question'], 'answer2')
def test_can_only_set_questions_on_open_framework(self):
with self.app.app_context():
framework = Framework(
slug='test-pending',
name='Test pending',
framework='gcloud',
status='pending')
db.session.add(framework)
db.session.commit()
response = self.client.put(
'/suppliers/0/selection-answers/test-pending',
data=json.dumps({
'updated_by': 'testing',
'selectionAnswers': {
'supplierId': 0,
'frameworkSlug': 'test-pending',
'questionAnswers': {
'question': 'answer'
}}}),
content_type='application/json')
assert_equal(response.status_code, 400)
def test_invalid_payload_fails(self):
with self.app.app_context():
response = self.client.put(
'/suppliers/0/selection-answers/test-open',
data=json.dumps({
'selectionAnswers': {
'invalid': {
'question': 'answer'
}
}
}),
content_type='application/json')
assert_equal(response.status_code, 400)
| |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright 2011-2014, Nigel Small
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" The ogm module provides Object to Graph Mapping features similar to ORM
facilities available for relational databases. All functionality is available
through the :py:class:`Store` class which is bound to a specific
:py:class:`neo4j.Graph` instance on creation.
Conceptually, a mapped object "owns" a single node within the graph along with
all of that node's outgoing relationships. These features are managed via a
pair of attributes called `__node__` and `__rel__` which store details of the
mapped node and the outgoing relationships respectively. The only specific
requirement for a mapped object is that it has a nullary constructor which can
be used to create new instances.
The `__node__` attribute holds a :py:class:`neo4j.Node` object which is the
node to which this object is mapped. If the attribute does not exist, or is
:py:const:`None`, the object is considered "unsaved".
The `__rel__` attribute holds a dictionary of outgoing relationship details.
Each key corresponds to a relationship type and each value to a list of
2-tuples representing the outgoing relationships of that type. Within each
2-tuple, the first value holds a dictionary of relationship properties (which
may be empty) and the second value holds the endpoint. The endpoint may be
either a :py:class:`neo4j.Node` instance or another mapped object. Any such
objects which are unsaved will be lazily saved as required by creation of the
relationship itself. The following data structure outline shows an example of
a `__rel__` attribute (where `alice` and `bob` represent other mapped objects::
{
"LIKES": [
({}, alice),
({"since": 1999}, bob)
]
}
To manage relationships, use the :py:func:`Store.relate` and
:py:func:`Store.separate` methods. Neither method makes any calls to the
database and operates only on the local `__rel__` attribute. Changes must be
explicitly saved via one of the available save methods. The
:py:func:`Store.load_related` method loads all objects marked as related by
the `__rel__` attribute.
The code below shows an example of usage::
from py2neo import neo4j, ogm
class Person(object):
def __init__(self, email=None, name=None, age=None):
self.email = email
self.name = name
self.age = age
def __str__(self):
return self.name
graph = neo4j.Graph()
store = ogm.Store(graph)
alice = Person("alice@example.com", "Alice", 34)
store.save_unique("People", "email", alice.email, alice)
bob = Person("bob@example.org", "Bob", 66)
carol = Person("carol@example.net", "Carol", 42)
store.relate(alice, "LIKES", bob) # these relationships are not saved
store.relate(alice, "LIKES", carol) # until `alice` is saved
store.save(alice)
friends = store.load_related(alice, "LIKES", Person)
print("Alice likes {0}".format(" and ".join(str(f) for f in friends)))
"""
from __future__ import unicode_literals
from py2neo.batch import WriteBatch
from py2neo.core import Node
class NotSaved(ValueError):
""" Raised when an object has not been saved but a bound node is required.
"""
pass
class Store(object):
def __init__(self, graph):
self.graph = graph
if self.graph.supports_optional_match:
self.__delete_query = ("START a=node({A}) "
"OPTIONAL MATCH a-[r]-b "
"DELETE r, a")
else:
self.__delete_query = ("START a=node({A}) "
"MATCH a-[r?]-b "
"DELETE r, a")
def _assert_saved(self, subj):
try:
node = subj.__node__
if node is None:
raise NotSaved(subj)
except AttributeError:
raise NotSaved(subj)
def _get_node(self, endpoint):
if isinstance(endpoint, Node):
return endpoint
if not hasattr(endpoint, "__node__"):
self.save(endpoint)
return endpoint.__node__
def _is_same(self, obj, endpoint):
if isinstance(endpoint, Node):
if hasattr(obj, "__node__"):
return endpoint == obj.__node__
else:
return False
else:
return endpoint is obj
def is_saved(self, subj):
""" Return :py:const:`True` if the object `subj` has been saved to
the database, :py:const:`False` otherwise.
:param subj: the object to test
"""
return hasattr(subj, "__node__") and subj.__node__ is not None
def relate(self, subj, rel_type, obj, properties=None):
""" Define a relationship between `subj` and `obj` of type `rel_type`.
This is a local operation only: nothing is saved to the database until
a save method is called. Relationship properties may optionally be
specified.
:param subj: the object bound to the start of the relationship
:param rel_type: the relationship type
:param obj: the object bound to the end of the relationship
:param properties: properties attached to the relationship (optional)
"""
if not hasattr(subj, "__rel__"):
subj.__rel__ = {}
if rel_type not in subj.__rel__:
subj.__rel__[rel_type] = []
subj.__rel__[rel_type].append((properties or {}, obj))
def separate(self, subj, rel_type, obj=None):
""" Remove any relationship definitions which match the criteria
specified. This is a local operation only: nothing is saved to the
database until a save method is called. If no object is specified, all
relationships of type `rel_type` are removed.
:param subj: the object bound to the start of the relationship
:param rel_type: the relationship type
:param obj: the object bound to the end of the relationship (optional)
"""
if not hasattr(subj, "__rel__"):
return
if rel_type not in subj.__rel__:
return
if obj is None:
del subj.__rel__[rel_type]
else:
subj.__rel__[rel_type] = [
(props, endpoint)
for props, endpoint in subj.__rel__[rel_type]
if not self._is_same(obj, endpoint)
]
def load_related(self, subj, rel_type, cls):
""" Load all nodes related to `subj` by a relationship of type
`rel_type` into objects of type `cls`.
:param subj: the object bound to the start of the relationship
:param rel_type: the relationship type
:param cls: the class to load all related objects into
:return: list of `cls` instances
"""
if not hasattr(subj, "__rel__"):
return []
if rel_type not in subj.__rel__:
return []
return [
self.load(cls, self._get_node(endpoint))
for rel_props, endpoint in subj.__rel__[rel_type]
]
def load(self, cls, node):
""" Load and return an object of type `cls` from database node `node`.
:param cls: the class of the object to be returned
:param node: the node from which to load object data
:return: a `cls` instance
"""
subj = cls()
setattr(subj, "__node__", node)
self.reload(subj)
return subj
def load_indexed(self, index_name, key, value, cls):
""" Load zero or more indexed nodes from the database into a list of
objects.
:param index_name: the node index name
:param key: the index key
:param value: the index value
:param cls: the class of the object to be returned
:return: a list of `cls` instances
"""
index = self.graph.legacy.get_index(Node, index_name)
nodes = index.get(key, value)
return [self.load(cls, node) for node in nodes]
def load_unique(self, index_name, key, value, cls):
""" Load a uniquely indexed node from the database into an object.
:param index_name: the node index name
:param key: the index key
:param value: the index value
:param cls: the class of the object to be returned
:return: as instance of `cls` containing the loaded data
"""
index = self.graph.legacy.get_index(Node, index_name)
nodes = index.get(key, value)
if not nodes:
return None
if len(nodes) > 1:
raise LookupError("Multiple nodes match the given criteria; "
"consider using `load_all` instead.")
return self.load(cls, nodes[0])
def reload(self, subj):
""" Reload properties and relationships from a database node into
`subj`.
:param subj: the object to reload
:raise NotSaved: if `subj` is not linked to a database node
"""
self._assert_saved(subj)
# naively copy properties from node to object
properties = subj.__node__.get_properties()
for key in subj.__dict__:
if not key.startswith("_") and key not in properties:
setattr(subj, key, None)
for key, value in properties.items():
if not key.startswith("_"):
setattr(subj, key, value)
subj.__rel__ = {}
for rel in subj.__node__.match():
if rel.type not in subj.__rel__:
subj.__rel__[rel.type] = []
subj.__rel__[rel.type].append((rel.get_properties(), rel.end_node))
def save(self, subj, node=None):
""" Save an object to a database node.
:param subj: the object to save
:param node: the database node to save to (if omitted, will re-save to
same node as previous save)
"""
if node is not None:
subj.__node__ = node
# naively copy properties from object to node
props = {}
for key, value in subj.__dict__.items():
if not key.startswith("_"):
props[key] = value
if hasattr(subj, "__node__"):
subj.__node__.set_properties(props)
self.graph.cypher.run("START a=node({a}) MATCH (a)-[r]->(b) DELETE r",
{"a": subj.__node__})
else:
subj.__node__, = self.graph.create(props)
# write rels
if hasattr(subj, "__rel__"):
batch = WriteBatch(self.graph)
for rel_type, rels in subj.__rel__.items():
for rel_props, endpoint in rels:
end_node = self._get_node(endpoint)
if end_node not in self.graph:
raise ValueError(end_node)
batch.create((subj.__node__, rel_type, end_node, rel_props))
batch.run()
return subj
def save_indexed(self, index_name, key, value, *subj):
""" Save one or more objects to the database, indexed under the
supplied criteria.
:param index_name: the node index name
:param key: the index key
:param value: the index value
:param subj: one or more objects to save
"""
index = self.graph.legacy.get_or_create_index(Node, index_name)
for subj in subj:
index.add(key, value, self.save(self._get_node(subj)))
def save_unique(self, index_name, key, value, subj):
""" Save an object to the database, uniquely indexed under the
supplied criteria.
:param index_name: the node index name
:param key: the index key
:param value: the index value
:param subj: the object to save
"""
index = self.graph.legacy.get_or_create_index(Node, index_name)
node = index.get_or_create(key, value, {})
self.save(subj, node)
def delete(self, subj):
""" Delete a saved object node from the database as well as all
incoming and outgoing relationships.
:param subj: the object to delete from the database
:raise NotSaved: if `subj` is not linked to a database node
"""
self._assert_saved(subj)
node = subj.__node__
del subj.__node__
self.graph.cypher.execute(self.__delete_query, {"A": node})
| |
"""
Filename: lqcontrol.py
Authors: Thomas J. Sargent, John Stachurski
Provides a class called LQ for solving linear quadratic control
problems.
"""
from textwrap import dedent
import numpy as np
from numpy import dot
from scipy.linalg import solve
from .matrix_eqn import solve_discrete_riccati
class LQ(object):
r"""
This class is for analyzing linear quadratic optimal control
problems of either the infinite horizon form
. min E sum_{t=0}^{\infty} beta^t r(x_t, u_t)
with
r(x_t, u_t) := x_t' R x_t + u_t' Q u_t + 2 u_t' N x_t
or the finite horizon form
min E sum_{t=0}^{T-1} beta^t r(x_t, u_t) + beta^T x_T' R_f x_T
Both are minimized subject to the law of motion
x_{t+1} = A x_t + B u_t + C w_{t+1}
Here x is n x 1, u is k x 1, w is j x 1 and the matrices are
conformable for these dimensions. The sequence {w_t} is assumed to
be white noise, with zero mean and E w_t w_t = I, the j x j
identity.
If C is not supplied as a parameter, the model is assumed to be
deterministic (and C is set to a zero matrix of appropriate
dimension).
For this model, the time t value (i.e., cost-to-go) function V_t
takes the form
x' P_T x + d_T
and the optimal policy is of the form u_T = -F_T x_T. In
the infinite horizon case, V, P, d and F are all stationary.
Parameters
----------
Q : array_like(float)
Q is the payoff(or cost) matrix that corresponds with the
control variable u and is k x k. Should be symmetric and
nonnegative definite
R : array_like(float)
R is the payoff(or cost) matrix that corresponds with the
state variable x and is n x n. Should be symetric and
non-negative definite
N : array_like(float)
N is the cross product term in the payoff, as above. It should
be k x n.
A : array_like(float)
A is part of the state transition as described above. It should
be n x n
B : array_like(float)
B is part of the state transition as described above. It should
be n x k
C : array_like(float), optional(default=None)
C is part of the state transition as described above and
corresponds to the random variable today. If the model is
deterministic then C should take default value of None
beta : scalar(float), optional(default=1)
beta is the discount parameter
T : scalar(int), optional(default=None)
T is the number of periods in a finite horizon problem.
Rf : array_like(float), optional(default=None)
Rf is the final (in a finite horizon model) payoff(or cost)
matrix that corresponds with the control variable u and is n x
n. Should be symetric and non-negative definite
Attributes
----------
Q, R, N, A, B, C, beta, T, Rf : see Parameters
P : array_like(float)
P is part of the value function representation of V(x) = x'Px + d
d : array_like(float)
d is part of the value function representation of V(x) = x'Px + d
F : array_like(float)
F is the policy rule that determines the choice of control in
each period.
k, n, j : scalar(int)
The dimensions of the matrices as presented above
"""
def __init__(self, Q, R, A, B, C=None, N=None, beta=1, T=None, Rf=None):
# == Make sure all matrices can be treated as 2D arrays == #
converter = lambda X: np.atleast_2d(np.asarray(X, dtype='float32'))
self.A, self.B, self.Q, self.R, self.N = list(map(converter,
(A, B, Q, R, N)))
# == Record dimensions == #
self.k, self.n = self.Q.shape[0], self.R.shape[0]
self.beta = beta
if C is None:
# == If C not given, then model is deterministic. Set C=0. == #
self.j = 1
self.C = np.zeros((self.n, self.j))
else:
self.C = converter(C)
self.j = self.C.shape[1]
if N is None:
# == No cross product term in payoff. Set N=0. == #
self.N = np.zeros((self.k, self.n))
if T:
# == Model is finite horizon == #
self.T = T
self.Rf = np.asarray(Rf, dtype='float32')
self.P = self.Rf
self.d = 0
else:
self.P = None
self.d = None
self.T = None
self.F = None
def __repr__(self):
return self.__str__()
def __str__(self):
m = """\
Linear Quadratic control system
- beta (discount parameter) : {b}
- T (time horizon) : {t}
- n (number of state variables) : {n}
- k (number of control variables) : {k}
- j (number of shocks) : {j}
"""
t = "infinite" if self.T is None else self.T
return dedent(m.format(b=self.beta, n=self.n, k=self.k, j=self.j,
t=t))
def update_values(self):
"""
This method is for updating in the finite horizon case. It
shifts the current value function
V_t(x) = x' P_t x + d_t
and the optimal policy F_t one step *back* in time,
replacing the pair P_t and d_t with
P_{t-1} and d_{t-1}, and F_t with
F_{t-1}
"""
# === Simplify notation === #
Q, R, A, B, N, C = self.Q, self.R, self.A, self.B, self.N, self.C
P, d = self.P, self.d
# == Some useful matrices == #
S1 = Q + self.beta * dot(B.T, dot(P, B))
S2 = self.beta * dot(B.T, dot(P, A)) + N
S3 = self.beta * dot(A.T, dot(P, A))
# == Compute F as (Q + B'PB)^{-1} (beta B'PA + N) == #
self.F = solve(S1, S2)
# === Shift P back in time one step == #
new_P = R - dot(S2.T, self.F) + S3
# == Recalling that trace(AB) = trace(BA) == #
new_d = self.beta * (d + np.trace(dot(P, dot(C, C.T))))
# == Set new state == #
self.P, self.d = new_P, new_d
def stationary_values(self):
"""
Computes the matrix P and scalar d that represent the value
function
V(x) = x' P x + d
in the infinite horizon case. Also computes the control matrix
F from u = - Fx
Returns
-------
P : array_like(float)
P is part of the value function representation of
V(x) = xPx + d
F : array_like(float)
F is the policy rule that determines the choice of control
in each period.
d : array_like(float)
d is part of the value function representation of
V(x) = xPx + d
"""
# === simplify notation === #
Q, R, A, B, N, C = self.Q, self.R, self.A, self.B, self.N, self.C
# === solve Riccati equation, obtain P === #
A0, B0 = np.sqrt(self.beta) * A, np.sqrt(self.beta) * B
P = solve_discrete_riccati(A0, B0, R, Q, N)
# == Compute F == #
S1 = Q + self.beta * dot(B.T, dot(P, B))
S2 = self.beta * dot(B.T, dot(P, A)) + N
F = solve(S1, S2)
# == Compute d == #
d = self.beta * np.trace(dot(P, dot(C, C.T))) / (1 - self.beta)
# == Bind states and return values == #
self.P, self.F, self.d = P, F, d
return P, F, d
def compute_sequence(self, x0, ts_length=None):
"""
Compute and return the optimal state and control sequences
x_0, ..., x_T and u_0,..., u_T under the
assumption that {w_t} is iid and N(0, 1).
Parameters
===========
x0 : array_like(float)
The initial state, a vector of length n
ts_length : scalar(int)
Length of the simulation -- defaults to T in finite case
Returns
========
x_path : array_like(float)
An n x T matrix, where the t-th column represents x_t
u_path : array_like(float)
A k x T matrix, where the t-th column represents u_t
w_path : array_like(float)
A j x T matrix, where the t-th column represent w_t
"""
# === Simplify notation === #
A, B, C = self.A, self.B, self.C
# == Preliminaries, finite horizon case == #
if self.T:
T = self.T if not ts_length else min(ts_length, self.T)
self.P, self.d = self.Rf, 0
# == Preliminaries, infinite horizon case == #
else:
T = ts_length if ts_length else 100
self.stationary_values()
# == Set up initial condition and arrays to store paths == #
x0 = np.asarray(x0)
x0 = x0.reshape(self.n, 1) # Make sure x0 is a column vector
x_path = np.empty((self.n, T+1))
u_path = np.empty((self.k, T))
w_path = dot(C, np.random.randn(self.j, T+1))
# == Compute and record the sequence of policies == #
policies = []
for t in range(T):
if self.T: # Finite horizon case
self.update_values()
policies.append(self.F)
# == Use policy sequence to generate states and controls == #
F = policies.pop()
x_path[:, 0] = x0.flatten()
u_path[:, 0] = - dot(F, x0).flatten()
for t in range(1, T):
F = policies.pop()
Ax, Bu = dot(A, x_path[:, t-1]), dot(B, u_path[:, t-1])
x_path[:, t] = Ax + Bu + w_path[:, t]
u_path[:, t] = - dot(F, x_path[:, t])
Ax, Bu = dot(A, x_path[:, T-1]), dot(B, u_path[:, T-1])
x_path[:, T] = Ax + Bu + w_path[:, T]
return x_path, u_path, w_path
| |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import argparse
import os
import sys
import csv
import requests
import datetime
AUTH = (os.environ.get("DCC_API_KEY"), os.environ.get("DCC_SECRET_KEY"))
BASE_URL = 'https://www.encodeproject.org/{}/?format=json'
ENCODE4_DNASE_PIPELINES = ['/pipelines/ENCPL848KLD/']
PREFERRED_DEFAULT_FILE_FORMAT = ['bigWig', 'bed', 'bigBed']
PREFERRED_DEFAULT_OUTPUT_TYPE = [
'read-depth normalized signal', 'peaks'
]
def get_latest_analysis(analyses):
# preprocessing
if not analyses:
return None
analyses_dict = {}
for a in analyses:
analysis = requests.get(BASE_URL.format(a['accession']), auth=AUTH).json()
date_created = analysis['date_created'].split('T')[0]
date_obj = datetime.datetime.strptime(date_created, '%Y-%m-%d')
analyses_dict[analysis['accession']] = {
'date': date_obj,
'pipeline_rfas': analysis['pipeline_award_rfas'],
'pipeline_labs': analysis['pipeline_labs'],
'status': analysis['status'],
'assembly': analysis['assembly']
}
latest = None
assembly_latest = False
for acc in analyses_dict.keys():
archivedFiles = False
encode_rfa = False
assembly_latest = False
if not latest:
latest = acc
if 'ENCODE4' in analyses_dict[acc]['pipeline_rfas']:
latest = acc
if ('in progress' in analyses_dict[acc]['status']) or (analyses_dict[acc]['date'] > analyses_dict[latest]['date']):
latest = acc
return latest
def check_encode4_dnase_pipeline(exp_acc):
experiment = requests.get(BASE_URL.format(exp_acc), auth=AUTH).json()
print('------------------------------')
print(exp_acc)
print('------------------------------')
bad_reason = []
archiveAnalyses = {}
archiveAnalyses[exp_acc] = []
serious_audits = {
'ERROR': len(experiment['audit'].get('ERROR', [])),
'NOT_COMPLIANT': len(experiment['audit'].get('NOT_COMPLIANT', [])),
}
if serious_audits['ERROR']:
print('Has {} ERROR audits'.format(serious_audits['ERROR']))
if serious_audits['NOT_COMPLIANT']:
print('Has {} NOT_COMPLIANT audits'.format(
serious_audits['NOT_COMPLIANT']
))
print('Number of original files: {}'.format(
len(experiment['original_files'])
))
analysisObj = experiment.get('analyses', [])
latest = get_latest_analysis(analysisObj)
print('Number of analyses: {}'.format(len(analysisObj)))
print('File count in analyses: {}'.format(list(
len(analysis['files']) for analysis in analysisObj
)))
skipped_analyses_count = 0
skipped_ENC4_analyses_count = 0
preferred_default_file_format = []
preferred_default_output_type = set()
rep_count = len({
rep['biological_replicate_number']
for rep in experiment['replicates']
})
file_output_map = {}
expected_file_output_count = {
('unfiltered alignments', 'bam'): rep_count,
('alignments', 'bam'): rep_count,
('peaks', 'bed'): rep_count * 2,
('peaks', 'bigBed'): rep_count * 2,
('peaks', 'starch'): rep_count,
('FDR cut rate', 'bed'): rep_count,
('FDR cut rate', 'bigBed'): rep_count,
('read-depth normalized signal', 'bigWig'): rep_count,
('footprints', 'bed'): rep_count,
('footprints', 'bigBed'): rep_count,
}
for analysis in analysisObj:
if analysis['status'] in ['released'] and analysis['accession'] != latest:
archiveAnalyses[exp_acc].append(analysis['accession'])
if sorted(analysis['pipelines']) != ENCODE4_DNASE_PIPELINES:
skipped_analyses_count += 1
continue
if sorted(analysis['pipelines']) == ENCODE4_DNASE_PIPELINES and analysis['accession'] != latest:
skipped_ENC4_analyses_count += 1
continue
print('Analysis object {} was checked'.format(analysis['accession']))
if analysis.get('assembly') not in ['GRCh38', 'mm10']:
print('Wrong assembly')
bad_reason.append('Wrong assembly')
if analysis.get('genome_annotation'):
print('Has genome annotation')
bad_reason.append('Has genome annotation')
for fid in analysis['files']:
f_obj = requests.get(BASE_URL.format(fid), auth=AUTH).json()
file_output_map.setdefault(
(f_obj['output_type'], f_obj['file_format']), 0
)
file_output_map[(f_obj['output_type'], f_obj['file_format'])] += 1
if f_obj.get('preferred_default'):
preferred_default_file_format.append(f_obj['file_format'])
preferred_default_output_type.add(f_obj['output_type'])
if (
rep_count == 1
and not preferred_default_file_format
and not preferred_default_output_type
):
print('Unreplicated experiment with no preferred default')
else:
if sorted(
preferred_default_file_format
) != sorted(PREFERRED_DEFAULT_FILE_FORMAT):
print(sorted(preferred_default_file_format))
print('Wrong preferred default file format')
bad_reason.append('Wrong preferred default file format')
if (
len(preferred_default_output_type) != 2
or list(
preferred_default_output_type
)[0] not in PREFERRED_DEFAULT_OUTPUT_TYPE
or list(
preferred_default_output_type
)[1] not in PREFERRED_DEFAULT_OUTPUT_TYPE
):
print('Wrong preferred default file output type')
bad_reason.append('Wrong preferred default file output type')
if file_output_map != expected_file_output_count:
print('Wrong file output type map')
bad_reason.append('Wrong file output type map')
print('Has {}'.format(str(file_output_map)))
print('Expect {}'.format(str(expected_file_output_count)))
if skipped_analyses_count == len(analysisObj):
print('No ENCODE4 analysis found')
bad_reason.append('No ENCODE4 analysis found')
if skipped_analyses_count:
print('Skipped {} non-ENCODE4 uniform analyses'.format(
skipped_analyses_count
))
print('')
return bad_reason, serious_audits, archiveAnalyses
def get_parser():
parser = argparse.ArgumentParser(
description='Script to check result of ENCODE4 ATAC-seq processing on '
'the ENCODE portal'
)
parser.add_argument(
'exp_accs',
nargs='*',
default=sys.stdin,
help='One or more experiment accessions (ENCSRs).'
)
parser.add_argument(
'--ticket', dest='ticket',
help='related ticket number (PROD###)'
)
return parser
def main():
parser = get_parser()
args = parser.parse_args()
summary = {}
GoodExperiments = {}
patchAnalyses = {}
for exp_acc in args.exp_accs:
bad_reason, serious_audits, archiveAnalyses = check_encode4_dnase_pipeline(
exp_acc.strip()
)
status = ', '.join(bad_reason) or 'Good'
if status == 'Good':
experimentID = exp_acc.strip()
GoodExperiments[experimentID] = sum(serious_audits.values())
if sum(serious_audits.values()):
status += ' BUT has {} ERROR and {} NOT_COMPLIANT'.format(
serious_audits.get('ERROR', 0),
serious_audits.get('NOT_COMPLIANT', 0),
)
summary[exp_acc.strip()] = status
patchAnalyses[exp_acc.strip()] = archiveAnalyses[exp_acc.strip()]
if args.ticket:
analysisArchive_filename = '%s_analysisStatusPatch.txt' % (args.ticket).strip()
release_filename = '%s_releasedPatch.txt' % (args.ticket).strip()
problem_filename = '%s_internalStatusPatch.txt' % (args.ticket).strip()
else:
analysisArchive_filename = 'analysisStatusPatch.txt'
release_filename = 'releasedPatch.txt'
problem_filename = 'internalStatusPatch.txt'
releasedFiles = open(release_filename, 'w+')
problemFiles = open(problem_filename, 'w+')
analysisPatch = open(analysisArchive_filename, 'w+')
analysisWriter = csv.writer(analysisPatch, delimiter='\t')
analysisWriter.writerow(['record_id', 'status'])
problemWriter = csv.writer(problemFiles, delimiter='\t')
problemWriter.writerow(['record_id', 'internal_status'])
for exp_acc in summary:
print('{}: {}'.format(exp_acc, summary[exp_acc]))
if patchAnalyses[exp_acc]:
print('Older released analyses for {} found: {}'.format(exp_acc, patchAnalyses[exp_acc]))
print('')
try:
for analysis in patchAnalyses[exp_acc.strip()]:
analysisWriter.writerow([analysis, 'archived'])
except KeyError:
continue
if GoodExperiments:
for key in GoodExperiments:
if GoodExperiments[key]:
problemWriter.writerow([key, 'post-pipeline review'])
else:
releasedFiles.write(key)
releasedFiles.write('\n')
problemWriter.writerow([key, 'release ready'])
if __name__ == '__main__':
main()
| |
#Copyright 2013 Hewlett-Packard Development Company, L.P.
#Licensed under the Apache License, Version 2.0 (the "License");
#you may not use this file except in compliance with the License.
#You may obtain a copy of the License at
#
#http://www.apache.org/licenses/LICENSE-2.0
#
#Unless required by applicable law or agreed to in writing, software
#distributed under the License is distributed on an "AS IS" BASIS,
#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#See the License for the specific language governing permissions and
#limitations under the License.
from mock import Mock, MagicMock, patch, ANY
from webob.exc import HTTPNotFound
import hashlib
import os
import testtools
from trove.common import utils
from trove.common.context import TroveContext
from trove.conductor import api as conductor_api
from trove.guestagent.common import operating_system
from trove.guestagent.strategies.backup import mysql_impl
from trove.guestagent.strategies.backup import couchbase_impl
from trove.guestagent.strategies.restore.base import RestoreRunner
from trove.backup.models import BackupState
from trove.guestagent.backup import backupagent
from trove.guestagent.strategies.backup.base import BackupRunner
from trove.guestagent.strategies.backup.base import UnknownBackupType
from trove.guestagent.strategies.storage.base import Storage
conductor_api.API.update_backup = Mock()
def create_fake_data():
from random import choice
from string import ascii_letters
return ''.join([choice(ascii_letters) for _ in xrange(1024)])
class MockBackup(BackupRunner):
"""Create a large temporary file to 'backup' with subprocess."""
backup_type = 'mock_backup'
def __init__(self, *args, **kwargs):
self.data = create_fake_data()
self.cmd = 'echo %s' % self.data
super(MockBackup, self).__init__(*args, **kwargs)
def cmd(self):
return self.cmd
class MockCheckProcessBackup(MockBackup):
"""Backup runner that fails confirming the process."""
def check_process(self):
return False
class MockLossyBackup(MockBackup):
"""Fake Incomplete writes to swift."""
def read(self, *args):
results = super(MockLossyBackup, self).read(*args)
if results:
# strip a few chars from the stream
return results[20:]
class MockSwift(object):
"""Store files in String."""
def __init__(self, *args, **kwargs):
self.store = ''
self.containers = []
self.container = "database_backups"
self.url = 'http://mockswift/v1'
self.etag = hashlib.md5()
def put_container(self, container):
if container not in self.containers:
self.containers.append(container)
return None
def put_object(self, container, obj, contents, **kwargs):
if container not in self.containers:
raise HTTPNotFound
while True:
if not hasattr(contents, 'read'):
break
content = contents.read(2 ** 16)
if not content:
break
self.store += content
self.etag.update(self.store)
return self.etag.hexdigest()
def save(self, filename, stream):
location = '%s/%s/%s' % (self.url, self.container, filename)
return True, 'w00t', 'fake-checksum', location
def load(self, context, storage_url, container, filename, backup_checksum):
pass
def load_metadata(self, location, checksum):
return {}
def save_metadata(self, location, metadata):
pass
class MockStorage(Storage):
def __call__(self, *args, **kwargs):
return self
def load(self, location, backup_checksum):
pass
def save(self, filename, stream):
pass
def load_metadata(self, location, checksum):
return {}
def save_metadata(self, location, metadata={}):
pass
def is_enabled(self):
return True
class MockRestoreRunner(RestoreRunner):
def __init__(self, storage, **kwargs):
pass
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
pass
def restore(self):
pass
def is_zipped(self):
return False
class MockStats:
f_blocks = 1024 ** 2
f_bsize = 4096
f_bfree = 512 * 1024
class BackupAgentTest(testtools.TestCase):
def setUp(self):
super(BackupAgentTest, self).setUp()
mysql_impl.get_auth_password = MagicMock(return_value='123')
backupagent.get_storage_strategy = MagicMock(return_value=MockSwift)
os.statvfs = MagicMock(return_value=MockStats)
self.orig_utils_execute_with_timeout = utils.execute_with_timeout
self.orig_os_get_ip_address = operating_system.get_ip_address
def tearDown(self):
super(BackupAgentTest, self).tearDown()
utils.execute_with_timeout = self.orig_utils_execute_with_timeout
operating_system.get_ip_address = self.orig_os_get_ip_address
def test_backup_impl_MySQLDump(self):
"""This test is for
guestagent/strategies/backup/impl
"""
mysql_dump = mysql_impl.MySQLDump(
'abc', extra_opts='')
self.assertIsNotNone(mysql_dump.cmd)
str_mysql_dump_cmd = ('mysqldump'
' --all-databases'
' %(extra_opts)s'
' --opt'
' --password=123'
' -u os_admin'
' 2>/tmp/mysqldump.log'
' | gzip |'
' openssl enc -aes-256-cbc -salt '
'-pass pass:default_aes_cbc_key')
self.assertEqual(mysql_dump.cmd, str_mysql_dump_cmd)
self.assertIsNotNone(mysql_dump.manifest)
self.assertEqual(mysql_dump.manifest, 'abc.gz.enc')
def test_backup_impl_InnoBackupEx(self):
"""This test is for
guestagent/strategies/backup/impl
"""
inno_backup_ex = mysql_impl.InnoBackupEx('innobackupex', extra_opts='')
self.assertIsNotNone(inno_backup_ex.cmd)
str_innobackup_cmd = ('sudo innobackupex'
' --stream=xbstream'
' %(extra_opts)s'
' /var/lib/mysql 2>/tmp/innobackupex.log'
' | gzip |'
' openssl enc -aes-256-cbc -salt '
'-pass pass:default_aes_cbc_key')
self.assertEqual(inno_backup_ex.cmd, str_innobackup_cmd)
self.assertIsNotNone(inno_backup_ex.manifest)
str_innobackup_manifest = 'innobackupex.xbstream.gz.enc'
self.assertEqual(inno_backup_ex.manifest, str_innobackup_manifest)
def test_backup_impl_CbBackup(self):
operating_system.get_ip_address = Mock(return_value="1.1.1.1")
utils.execute_with_timeout = Mock(return_value=None)
cbbackup = couchbase_impl.CbBackup('cbbackup', extra_opts='')
self.assertIsNotNone(cbbackup)
str_cbbackup_cmd = ("tar cPf - /tmp/backups | "
"gzip | openssl enc -aes-256-cbc -salt -pass "
"pass:default_aes_cbc_key")
self.assertEqual(str_cbbackup_cmd, cbbackup.cmd)
self.assertIsNotNone(cbbackup.manifest)
self.assertIn('gz.enc', cbbackup.manifest)
def test_backup_base(self):
"""This test is for
guestagent/strategies/backup/base
"""
BackupRunner.cmd = "%s"
backup_runner = BackupRunner('sample', cmd='echo command')
if backup_runner.is_zipped:
self.assertEqual(backup_runner.zip_manifest, '.gz')
self.assertIsNotNone(backup_runner.zip_manifest)
self.assertIsNotNone(backup_runner.zip_cmd)
self.assertEqual(backup_runner.zip_cmd, ' | gzip')
else:
self.assertIsNone(backup_runner.zip_manifest)
self.assertIsNone(backup_runner.zip_cmd)
self.assertEqual(backup_runner.backup_type, 'BackupRunner')
def test_execute_backup(self):
"""This test should ensure backup agent
ensures that backup and storage is not running
resolves backup instance
starts backup
starts storage
reports status
"""
agent = backupagent.BackupAgent()
backup_info = {'id': '123',
'location': 'fake-location',
'type': 'InnoBackupEx',
'checksum': 'fake-checksum',
'datastore': 'mysql',
'datastore_version': '5.5'
}
agent.execute_backup(context=None, backup_info=backup_info,
runner=MockBackup)
self.assertTrue(
conductor_api.API.update_backup.called_once_with(
ANY,
backup_id=backup_info['id'],
state=BackupState.NEW))
self.assertTrue(
conductor_api.API.update_backup.called_once_with(
ANY,
backup_id=backup_info['id'],
size=ANY,
state=BackupState.BUILDING))
self.assertTrue(
conductor_api.API.update_backup.called_once_with(
ANY,
backup_id=backup_info['id'],
checksum=ANY,
location=ANY,
note=ANY,
backup_type=backup_info['type'],
state=BackupState.COMPLETED))
def test_execute_bad_process_backup(self):
agent = backupagent.BackupAgent()
backup_info = {'id': '123',
'location': 'fake-location',
'type': 'InnoBackupEx',
'checksum': 'fake-checksum',
'datastore': 'mysql',
'datastore_version': '5.5'
}
self.assertRaises(backupagent.BackupError, agent.execute_backup,
context=None, backup_info=backup_info,
runner=MockCheckProcessBackup)
self.assertTrue(
conductor_api.API.update_backup.called_once_with(
ANY,
backup_id=backup_info['id'],
state=BackupState.NEW))
self.assertTrue(
conductor_api.API.update_backup.called_once_with(
ANY,
backup_id=backup_info['id'],
size=ANY,
state=BackupState.BUILDING))
self.assertTrue(
conductor_api.API.update_backup.called_once_with(
ANY,
backup_id=backup_info['id'],
checksum=ANY,
location=ANY,
note=ANY,
backup_type=backup_info['type'],
state=BackupState.FAILED))
def test_execute_lossy_backup(self):
"""This test verifies that incomplete writes to swift will fail."""
with patch.object(MockSwift, 'save',
return_value=(False, 'Error', 'y', 'z')):
agent = backupagent.BackupAgent()
backup_info = {'id': '123',
'location': 'fake-location',
'type': 'InnoBackupEx',
'checksum': 'fake-checksum',
}
self.assertRaises(backupagent.BackupError, agent.execute_backup,
context=None, backup_info=backup_info,
runner=MockLossyBackup)
self.assertTrue(
conductor_api.API.update_backup.called_once_with(
ANY,
backup_id=backup_info['id'],
state=BackupState.FAILED))
def test_execute_restore(self):
"""This test should ensure backup agent
resolves backup instance
determines backup/restore type
transfers/downloads data and invokes the restore module
reports status
"""
with patch.object(backupagent, 'get_storage_strategy',
return_value=MockStorage):
with patch.object(backupagent, 'get_restore_strategy',
return_value=MockRestoreRunner):
agent = backupagent.BackupAgent()
bkup_info = {'id': '123',
'location': 'fake-location',
'type': 'InnoBackupEx',
'checksum': 'fake-checksum',
}
agent.execute_restore(TroveContext(),
bkup_info,
'/var/lib/mysql')
def test_restore_unknown(self):
with patch.object(backupagent, 'get_restore_strategy',
side_effect=ImportError):
agent = backupagent.BackupAgent()
bkup_info = {'id': '123',
'location': 'fake-location',
'type': 'foo',
'checksum': 'fake-checksum',
}
self.assertRaises(UnknownBackupType, agent.execute_restore,
context=None, backup_info=bkup_info,
restore_location='/var/lib/mysql')
def test_backup_incremental_metadata(self):
with patch.object(backupagent, 'get_storage_strategy',
return_value=MockSwift):
MockStorage.save_metadata = Mock()
with patch.object(MockSwift, 'load_metadata',
return_value={'lsn': '54321'}):
meta = {
'lsn': '12345',
'parent_location': 'fake',
'parent_checksum': 'md5',
}
mysql_impl.InnoBackupExIncremental.metadata = MagicMock(
return_value=meta)
mysql_impl.InnoBackupExIncremental.run = MagicMock(
return_value=True)
mysql_impl.InnoBackupExIncremental.__exit__ = MagicMock(
return_value=True)
agent = backupagent.BackupAgent()
bkup_info = {'id': '123',
'location': 'fake-location',
'type': 'InnoBackupEx',
'checksum': 'fake-checksum',
'parent': {'location': 'fake', 'checksum': 'md5'}
}
agent.execute_backup(TroveContext(),
bkup_info,
'/var/lib/mysql')
self.assertTrue(MockStorage.save_metadata.called_once_with(
ANY,
meta))
def test_backup_incremental_bad_metadata(self):
with patch.object(backupagent, 'get_storage_strategy',
return_value=MockSwift):
agent = backupagent.BackupAgent()
bkup_info = {'id': '123',
'location': 'fake-location',
'type': 'InnoBackupEx',
'checksum': 'fake-checksum',
'parent': {'location': 'fake', 'checksum': 'md5'}
}
self.assertRaises(
AttributeError,
agent.execute_backup, TroveContext(), bkup_info, 'location')
| |
import os
import re
import random
import string
import subprocess
import warnings
import logging
from tempfile import mkdtemp
from datetime import datetime
from functools import reduce
try:
from urllib import urlencode
except ImportError:
from urllib.parse import urlencode
from hetzner import RobotError, WebRobotError
from hetzner.rdns import ReverseDNS, ReverseDNSManager
from hetzner.reset import Reset
from hetzner.util import addr, scraping
__all__ = ['AdminAccount', 'IpAddress', 'RescueSystem', 'Server', 'Subnet',
'IpManager', 'SubnetManager']
class SSHAskPassHelper(object):
"""
This creates a temporary SSH askpass helper script, which just passes the
provided password.
"""
def __init__(self, passwd):
self.passwd = passwd
self.tempdir = None
self.script = None
def __enter__(self):
self.tempdir = mkdtemp()
script = os.path.join(self.tempdir, "askpass")
fd = os.open(script, os.O_WRONLY | os.O_CREAT | os.O_NOFOLLOW, 0o700)
self.script = script
esc_passwd = self.passwd.replace("'", r"'\''")
askpass = "#!/bin/sh\necho -n '{0}'".format(esc_passwd).encode('ascii')
os.write(fd, askpass)
os.close(fd)
return script
def __exit__(self, type, value, traceback):
if self.script is not None:
os.unlink(self.script)
if self.tempdir is not None:
os.rmdir(self.tempdir)
class RescueSystem(object):
def __init__(self, server):
self.server = server
self.conn = server.conn
self._active = None
self._password = None
self._authorized_keys = None
def _update_status(self, data=None):
if data is None:
path = '/boot/{0}/rescue'.format(self.server.ip)
data = self.conn.get(path)
rescue = data['rescue']
self._active = rescue['active']
self._password = rescue['password']
self._authorized_keys = rescue['authorized_key']
@property
def active(self):
if self._active is not None:
return self._active
self._update_status()
return self._active
@property
def password(self):
if self._password is not None:
return self._password
self._update_status()
return self._password
@property
def authorized_keys(self):
if self._authorized_keys is not None:
return self._authorized_keys
self._update_status()
return self._authorized_keys
def _rescue_action(self, method, opts=None):
reply = self.conn.request(
method,
'/boot/{0}/rescue'.format(self.server.ip),
opts
)
self._update_status(reply)
def activate(self, bits=64, os='linux', authorized_keys=None):
"""
Activate the rescue system if necessary.
Note that 'authorized_keys', if passed, must be a list of fingerprints,
e.g. ['a3:14:62:38:d1:45:35:6c:de:ad:ec:12:be:93:24:ef'] of the keys
that should have been added to robot already.
"""
if not self.active:
opts = {'os': os, 'arch': bits}
if authorized_keys is not None:
opts['authorized_key'] = list(authorized_keys)
return self._rescue_action('post', opts)
def deactivate(self):
"""
Deactivate the rescue system if necessary.
"""
if self.active:
return self._rescue_action('delete')
def observed_activate(self, *args, **kwargs):
"""
Activate the rescue system and reboot into it.
Look at Server.observed_reboot() for options.
"""
self.activate()
self.server.observed_reboot(*args, **kwargs)
def observed_deactivate(self, *args, **kwargs):
"""
Deactivate the rescue system and reboot into normal system.
Look at Server.observed_reboot() for options.
"""
self.deactivate()
self.server.observed_reboot(*args, **kwargs)
def shell(self, *args, **kwargs):
"""
Reboot into rescue system, spawn a shell and after the shell is
closed, reboot back into the normal system.
Look at Server.observed_reboot() for further options.
"""
msg = ("The RescueSystem.shell() method will be removed from the API"
" in version 1.0.0, please do not use it! See"
" https://github.com/aszlig/hetzner/issues/13"
" for details.")
warnings.warn(msg, FutureWarning)
self.observed_activate(*args, **kwargs)
with SSHAskPassHelper(self.password) as askpass:
ssh_options = [
'CheckHostIP=no',
'GlobalKnownHostsFile=/dev/null',
'UserKnownHostsFile=/dev/null',
'StrictHostKeyChecking=no',
'LogLevel=quiet',
]
ssh_args = reduce(lambda acc, opt: acc + ['-o', opt],
ssh_options, [])
cmd = ['ssh'] + ssh_args + ["root@{0}".format(self.server.ip)]
env = dict(os.environ)
env['DISPLAY'] = ":666"
env['SSH_ASKPASS'] = askpass
subprocess.check_call(cmd, env=env, preexec_fn=os.setsid)
self.observed_deactivate(*args, **kwargs)
class AdminAccount(object):
def __init__(self, server):
# XXX: This is preliminary, because we don't have such functionality in
# the official API yet.
self._scraper = server.conn.scraper
self._serverid = server.number
self.exists = False
self.login = None
self.passwd = None
self.update_info()
def update_info(self):
"""
Get information about currently active admin login.
"""
self._scraper.login()
login_re = re.compile(r'"label_req">Login.*?"element">([^<]+)',
re.DOTALL)
path = '/server/admin/id/{0}'.format(self._serverid)
response = self._scraper.request(path)
assert response.status == 200
match = login_re.search(response.read().decode('utf-8'))
if match is None:
self.exists = False
else:
self.exists = True
self.login = match.group(1)
def _genpasswd(self):
random.seed(os.urandom(512))
chars = string.ascii_letters + string.digits + "/()-=+_,;.^~#*@"
length = random.randint(20, 40)
return ''.join(random.choice(chars) for i in range(length))
def create(self, passwd=None):
"""
Create a new admin account if missing. If passwd is supplied, use it
instead of generating a random one.
"""
if passwd is None:
passwd = self._genpasswd()
form_path = '/server/admin/id/{0}'.format(self._serverid)
form_response = self._scraper.request(form_path, method='POST')
parser = scraping.CSRFParser('password[_csrf_token]')
parser.feed(form_response.read().decode('utf-8'))
assert parser.csrf_token is not None
data = {
'password[new_password]': passwd,
'password[new_password_repeat]': passwd,
'password[_csrf_token]': parser.csrf_token,
}
if not self.exists:
failmsg = "Unable to create admin account"
path = '/server/adminCreate/id/{0}'.format(self._serverid)
else:
failmsg = "Unable to update admin account password"
path = '/server/adminUpdate'
data['id'] = self._serverid
response = self._scraper.request(path, data)
data = response.read().decode('utf-8')
if "msgbox_success" not in data:
ul_re = re.compile(r'<ul\s+class="error_list">(.*?)</ul>',
re.DOTALL)
li_re = re.compile(r'<li>\s*([^<]*?)\s*</li>')
ul_match = ul_re.search(data)
if ul_match is not None:
errors = [error.group(1)
for error in li_re.finditer(ul_match.group(0))]
msg = failmsg + ': ' + ', '.join(errors)
raise WebRobotError(msg)
raise WebRobotError(failmsg)
self.update_info()
self.passwd = passwd
return self.login, self.passwd
def delete(self):
"""
Remove the admin account.
"""
if not self.exists:
return
path = '/server/adminDelete/id/{0}'.format(self._serverid)
assert "msgbox_success" in \
self._scraper.request(path).read().decode('utf-8')
self.update_info()
def __repr__(self):
if self.exists:
return "<AdminAccount login: {0}>".format(self.login)
else:
return "<AdminAccount missing>"
class IpAddress(object):
def __init__(self, conn, result, subnet_ip=None):
self.conn = conn
self.subnet_ip = subnet_ip
self.update_info(result)
self._rdns = None
@property
def rdns(self):
"""
Get or set reverse DNS PTRs.
"""
if self._rdns is None:
self._rdns = ReverseDNS(self.conn, self.ip)
return self._rdns
def update_info(self, result=None):
"""
Update the information of the current IP address and all related
information such as traffic warnings. If result is omitted, a new
request is sent to the robot to gather the information.
"""
if self.subnet_ip is not None:
if result is None:
result = self.conn.get('/subnet/{0}'.format(self._subnet_addr))
data = result['subnet']
self._subnet_addr = data['ip']
data['ip'] = self.subnet_ip
# Does not exist in subnets
data['separate_mac'] = None
else:
if result is None:
result = self.conn.get('/ip/{0}'.format(self.ip))
data = result['ip']
self.ip = data['ip']
self.server_ip = data['server_ip']
self.locked = data['locked']
self.separate_mac = data['separate_mac']
self.traffic_warnings = data['traffic_warnings']
self.traffic_hourly = data['traffic_hourly']
self.traffic_daily = data['traffic_daily']
self.traffic_monthly = data['traffic_monthly']
def __repr__(self):
return "<IpAddress {0}>".format(self.ip)
class IpManager(object):
def __init__(self, conn, main_ip):
self.conn = conn
self.main_ip = main_ip
def get(self, ip):
"""
Get a specific IP address of a server.
"""
return IpAddress(self.conn, self.conn.get('/ip/{0}'.format(ip)))
def __iter__(self):
data = urlencode({'server_ip': self.main_ip})
result = self.conn.get('/ip?{0}'.format(data))
return iter([IpAddress(self.conn, ip) for ip in result])
class Subnet(object):
def __init__(self, conn, result):
self.conn = conn
self.update_info(result)
def update_info(self, result=None):
"""
Update the information of the subnet. If result is omitted, a new
request is sent to the robot to gather the information.
"""
if result is None:
result = self.conn.get('/subnet/{0}'.format(self.net_ip))
data = result['subnet']
self.net_ip = data['ip']
self.mask = data['mask']
self.gateway = data['gateway']
self.server_ip = data['server_ip']
self.failover = data['failover']
self.locked = data['locked']
self.traffic_warnings = data['traffic_warnings']
self.traffic_hourly = data['traffic_hourly']
self.traffic_daily = data['traffic_daily']
self.traffic_monthly = data['traffic_monthly']
self.is_ipv6, self.numeric_net_ip = addr.parse_ipaddr(self.net_ip)
self.numeric_gateway = addr.parse_ipaddr(self.gateway, self.is_ipv6)
getrange = addr.get_ipv6_range if self.is_ipv6 else addr.get_ipv4_range
self.numeric_range = getrange(self.numeric_net_ip, self.mask)
def get_ip_range(self):
"""
Return the smallest and biggest possible IP address of the current
subnet.
"""
convert = addr.ipv6_bin2addr if self.is_ipv6 else addr.ipv4_bin2addr
return convert(self.numeric_range[0]), convert(self.numeric_range[1])
def __contains__(self, addr):
"""
Check whether a specific IP address is within the current subnet.
"""
numeric_addr = addr.parse_ipaddr(addr, self.is_ipv6)
return self.numeric_range[0] <= numeric_addr <= self.numeric_range[1]
def get_ip(self, addr):
"""
Return an IpAddress object for the specified IPv4 or IPv6 address or
None if the IP address doesn't exist in the current subnet.
"""
if addr in self:
result = self.conn.get('/subnet/{0}'.format(self.net_ip))
return IpAddress(self.conn, result, addr)
else:
return None
def __repr__(self):
return "<Subnet {0}/{1} (Gateway: {2})>".format(self.net_ip, self.mask,
self.gateway)
class SubnetManager(object):
def __init__(self, conn, main_ip):
self.conn = conn
self.main_ip = main_ip
def get(self, net_ip):
"""
Get a specific subnet of a server.
"""
return Subnet(self.conn, self.conn.get('/subnet/{0}'.format(net_ip)))
def __iter__(self):
data = urlencode({'server_ip': self.main_ip})
try:
result = self.conn.get('/subnet?{0}'.format(data))
except RobotError as err:
# If there are no subnets a 404 is returned rather than just an
# empty list.
if err.status == 404:
result = []
return iter([Subnet(self.conn, net) for net in result])
class Server(object):
def __init__(self, conn, result):
self.conn = conn
self.update_info(result)
self.rescue = RescueSystem(self)
self.reset = Reset(self)
self.ips = IpManager(self.conn, self.ip)
self.subnets = SubnetManager(self.conn, self.ip)
self.rdns = ReverseDNSManager(self.conn, self.ip)
self._admin_account = None
self.logger = logging.getLogger("Server #{0}".format(self.number))
@property
def admin(self):
"""
Update, create and delete admin accounts.
"""
if self._admin_account is None:
self._admin_account = AdminAccount(self)
return self._admin_account
def update_info(self, result=None):
"""
Updates the information of the current Server instance either by
sending a new GET request or by parsing the response given by result.
"""
if result is None:
result = self.conn.get('/server/{0}'.format(self.ip))
data = result['server']
self.ip = data['server_ip']
self.number = data['server_number']
self.name = data['server_name']
self.product = data['product']
self.datacenter = data['dc']
self.traffic = data['traffic']
self.flatrate = data['flatrate']
self.status = data['status']
self.throttled = data['throttled']
self.cancelled = data['cancelled']
self.paid_until = datetime.strptime(data['paid_until'], '%Y-%m-%d')
self.is_vserver = self.product.startswith('VQ')
def observed_reboot(self, *args, **kwargs):
msg = ("Server.observed_reboot() is deprecated. Please use"
" Server.reset.observed_reboot() instead.")
warnings.warn(msg, DeprecationWarning)
return self.reset.observed_reboot(*args, **kwargs)
def reboot(self, *args, **kwargs):
msg = ("Server.reboot() is deprecated. Please use"
" Server.reset.reboot() instead.")
warnings.warn(msg, DeprecationWarning)
return self.reset.reboot(*args, **kwargs)
def set_name(self, name):
result = self.conn.post('/server/{0}'.format(self.ip),
{'server_name': name})
self.update_info(result)
def __repr__(self):
return "<{0} (#{1} {2})>".format(self.ip, self.number, self.product)
| |
#
# aac_map.py, doom-net
#
# Created by Andrey Kolishchak on 01/21/17.
#
import torch
import torch.nn as nn
import torch.nn.functional as F
from device import device
from collections import namedtuple
from aac_base import AACBase
import random
class ResidualBlock(nn.Module):
def __init__(self, in_channels, out_channels, kernel_size, stride):
super(ResidualBlock, self).__init__()
self.conv1 = nn.Conv2d(in_channels=in_channels, out_channels=out_channels, kernel_size=kernel_size, stride=stride)
self.bn1 = nn.BatchNorm2d(out_channels)
self.relu = nn.relu(inplace=True)
self.conv2 = nn.Conv2d(in_channels=out_channels, out_channels=out_channels, kernel_size=kernel_size)
self.bn2 = nn.BatchNorm2d(out_channels)
self.residual = nn.Conv2d(in_channels, out_channels, kernel_size=1, stride=stride, bias=False)
self.residual_bn = nn.BatchNorm2d(out_channels)
def forward(self, x):
output = self.conv1(x)
output = self.bn1(output)
output = self.relu(output)
output = self.conv2(output)
output = self.bn2(output)
residual = self.residual(x)
residual = self.residual_bn(residual)
output += residual
output = self.relu(output)
return output
class BaseModel(AACBase):
def __init__(self, in_channels, button_num, variable_num, frame_num):
super(BaseModel, self).__init__()
self.screen_feature_num = 256
self.conv1 = nn.Conv2d(in_channels=in_channels, out_channels=32, kernel_size=3, stride=1)
self.conv2 = nn.Conv2d(in_channels=32, out_channels=32, kernel_size=3, stride=1)
self.conv3 = nn.Conv2d(in_channels=32, out_channels=64, kernel_size=3, stride=2, dilation=8)
self.conv4 = nn.Conv2d(in_channels=64, out_channels=64, kernel_size=3, stride=2, dilation=16)
self.conv5 = nn.Conv2d(in_channels=64, out_channels=128, kernel_size=3, stride=2, dilation=1)
#self.screen_features1 = nn.Linear(13 * 18 * 128, self.screen_feature_num)
self.screen_features1 = nn.Linear(5 * 21 * 128, self.screen_feature_num)
self.batch_norm = nn.BatchNorm1d(self.screen_feature_num)
layer1_size = 64
self.action1 = nn.Linear(self.screen_feature_num, layer1_size)
self.action2 = nn.Linear(layer1_size + variable_num, button_num)
self.batch_norm_action = nn.BatchNorm1d(layer1_size + variable_num)
self.value1 = nn.Linear(self.screen_feature_num, layer1_size)
self.value2 = nn.Linear(layer1_size + variable_num, 1)
self.batch_norm_value = nn.BatchNorm1d(layer1_size + variable_num)
self.screens = None
self.frame_num = frame_num
def forward(self, screen, variables):
# cnn
screen_features = F.relu(self.conv1(screen))
screen_features = F.relu(self.conv2(screen_features))
screen_features = F.relu(self.conv3(screen_features))
screen_features = F.relu(self.conv4(screen_features))
screen_features = F.relu(self.conv5(screen_features))
screen_features = screen_features.view(screen_features.size(0), -1)
# features
input = self.screen_features1(screen_features)
input = self.batch_norm(input)
input = F.relu(input)
# action
action = F.relu(self.action1(input))
action = torch.cat([action, variables], 1)
action = self.batch_norm_action(action)
action = self.action2(action)
return action, input
def transform_input(self, screen, variables):
screen_batch = []
if self.frame_num > 1:
if self.screens is None:
self.screens = [[]] * len(screen)
for idx, screens in enumerate(self.screens):
if len(screens) >= self.frame_num:
screens.pop(0)
screens.append(screen[idx])
if len(screens) == 1:
for i in range(self.frame_num - 1):
screens.append(screen[idx])
screen_batch.append(torch.cat(screens, 0))
screen = torch.stack(screen_batch)
variables /= 100
return screen.to(device), variables.to(device)
def set_terminal(self, terminal):
if self.screens is not None:
indexes = torch.nonzero(terminal.view(-1) == 0)
for idx in range(len(indexes)):
self.screens[indexes[idx]] = []
ModelOutput = namedtuple('ModelOutput', ['log_action', 'value'])
class AdvantageActorCriticMap(BaseModel):
def __init__(self, args):
super(AdvantageActorCriticMap, self).__init__(args.screen_size[0]*args.frame_num, args.button_num, args.variable_num, args.frame_num)
if args.base_model is not None:
# load weights from the base model
base_model = torch.load(args.base_model)
self.load_state_dict(base_model.state_dict())
del base_model
self.discount = args.episode_discount
self.outputs = []
self.rewards = []
self.discounts = []
def reset(self):
self.outputs = []
self.rewards = []
self.discounts = []
def forward(self, screen, variables):
action_prob, input = super(AdvantageActorCriticMap, self).forward(screen, variables)
if not self.training:
_, action = action_prob.max(1, keepdim=True)
return action, None
# greedy actions
if random.random() < 0.1:
action = torch.LongTensor(action_prob.size(0), 1).random_(0, action_prob.size(1)).to(device)
else:
_, action = action_prob.max(1, keepdim=True)
# value prediction - critic
value = F.relu(self.value1(input))
value = torch.cat([value, variables], 1)
value = self.batch_norm_value(value)
value = self.value2(value)
# save output for backpro
action_prob = F.log_softmax(action_prob, dim=1)
self.outputs.append(ModelOutput(action_prob.gather(-1, action), value))
return action, value
def get_action(self, state):
action, _ = self.forward(*self.transform_input(state.screen, state.variables))
return action
def set_reward(self, reward):
self.rewards.append(reward * 0.01) # no clone() b/c of * 0.01
def set_terminal(self, terminal):
super(AdvantageActorCriticMap, self).set_terminal(terminal)
self.discounts.append(self.discount * terminal)
def backward(self):
#
# calculate step returns in reverse order
#rewards = torch.stack(self.rewards, dim=0)
rewards = self.rewards
returns = torch.Tensor(len(rewards) - 1, *self.outputs[-1].value.size())
step_return = self.outputs[-1].value.detach().cpu()
for i in range(len(rewards) - 2, -1, -1):
step_return.mul_(self.discounts[i]).add_(rewards[i])
returns[i] = step_return
returns = returns.to(device)
#
# calculate losses
policy_loss = 0
value_loss = 0
steps = len(self.outputs) - 1
for i in range(steps):
advantage = returns[i] - self.outputs[i].value.detach()
policy_loss += -self.outputs[i].log_action * advantage
value_loss += F.smooth_l1_loss(self.outputs[i].value, returns[i])
weights_l2 = 0
for param in self.parameters():
# TODO: check for non-grad params
weights_l2 += param.norm(2)
loss = policy_loss.mean() / steps + value_loss / steps + 0.00001 * weights_l2
loss.backward()
# reset state
self.reset()
| |
#pylint: disable=W0703,R0915,R0912,R0914,R0904,W0223,W0105
""" Base class for package download and manifest create operations """
import pylons
import os
import time
from agent.lib.errors import Errors, AgentException
from agent.lib.utils import calcProgress, rchown, rchmod, isHigherPrivilegeService, \
chmod, getuserofpath, rmrf
from agent.lib.agent_thread.agent_thread import AgentThread
from agent.lib.package import PackageUtil
from agent.lib.packagemgr import PackageMgr
from agent.lib.agent_thread.download_thread import DownloadThread
from agent.lib import contextutils, configutil, utils, manifestutil
import logging
from agent.lib.agent_thread.exec_thread import ExecThread
LOG = logging.getLogger(__name__)
class DownloadHelper(AgentThread):
""" This thread will attempt to help creating packages for createManifest and createPackages APIs.
This means creating the directory.
Downloading the packages, if necessary.
Verifying the package.
Untaring the package.
"""
inProgressExt = '.inprogress'
def __init__(self, threadMgr, cat = None, name = 'agent_thread', parentId = None):
""" Constructor """
AgentThread.__init__(self, threadMgr, cat = cat, name = name, parentId = parentId)
def _findPackagesToDownload(self, packages, packageRootPath):
''' check which packages exist and which need to be downloaded '''
# figure out which of the packages are already there
remainingPackages = {}
for one_package in packages:
one_package_dict = PackageUtil.parseUri(one_package)
one_package_path = os.path.join(packageRootPath, one_package_dict['packageName'], one_package_dict['packageVersion'])
if (not os.path.exists(one_package_path)):
remainingPackages[one_package] = one_package_dict
return remainingPackages
def _downloadPackages(self, packages, isFailedFatal = True):
"""
download all the packages
update progress
check for timeout
@params packages = list of package uri's
@throws AgentException
"""
try:
appGlobal = pylons.config['pylons.app_globals']
# globals all the remaining packages, download the packages
pkgObjs = []
for pkg in packages:
packageKey = PackageUtil.getPackageKey(pkg)
matchDlThreads = appGlobal.threadMgr.getThreadByCat(packageKey)
if matchDlThreads:
# found an inprogress download thread
dlThread = matchDlThreads[0]
else:
dlThread = DownloadThread(self._threadMgr, pkg, category=[packageKey], packageloc=None, parentId = self.getUuid())
contextutils.copyJobContexts(self, dlThread)
dlThread.start()
if (dlThread.getStatus().get('progress') != 100):
pkgObjs.append(dlThread)
self._checkStop()
# check that we have packages to download
if (len(pkgObjs) == 0):
return [] # nothing to download, so no failed packages
# now wait for all the packages to finish
liveThreadCount = len(pkgObjs)
timeoutNotSet = True
create_sleep_time = float(pylons.config['exec_thread_sleep_time']) * liveThreadCount
failed_packages = []
while (liveThreadCount > 0):
self._checkStop()
LOG.info('%s packages still downloading' % liveThreadCount)
time.sleep(create_sleep_time)
# go through all the packages and
# calculated average progress
# check that all packages are alive
totalProgress = 0
liveThreadCount = 0
timeouts = (0.0, 0.0)
for dlThread in pkgObjs:
# we are not done yet
if dlThread.isAlive():
liveThreadCount += 1
# if one package failed, then we have to fail the entire manifest
threadStatus = dlThread.getStatus()
if (not dlThread.isAlive() and threadStatus.get('progress') != 100):
if isFailedFatal:
raise AgentException(Errors.DC_FAILED_DOWNLOAD,
'failed downloading package (%s) - %s'
% (dlThread.getUriDict().get('uri'), threadStatus.get('errorMsg')))
failed_packages.append(dlThread.getUriDict().get('uri'))
progressTimeout = dlThread.getProgressTimeouts()
if (progressTimeout and timeouts):
timeouts = (timeouts[0] + progressTimeout[0], timeouts[1] + progressTimeout[1])
else:
timeouts = None
pkgProgress = threadStatus.get('progress')
totalProgress += pkgProgress
if (timeouts and timeoutNotSet):
# Setting the timeout once as it is absolute time. Doing this after the timeout for each
# package is available
self.extendTimeout(timeouts[0])
timeoutNotSet = False
LOG.debug('Using overall timeout=%s and progress timeout=%s' % (timeouts[0], timeouts[1]))
# calculate the new progress
# I'm allocating 80 of the progress to be the download
newProgress = calcProgress(1, 79, float(totalProgress) / (100 * len(pkgObjs)))
# now update the pgoress
self._updateProgress(newProgress)
return failed_packages
finally:
# now update the pgoress
self._updateProgress(79)
def untar(self, packagePath, untarPath, nicelevel):
''' do real untar '''
cmd = ['tar', '-C', untarPath, '-x', '-f', packagePath]
# timeout 60 minute
execThread = ExecThread(None, cmd, None, self.getUuid())
execThread.setTimeout(3600)
execThread.run()
status = execThread.getStatus()
if (status['error'] != None):
msg = 'untar cmd (%s) failed (%s - %s)' % (' '.join(cmd), status['error'], status['errorMsg'])
LOG.error(msg)
raise AgentException(Errors.PACKAGE_UNTAR_FAILURE, msg)
def _untarPackages(self, packages, service, untarRootPath, nicelevel=None, pgksNeedSuffix=None, pathSuffix=''):
"""
untar all the packages
@params packages - list of dictionary of parsed packages uri
@throws AgentException
"""
self._updateProgress(80)
count = 0
for pkgDict in packages.itervalues():
count += 1
# check if package path exists already
# if the package is already untarred, move on
# else create the package path
pkgName = pkgDict['packageName']
untarPath = os.path.join(untarRootPath,
pkgName,
'%s%s' % (pkgDict['packageVersion'], pathSuffix if ((pgksNeedSuffix is not None) and pkgName in pgksNeedSuffix) else ''))
if os.path.exists(untarPath):
# perhaps another thread has finished extraction, continue with next package
continue
os.makedirs(untarPath)
try:
self.untar(pkgDict['packagePath'], untarPath, nicelevel)
except AgentException:
rmrf(untarPath)
# Note: i. atleast self should have 'rx' so that we can proceed setting 'rx' for group and others
# if both belong to same group in future, then just self, group should be enough
# ensure all parent dir of scripts dir have 'rx' so that we can really navigate to scripts dir and execute
uname = getuserofpath(untarPath)
chmod(untarPath, '+rx', sudoUser = uname)
cronusPath = os.path.join(untarPath, 'cronus')
if os.path.exists(cronusPath):
uname = getuserofpath(cronusPath)
chmod(cronusPath, '+rx', sudoUser = uname)
# now give all scripts 'rx' permission
scriptsPath = os.path.join(cronusPath, 'scripts')
if os.path.exists(scriptsPath):
uname = getuserofpath(scriptsPath)
rchmod(scriptsPath, '+rx', sudoUser = uname)
# issue #16, now add symlink to .appdata for easy access
appdata_path = manifestutil.appDataPath(service)
link_path = os.path.join(untarPath, '.appdata')
LOG.info('Create .appdata symlink from %s to %s' % (link_path, appdata_path))
utils.symlink(appdata_path, link_path)
#Running as cronus user when higher privilege service (i.e. not chown all the package into the application user)
if (not isHigherPrivilegeService(service)):
uname = configutil.getAppUser()
uid, gid = utils.getUidGid(uname)
rchown(untarPath, uid, gid)
self._updateProgress(calcProgress(80, 99, float(count) / len(packages)))
def _classfiy_packages(self, packages):
""" verify that all packages that need to be downloaded exist """
non_existent_packages = []
filtered_packages = []
packagePath = PackageMgr.packagePath()
for one_package in packages:
path = os.path.join(packagePath, one_package + '.cronus')
if not os.path.exists(path):
non_existent_packages.append(one_package)
else:
filtered_packages.append(one_package)
return non_existent_packages, filtered_packages
| |
from Tkinter import *
import ttk, tkFont, tkMessageBox
import itertools
import datetime
from Connect_Brisco_DB import Connect_DB
from DB_searchandFill import DB_search
from psycopg2 import sql
import serial
from psycopg2.extensions import AsIs
class GUIatFrontDesk:
def __init__(self,master):
from PIL import Image, ImageTk
self.master = master
'''
~~~~~~~~~~~~~~~ Connect to Database and initialize Listst ~~~~~~~~~~~~~~
'''
self.Connect_Brisco_DB = Connect_DB('postgres','postgres','192.168.0.200','coffeegood')
self.cur1 = self.Connect_Brisco_DB.crsr()
# self.loggingco_list =[]
self.init_list_truck = self.initializeLists('truckers_db')
self.init_list_owner = self.initializeLists('owner_db')
'''
~~~~~~~~~~~~~~~~~~~~~~~ Initialize Frames ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
'''
self.frame1 = Frame(self.master,borderwidth =5,relief='raised')
self.frame1.grid(row=0,column=0,sticky='ew')
self.frame2 = Frame(self.master,borderwidth =5,relief='raised')
self.frame2.grid(row=1,column=0,sticky='ew',ipady=20)
self.frame3 = Frame(self.master,borderwidth =5,relief='raised')
self.frame3.grid(row=2,column=0,sticky='ew',ipady=20)
self.frame4 = Frame(self.master,borderwidth =5,relief='raised')
self.frame4.grid(row=3,column=0,sticky='ew',ipady=20)
self.frame5 = Frame(self.master,borderwidth =5,relief='raised')
self.frame5.grid(row=0,column=1,sticky='nsew')
self.frame6 = Frame(self.master,borderwidth =5,relief='raised')
self.frame6.grid(row=1,column=1,sticky='nsew',rowspan=3)
self.frame7 = Frame(self.master,borderwidth =5,relief='raised')
self.frame7.grid(row=0,column=2,rowspan=4,sticky='nwes')
#list of dictionaries for trucks in the yard and info attached to them
self.Lst_truckInfo = []
'''
~~~~~~~~~~~~~~~ Create initial vals for comboboxes ~~~~~~~~~~~~~~~
'''
self.owner_combo_val = StringVar()
self.FMA_combo_val = StringVar()
self.wCircle_combo_val = StringVar()
self.block_combo_val = StringVar()
self.logCo_combo_val = StringVar()
self.truckLicense_combo_val = StringVar()
self.truckNum_combo_val = StringVar()
self.axle_combo_val = StringVar()
self.hauledBy_combo_val = StringVar()
self.popDD_val = StringVar()
self.sampleDD_val = StringVar()
self.loggingCo_combo_val = StringVar()
self.date_entry_var = StringVar()
self.timeIn_entry_var = StringVar()
self.timeOut_entry_var = StringVar()
self.gross_weight_entry_var = StringVar()
self.tare_weight_entry_var = StringVar()
self.net_weight_entry_var = StringVar()
self.loadNum_entry_var = StringVar()
'''
~~~~~~~~~~~~~~~~~~~~~~~ Frame 1 ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
'''
framenum = 1
framenum = self.frame1
colm = 0
rown = 0
f1_lst_labels = ["TM9 Ticket","Block #","Pieces"]
for strng in f1_lst_labels:
self.create_place_label(framenum,strng,rown,colm,("Courier", 20,"bold"),W)
rown = rown + 1
colm = 1
rown = 0
pddx = (90,0)
self.TM9_entry_var = StringVar()
self.numPieces_entry_var = StringVar()
self.blockNum_entry_var = StringVar()
self.TM9_entry = self.create_place_entry(framenum,self.TM9_entry_var, rown, colm, ("Courier", 16,"bold"),20,E,pddx)
rown = rown + 1
self.blockNum_entry = self.create_place_entry(framenum,self.blockNum_entry_var, rown, colm, ("Courier", 16,"bold"),40,E,pddx)
rown = rown + 1
self.numPieces_entry = self.create_place_entry(framenum,self.numPieces_entry_var, rown, colm, ("Courier", 16,"bold"),None,E,pddx)
'''
~~~~~~~~~~~~~~~~~~~~~~~ Frame 2 ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
'''
framenum = self.frame2
#labels
colm = 0
rown = 0
f2_lst_labels = ["Truck #","License Plate ","Hauling Contractor","Truck Axle"]
for strng in f2_lst_labels:
if rown==0:
fnt_size=20
else:
fnt_size = 16
self.create_place_label(framenum,strng,rown,colm,("Courier", fnt_size,"bold"),W)
rown = rown + 1
List_frame2 = self.initializeLists('truckers_db')
#Menus
rown = 0
colm = 1
pddx = None
self.truckNum_combo = self.create_place_combo(framenum,self.init_list_truck[0],self.truckNum_combo_val,rown,colm,("Courier", 20,"bold"),"truck",W,pddx)
rown = rown + 1
self.truckLicense_combo = self.create_place_combo(framenum,self.init_list_truck[1],self.truckLicense_combo_val,rown,colm,("Courier", 16,"bold"),"truck",W,pddx)
rown = rown + 1
self.hauledBy_combo = self.create_place_combo(framenum,self.init_list_truck[2],self.hauledBy_combo_val,rown,colm,("Courier", 16,"bold"),"truck",W,pddx)
rown = rown + 1
self.axle_combo = self.create_place_combo(framenum,self.init_list_truck[3],self.axle_combo_val,rown,colm,("Courier", 16,"bold"),"truck",W,pddx)
'''
~~~~~~~~~~~~~~~~~~~~~~~ Frame 3 ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
'''
framenum = self.frame3
colm = 0
rown = 0
f3_lst_labels = ["Owner","FMA #","Working Circle","Logging Contractor"]
for strng in f3_lst_labels:
if rown==0:
fnt_size=20
else:
fnt_size = 16
self.create_place_label(framenum,strng,rown,colm,("Courier", fnt_size,"bold"),W)
rown = rown + 1
rown = 0
colm = 1
pddx = None
self.owner_combo = self.create_place_combo(framenum,self.init_list_owner[0],self.owner_combo_val,rown,colm,("Courier", 20,"bold"),"owner",W,pddx)
rown = rown + 1
self.FMA_combo = self.create_place_combo(framenum,self.init_list_owner[1],self.FMA_combo_val,rown,colm,("Courier", 16,"bold"),"owner",W,pddx)
rown = rown + 1
self.wCircle_combo = self.create_place_combo(framenum,self.init_list_owner[2],self.wCircle_combo_val,rown,colm,("Courier", 16,"bold"),"owner",W,pddx)
rown = rown + 1
self.loggingCo_combo = self.create_place_combo(framenum,self.loggingco_list,self.loggingCo_combo_val,rown,colm,("Courier", 16,"bold"),"owner",W,pddx)
rown = rown + 1
'''
~~~~~~~~~~~~~~~~~~~~~~~ Frame 4 ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
'''
framenum = self.frame4
colm = 0
rown = 0
f4_lst_labels = ["Population","Sample Load"]
for strng in f4_lst_labels:
self.create_place_label(framenum,strng,rown,colm,("Courier", 20,"bold"),W)
rown = rown + 1
lst_pop = ['726','720','730','740','750','760','780','785']
lst_sample = ['No','Yes']
colm = 1
rown = 0
paddx = (180,0)
self.popDD = self.create_place_dropdown(framenum, lst_pop, self.popDD_val , rown, colm, ("Courier", 20,"bold"),'ew',paddx)
rown = rown + 1
self.sampleDD = self.create_place_dropdown(framenum, lst_sample , self.sampleDD_val , rown, colm, ("Courier", 20,"bold"),'ew',paddx)
'''
~~~~~~~~~~~~~~~~~~~~~~~ Frame 5 ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
'''
framenum = self.frame5
colm = 0
rown = 0
pddx = 0
f6_lst_labels = ["Date: ","Gross Weight: ","Tare Weight: ","Net Weight: ","Load Slip #: "]
self.dict_labels = {}
for strng in f6_lst_labels:
self.create_place_label(framenum,strng,rown,colm,("Courier", 16),E)
rown = rown + 1
colm = 0
rown = 0
pddx = 0
paddy = 0
self.date_entry = self.create_place_entry(framenum,self.date_entry_var,rown,colm+1,("Courier", 16),paddy,E,pddx)
rown = rown + 1
# self.timeIn_entry = self.create_place_entry(framenum,self.timeIn_entry_var,rown,colm+1,("Courier", 16),paddy,E,pddx)
# rown = rown + 1
# self.timeOut_entry = self.create_place_entry(framenum,self.timeOut_entry_var,rown,colm+1,("Courier", 16),paddy,E,pddx)
# rown = rown + 1
self.gross_weight_entry = self.create_place_entry(framenum,self.gross_weight_entry_var,rown,colm+1,("Courier", 16),paddy,E,pddx)
rown = rown + 1
self.tare_weight_entry = self.create_place_entry(framenum,self.tare_weight_entry_var,rown,colm+1,("Courier", 16),paddy,E,pddx)
rown = rown + 1
self.net_weight_entry = self.create_place_entry(framenum,self.net_weight_entry_var,rown,colm+1,("Courier", 16),paddy,E,pddx)
rown = rown + 1
self.loadNum_entry = self.create_place_entry(framenum,self.loadNum_entry_var,rown,colm+1,("Courier", 16),paddy,E,pddx)
rown = rown + 1
'''
~~~~~~~~~~~~~~~~~~~~~~~ Frame 6 ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
'''
framenum = self.frame6
self.orig_colour = 'light grey'
self.truckin_colour = 'orange'
self.label_lstbox = Label(framenum, text = "Trucks to Weigh Out", borderwidth=2,relief='ridge')
self.label_lstbox.config(font=("Courier", 20,"bold"),bg=self.orig_colour)
self.label_lstbox.pack(side=TOP,expand=Y)
self.TrucksInYard = Listbox(framenum)
self.TrucksInYard.config(font=("Courier", 20,"bold"),bg=self.orig_colour)
self.TrucksInYard.pack(side=TOP,expand=Y)
self.TrucksInYard.bind("<<ListboxSelect>>", self.enable_weighOut)
'''
~~~~~~~~~~~~~~~~~~~~~~~ Frame 7 ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
'''
framenum = self.frame7
self.img = Image.open("Brisco_logo.png")
self.tk_img = ImageTk.PhotoImage(self.img)
self.label_image = Label(framenum,image=self.tk_img ,relief='groove')
self.label_image.config(height = 250, width =250)
self.label_image.grid(row = 0, column = 0, columnspan=2)
pddx = 20
pddy = (150,0)
dimh = 8
dimw = 10
rown = 1
colm = 0
self.WeighIN = self.create_place_button(framenum, 'Enter\nValues', rown, colm, ("Courier", 22, "bold"),pddy,pddx,dimh,dimw,W,self.weighIN)
self.WeighOUT = self.create_place_button(framenum, 'Weigh\nOut', rown, colm+1, ("Courier", 22, "bold"),pddy,pddx,dimh,dimw,E,self.weighOUT)
# self.WeighIN.config(state='Disabled')
self.WeighOUT.config(state='disabled',bg='grey')
'''
~~~~~~~~~~~~~~~ close program with escape key ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
'''
self.master.bind('<Escape>', lambda e: self.master.destroy())
'''
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ GUI Methods ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
'''
def enable_weighOut(self,event):
self.WeighIN.config(state='disabled',bg='grey')
self.WeighOUT.config(state='normal',bg='green')
def weighIN(self):
if self.sampleDD_val.get()=='No':
binary_sample = 0
else:
binary_sample = 1
Weighin_dict = {
'daterecieved': self.date_entry.get(),
'poploadslip' : int(self.popDD_val.get()),
'count' : self.loadNum_entry.get(),
'sampleloads' : binary_sample,
'tm9_ticket' : self.TM9_entry.get(),
'owner' : self.owner_combo_val.get(),
'disposition_fmanum' : self.FMA_combo_val.get(),
'workingcircle' : self.wCircle_combo_val.get(),
'blocknum' : self.blockNum_entry.get(),
'loggingco' : self.loggingCo_combo_val.get(),
'haulingcontractor' : self.hauledBy_combo_val.get(),
'truckplate' : self.truckLicense_combo_val.get(),
'trucknum' : self.truckNum_combo_val.get(),
'truckaxle' : int(self.axle_combo_val.get()),
'grossweight' : self.gross_weight_entry.get(),
# 'timeIn' : self.timeIn_entry.get(),
'numpcsreceived' : self.numPieces_entry.get(),
'tareweight': self.tare_weight_entry.get(),
'netweight' : self.net_weight_entry.get(),
# 'timeOut' : self.timeOut_entry.get()
}
columns = Weighin_dict.keys()
values = [Weighin_dict[column] for column in columns]
insert_statement = 'INSERT INTO barkies_db (%s) VALUES %s'
try:
self.cur1.execute(insert_statement, (AsIs(','.join(columns)), tuple(values)))
except:
tkMessageBox.showinfo("WHOOPS!","Make sure all values are filled in!")
print self.cur1.mogrify(insert_statement, (AsIs(','.join(columns)), tuple(values)))
def weighOUT(self):
trucknum_indx = next(index for (index, d) in enumerate(self.Lst_truckInfo) if d['trucknum'] == self.TrucksInYard.get(self.TrucksInYard.curselection()))
dict_to_fill = self.Lst_truckInfo[trucknum_indx]
del self.Lst_truckInfo[trucknum_indx]
self.owner_combo.set(dict_to_fill['owner'])
self.FMA_combo.set(dict_to_fill['disposition_fmanum'])
self.wCircle_combo.set(dict_to_fill['workingcircle'])
self.truckNum_combo.set(dict_to_fill['trucknum'])
self.truckLicense_combo.set(dict_to_fill['truckplate'])
self.hauledBy_combo.set(dict_to_fill['haulingcontractor'])
self.axle_combo.set(dict_to_fill['truckaxle'])
self.TM9_entry_var.set(dict_to_fill['tm9_ticket'])
self.blockNum_entry_var.set(dict_to_fill['blocknum'])
self.numPieces_entry_var.set(dict_to_fill['numpcsreceived'])
try:
ser = serial.Serial('/dev/ttyUSB0',9600)
str_weight = ser.readline()
self.tare_weight = str_weight.split()[1]
self.net_weight = int(dict_to_fill['grossweight'])-int(self.tare_weight)
except:
self.tare_weight = 50
self.net_weight = int(dict_to_fill['grossweight'])-self.tare_weight
self.timeOut_now = str(datetime.datetime.now().strftime("%H:%M:%S"))
label_list = []
#Set labels after weign in
label_list = [dict_to_fill['daterecieved'],dict_to_fill['timeIn'],self.timeOut_now,str(dict_to_fill['grossweight']),str(self.tare_weight),str(self.net_weight),dict_to_fill['count'] ]
rown = 0
for labl in label_list:
self.create_place_label(self.frame5, labl, rown, 1, ("Courier", 16), E)
rown = rown + 1
self.TrucksInYard.delete(trucknum_indx)
self.TrucksInYard.config(bg='bisque')
self.label_lstbox.config(bg='bisque')
WeighOut_dict = {
'tareweight': self.tare_weight,
'netweight' : self.net_weight,
'timeOut' : self.timeOut_now
}
columns = WeighOut_dict.keys()
values = [WeighOut_dict[column] for column in columns]
insert_statement = 'UPDATE barkies_db SET (%s) = %s WHERE tm9_ticket = %s;'
strng = self.TM9_entry.get()
self.cur1.execute(insert_statement, (AsIs(','.join(columns)), tuple(values), strng))
self.WeighOUT.config(state='disabled',bg='grey')
self.WeighIN.config(state='normal',bg='green')
self.update_colors_truck()
def update_colors_truck(self):
if self.Lst_truckInfo:
self.TrucksInYard.config(bg=self.truckin_colour)
self.label_lstbox.config(bg=self.truckin_colour)
else:
self.TrucksInYard.config(bg=self.orig_colour)
self.label_lstbox.config(bg=self.orig_colour)
def update_lists(self,event,strng,name_combo,Lst):
var_Selected = name_combo.current()
if strng == 'owner':
self.owner_combo.set(self.init_list_owner[0][var_Selected])
self.FMA_combo.set(self.init_list_owner[1][var_Selected])
self.wCircle_combo.set(self.init_list_owner[2][var_Selected])
# self.loggingCo_combo.set(self.init_list_owner[3][var_Selected])
elif strng == 'truck':
self.truckNum_combo.set(self.init_list_truck[0][var_Selected])
self.truckLicense_combo.set(self.init_list_truck[1][var_Selected])
self.hauledBy_combo.set(self.init_list_truck[2][var_Selected])
self.axle_combo.set(self.init_list_truck[3][var_Selected])
# self.loggingCo_combo.set(self.init_list_owner[3][var_Selected])
def initializeLists(self,table):
query = 'select * from "{}"'.format(table)
self.cur1.execute(query)
rows = self.cur1.fetchall()
rows=sorted(rows)
sorted_list = map(list, itertools.izip_longest(*rows))
if table == 'owner_db':
table1 = 'barkies_db'
query = 'select loggingco from "{}"'.format(table1)
self.cur1.execute(query)
rows = self.cur1.fetchall()
t_list =[str(x[0]) for x in rows]
self.loggingco_list =list(set(t_list))
return sorted_list
def create_place_label(self,frme,strng,rownum,columnum,fnt,stcky):
labl_name = Label(frme, text = ' ')
labl_name.grid(row=rownum, column=columnum,sticky=stcky)
labl_name.config(font=fnt, text=strng)
return labl_name
def create_place_combo(self,frme,Lst,cmboVal,rownum,columnum,fnt,strng,stcky,pdx):
bigfont = tkFont.Font(root=frme,family="Courier",size=30, weight='bold')
frme.option_add("*TCombobox*Listbox*Font", bigfont)
name_combo = ttk.Combobox(frme,textvariable = cmboVal)
name_combo.grid(row=rownum, column=columnum,sticky=stcky,padx=pdx)
name_combo.config(font=fnt)
name_combo['values'] = Lst
name_combo.set(Lst[1])
# self.owner_combo.bind("<<ComboboxSelected>>",lambda event: self.DB_Search_n_Fill(event,"owner",self.Connect_Brisco_DB))
name_combo.bind("<<ComboboxSelected>>", lambda event: self.update_lists(event,strng,name_combo,Lst))
return name_combo
def create_place_dropdown(self, frme, DD_lst, ddVal, rownum, columnum, fnt,stcky,pdx):
ddVal.set(DD_lst[0])
name_DD = OptionMenu(frme, ddVal, *DD_lst)
name_DD.config(font=fnt,bg='tan')
name_DD.grid(row=rownum, column=columnum, sticky=stcky,padx=pdx)
return name_DD
def create_place_entry(self, frme, txt, rownum, columnum, fnt,pdy,stcky,pdx):
name_entry = Entry(frme,text=txt)
name_entry.config(font=fnt)
name_entry.grid(row=rownum, column=columnum,pady=pdy,sticky=stcky,padx=pdx)
return name_entry
def create_place_button(self, frme ,txt_name, rownum, columnum, fnt,pdy,pdx,dimmh,dimmw,stcky,cmmd):
name_button = Button(frme, text=txt_name, command = cmmd)
name_button.config(height=dimmh, width=dimmw, bg='green', activebackground='red',font=fnt)
name_button.grid(row=rownum, column=columnum, pady=pdy, padx=pdx, sticky = stcky)
return name_button
def print_test(self,event,strng,name_combo,Lst):
var_Selected = name_combo.current()
selection_val = str(Lst[var_Selected])
print(strng)
print(selection_val)
def main():
root = Tk()
mainApp = GUIatFrontDesk(root)
# root.attributes('-fullscreen',True)
root.geometry("1200x500")
root.mainloop()
if __name__ == '__main__':
main()
| |
# Python
import operator
import datetime
import random
import time
# Django
from django.shortcuts import render_to_response, get_object_or_404, HttpResponse, HttpResponseRedirect, Http404
from django.http import HttpResponse, Http404
from django.core.exceptions import MultipleObjectsReturned, ObjectDoesNotExist
from django.template import RequestContext
from django.core.urlresolvers import reverse
from django.conf import settings
from django.contrib.auth.decorators import login_required
from django.contrib.sites.models import Site
from mongodb import *
from utils import PlayerUtils
from django.utils.cache import _generate_cache_header_key, get_cache_key
from django.core.cache import cache
# Other
from pyofc2 import *
from stats.utils import ServerListUtils, PlayerUtils
from stats.models import UserProfile, ClassRank
from core import bitly
from core.views import update_online_users
@update_online_users
def index(request):
try:
member_id = request.POST['member_id']
if not member_id:
return render_to_response('stats/index.html', context_instance = RequestContext(request))
try:
member_id = int(member_id)
except ValueError:
return render_to_response('stats/index.html', context_instance = RequestContext(request))
return HttpResponseRedirect(reverse('stats_summary', args = (member_id,)))
except KeyError:
# No member ID is provided
if request.user.is_authenticated() and request.user.get_profile().account_id:
# Member with a linked account, redirect him to the page with his stats
return HttpResponseRedirect(reverse('stats_summary', args = (int(request.user.get_profile().account_id),)))
return render_to_response('stats/index.html', context_instance = RequestContext(request))
@update_online_users
def servers(request):
server_list = get_server_list()
date_retrieved = server_list[0]['date_retrieved']
player_count = sum([server['sgc_player_count'] + server['sl_player_count'] for server in server_list])
player_count_max = len(server_list) * 16
return render_to_response('stats/servers.html', {'servers': server_list, 'date_retrieved': date_retrieved, 'player_count': player_count, 'player_count_max': player_count_max}, context_instance = RequestContext(request))
@update_online_users
def stats(request, account_id, what):
if what not in ['summary', 'kills', 'classes', 'maps', 'achievements']:
raise Http404()
url = reverse('stats_%s_data' % (what), args = (account_id,))
return render_to_response('stats/loading.html', {'account_id': account_id, 'username': get_player_username(account_id), 'what': what, 'url': url}, context_instance = RequestContext(request))
def summary(request, account_id):
player_stats = get_player_objectives(request, account_id)
if not player_stats:
return error_page(request, account_id, error_type = 2 if player_stats == False else 1)
player_achievements = get_player_achievements(request, account_id)
if not player_achievements:
return error_page(request, account_id, error_type = 2 if player_achievements == False else 1)
latest_achievements = get_player_latest_completed_achievements(player_achievements)[:6]
top_class = get_player_top_class_by_kills(player_stats)
top_class_image = random.sample(['jaffa', 'jaffa_sobek'], 1)[0] if top_class == 'jaffa' else top_class
top_class_data = get_player_top_class_data(player_stats, top_class)
top_map = get_player_top_map_by_wins(player_stats)
worst_map = get_player_worst_map_by_loses(player_stats)
rival_class = get_player_rival_class(player_stats)
easy_class = get_player_easy_class(player_stats)
# Logged-in user is viewing its own stats
if request.user.is_authenticated() and request.user.get_profile().account_id:
account_id_linked = request.user.get_profile().account_id
if int(account_id) == account_id_linked:
current_site = Site.objects.get_current()
current_site_url = current_site.domain
api = bitly.Api(login = 'sgrstats', apikey = 'R_88fd4a0cace4cbf92c1cd588531694c3')
try:
bitly_url = api.shorten('%s%s' % (current_site_url, reverse('stats_summary', args = (account_id,))))
except:
bitly_url = None
else:
bitly_url = None
else:
bitly_url = None
return render_to_response('stats/summary.html', {
'account_id': int(account_id),
'username': get_player_username(account_id),
'bitly_url': bitly_url,
'player_stats': player_stats,
'top_class': top_class,
'top_class_image': top_class_image,
'top_class_data': top_class_data,
'latest_achievements': latest_achievements,
'top_map': top_map,
'worst_map': worst_map,
'rival_class': rival_class,
'easy_class': easy_class}, context_instance = RequestContext(request))
def kills(request, account_id):
player_stats = get_player_objectives(request, account_id)
if not player_stats:
return error_page(request, account_id, error_type = 2 if player_stats == False else 1)
return render_to_response('stats/kills.html', {'account_id': int(account_id), 'username': get_player_username(account_id), 'player_stats': player_stats}, context_instance = RequestContext(request))
def classes(request, account_id):
player_stats = get_player_objectives(request, account_id)
if player_stats is None:
return error_page(request, account_id, error_type = 1)
elif player_stats is False:
return error_page(request, account_id, error_type = 2)
return render_to_response('stats/classes.html', {'account_id': int(account_id), 'username': get_player_username(account_id), 'player_stats': player_stats}, context_instance = RequestContext(request))
def maps(request, account_id):
player_stats = get_player_objectives(request, account_id)
if not player_stats:
return error_page(request, account_id, error_type = 2 if player_stats == False else 1)
top_class = get_player_top_class_by_map(player_stats)
worst_class = get_player_worst_class_by_map(player_stats)
return render_to_response('stats/maps.html', {'account_id': int(account_id), 'username': get_player_username(account_id), 'player_stats': player_stats, 'top_class': top_class, 'worst_class': worst_class}, context_instance = RequestContext(request))
def achievements(request, account_id):
player_stats = get_player_objectives(request, account_id)
player_achievements = get_player_achievements(request, account_id)
if not player_stats or not player_achievements:
return error_page(request, account_id, error_type = 2 if player_stats == False else 1)
player_completed_achievements = get_player_completed_achievements(player_achievements)
try:
total_count = player_achievements['total_count']
completed_count = player_achievements['completed_count']
except KeyError:
total_count = 0
completed_count = 0
if total_count and completed_count:
percent_completed = (float(completed_count) / float(total_count)) * 100
else:
percent_completed = 0
if not player_completed_achievements:
return error_page(request, account_id, error_type = 3)
return render_to_response('stats/achievements.html', {'account_id': int(account_id), 'username': get_player_username(account_id), 'player_stats': player_stats, 'player_achievements': player_completed_achievements, 'total_count': int(total_count), 'completed_count': int(completed_count), 'percent_completed': percent_completed}, context_instance = RequestContext(request))
# @todo: To implement.
def leaderboards(request, account_id):
player_stats = get_player_objectives(request, account_id)
if player_stats is None:
return error_page(request, account_id, error_type = 1)
elif player_stats is False:
return error_page(request, account_id, error_type = 2)
return render_to_response('stats/leaderboards.html', {'account_id': int(account_id), 'player_stats': player_stats}, context_instance = RequestContext(request))
# Helper functions
def get_server_list():
server_list = database.servers.find().sort('sgc_player_count', -1)
if server_list.count() == 0:
# No data about servers is available
server_utils = ServerListUtils()
server_list = server_utils.update_server_list(return_fetched_data = True)
server_list = format_total_player_count(server_list)
return server_list
def get_player_objectives(request, account_id, fields = None):
key = _get_cache_key_name('player.objectives', account_id)
player_stats = cache.get(key, False)
if player_stats != False:
return player_stats
account_id = int(account_id)
diff = datetime.datetime.now() - datetime.timedelta(seconds = settings.DATA_CACHE_PERIOD)
player_stats = database.players_objective_lists.find_one({'_id': account_id, 'date_retrieved': {'$gte': diff}}, fields = fields)
if not player_stats:
# No data about this player or the data is stale - try to fetch fresh data
player_utils = PlayerUtils([account_id])
player_stats = player_utils.update_players_objective_lists(return_fetched_data = True)
if player_stats == False:
# FireSky servers are probably offline, retrieve last saved objectives (if any) we have for this user
player_stats = database.players_objective_lists.find_one({'_id': account_id}, fields = fields)
if not player_stats:
player_stats = None
# Format total played times
if player_stats:
player_stats = format_classes_rank(format_total_played_times(player_stats))
if player_stats != False:
# If data was successfully fetched or this player doesn't exist, we cache the value
cache.set(key, player_stats, 300)
return player_stats
def get_player_achievements(request, account_id, fields = None):
key = _get_cache_key_name('player.achievements', account_id)
player_achievements = cache.get(key, False)
if player_achievements != False:
return player_achievements
account_id = int(account_id)
diff = datetime.datetime.now() - datetime.timedelta(seconds = settings.DATA_CACHE_PERIOD)
player_achievements = database.players_achievement_lists.find_one({'_id': account_id, 'date_retrieved': {'$gte': diff}}, fields = fields)
if not player_achievements:
# No data about this player or the data is stale - fetch the fresh data
player_utils = PlayerUtils([account_id])
player_achievements = player_utils.update_players_achievement_lists(return_fetched_data = True)
if player_achievements == False:
# FireSky servers are probably offline retrieve last saved achievements (if any) we have for this user
player_achievements = database.players_achievement_lists.find_one({'_id': account_id}, fields = fields)
if not player_achievements:
player_achievements = None
# Format dates
if player_achievements:
player_achievements = format_achievements_dates(player_achievements)
if player_achievements != False:
# If data was successfully fetched or this player doesn't exist, we cache the value
cache.set(key, player_achievements, 300)
return player_achievements
def get_player_completed_achievements(player_achievements):
completed_achievements = []
for achievement in player_achievements['achievements']:
if achievement['completed_on'] != '':
completed_achievements.append(achievement)
return completed_achievements
def get_player_latest_completed_achievements(player_achievements):
completed_achievements = get_player_completed_achievements(player_achievements)
achievements = []
for achievement in completed_achievements:
achievements.append((achievement['completed_on'], achievement['id'], achievement['title']))
achievements_sorted = sorted(achievements, key = operator.itemgetter(0), reverse = True)
return achievements_sorted
def format_completed_achievements(player_achievements):
""" Format completed achievements as a dictionary with key = title and value = completed_on. """
completed_achievements = get_player_completed_achievements(player_achievements)
achievements = []
for achievement in completed_achievements:
achievements.append((achievement['completed_on'], achievement['title']))
return sorted(achievements, key = operator.itemgetter(0))
def get_player_top_class_by_kills(player_stats):
data = {
'soldier': player_stats['SGR_Soldier_KillsTotal'],
'commando': player_stats['SGR_Commando_KillsTotal'],
'scientist': player_stats['SGR_Scientist_KillsTotal'],
'goauld': player_stats['SGR_Goauld_KillsTotal'],
'jaffa': player_stats['SGR_Jaffa_KillsTotal'],
'ashrak': player_stats['SGR_Ashrak_KillsTotal']
}
max_value = max(data.values())
return [k for k,v in data.items() if v == max_value][0]
def get_player_top_map_by_wins(player_stats):
data = {
'sgc': player_stats['SGR_Account_WinsOnSGC'],
'amarna': player_stats['SGR_Account_WinsOnAmarna'],
'whiteout': player_stats['SGR_Account_WinsOnWhiteout'],
'leonops': (player_stats['SGR_Account_WinsOnCourt'] + player_stats['SGR_Account_WinsOnArena']),
}
max_value = max(data.values())
return [(k,v) for k,v in data.items() if v == max_value][0]
def get_player_worst_map_by_loses(player_stats):
data = {
'sgc': player_stats['SGR_Account_LossesOnSGC'],
'amarna': player_stats['SGR_Account_LossesOnAmarna'],
'whiteout': player_stats['SGR_Account_LossesOnWhiteout'],
'leonops': (player_stats['SGR_Account_LossesOnCourt'] + player_stats['SGR_Account_LossesOnArena']),
}
max_value = max(data.values())
return [(k,v) for k,v in data.items() if v == max_value][0]
def get_player_rival_class(player_stats):
data = {
'soldier': player_stats['SGR_Account_KilledBySoldier'],
'commando': player_stats['SGR_Account_KilledByCommando'],
'scientist': player_stats['SGR_Account_KilledByScientist'],
'goauld': player_stats['SGR_Account_KilledByGoauld'],
'jaffa': player_stats['SGR_Account_KilledByJaffa'],
'ashrak': player_stats['SGR_Account_KilledByAshrak']
}
max_value = max(data.values())
return [(k,v) for k,v in data.items() if v == max_value][0]
def get_player_easy_class(player_stats):
data = {
'soldier': player_stats['SGR_Account_SoldierKills'],
'commando': player_stats['SGR_Account_CommandoKills'],
'scientist': player_stats['SGR_Account_ScientistKills'],
'goauld': player_stats['SGR_Account_GoauldKills'],
'jaffa': player_stats['SGR_Account_JaffaKills'],
'ashrak': player_stats['SGR_Account_AshrakKills']
}
max_value = max(data.values())
return [(k,v) for k,v in data.items() if v == max_value][0]
def get_player_top_class_data(player_stats, top_class):
top_class_formatted = top_class[0].upper() + top_class[1:]
data = {
'kills': player_stats['SGR_%s_KillsTotal' % (top_class_formatted)],
'deaths': player_stats['SGR_%s_KilledTotal' % (top_class_formatted)],
'streak': player_stats['SGR_%s_HighestMatchKillStreak' % (top_class_formatted)],
'wins': player_stats['SGR_%s_WinsTotal' % (top_class_formatted)],
'losses': player_stats['SGR_%s_LossesTotal' % (top_class_formatted)],
}
return data
def format_total_played_times(player_stats):
""" Converts total played time from seconds to hours. """
# Account and classes
for type in ['Account', 'SGC', 'SystemLords', 'Soldier', 'Commando', 'Scientist', 'Goauld', 'Jaffa', 'Ashrak']:
time_played = player_stats['SGR_%s_TimePlayedTotal' % (type)]
hours = time_played / 60 / 60
minutes = (time_played - (hours * 60 * 60)) / 60
player_stats['SGR_%s_TimePlayedTotal' % (type)] = '%sh %smin' % (hours, minutes)
# Maps
for map in ['SGC', 'Amarna', 'Whiteout', 'Court', 'Arena']:
time_played = player_stats['SGR_Account_TimePlayedOn%s' % (map)]
hours = time_played / 60 / 60
minutes = (time_played - (hours * 60 * 60)) / 60
player_stats['SGR_Account_TimePlayedOn%s' % (map)] = '%sh %smin' % (hours, minutes)
return player_stats
def format_classes_rank(player_stats):
""" Calculates classes current rank """
for player_class in [(0, 'Soldier'), (0, 'Commando'), (0, 'Scientist'), (1, 'Goauld'), (1, 'Jaffa'), (1, 'Ashrak')]:
exp_current = player_stats['SGR_%s_ExperiencePointsEarned' % (player_class[1])]
try:
class_rank = ClassRank.objects.filter(category = player_class[0], exp_min__lte = exp_current).order_by('-exp_min')[0]
rank_title = class_rank.title
except IndexError:
rank_title = 'Unknown'
player_stats['SGR_%s_Rank' % (player_class[1])] = rank_title
return player_stats
def get_player_top_class_by_map(player_stats):
classes_wins = {}
classes_top = {}
for map in ['SGC', 'Amarna', 'Whiteout', 'Court', 'Arena']:
classes_wins[map] = {}
classes_top[map] = {}
for player_class in ['Soldier', 'Commando', 'Scientist', 'Goauld', 'Jaffa', 'Ashrak']:
classes_wins[map][player_class] = int(player_stats['SGR_%s_WinsOn%s' % (player_class, map)])
max_value = max(classes_wins[map].values())
classes_top[map] = [(k,v) for k,v in classes_wins[map].items() if v == max_value][0]
return classes_top
def get_player_worst_class_by_map(player_stats):
classes_losses = {}
classes_worst = {}
for map in ['SGC', 'Amarna', 'Whiteout', 'Court', 'Arena']:
classes_losses[map] = {}
classes_worst[map] = {}
for player_class in ['Soldier', 'Commando', 'Scientist', 'Goauld', 'Jaffa', 'Ashrak']:
classes_losses[map][player_class] = int(player_stats['SGR_%s_LossesOn%s' % (player_class, map)])
max_value = max(classes_losses[map].values())
classes_worst[map] = [(k,v) for k,v in classes_losses[map].items() if v == max_value][0]
return classes_worst
def format_achievements_dates(player_achievements):
for achievement in player_achievements['achievements']:
completed_on = achievement['completed_on']
if completed_on != '':
achievement['completed_on'] = datetime.datetime.strptime(achievement['completed_on'].split('.000')[0], '%Y-%m-%dT%H:%M:%S')
return player_achievements
def format_total_player_count(server_list):
servers = []
for server in server_list:
server['total_player_count'] = server['sgc_player_count'] + server['sl_player_count']
servers.append(server)
return servers
def get_next_rank_title_and_exp_points(class_category = 0, exp_current = 0):
try:
class_rank = ClassRank.objects.filter(category = class_category, exp_min__gt = exp_current).order_by('exp_min')[0]
except IndexError:
return None
title = class_rank.title
exp_total = class_rank.exp_min
exp_needed = exp_total - exp_current
return (title, exp_needed, exp_total)
def get_player_username(account_id):
"""
Returns player's username if player with this account ID has a linked account,
None otherwise.
"""
key = _get_cache_key_name('player.stats.u', account_id)
username = cache.get(key, False)
if username != False:
return username
try:
username = UserProfile.objects.get(account_id = account_id).user.username
except (ObjectDoesNotExist, MultipleObjectsReturned):
username = None
cache.set(key, username)
return username
# Other
def error_page(request, account_id, error_type = 1):
return render_to_response('stats/error.html', {'account_id': int(account_id), 'error_type': error_type}, context_instance = RequestContext(request))
def _get_cache_key_name(prefix, account_id):
return '%s.%s' % (prefix, account_id)
| |
import logging
import datetime
import os
import pytest
import mongomock
from mock import Mock
from bripipetools import model as docs
from bripipetools import database
logging.basicConfig(level=logging.DEBUG)
logger = logging.getLogger(__name__)
def test_database_connection():
# TODO: come up with a better way to test this
db = database.connect('researchdb')
assert ('bri' in db.name)
if os.environ.get('DB_PARAM_FILE') != 'default.ini':
assert (db.collection_names())
@pytest.fixture(scope='function')
def mock_db():
# GIVEN a mocked version of the TG3 Mongo database
logger.debug(("[setup] mock database, connect "
"to mock Mongo database"))
yield mongomock.MongoClient().db
logger.debug(("[teardown] mock database, disconnect "
"from mock Mongo database"))
@pytest.fixture(scope='function')
def mock_dbobject():
# GIVEN the definition of a single database object object
logger.debug("[setup] mock database object")
yield {'_id': 'mockobject',
'updateField': 'value',
'arrayField': ['foo', 'baz']}
logger.debug("[teardown] mock database object")
@pytest.mark.usefixtures('mock_db')
class TestDatabaseOperations:
"""
Tests methods in the ``database.operations`` module for interacting
with databases such as GenLIMS.
"""
@pytest.mark.parametrize(
'dbobject_exists', [False, True]
)
def test_find_objects(self, mock_db, mock_dbobject, dbobject_exists):
# AND the mock database is either empty or contains a mocked
# collection with a single pre-defined object
if dbobject_exists:
mock_db['mockcollection'].insert_one(mock_dbobject)
# WHEN the wrapper function `find_objects()` is used to query for
# objects in a collection
mock_fn = Mock(name='mock_fn',
return_value=(mock_db,
{'_id': mock_dbobject['_id']}))
mock_fn.__name__ = 'mock_fn'
wrapped_fn = database.find_objects('mockcollection')(mock_fn)
dbobjects = wrapped_fn()
# THEN should return a list of objects equal in length to the matching
# number of objects in the database; if any objects were retrieved,
# they should include the expected fields and values
test_query = {'_id': mock_dbobject['_id']}
assert (type(dbobjects) == list)
assert (len(dbobjects)
== mock_db['mockcollection'].find(test_query).count())
if dbobject_exists:
assert (dbobjects[0] == mock_dbobject)
@pytest.mark.parametrize(
'dbobject_exists', [False, True]
)
def test_insert_objects(self, mock_db, mock_dbobject, dbobject_exists):
# AND the mock database is either empty or contains a mocked
# collection with a single pre-defined object
if dbobject_exists:
mock_db['mockcollection'].insert_one(mock_dbobject)
# WHEN an object is inserted into the database (in the mock
# collection) using the wrapper function `insert_objects()`
mock_fn = Mock(name='mock_fn',
return_value=(mock_db, mock_dbobject))
mock_fn.__name__ = 'mock_fn'
wrapped_fn = database.insert_objects('mockcollection')(mock_fn)
wrapped_fn()
# THEN only the single object be in the database in the mock collection
# and should match the mock object definition
test_query = {'_id': mock_dbobject['_id']}
assert (mock_db['mockcollection'].find(test_query).count() == 1)
assert (mock_db['mockcollection'].find_one(test_query)
== mock_dbobject)
@pytest.mark.parametrize(
'update_field',
[
{'newField': 'value'},
{'updateField': 'newvalue'},
{'skipField': None}
]
)
def test_insert_objects_with_update(self, mock_db, mock_dbobject,
update_field):
# AND the mock database is contains a mocked collection with
# a single pre-defined object
mock_db['mockcollection'].insert_one(mock_dbobject)
# AND the local copy of the mock object is updated with either
# a new field or an updated value of an existing field
mock_dbobject.update(update_field)
# WHEN an object with the same ID as the existing object, but with
# different fields, is inserted into the database (in the mock
# collection) using the wrapper function `insert_objects()`
mock_fn = Mock(name='mock_fn',
return_value=(mock_db, mock_dbobject))
mock_fn.__name__ = 'mock_fn'
wrapped_fn = database.insert_objects('mockcollection')(mock_fn)
wrapped_fn()
# THEN only the single object should be in the database, all unchanged
# fields should match the original mock object definition, and new or
# updated fields should match the expected value; any fields provided
# in the input object with a value of 'None' should be skipped
test_query = {'_id': mock_dbobject['_id']}
test_dbobject = mock_db['mockcollection'].find_one(test_query)
assert (mock_db['mockcollection'].find(test_query).count() == 1)
assert (all({test_dbobject[field] == mock_dbobject[field]
for field in list(mock_dbobject.keys())
if field not in list(update_field.keys())}))
for field, value in list(update_field.items()):
if value is not None:
assert(test_dbobject[field] == value)
assert ('skipField' not in test_dbobject)
def test_insert_objects_multiple(self, mock_db, mock_dbobject):
# WHEN inserting the object
new_dbobject = mock_dbobject.copy()
new_dbobject['_id'] = 'newmockobject'
mock_fn = Mock(name='mock_fn',
return_value=(mock_db, [mock_dbobject, new_dbobject]))
mock_fn.__name__ = 'mock_fn'
wrapped_fn = database.insert_objects('mockcollection')(mock_fn)
wrapped_fn()
# THEN new objects should be in database
assert (mock_db['mockcollection'].find().count() == 2)
@pytest.mark.parametrize(
'test_collection, test_function',
[
('genomicsSamples', database.get_genomicsSamples),
('genomicsCounts', database.get_genomicsCounts),
('genomicsMetrics', database.get_genomicsMetrics),
('genomicsRuns', database.get_genomicsRuns),
('genomicsWorkflowbatches', database.get_genomicsWorkflowbatches)
]
)
def test_wrapped_get_functions(self, mock_db, test_collection, test_function):
# WHEN using a wrapped get function to query for objects in
# the specified collection
logger.debug("test `get_{}()`".format(test_collection))
test_query = {'_id': 'mockobject'}
dbobjects = test_function(mock_db, test_query)
# THEN should return a list of objects equal in length to the matching
# number of objects in the database for the specified collection
assert (len(dbobjects)
== mock_db[test_collection].find(test_query).count())
@pytest.mark.parametrize(
'test_collection, test_function',
[
('genomicsSamples', database.put_genomicsSamples),
('genomicsCounts', database.put_genomicsCounts),
('genomicsMetrics', database.put_genomicsMetrics),
('genomicsRuns', database.put_genomicsRuns),
('genomicsWorkflowbatches', database.put_genomicsWorkflowbatches)
]
)
def test_wrapped_put_functions(self, mock_db, mock_dbobject,
test_collection, test_function):
# WHEN using a wrapped put function to insert object(s) into
# the specified collection
logger.debug("test `put_{}()`".format(test_collection))
test_function(mock_db, mock_dbobject)
# THEN only the single object be in the database in the
# specified collection
test_query = {'_id': mock_dbobject['_id']}
assert (mock_db[test_collection].find(test_query).count() == 1)
@pytest.mark.parametrize(
'id_exists', [False, True]
)
def test_create_workflowbatch_id_new(self, mock_db, id_exists):
# AND a prefix/date combination corresponding to the type and date
# of the workflow batch is either new or already exists in the
# 'workflowbatches' collection in the database
if id_exists:
mock_db.genomicsWorkflowbatches.insert_one(
{'_id': 'mockprefix_2000-01-01_1',
'date': datetime.datetime(2000, 1, 1, 0, 0)})
# WHEN creating new workflow batch ID based on a prefix (which
# corresponds to the type of the workflow batch) and the date on
# which the workflow batch was run
wb_id = database.create_workflowbatch_id(
db=mock_db,
prefix='mockprefix',
date=datetime.datetime(2000, 1, 1, 0, 0))
# THEN the constructed workflow batch ID should end in the
# expected number: 1 if new, or 2 if the same prefix/date combo
# already existed in the 'genomicsWorkflowbatches' collection
id_tag = 2 if id_exists else 1
assert (wb_id == 'mockprefix_2000-01-01_{}'.format(id_tag))
@pytest.mark.parametrize(
'field_level', [-1, 0, 1, 2]
)
def test_search_ancestors_field(self, mock_db, field_level):
# AND a hierarchy of objects in the 'samples' collection, with
# parent relationship specified by the 'parentId' field
mock_db.samples.insert_one(
{'_id': 'sample0', 'parentId': 'sample1'})
mock_db.samples.insert_one(
{'_id': 'sample1', 'parentId': 'sample2'})
mock_db.samples.update_one(
{'_id': 'sample{}'.format(field_level)},
{'$set': {'mockfield': 'mockvalue'}},
upsert=True)
# WHEN searching for the field among all sample ancestors in
# the hierarchy
value = database.search_ancestors(mock_db, 'sample0', 'mockfield')
# THEN should return the value of the field from whichever level
# above or equal to the input sample that the field was found; if
# the field does not exist in any samples in the hierarchy, should
# return 'None'
expected_result = 'mockvalue' if field_level >= 0 else None
assert (value == expected_result)
def test_search_ancestors_no_parent(self, mock_db):
# AND a hierarchy of objects in the 'samples' collection, but
# parent relationship indicator is missing
mock_db.samples.insert_one(
{'_id': 'sample0'})
mock_db.samples.insert_one(
{'_id': 'sample1', 'mockfield': 'mockvalue'})
# WHEN searching for the field among all sample ancestors in
# the hierarchy
value = database.search_ancestors(mock_db, 'sample0', 'mockfield')
# THEN should return None
assert (value is None)
class TestMapping:
@pytest.mark.parametrize(
'test_input, expected_result',
[
({'aB': None}, {'a_b': None}),
({'aB': [{'bC': None}]}, {'a_b': [{'b_c': None}]}),
({'_id': None}, {'_id': None}),
]
)
def test_map_keys(self, test_input, expected_result):
# GIVEN any state
# WHEN camelCase keys/fields in an object are converted to snake_case
# THEN keys at all nested levels should be converted to snake case
# (with the exception of '_id', which should be unchangedj)
assert (database.map_keys(test_input) == expected_result)
def test_get_model_class(self):
# GIVEN any state
# WHEN searching for matched model class based on object type
# THEN should return expected class name
assert (database.get_model_class({'type': 'sequenced library'})
== 'SequencedLibrary')
assert (database.get_model_class({'type': 'library'})
== 'Library')
def test_map_to_object(self):
# GIVEN any state
# WHEN mapping a database object to a model class instance
doc = {'_id': 'lib7293_C6VG0ANXX',
'parentId': 'lib7293',
'type': 'sequenced library',
'rawData': [{'path': None,
'laneId': 'L001',
'sampleNumber': 1}]}
obj = database.map_to_object(doc)
# THEN the model class instance should be the correct type and
# include the appropriately formatted fields/attributes
# TODO: try to remove dependency on model/docs module when testing,
# if possible (and maybe even in the method itself)
assert (type(obj) is docs.SequencedLibrary)
assert (all([hasattr(obj, field)
for field in ['_id', 'parent_id', 'type', 'raw_data',
'date_created', 'last_updated']]))
assert (all([field in obj.raw_data]
for field in ['path', 'lane_id', 'sample_number']))
assert (obj.last_updated == obj.date_created)
| |
import gzip
import os
import pathlib
import sys
from functools import partial
from time import sleep
import cloudpickle
import pytest
from fsspec.compression import compr
from fsspec.core import open_files
from fsspec.implementations.local import LocalFileSystem
from packaging.version import parse as parse_version
from tlz import concat, valmap
from dask import compute
from dask.bytes.core import read_bytes
from dask.bytes.utils import compress
from dask.utils import filetexts
compute = partial(compute, scheduler="sync")
files = {
".test.accounts.1.json": (
b'{"amount": 100, "name": "Alice"}\n'
b'{"amount": 200, "name": "Bob"}\n'
b'{"amount": 300, "name": "Charlie"}\n'
b'{"amount": 400, "name": "Dennis"}\n'
),
".test.accounts.2.json": (
b'{"amount": 500, "name": "Alice"}\n'
b'{"amount": 600, "name": "Bob"}\n'
b'{"amount": 700, "name": "Charlie"}\n'
b'{"amount": 800, "name": "Dennis"}\n'
),
}
csv_files = {
".test.fakedata.1.csv": (b"a,b\n" b"1,2\n"),
".test.fakedata.2.csv": (b"a,b\n" b"3,4\n"),
"subdir/.test.fakedata.2.csv": (b"a,b\n" b"5,6\n"),
}
def to_uri(path):
return pathlib.Path(os.path.abspath(path)).as_uri()
def test_unordered_urlpath_errors():
# Unordered urlpath argument
with pytest.raises(TypeError):
read_bytes(
{
"sets/are.csv",
"unordered/so/they.csv",
"should/not/be.csv",
"allowed.csv",
}
)
def test_read_bytes():
with filetexts(files, mode="b"):
sample, values = read_bytes(".test.accounts.*")
assert isinstance(sample, bytes)
assert sample[:5] == files[sorted(files)[0]][:5]
assert sample.endswith(b"\n")
assert isinstance(values, (list, tuple))
assert isinstance(values[0], (list, tuple))
assert hasattr(values[0][0], "dask")
assert sum(map(len, values)) >= len(files)
results = compute(*concat(values))
assert set(results) == set(files.values())
def test_read_bytes_sample_delimiter():
with filetexts(files, mode="b"):
sample, values = read_bytes(".test.accounts.*", sample=80, delimiter=b"\n")
assert sample.endswith(b"\n")
sample, values = read_bytes(".test.accounts.1.json", sample=80, delimiter=b"\n")
assert sample.endswith(b"\n")
sample, values = read_bytes(".test.accounts.1.json", sample=2, delimiter=b"\n")
assert sample.endswith(b"\n")
def test_parse_sample_bytes():
with filetexts(files, mode="b"):
sample, values = read_bytes(".test.accounts.*", sample="40 B")
assert len(sample) == 40
def test_read_bytes_no_sample():
with filetexts(files, mode="b"):
sample, _ = read_bytes(".test.accounts.1.json", sample=False)
assert sample is False
def test_read_bytes_blocksize_none():
with filetexts(files, mode="b"):
sample, values = read_bytes(".test.accounts.*", blocksize=None)
assert sum(map(len, values)) == len(files)
@pytest.mark.parametrize("blocksize", [5.0, "5 B"])
def test_read_bytes_blocksize_types(blocksize):
with filetexts(files, mode="b"):
sample, vals = read_bytes(".test.account*", blocksize=blocksize)
results = compute(*concat(vals))
ourlines = b"".join(results).split(b"\n")
testlines = b"".join(files.values()).split(b"\n")
assert set(ourlines) == set(testlines)
def test_read_bytes_blocksize_float_errs():
with filetexts(files, mode="b"):
with pytest.raises(TypeError):
read_bytes(".test.account*", blocksize=5.5)
def test_read_bytes_include_path():
with filetexts(files, mode="b"):
_, _, paths = read_bytes(".test.accounts.*", include_path=True)
assert {os.path.split(path)[1] for path in paths} == files.keys()
def test_with_urls():
with filetexts(files, mode="b"):
# OS-independent file:// URI with glob *
url = to_uri(".test.accounts.") + "*"
sample, values = read_bytes(url, blocksize=None)
assert sum(map(len, values)) == len(files)
@pytest.mark.skipif(sys.platform == "win32", reason="pathlib and moto clash on windows")
def test_with_paths():
with filetexts(files, mode="b"):
url = pathlib.Path("./.test.accounts.*")
sample, values = read_bytes(url, blocksize=None)
assert sum(map(len, values)) == len(files)
with pytest.raises(OSError):
# relative path doesn't work
url = pathlib.Path("file://.test.accounts.*")
read_bytes(url, blocksize=None)
def test_read_bytes_block():
with filetexts(files, mode="b"):
for bs in [5, 15, 45, 1500]:
sample, vals = read_bytes(".test.account*", blocksize=bs)
assert list(map(len, vals)) == [
max((len(v) // bs), 1) for v in files.values()
]
results = compute(*concat(vals))
assert sum(len(r) for r in results) == sum(len(v) for v in files.values())
ourlines = b"".join(results).split(b"\n")
testlines = b"".join(files.values()).split(b"\n")
assert set(ourlines) == set(testlines)
def test_read_bytes_delimited():
with filetexts(files, mode="b"):
for bs in [5, 15, 45, "1.5 kB"]:
_, values = read_bytes(".test.accounts*", blocksize=bs, delimiter=b"\n")
_, values2 = read_bytes(".test.accounts*", blocksize=bs, delimiter=b"foo")
assert [a.key for a in concat(values)] != [b.key for b in concat(values2)]
results = compute(*concat(values))
res = [r for r in results if r]
assert all(r.endswith(b"\n") for r in res)
ourlines = b"".join(res).split(b"\n")
testlines = b"".join(files[k] for k in sorted(files)).split(b"\n")
assert ourlines == testlines
# delimiter not at the end
d = b"}"
_, values = read_bytes(".test.accounts*", blocksize=bs, delimiter=d)
results = compute(*concat(values))
res = [r for r in results if r]
# All should end in } except EOF
assert sum(r.endswith(b"}") for r in res) == len(res) - 2
ours = b"".join(res)
test = b"".join(files[v] for v in sorted(files))
assert ours == test
fmt_bs = [(fmt, None) for fmt in compr] + [(fmt, 10) for fmt in compr]
@pytest.mark.parametrize("fmt,blocksize", fmt_bs)
def test_compression(fmt, blocksize):
if fmt not in compress:
pytest.skip("compression function not provided")
files2 = valmap(compress[fmt], files)
with filetexts(files2, mode="b"):
if fmt and blocksize:
with pytest.raises(ValueError):
read_bytes(
".test.accounts.*.json",
blocksize=blocksize,
delimiter=b"\n",
compression=fmt,
)
return
sample, values = read_bytes(
".test.accounts.*.json",
blocksize=blocksize,
delimiter=b"\n",
compression=fmt,
)
assert sample[:5] == files[sorted(files)[0]][:5]
assert sample.endswith(b"\n")
results = compute(*concat(values))
assert b"".join(results) == b"".join([files[k] for k in sorted(files)])
def test_open_files():
with filetexts(files, mode="b"):
myfiles = open_files(".test.accounts.*")
assert len(myfiles) == len(files)
for lazy_file, data_file in zip(myfiles, sorted(files)):
with lazy_file as f:
x = f.read()
assert x == files[data_file]
@pytest.mark.parametrize("encoding", ["utf-8", "ascii"])
def test_open_files_text_mode(encoding):
with filetexts(files, mode="b"):
myfiles = open_files(".test.accounts.*", mode="rt", encoding=encoding)
assert len(myfiles) == len(files)
data = []
for file in myfiles:
with file as f:
data.append(f.read())
assert list(data) == [files[k].decode(encoding) for k in sorted(files)]
@pytest.mark.parametrize("mode", ["rt", "rb"])
@pytest.mark.parametrize("fmt", list(compr))
def test_open_files_compression(mode, fmt):
if fmt not in compress:
pytest.skip("compression function not provided")
files2 = valmap(compress[fmt], files)
with filetexts(files2, mode="b"):
myfiles = open_files(".test.accounts.*", mode=mode, compression=fmt)
data = []
for file in myfiles:
with file as f:
data.append(f.read())
sol = [files[k] for k in sorted(files)]
if mode == "rt":
sol = [b.decode() for b in sol]
assert list(data) == sol
def test_bad_compression():
with filetexts(files, mode="b"):
for func in [read_bytes, open_files]:
with pytest.raises(ValueError):
sample, values = func(".test.accounts.*", compression="not-found")
def test_not_found():
fn = "not-a-file"
with pytest.raises((FileNotFoundError, OSError), match=fn):
read_bytes(fn)
@pytest.mark.slow
def test_names():
with filetexts(files, mode="b"):
_, a = read_bytes(".test.accounts.*")
_, b = read_bytes(".test.accounts.*")
a = list(concat(a))
b = list(concat(b))
assert [aa._key for aa in a] == [bb._key for bb in b]
sleep(1)
for fn in files:
with open(fn, "ab") as f:
f.write(b"x")
_, c = read_bytes(".test.accounts.*")
c = list(concat(c))
assert [aa._key for aa in a] != [cc._key for cc in c]
@pytest.mark.parametrize("compression_opener", [(None, open), ("gzip", gzip.open)])
def test_open_files_write(tmpdir, compression_opener):
compression, opener = compression_opener
tmpdir = str(tmpdir)
files = open_files(tmpdir, num=2, mode="wb", compression=compression)
assert len(files) == 2
assert {f.mode for f in files} == {"wb"}
for fil in files:
with fil as f:
f.write(b"000")
files = sorted(os.listdir(tmpdir))
assert files == ["0.part", "1.part"]
with opener(os.path.join(tmpdir, files[0]), "rb") as f:
d = f.read()
assert d == b"000"
def test_pickability_of_lazy_files(tmpdir):
tmpdir = str(tmpdir)
with filetexts(files, mode="b"):
myfiles = open_files(".test.accounts.*")
myfiles2 = cloudpickle.loads(cloudpickle.dumps(myfiles))
for f, f2 in zip(myfiles, myfiles2):
assert f.path == f2.path
assert type(f.fs) == type(f2.fs)
with f as f_open, f2 as f2_open:
assert f_open.read() == f2_open.read()
def test_py2_local_bytes(tmpdir):
fn = str(tmpdir / "myfile.txt.gz")
with gzip.open(fn, mode="wb") as f:
f.write(b"hello\nworld")
files = open_files(fn, compression="gzip", mode="rt")
with files[0] as f:
assert all(isinstance(line, str) for line in f)
def test_abs_paths(tmpdir):
tmpdir = str(tmpdir)
here = os.getcwd()
os.chdir(tmpdir)
with open("tmp", "w") as f:
f.write("hi")
out = LocalFileSystem().glob("*")
assert len(out) == 1
assert "/" in out[0]
assert "tmp" in out[0]
fs = LocalFileSystem()
os.chdir(here)
with fs.open(out[0], "r") as f:
res = f.read()
assert res == "hi"
def test_get_pyarrow_filesystem():
from fsspec.implementations.local import LocalFileSystem
pa = pytest.importorskip("pyarrow")
if parse_version(pa.__version__).major >= 2:
pytest.skip("fsspec no loger inherits from pyarrow>=2.0.")
fs = LocalFileSystem()
assert isinstance(fs, pa.filesystem.FileSystem)
| |
import pytest
import numpy as np
import numpy.ma as ma
from numpy.ma.mrecords import MaskedRecords
from numpy.ma.testutils import assert_equal
from numpy.testing import assert_, assert_raises
from numpy.lib.recfunctions import (
drop_fields, rename_fields, get_fieldstructure, recursive_fill_fields,
find_duplicates, merge_arrays, append_fields, stack_arrays, join_by,
repack_fields, unstructured_to_structured, structured_to_unstructured,
apply_along_fields, require_fields, assign_fields_by_name)
get_fieldspec = np.lib.recfunctions._get_fieldspec
get_names = np.lib.recfunctions.get_names
get_names_flat = np.lib.recfunctions.get_names_flat
zip_descr = np.lib.recfunctions._zip_descr
zip_dtype = np.lib.recfunctions._zip_dtype
class TestRecFunctions:
# Misc tests
def setup(self):
x = np.array([1, 2, ])
y = np.array([10, 20, 30])
z = np.array([('A', 1.), ('B', 2.)],
dtype=[('A', '|S3'), ('B', float)])
w = np.array([(1, (2, 3.0)), (4, (5, 6.0))],
dtype=[('a', int), ('b', [('ba', float), ('bb', int)])])
self.data = (w, x, y, z)
def test_zip_descr(self):
# Test zip_descr
(w, x, y, z) = self.data
# Std array
test = zip_descr((x, x), flatten=True)
assert_equal(test,
np.dtype([('', int), ('', int)]))
test = zip_descr((x, x), flatten=False)
assert_equal(test,
np.dtype([('', int), ('', int)]))
# Std & flexible-dtype
test = zip_descr((x, z), flatten=True)
assert_equal(test,
np.dtype([('', int), ('A', '|S3'), ('B', float)]))
test = zip_descr((x, z), flatten=False)
assert_equal(test,
np.dtype([('', int),
('', [('A', '|S3'), ('B', float)])]))
# Standard & nested dtype
test = zip_descr((x, w), flatten=True)
assert_equal(test,
np.dtype([('', int),
('a', int),
('ba', float), ('bb', int)]))
test = zip_descr((x, w), flatten=False)
assert_equal(test,
np.dtype([('', int),
('', [('a', int),
('b', [('ba', float), ('bb', int)])])]))
def test_drop_fields(self):
# Test drop_fields
a = np.array([(1, (2, 3.0)), (4, (5, 6.0))],
dtype=[('a', int), ('b', [('ba', float), ('bb', int)])])
# A basic field
test = drop_fields(a, 'a')
control = np.array([((2, 3.0),), ((5, 6.0),)],
dtype=[('b', [('ba', float), ('bb', int)])])
assert_equal(test, control)
# Another basic field (but nesting two fields)
test = drop_fields(a, 'b')
control = np.array([(1,), (4,)], dtype=[('a', int)])
assert_equal(test, control)
# A nested sub-field
test = drop_fields(a, ['ba', ])
control = np.array([(1, (3.0,)), (4, (6.0,))],
dtype=[('a', int), ('b', [('bb', int)])])
assert_equal(test, control)
# All the nested sub-field from a field: zap that field
test = drop_fields(a, ['ba', 'bb'])
control = np.array([(1,), (4,)], dtype=[('a', int)])
assert_equal(test, control)
# dropping all fields results in an array with no fields
test = drop_fields(a, ['a', 'b'])
control = np.array([(), ()], dtype=[])
assert_equal(test, control)
def test_rename_fields(self):
# Test rename fields
a = np.array([(1, (2, [3.0, 30.])), (4, (5, [6.0, 60.]))],
dtype=[('a', int),
('b', [('ba', float), ('bb', (float, 2))])])
test = rename_fields(a, {'a': 'A', 'bb': 'BB'})
newdtype = [('A', int), ('b', [('ba', float), ('BB', (float, 2))])]
control = a.view(newdtype)
assert_equal(test.dtype, newdtype)
assert_equal(test, control)
def test_get_names(self):
# Test get_names
ndtype = np.dtype([('A', '|S3'), ('B', float)])
test = get_names(ndtype)
assert_equal(test, ('A', 'B'))
ndtype = np.dtype([('a', int), ('b', [('ba', float), ('bb', int)])])
test = get_names(ndtype)
assert_equal(test, ('a', ('b', ('ba', 'bb'))))
ndtype = np.dtype([('a', int), ('b', [])])
test = get_names(ndtype)
assert_equal(test, ('a', ('b', ())))
ndtype = np.dtype([])
test = get_names(ndtype)
assert_equal(test, ())
def test_get_names_flat(self):
# Test get_names_flat
ndtype = np.dtype([('A', '|S3'), ('B', float)])
test = get_names_flat(ndtype)
assert_equal(test, ('A', 'B'))
ndtype = np.dtype([('a', int), ('b', [('ba', float), ('bb', int)])])
test = get_names_flat(ndtype)
assert_equal(test, ('a', 'b', 'ba', 'bb'))
ndtype = np.dtype([('a', int), ('b', [])])
test = get_names_flat(ndtype)
assert_equal(test, ('a', 'b'))
ndtype = np.dtype([])
test = get_names_flat(ndtype)
assert_equal(test, ())
def test_get_fieldstructure(self):
# Test get_fieldstructure
# No nested fields
ndtype = np.dtype([('A', '|S3'), ('B', float)])
test = get_fieldstructure(ndtype)
assert_equal(test, {'A': [], 'B': []})
# One 1-nested field
ndtype = np.dtype([('A', int), ('B', [('BA', float), ('BB', '|S1')])])
test = get_fieldstructure(ndtype)
assert_equal(test, {'A': [], 'B': [], 'BA': ['B', ], 'BB': ['B']})
# One 2-nested fields
ndtype = np.dtype([('A', int),
('B', [('BA', int),
('BB', [('BBA', int), ('BBB', int)])])])
test = get_fieldstructure(ndtype)
control = {'A': [], 'B': [], 'BA': ['B'], 'BB': ['B'],
'BBA': ['B', 'BB'], 'BBB': ['B', 'BB']}
assert_equal(test, control)
# 0 fields
ndtype = np.dtype([])
test = get_fieldstructure(ndtype)
assert_equal(test, {})
def test_find_duplicates(self):
# Test find_duplicates
a = ma.array([(2, (2., 'B')), (1, (2., 'B')), (2, (2., 'B')),
(1, (1., 'B')), (2, (2., 'B')), (2, (2., 'C'))],
mask=[(0, (0, 0)), (0, (0, 0)), (0, (0, 0)),
(0, (0, 0)), (1, (0, 0)), (0, (1, 0))],
dtype=[('A', int), ('B', [('BA', float), ('BB', '|S1')])])
test = find_duplicates(a, ignoremask=False, return_index=True)
control = [0, 2]
assert_equal(sorted(test[-1]), control)
assert_equal(test[0], a[test[-1]])
test = find_duplicates(a, key='A', return_index=True)
control = [0, 1, 2, 3, 5]
assert_equal(sorted(test[-1]), control)
assert_equal(test[0], a[test[-1]])
test = find_duplicates(a, key='B', return_index=True)
control = [0, 1, 2, 4]
assert_equal(sorted(test[-1]), control)
assert_equal(test[0], a[test[-1]])
test = find_duplicates(a, key='BA', return_index=True)
control = [0, 1, 2, 4]
assert_equal(sorted(test[-1]), control)
assert_equal(test[0], a[test[-1]])
test = find_duplicates(a, key='BB', return_index=True)
control = [0, 1, 2, 3, 4]
assert_equal(sorted(test[-1]), control)
assert_equal(test[0], a[test[-1]])
def test_find_duplicates_ignoremask(self):
# Test the ignoremask option of find_duplicates
ndtype = [('a', int)]
a = ma.array([1, 1, 1, 2, 2, 3, 3],
mask=[0, 0, 1, 0, 0, 0, 1]).view(ndtype)
test = find_duplicates(a, ignoremask=True, return_index=True)
control = [0, 1, 3, 4]
assert_equal(sorted(test[-1]), control)
assert_equal(test[0], a[test[-1]])
test = find_duplicates(a, ignoremask=False, return_index=True)
control = [0, 1, 2, 3, 4, 6]
assert_equal(sorted(test[-1]), control)
assert_equal(test[0], a[test[-1]])
def test_repack_fields(self):
dt = np.dtype('u1,f4,i8', align=True)
a = np.zeros(2, dtype=dt)
assert_equal(repack_fields(dt), np.dtype('u1,f4,i8'))
assert_equal(repack_fields(a).itemsize, 13)
assert_equal(repack_fields(repack_fields(dt), align=True), dt)
# make sure type is preserved
dt = np.dtype((np.record, dt))
assert_(repack_fields(dt).type is np.record)
def test_structured_to_unstructured(self):
a = np.zeros(4, dtype=[('a', 'i4'), ('b', 'f4,u2'), ('c', 'f4', 2)])
out = structured_to_unstructured(a)
assert_equal(out, np.zeros((4,5), dtype='f8'))
b = np.array([(1, 2, 5), (4, 5, 7), (7, 8 ,11), (10, 11, 12)],
dtype=[('x', 'i4'), ('y', 'f4'), ('z', 'f8')])
out = np.mean(structured_to_unstructured(b[['x', 'z']]), axis=-1)
assert_equal(out, np.array([ 3. , 5.5, 9. , 11. ]))
out = np.mean(structured_to_unstructured(b[['x']]), axis=-1)
assert_equal(out, np.array([ 1. , 4. , 7. , 10. ]))
c = np.arange(20).reshape((4,5))
out = unstructured_to_structured(c, a.dtype)
want = np.array([( 0, ( 1., 2), [ 3., 4.]),
( 5, ( 6., 7), [ 8., 9.]),
(10, (11., 12), [13., 14.]),
(15, (16., 17), [18., 19.])],
dtype=[('a', 'i4'),
('b', [('f0', 'f4'), ('f1', 'u2')]),
('c', 'f4', (2,))])
assert_equal(out, want)
d = np.array([(1, 2, 5), (4, 5, 7), (7, 8 ,11), (10, 11, 12)],
dtype=[('x', 'i4'), ('y', 'f4'), ('z', 'f8')])
assert_equal(apply_along_fields(np.mean, d),
np.array([ 8.0/3, 16.0/3, 26.0/3, 11. ]))
assert_equal(apply_along_fields(np.mean, d[['x', 'z']]),
np.array([ 3. , 5.5, 9. , 11. ]))
# check that for uniform field dtypes we get a view, not a copy:
d = np.array([(1, 2, 5), (4, 5, 7), (7, 8 ,11), (10, 11, 12)],
dtype=[('x', 'i4'), ('y', 'i4'), ('z', 'i4')])
dd = structured_to_unstructured(d)
ddd = unstructured_to_structured(dd, d.dtype)
assert_(dd.base is d)
assert_(ddd.base is d)
# including uniform fields with subarrays unpacked
d = np.array([(1, [2, 3], [[ 4, 5], [ 6, 7]]),
(8, [9, 10], [[11, 12], [13, 14]])],
dtype=[('x0', 'i4'), ('x1', ('i4', 2)),
('x2', ('i4', (2, 2)))])
dd = structured_to_unstructured(d)
ddd = unstructured_to_structured(dd, d.dtype)
assert_(dd.base is d)
assert_(ddd.base is d)
# test that nested fields with identical names don't break anything
point = np.dtype([('x', int), ('y', int)])
triangle = np.dtype([('a', point), ('b', point), ('c', point)])
arr = np.zeros(10, triangle)
res = structured_to_unstructured(arr, dtype=int)
assert_equal(res, np.zeros((10, 6), dtype=int))
# test nested combinations of subarrays and structured arrays, gh-13333
def subarray(dt, shape):
return np.dtype((dt, shape))
def structured(*dts):
return np.dtype([('x{}'.format(i), dt) for i, dt in enumerate(dts)])
def inspect(dt, dtype=None):
arr = np.zeros((), dt)
ret = structured_to_unstructured(arr, dtype=dtype)
backarr = unstructured_to_structured(ret, dt)
return ret.shape, ret.dtype, backarr.dtype
dt = structured(subarray(structured(np.int32, np.int32), 3))
assert_equal(inspect(dt), ((6,), np.int32, dt))
dt = structured(subarray(subarray(np.int32, 2), 2))
assert_equal(inspect(dt), ((4,), np.int32, dt))
dt = structured(np.int32)
assert_equal(inspect(dt), ((1,), np.int32, dt))
dt = structured(np.int32, subarray(subarray(np.int32, 2), 2))
assert_equal(inspect(dt), ((5,), np.int32, dt))
dt = structured()
assert_raises(ValueError, structured_to_unstructured, np.zeros(3, dt))
# these currently don't work, but we may make it work in the future
assert_raises(NotImplementedError, structured_to_unstructured,
np.zeros(3, dt), dtype=np.int32)
assert_raises(NotImplementedError, unstructured_to_structured,
np.zeros((3,0), dtype=np.int32))
def test_field_assignment_by_name(self):
a = np.ones(2, dtype=[('a', 'i4'), ('b', 'f8'), ('c', 'u1')])
newdt = [('b', 'f4'), ('c', 'u1')]
assert_equal(require_fields(a, newdt), np.ones(2, newdt))
b = np.array([(1,2), (3,4)], dtype=newdt)
assign_fields_by_name(a, b, zero_unassigned=False)
assert_equal(a, np.array([(1,1,2),(1,3,4)], dtype=a.dtype))
assign_fields_by_name(a, b)
assert_equal(a, np.array([(0,1,2),(0,3,4)], dtype=a.dtype))
# test nested fields
a = np.ones(2, dtype=[('a', [('b', 'f8'), ('c', 'u1')])])
newdt = [('a', [('c', 'u1')])]
assert_equal(require_fields(a, newdt), np.ones(2, newdt))
b = np.array([((2,),), ((3,),)], dtype=newdt)
assign_fields_by_name(a, b, zero_unassigned=False)
assert_equal(a, np.array([((1,2),), ((1,3),)], dtype=a.dtype))
assign_fields_by_name(a, b)
assert_equal(a, np.array([((0,2),), ((0,3),)], dtype=a.dtype))
# test unstructured code path for 0d arrays
a, b = np.array(3), np.array(0)
assign_fields_by_name(b, a)
assert_equal(b[()], 3)
class TestRecursiveFillFields:
# Test recursive_fill_fields.
def test_simple_flexible(self):
# Test recursive_fill_fields on flexible-array
a = np.array([(1, 10.), (2, 20.)], dtype=[('A', int), ('B', float)])
b = np.zeros((3,), dtype=a.dtype)
test = recursive_fill_fields(a, b)
control = np.array([(1, 10.), (2, 20.), (0, 0.)],
dtype=[('A', int), ('B', float)])
assert_equal(test, control)
def test_masked_flexible(self):
# Test recursive_fill_fields on masked flexible-array
a = ma.array([(1, 10.), (2, 20.)], mask=[(0, 1), (1, 0)],
dtype=[('A', int), ('B', float)])
b = ma.zeros((3,), dtype=a.dtype)
test = recursive_fill_fields(a, b)
control = ma.array([(1, 10.), (2, 20.), (0, 0.)],
mask=[(0, 1), (1, 0), (0, 0)],
dtype=[('A', int), ('B', float)])
assert_equal(test, control)
class TestMergeArrays:
# Test merge_arrays
def setup(self):
x = np.array([1, 2, ])
y = np.array([10, 20, 30])
z = np.array(
[('A', 1.), ('B', 2.)], dtype=[('A', '|S3'), ('B', float)])
w = np.array(
[(1, (2, 3.0, ())), (4, (5, 6.0, ()))],
dtype=[('a', int), ('b', [('ba', float), ('bb', int), ('bc', [])])])
self.data = (w, x, y, z)
def test_solo(self):
# Test merge_arrays on a single array.
(_, x, _, z) = self.data
test = merge_arrays(x)
control = np.array([(1,), (2,)], dtype=[('f0', int)])
assert_equal(test, control)
test = merge_arrays((x,))
assert_equal(test, control)
test = merge_arrays(z, flatten=False)
assert_equal(test, z)
test = merge_arrays(z, flatten=True)
assert_equal(test, z)
def test_solo_w_flatten(self):
# Test merge_arrays on a single array w & w/o flattening
w = self.data[0]
test = merge_arrays(w, flatten=False)
assert_equal(test, w)
test = merge_arrays(w, flatten=True)
control = np.array([(1, 2, 3.0), (4, 5, 6.0)],
dtype=[('a', int), ('ba', float), ('bb', int)])
assert_equal(test, control)
def test_standard(self):
# Test standard & standard
# Test merge arrays
(_, x, y, _) = self.data
test = merge_arrays((x, y), usemask=False)
control = np.array([(1, 10), (2, 20), (-1, 30)],
dtype=[('f0', int), ('f1', int)])
assert_equal(test, control)
test = merge_arrays((x, y), usemask=True)
control = ma.array([(1, 10), (2, 20), (-1, 30)],
mask=[(0, 0), (0, 0), (1, 0)],
dtype=[('f0', int), ('f1', int)])
assert_equal(test, control)
assert_equal(test.mask, control.mask)
def test_flatten(self):
# Test standard & flexible
(_, x, _, z) = self.data
test = merge_arrays((x, z), flatten=True)
control = np.array([(1, 'A', 1.), (2, 'B', 2.)],
dtype=[('f0', int), ('A', '|S3'), ('B', float)])
assert_equal(test, control)
test = merge_arrays((x, z), flatten=False)
control = np.array([(1, ('A', 1.)), (2, ('B', 2.))],
dtype=[('f0', int),
('f1', [('A', '|S3'), ('B', float)])])
assert_equal(test, control)
def test_flatten_wflexible(self):
# Test flatten standard & nested
(w, x, _, _) = self.data
test = merge_arrays((x, w), flatten=True)
control = np.array([(1, 1, 2, 3.0), (2, 4, 5, 6.0)],
dtype=[('f0', int),
('a', int), ('ba', float), ('bb', int)])
assert_equal(test, control)
test = merge_arrays((x, w), flatten=False)
controldtype = [('f0', int),
('f1', [('a', int),
('b', [('ba', float), ('bb', int), ('bc', [])])])]
control = np.array([(1., (1, (2, 3.0, ()))), (2, (4, (5, 6.0, ())))],
dtype=controldtype)
assert_equal(test, control)
def test_wmasked_arrays(self):
# Test merge_arrays masked arrays
(_, x, _, _) = self.data
mx = ma.array([1, 2, 3], mask=[1, 0, 0])
test = merge_arrays((x, mx), usemask=True)
control = ma.array([(1, 1), (2, 2), (-1, 3)],
mask=[(0, 1), (0, 0), (1, 0)],
dtype=[('f0', int), ('f1', int)])
assert_equal(test, control)
test = merge_arrays((x, mx), usemask=True, asrecarray=True)
assert_equal(test, control)
assert_(isinstance(test, MaskedRecords))
def test_w_singlefield(self):
# Test single field
test = merge_arrays((np.array([1, 2]).view([('a', int)]),
np.array([10., 20., 30.])),)
control = ma.array([(1, 10.), (2, 20.), (-1, 30.)],
mask=[(0, 0), (0, 0), (1, 0)],
dtype=[('a', int), ('f1', float)])
assert_equal(test, control)
def test_w_shorter_flex(self):
# Test merge_arrays w/ a shorter flexndarray.
z = self.data[-1]
# Fixme, this test looks incomplete and broken
#test = merge_arrays((z, np.array([10, 20, 30]).view([('C', int)])))
#control = np.array([('A', 1., 10), ('B', 2., 20), ('-1', -1, 20)],
# dtype=[('A', '|S3'), ('B', float), ('C', int)])
#assert_equal(test, control)
# Hack to avoid pyflakes warnings about unused variables
merge_arrays((z, np.array([10, 20, 30]).view([('C', int)])))
np.array([('A', 1., 10), ('B', 2., 20), ('-1', -1, 20)],
dtype=[('A', '|S3'), ('B', float), ('C', int)])
def test_singlerecord(self):
(_, x, y, z) = self.data
test = merge_arrays((x[0], y[0], z[0]), usemask=False)
control = np.array([(1, 10, ('A', 1))],
dtype=[('f0', int),
('f1', int),
('f2', [('A', '|S3'), ('B', float)])])
assert_equal(test, control)
class TestAppendFields:
# Test append_fields
def setup(self):
x = np.array([1, 2, ])
y = np.array([10, 20, 30])
z = np.array(
[('A', 1.), ('B', 2.)], dtype=[('A', '|S3'), ('B', float)])
w = np.array([(1, (2, 3.0)), (4, (5, 6.0))],
dtype=[('a', int), ('b', [('ba', float), ('bb', int)])])
self.data = (w, x, y, z)
def test_append_single(self):
# Test simple case
(_, x, _, _) = self.data
test = append_fields(x, 'A', data=[10, 20, 30])
control = ma.array([(1, 10), (2, 20), (-1, 30)],
mask=[(0, 0), (0, 0), (1, 0)],
dtype=[('f0', int), ('A', int)],)
assert_equal(test, control)
def test_append_double(self):
# Test simple case
(_, x, _, _) = self.data
test = append_fields(x, ('A', 'B'), data=[[10, 20, 30], [100, 200]])
control = ma.array([(1, 10, 100), (2, 20, 200), (-1, 30, -1)],
mask=[(0, 0, 0), (0, 0, 0), (1, 0, 1)],
dtype=[('f0', int), ('A', int), ('B', int)],)
assert_equal(test, control)
def test_append_on_flex(self):
# Test append_fields on flexible type arrays
z = self.data[-1]
test = append_fields(z, 'C', data=[10, 20, 30])
control = ma.array([('A', 1., 10), ('B', 2., 20), (-1, -1., 30)],
mask=[(0, 0, 0), (0, 0, 0), (1, 1, 0)],
dtype=[('A', '|S3'), ('B', float), ('C', int)],)
assert_equal(test, control)
def test_append_on_nested(self):
# Test append_fields on nested fields
w = self.data[0]
test = append_fields(w, 'C', data=[10, 20, 30])
control = ma.array([(1, (2, 3.0), 10),
(4, (5, 6.0), 20),
(-1, (-1, -1.), 30)],
mask=[(
0, (0, 0), 0), (0, (0, 0), 0), (1, (1, 1), 0)],
dtype=[('a', int),
('b', [('ba', float), ('bb', int)]),
('C', int)],)
assert_equal(test, control)
class TestStackArrays:
# Test stack_arrays
def setup(self):
x = np.array([1, 2, ])
y = np.array([10, 20, 30])
z = np.array(
[('A', 1.), ('B', 2.)], dtype=[('A', '|S3'), ('B', float)])
w = np.array([(1, (2, 3.0)), (4, (5, 6.0))],
dtype=[('a', int), ('b', [('ba', float), ('bb', int)])])
self.data = (w, x, y, z)
def test_solo(self):
# Test stack_arrays on single arrays
(_, x, _, _) = self.data
test = stack_arrays((x,))
assert_equal(test, x)
assert_(test is x)
test = stack_arrays(x)
assert_equal(test, x)
assert_(test is x)
def test_unnamed_fields(self):
# Tests combinations of arrays w/o named fields
(_, x, y, _) = self.data
test = stack_arrays((x, x), usemask=False)
control = np.array([1, 2, 1, 2])
assert_equal(test, control)
test = stack_arrays((x, y), usemask=False)
control = np.array([1, 2, 10, 20, 30])
assert_equal(test, control)
test = stack_arrays((y, x), usemask=False)
control = np.array([10, 20, 30, 1, 2])
assert_equal(test, control)
def test_unnamed_and_named_fields(self):
# Test combination of arrays w/ & w/o named fields
(_, x, _, z) = self.data
test = stack_arrays((x, z))
control = ma.array([(1, -1, -1), (2, -1, -1),
(-1, 'A', 1), (-1, 'B', 2)],
mask=[(0, 1, 1), (0, 1, 1),
(1, 0, 0), (1, 0, 0)],
dtype=[('f0', int), ('A', '|S3'), ('B', float)])
assert_equal(test, control)
assert_equal(test.mask, control.mask)
test = stack_arrays((z, x))
control = ma.array([('A', 1, -1), ('B', 2, -1),
(-1, -1, 1), (-1, -1, 2), ],
mask=[(0, 0, 1), (0, 0, 1),
(1, 1, 0), (1, 1, 0)],
dtype=[('A', '|S3'), ('B', float), ('f2', int)])
assert_equal(test, control)
assert_equal(test.mask, control.mask)
test = stack_arrays((z, z, x))
control = ma.array([('A', 1, -1), ('B', 2, -1),
('A', 1, -1), ('B', 2, -1),
(-1, -1, 1), (-1, -1, 2), ],
mask=[(0, 0, 1), (0, 0, 1),
(0, 0, 1), (0, 0, 1),
(1, 1, 0), (1, 1, 0)],
dtype=[('A', '|S3'), ('B', float), ('f2', int)])
assert_equal(test, control)
def test_matching_named_fields(self):
# Test combination of arrays w/ matching field names
(_, x, _, z) = self.data
zz = np.array([('a', 10., 100.), ('b', 20., 200.), ('c', 30., 300.)],
dtype=[('A', '|S3'), ('B', float), ('C', float)])
test = stack_arrays((z, zz))
control = ma.array([('A', 1, -1), ('B', 2, -1),
(
'a', 10., 100.), ('b', 20., 200.), ('c', 30., 300.)],
dtype=[('A', '|S3'), ('B', float), ('C', float)],
mask=[(0, 0, 1), (0, 0, 1),
(0, 0, 0), (0, 0, 0), (0, 0, 0)])
assert_equal(test, control)
assert_equal(test.mask, control.mask)
test = stack_arrays((z, zz, x))
ndtype = [('A', '|S3'), ('B', float), ('C', float), ('f3', int)]
control = ma.array([('A', 1, -1, -1), ('B', 2, -1, -1),
('a', 10., 100., -1), ('b', 20., 200., -1),
('c', 30., 300., -1),
(-1, -1, -1, 1), (-1, -1, -1, 2)],
dtype=ndtype,
mask=[(0, 0, 1, 1), (0, 0, 1, 1),
(0, 0, 0, 1), (0, 0, 0, 1), (0, 0, 0, 1),
(1, 1, 1, 0), (1, 1, 1, 0)])
assert_equal(test, control)
assert_equal(test.mask, control.mask)
def test_defaults(self):
# Test defaults: no exception raised if keys of defaults are not fields.
(_, _, _, z) = self.data
zz = np.array([('a', 10., 100.), ('b', 20., 200.), ('c', 30., 300.)],
dtype=[('A', '|S3'), ('B', float), ('C', float)])
defaults = {'A': '???', 'B': -999., 'C': -9999., 'D': -99999.}
test = stack_arrays((z, zz), defaults=defaults)
control = ma.array([('A', 1, -9999.), ('B', 2, -9999.),
(
'a', 10., 100.), ('b', 20., 200.), ('c', 30., 300.)],
dtype=[('A', '|S3'), ('B', float), ('C', float)],
mask=[(0, 0, 1), (0, 0, 1),
(0, 0, 0), (0, 0, 0), (0, 0, 0)])
assert_equal(test, control)
assert_equal(test.data, control.data)
assert_equal(test.mask, control.mask)
def test_autoconversion(self):
# Tests autoconversion
adtype = [('A', int), ('B', bool), ('C', float)]
a = ma.array([(1, 2, 3)], mask=[(0, 1, 0)], dtype=adtype)
bdtype = [('A', int), ('B', float), ('C', float)]
b = ma.array([(4, 5, 6)], dtype=bdtype)
control = ma.array([(1, 2, 3), (4, 5, 6)], mask=[(0, 1, 0), (0, 0, 0)],
dtype=bdtype)
test = stack_arrays((a, b), autoconvert=True)
assert_equal(test, control)
assert_equal(test.mask, control.mask)
with assert_raises(TypeError):
stack_arrays((a, b), autoconvert=False)
def test_checktitles(self):
# Test using titles in the field names
adtype = [(('a', 'A'), int), (('b', 'B'), bool), (('c', 'C'), float)]
a = ma.array([(1, 2, 3)], mask=[(0, 1, 0)], dtype=adtype)
bdtype = [(('a', 'A'), int), (('b', 'B'), bool), (('c', 'C'), float)]
b = ma.array([(4, 5, 6)], dtype=bdtype)
test = stack_arrays((a, b))
control = ma.array([(1, 2, 3), (4, 5, 6)], mask=[(0, 1, 0), (0, 0, 0)],
dtype=bdtype)
assert_equal(test, control)
assert_equal(test.mask, control.mask)
def test_subdtype(self):
z = np.array([
('A', 1), ('B', 2)
], dtype=[('A', '|S3'), ('B', float, (1,))])
zz = np.array([
('a', [10.], 100.), ('b', [20.], 200.), ('c', [30.], 300.)
], dtype=[('A', '|S3'), ('B', float, (1,)), ('C', float)])
res = stack_arrays((z, zz))
expected = ma.array(
data=[
(b'A', [1.0], 0),
(b'B', [2.0], 0),
(b'a', [10.0], 100.0),
(b'b', [20.0], 200.0),
(b'c', [30.0], 300.0)],
mask=[
(False, [False], True),
(False, [False], True),
(False, [False], False),
(False, [False], False),
(False, [False], False)
],
dtype=zz.dtype
)
assert_equal(res.dtype, expected.dtype)
assert_equal(res, expected)
assert_equal(res.mask, expected.mask)
class TestJoinBy:
def setup(self):
self.a = np.array(list(zip(np.arange(10), np.arange(50, 60),
np.arange(100, 110))),
dtype=[('a', int), ('b', int), ('c', int)])
self.b = np.array(list(zip(np.arange(5, 15), np.arange(65, 75),
np.arange(100, 110))),
dtype=[('a', int), ('b', int), ('d', int)])
def test_inner_join(self):
# Basic test of join_by
a, b = self.a, self.b
test = join_by('a', a, b, jointype='inner')
control = np.array([(5, 55, 65, 105, 100), (6, 56, 66, 106, 101),
(7, 57, 67, 107, 102), (8, 58, 68, 108, 103),
(9, 59, 69, 109, 104)],
dtype=[('a', int), ('b1', int), ('b2', int),
('c', int), ('d', int)])
assert_equal(test, control)
def test_join(self):
a, b = self.a, self.b
# Fixme, this test is broken
#test = join_by(('a', 'b'), a, b)
#control = np.array([(5, 55, 105, 100), (6, 56, 106, 101),
# (7, 57, 107, 102), (8, 58, 108, 103),
# (9, 59, 109, 104)],
# dtype=[('a', int), ('b', int),
# ('c', int), ('d', int)])
#assert_equal(test, control)
# Hack to avoid pyflakes unused variable warnings
join_by(('a', 'b'), a, b)
np.array([(5, 55, 105, 100), (6, 56, 106, 101),
(7, 57, 107, 102), (8, 58, 108, 103),
(9, 59, 109, 104)],
dtype=[('a', int), ('b', int),
('c', int), ('d', int)])
def test_join_subdtype(self):
# tests the bug in https://stackoverflow.com/q/44769632/102441
foo = np.array([(1,)],
dtype=[('key', int)])
bar = np.array([(1, np.array([1,2,3]))],
dtype=[('key', int), ('value', 'uint16', 3)])
res = join_by('key', foo, bar)
assert_equal(res, bar.view(ma.MaskedArray))
def test_outer_join(self):
a, b = self.a, self.b
test = join_by(('a', 'b'), a, b, 'outer')
control = ma.array([(0, 50, 100, -1), (1, 51, 101, -1),
(2, 52, 102, -1), (3, 53, 103, -1),
(4, 54, 104, -1), (5, 55, 105, -1),
(5, 65, -1, 100), (6, 56, 106, -1),
(6, 66, -1, 101), (7, 57, 107, -1),
(7, 67, -1, 102), (8, 58, 108, -1),
(8, 68, -1, 103), (9, 59, 109, -1),
(9, 69, -1, 104), (10, 70, -1, 105),
(11, 71, -1, 106), (12, 72, -1, 107),
(13, 73, -1, 108), (14, 74, -1, 109)],
mask=[(0, 0, 0, 1), (0, 0, 0, 1),
(0, 0, 0, 1), (0, 0, 0, 1),
(0, 0, 0, 1), (0, 0, 0, 1),
(0, 0, 1, 0), (0, 0, 0, 1),
(0, 0, 1, 0), (0, 0, 0, 1),
(0, 0, 1, 0), (0, 0, 0, 1),
(0, 0, 1, 0), (0, 0, 0, 1),
(0, 0, 1, 0), (0, 0, 1, 0),
(0, 0, 1, 0), (0, 0, 1, 0),
(0, 0, 1, 0), (0, 0, 1, 0)],
dtype=[('a', int), ('b', int),
('c', int), ('d', int)])
assert_equal(test, control)
def test_leftouter_join(self):
a, b = self.a, self.b
test = join_by(('a', 'b'), a, b, 'leftouter')
control = ma.array([(0, 50, 100, -1), (1, 51, 101, -1),
(2, 52, 102, -1), (3, 53, 103, -1),
(4, 54, 104, -1), (5, 55, 105, -1),
(6, 56, 106, -1), (7, 57, 107, -1),
(8, 58, 108, -1), (9, 59, 109, -1)],
mask=[(0, 0, 0, 1), (0, 0, 0, 1),
(0, 0, 0, 1), (0, 0, 0, 1),
(0, 0, 0, 1), (0, 0, 0, 1),
(0, 0, 0, 1), (0, 0, 0, 1),
(0, 0, 0, 1), (0, 0, 0, 1)],
dtype=[('a', int), ('b', int), ('c', int), ('d', int)])
assert_equal(test, control)
def test_different_field_order(self):
# gh-8940
a = np.zeros(3, dtype=[('a', 'i4'), ('b', 'f4'), ('c', 'u1')])
b = np.ones(3, dtype=[('c', 'u1'), ('b', 'f4'), ('a', 'i4')])
# this should not give a FutureWarning:
j = join_by(['c', 'b'], a, b, jointype='inner', usemask=False)
assert_equal(j.dtype.names, ['b', 'c', 'a1', 'a2'])
def test_duplicate_keys(self):
a = np.zeros(3, dtype=[('a', 'i4'), ('b', 'f4'), ('c', 'u1')])
b = np.ones(3, dtype=[('c', 'u1'), ('b', 'f4'), ('a', 'i4')])
assert_raises(ValueError, join_by, ['a', 'b', 'b'], a, b)
@pytest.mark.xfail(reason="See comment at gh-9343")
def test_same_name_different_dtypes_key(self):
a_dtype = np.dtype([('key', 'S5'), ('value', '<f4')])
b_dtype = np.dtype([('key', 'S10'), ('value', '<f4')])
expected_dtype = np.dtype([
('key', 'S10'), ('value1', '<f4'), ('value2', '<f4')])
a = np.array([('Sarah', 8.0), ('John', 6.0)], dtype=a_dtype)
b = np.array([('Sarah', 10.0), ('John', 7.0)], dtype=b_dtype)
res = join_by('key', a, b)
assert_equal(res.dtype, expected_dtype)
def test_same_name_different_dtypes(self):
# gh-9338
a_dtype = np.dtype([('key', 'S10'), ('value', '<f4')])
b_dtype = np.dtype([('key', 'S10'), ('value', '<f8')])
expected_dtype = np.dtype([
('key', '|S10'), ('value1', '<f4'), ('value2', '<f8')])
a = np.array([('Sarah', 8.0), ('John', 6.0)], dtype=a_dtype)
b = np.array([('Sarah', 10.0), ('John', 7.0)], dtype=b_dtype)
res = join_by('key', a, b)
assert_equal(res.dtype, expected_dtype)
def test_subarray_key(self):
a_dtype = np.dtype([('pos', int, 3), ('f', '<f4')])
a = np.array([([1, 1, 1], np.pi), ([1, 2, 3], 0.0)], dtype=a_dtype)
b_dtype = np.dtype([('pos', int, 3), ('g', '<f4')])
b = np.array([([1, 1, 1], 3), ([3, 2, 1], 0.0)], dtype=b_dtype)
expected_dtype = np.dtype([('pos', int, 3), ('f', '<f4'), ('g', '<f4')])
expected = np.array([([1, 1, 1], np.pi, 3)], dtype=expected_dtype)
res = join_by('pos', a, b)
assert_equal(res.dtype, expected_dtype)
assert_equal(res, expected)
def test_padded_dtype(self):
dt = np.dtype('i1,f4', align=True)
dt.names = ('k', 'v')
assert_(len(dt.descr), 3) # padding field is inserted
a = np.array([(1, 3), (3, 2)], dt)
b = np.array([(1, 1), (2, 2)], dt)
res = join_by('k', a, b)
# no padding fields remain
expected_dtype = np.dtype([
('k', 'i1'), ('v1', 'f4'), ('v2', 'f4')
])
assert_equal(res.dtype, expected_dtype)
class TestJoinBy2:
@classmethod
def setup(cls):
cls.a = np.array(list(zip(np.arange(10), np.arange(50, 60),
np.arange(100, 110))),
dtype=[('a', int), ('b', int), ('c', int)])
cls.b = np.array(list(zip(np.arange(10), np.arange(65, 75),
np.arange(100, 110))),
dtype=[('a', int), ('b', int), ('d', int)])
def test_no_r1postfix(self):
# Basic test of join_by no_r1postfix
a, b = self.a, self.b
test = join_by(
'a', a, b, r1postfix='', r2postfix='2', jointype='inner')
control = np.array([(0, 50, 65, 100, 100), (1, 51, 66, 101, 101),
(2, 52, 67, 102, 102), (3, 53, 68, 103, 103),
(4, 54, 69, 104, 104), (5, 55, 70, 105, 105),
(6, 56, 71, 106, 106), (7, 57, 72, 107, 107),
(8, 58, 73, 108, 108), (9, 59, 74, 109, 109)],
dtype=[('a', int), ('b', int), ('b2', int),
('c', int), ('d', int)])
assert_equal(test, control)
def test_no_postfix(self):
assert_raises(ValueError, join_by, 'a', self.a, self.b,
r1postfix='', r2postfix='')
def test_no_r2postfix(self):
# Basic test of join_by no_r2postfix
a, b = self.a, self.b
test = join_by(
'a', a, b, r1postfix='1', r2postfix='', jointype='inner')
control = np.array([(0, 50, 65, 100, 100), (1, 51, 66, 101, 101),
(2, 52, 67, 102, 102), (3, 53, 68, 103, 103),
(4, 54, 69, 104, 104), (5, 55, 70, 105, 105),
(6, 56, 71, 106, 106), (7, 57, 72, 107, 107),
(8, 58, 73, 108, 108), (9, 59, 74, 109, 109)],
dtype=[('a', int), ('b1', int), ('b', int),
('c', int), ('d', int)])
assert_equal(test, control)
def test_two_keys_two_vars(self):
a = np.array(list(zip(np.tile([10, 11], 5), np.repeat(np.arange(5), 2),
np.arange(50, 60), np.arange(10, 20))),
dtype=[('k', int), ('a', int), ('b', int), ('c', int)])
b = np.array(list(zip(np.tile([10, 11], 5), np.repeat(np.arange(5), 2),
np.arange(65, 75), np.arange(0, 10))),
dtype=[('k', int), ('a', int), ('b', int), ('c', int)])
control = np.array([(10, 0, 50, 65, 10, 0), (11, 0, 51, 66, 11, 1),
(10, 1, 52, 67, 12, 2), (11, 1, 53, 68, 13, 3),
(10, 2, 54, 69, 14, 4), (11, 2, 55, 70, 15, 5),
(10, 3, 56, 71, 16, 6), (11, 3, 57, 72, 17, 7),
(10, 4, 58, 73, 18, 8), (11, 4, 59, 74, 19, 9)],
dtype=[('k', int), ('a', int), ('b1', int),
('b2', int), ('c1', int), ('c2', int)])
test = join_by(
['a', 'k'], a, b, r1postfix='1', r2postfix='2', jointype='inner')
assert_equal(test.dtype, control.dtype)
assert_equal(test, control)
class TestAppendFieldsObj:
"""
Test append_fields with arrays containing objects
"""
# https://github.com/numpy/numpy/issues/2346
def setup(self):
from datetime import date
self.data = dict(obj=date(2000, 1, 1))
def test_append_to_objects(self):
"Test append_fields when the base array contains objects"
obj = self.data['obj']
x = np.array([(obj, 1.), (obj, 2.)],
dtype=[('A', object), ('B', float)])
y = np.array([10, 20], dtype=int)
test = append_fields(x, 'C', data=y, usemask=False)
control = np.array([(obj, 1.0, 10), (obj, 2.0, 20)],
dtype=[('A', object), ('B', float), ('C', int)])
assert_equal(test, control)
| |
#!/usr/bin/env python
import operator
import abc
class Expression(object):
__metaclass__ = abc.ABCMeta
def __init__(self):
pass
@abc.abstractmethod
def get_value(self):
pass
@abc.abstractmethod
def __str__(self):
pass
@abc.abstractmethod
def copy(self):
pass
@classmethod
def coerce_operand(cls, operand):
if isinstance(operand, Expression):
return operand
else:
return ConstExpression(operand)
def bind(self, owner):
pass
def get_labels(self):
return set()
def __add__(self, other):
return Add(self, other)
def __radd__(self, other):
return Add(other, self)
def __sub__(self, other):
return Sub(self, other)
def __rsub__(self, other):
return Sub(other, self)
def __mul__(self, other):
return Mul(self, other)
def __rmul__(self, other):
return Mul(other, self)
def __div__(self, other):
return Div(self, other)
def __floordiv__(self, other):
return FloorDiv(self, other)
def __truediv__(self, other):
return TrueDiv(self, other)
def __rdiv__(self, other):
return Div(other, self)
def __rfloordiv__(self, other):
return FloorDiv(other, self)
def __rtruediv__(self, other):
return TrueDiv(other, self)
def __mod__(self, other):
return Mod(self, other)
def __pow__(self, other):
return Pow(self, other)
def __eq__(self, other):
return Eq(self, other)
def __ne__(self, other):
return Ne(self, other)
def __gt__(self, other):
return Gt(self, other)
def __ge__(self, other):
return Ge(self, other)
def __lt__(self, other):
return Lt(self, other)
def __le__(self, other):
return Le(self, other)
def __and__(self, other):
"""using bitwise and instead of short-circuit logical and"""
return And(self, other)
def __or__(self, other):
"""using bitwise or instead of short-circuit logical or"""
return Or(self, other)
def __not__(self):
"""logical not"""
return Not(self)
def __invert__(self):
"""using bitwise invert as logical not"""
return self.__not__()
def __pos__(self):
return Pos(self)
def __neg__(self):
return Neq(self)
def __abs__(self):
return Abs(self)
def __getitem__(self, index):
return ItemGetter(self, index)
class ConstExpression(Expression):
def __init__(self, const_value):
super(ConstExpression, self).__init__()
self._const_value = const_value
def get_value(self):
return self._const_value
def __str__(self):
return str(self._const_value)
def copy(self):
return self.__class__(self._const_value)
class Operator(Expression):
def __init__(self, *operands):
self._operands = operands
def bind(self, owner):
for operand in self._operands:
operand.bind(owner)
def get_labels(self):
labels = set()
for operand in self._operands:
labels.update(operand.get_labels())
return labels
class BinaryOperator(Operator):
__symbol__ = '?'
def __init__(self, left_operand, right_operand):
self.left_operand = self.coerce_operand(left_operand)
self.right_operand = self.coerce_operand(right_operand)
super(BinaryOperator, self).__init__(self.left_operand, self.right_operand)
def get_value(self):
return self.binary_compute(self.left_operand.get_value(), self.right_operand.get_value())
@abc.abstractmethod
def binary_compute(self, l, r):
pass
def copy(self):
return self.__class__(self.left_operand.copy(), self.right_operand.copy())
def __str__(self):
return "({0} {1} {2})".format(self.left_operand, self.__symbol__, self.right_operand)
class UnaryOperator(Operator):
__metaclass__ = abc.ABCMeta
__symbol__ = '?'
def __init__(self, operand):
self.operand = self.coerce_operand(operand)
super(UnaryOperator, self).__init__(self.operand)
def get_value(self):
return self.unary_compute(self.operand.get_value())
@abc.abstractmethod
def unary_compute(self, o):
pass
def copy(self):
return self.__class__(self.operand.copy())
def __str__(self):
return "({0}{1})".format(self.__symbol__, self.operand)
class Add(BinaryOperator):
__symbol__ = '+'
def binary_compute(self, l, r):
return l + r
class Sub(BinaryOperator):
__symbol__ = '-'
def binary_compute(self, l, r):
return l - r
class Mul(BinaryOperator):
__symbol__ = '*'
def binary_compute(self, l, r):
return l * r
class Div(BinaryOperator):
__symbol__ = '/'
def binary_compute(self, l, r):
return l / r
class FloorDiv(BinaryOperator):
__symbol__ = '//'
def binary_compute(self, l, r):
return l // r
class TrueDiv(BinaryOperator):
__symbol__ = '/'
def binary_compute(self, l, r):
return operator.__truediv__(l, r)
class Pow(BinaryOperator):
__symbol__ = '**'
def binary_compute(self, l, r):
return l ** r
class Mod(BinaryOperator):
__symbol__ = '%'
def binary_compute(self, l, r):
return l % r
class Eq(BinaryOperator):
__symbol__ = '=='
def binary_compute(self, l, r):
return l == r
class Ne(BinaryOperator):
__symbol__ = '!='
def binary_compute(self, l, r):
return l != r
class Gt(BinaryOperator):
__symbol__ = '>'
def binary_compute(self, l, r):
return l > r
class Ge(BinaryOperator):
__symbol__ = '>='
def binary_compute(self, l, r):
return l >= r
class Lt(BinaryOperator):
__symbol__ = '<'
def binary_compute(self, l, r):
return l < r
class Le(BinaryOperator):
__symbol__ = '<='
def binary_compute(self, l, r):
return l <= r
class And(BinaryOperator):
__symbol__ = '&'
def binary_compute(self, l, r):
return l and r
class Or(BinaryOperator):
__symbol__ = '|'
def binary_compute(self, l, r):
return l or r
class Not(UnaryOperator):
__symbol__ = '|'
def unary_compute(self, o):
return not o
class Pos(UnaryOperator):
__symbol__ = '+'
def unary_compute(self, o):
return o
class Neq(UnaryOperator):
__symbol__ = '-'
def unary_compute(self, o):
return -o
class UnaryFunction(UnaryOperator):
__function_name__ = None
def __str__(self):
return "{0}({1})".format(self.__function_name__, self.operand)
class Abs(UnaryFunction):
__function_name__ = "abs"
def unary_compute(self, o):
return abs(o)
class IntCast(UnaryFunction):
__function_name__ = "int"
def unary_compute(self, o):
return int(o)
class FloatCast(UnaryFunction):
__function_name__ = "float"
def unary_compute(self, o):
return float(o)
class StrCast(UnaryFunction):
__function_name__ = "str"
def unary_compute(self, o):
return str(o)
class BoolCast(UnaryFunction):
__function_name__ = "bool"
def unary_compute(self, o):
return bool(o)
class Len(UnaryFunction):
__function_name__ = "len"
def unary_compute(self, o):
return len(o)
class ItemGetter(BinaryOperator):
def binary_compute(self, l, r):
return l[r]
def __str__(self):
return "{0}({1})".format(self.left_operand, self.right_operand)
class AttributeGetter(Expression):
def __init__(self, label):
self._owner = None
self._label = label
super(AttributeGetter, self).__init__()
def bind(self, owner):
self._owner = owner
def get_value(self):
return getattr(self._owner, self._label) #.value
def copy(self):
instance = self.__class__(self._label)
if self._owner is not None:
instance.bind(self._owner)
return instance
def get_labels(self):
return {self._label}
def __str__(self):
return 'BIND.{0}'.format(self._label)
class Binder(object):
def __getattr__(self, label):
return AttributeGetter(label)
BIND = Binder()
| |
import collections
import glob
import os
import pickle
from typing import Dict, Iterator, List, NamedTuple, Tuple
import cv2
import numpy as np
from data import data
from data.image import component
from puzzle.problems.image import image_problem
from puzzle.puzzlepedia import prod_config
prod_config.init()
_CLASSIFIED_COMPONENTS = data.project_path('data/grid/classified_components')
_CLASSIFIED_MAX_WIDTH = 720
_COMPONENT_SCRATCH = data.project_path('data/grid/components.pkl')
_COMPONENT_INDEX = data.project_path('data/grid/component_index.pkl')
_GRID_FILE_PATTERN = data.project_path('data/grid/original/*.png')
_IMSHOW_TITLE = 'component'
_TODO = {
}
_FOCUS = {
}
class ClassifiedComponent(NamedTuple):
classification: str
component: component.Component
AllComponents = Dict[int, ClassifiedComponent]
ComponentPosition = Tuple[int, int]
def images() -> Iterator[np.ndarray]:
for filename in sorted(glob.glob(_GRID_FILE_PATTERN)):
yield (
os.path.basename(filename),
cv2.imread(filename, flags=cv2.IMREAD_UNCHANGED))
def image_problems() -> Iterator[image_problem.ImageProblem]:
for name, image in images():
if _FOCUS and name not in _FOCUS:
continue
if name in _TODO:
print('Skipping unsupported image:', name)
continue
print('Working on:', name)
yield image_problem.ImageProblem(name, image)
def read_classified() -> AllComponents:
if not os.path.exists(_COMPONENT_SCRATCH):
return {}
all_components = pickle.load(open(_COMPONENT_SCRATCH, 'rb'))
print('Loaded %s' % len(all_components))
return all_components
def verify_classified(all_components: AllComponents) -> None:
classified_index = collections.defaultdict(list)
for classification, c in all_components.values():
classified_index[classification].append(c)
print('Click to erase incorrect classifications')
print('Press any key to continue')
for classification, components_list in sorted(
classified_index.items(), key=lambda x: x[0]):
if classification == '-':
continue
print('Classification:', classification)
output, positions = illustrate_classified_components(components_list)
positioned_components = [
(position, c) for position, c in zip(positions, components_list)]
if not interactive_click_to_remove(
output, positioned_components, all_components):
break
def interactive_click_to_remove(
image: np.ndarray,
positioned_components: List[Tuple[ComponentPosition, component.Component]],
all_components: AllComponents) -> bool:
def on_mouse_click(event: int, x: int, y: int, flags: int, param: None):
del flags, param
if event != cv2.EVENT_LBUTTONUP:
return
for (c_x, c_y), c in positioned_components:
if x < c_x or y < c_y:
continue
height, width = c.source.shape
if x > c_x + width or y > c_y + height:
continue
del all_components[hash(c)]
image[c_y:c_y + height, c_x:c_x + width] = 128
cv2.namedWindow(_IMSHOW_TITLE)
cv2.setMouseCallback(_IMSHOW_TITLE, on_mouse_click)
while True:
cv2.imshow(_IMSHOW_TITLE, image)
key = cv2.waitKey(250) & 0xFF
if key == 27:
return False
elif key != 255:
break
return True
def write_classified(all_components: AllComponents) -> None:
print('Writing %s' % len(all_components))
pickle.dump(
all_components,
open(_COMPONENT_SCRATCH, 'wb'),
protocol=pickle.HIGHEST_PROTOCOL)
index = {}
for k, v in all_components.items():
if v.classification == '-':
continue
index[k] = component.Component(v.component.source, labels={
'symbol': v.classification,
})
pickle.dump(
index,
open(_COMPONENT_INDEX, 'wb'),
protocol=pickle.HIGHEST_PROTOCOL)
def classify(all_components: AllComponents) -> None:
manual_mode = False
shift = False
for i in image_problems():
unclassified_components = list(i.get_components())
i = 0
while i < len(unclassified_components):
c = unclassified_components[i]
hash_id = hash(c)
if hash_id in all_components:
if not manual_mode:
print('component already identified:',
all_components[hash_id].classification)
i += 1
continue
classification = all_components[hash_id].classification
else:
classification = None
print('hash id:', hash_id, classification)
cv2.imshow(_IMSHOW_TITLE, c.source)
code = cv2.waitKey(0)
key = chr(code & 0xFF)
if code == 27: # ESC
return
elif code == 0:
shift = True
continue
elif code == 32: # SPACE
all_components[hash_id] = ClassifiedComponent(
input('classification:'), c)
manual_mode = False
elif code == 44: # <
i -= 1
manual_mode = True
continue
elif code == 46: # >
i += 1
manual_mode = True
continue
elif code == 45: # -?
print('ignoring')
all_components[hash_id] = ClassifiedComponent('-', c)
manual_mode = False
shift = False
elif key.isalnum():
if shift:
key = key.upper()
all_components[hash_id] = ClassifiedComponent(key, c)
manual_mode = False
shift = False
else:
print('unrecognized:', code)
i += 1
def illustrate_all_classified(all_classified: AllComponents) -> None:
classified_index = collections.defaultdict(list)
for classification, c in all_classified.values():
classified_index[classification].append(c)
for classification, components_list in classified_index.items():
output, _ = illustrate_classified_components(components_list)
write_classified_components(output, classification)
def illustrate_classified_components(
all_components: List[component.Component]
) -> Tuple[np.ndarray, List[ComponentPosition]]:
all_components = list(sorted(
all_components, key=lambda i: i.source.shape[0] * i.source.shape[1],
reverse=True))
position_information = []
total_width = 0
max_row_height = 0
cursor_x = 0
cursor_y = 0
for c in all_components:
height, width = c.source.shape
if cursor_x + width > _CLASSIFIED_MAX_WIDTH:
total_width = max(total_width, cursor_x)
cursor_x = 0
cursor_y += max_row_height
max_row_height = 0
position_information.append((cursor_x, cursor_y))
cursor_x += width
max_row_height = max(max_row_height, height)
total_width = max(total_width, cursor_x) + 8 # Arbitrary padding.
total_height = cursor_y + max_row_height
shape = (total_height, total_width)
output = np.zeros(shape, dtype=np.uint8)
for c, (x, y) in zip(all_components, position_information):
height, width = c.source.shape
output[y:y + height, x:x + width] = c.source
return output, position_information
def write_classified_components(
output: np.ndarray, classification: str) -> None:
filename = os.path.join(_CLASSIFIED_COMPONENTS, '%s.png' % classification)
cv2.imwrite(filename, output)
def main() -> None:
already_classified = read_classified()
verify_classified(already_classified)
classify(already_classified)
write_classified(already_classified)
illustrate_all_classified(already_classified)
main()
| |
# Dynamic Mode Decomposition based on http://arxiv.org/pdf/1312.0041v1.pdf
import numpy as np
import pandas as pd
from sklearn.preprocessing import normalize
from bacteriopop_utils import prepare_DMD_matrices
def find_fixed_adjacency_matrix(min_abundance=0.0, phylo_column='order',
full_svd=True):
"""
This function find the adjacency matrix among clusters of bacteria over
the 11 weeks of sampling assuming the interaction between clusters is
fixed.
It creates a dictionary of descriptive tuples like ("High", 2) for
high-oxygen week 2, and corresponding dataframe values. These
dataframes have weeks as columns and taxa ("bacteria") as rows.
Unlike find_temporal_adjacency_matrix(), we get only one predictive
matrix that represents the 10 transitions between sampling points.
Since the dictionary has 8 tuple keys for High/Low oxygen and 4
replicates for each condition, 8 interaction ("A") matrices are created.
These are accessed by the dictionary linear_mappings, with the same
tuples as keys.
The names of each node can be accessed by nodes_list, the other output.
:param min_abundance: minimum abundance to loook for in original data
:param phylo_column: most detailed phylogenetic column to consider
:param full_svd: if True, runs the full svd algorithm. If False,
runs a faster version.
"""
# Default values
if min_abundance is None:
min_abundance = 0
if phylo_column is None:
phylo_column = 'order'
if full_svd is None:
full_svd = False
# snapshots of samples over 11 weeks
snapshots = prepare_DMD_matrices(min_abundance, phylo_column, oxygen='all',debug=False)
linear_mappings = {}
nodes_list = {}
for descriptive_tuple in snapshots.keys():
df = snapshots[descriptive_tuple]
data = df.values
X = data[:, 0:10]
Y = data[:, 1:11]
# Preprocess the abundance data
X = normalize(X, axis=0)
Y = normalize(Y, axis=0)
U, s, V = np.linalg.svd(X, full_matrices=full_svd)
if full_svd is True: # slower
S = np.zeros((len(U), len(s)), dtype=float)
S[:len(s), :len(s)] = np.diag(s)
pseu_inv_x = np.dot(np.linalg.inv(V),
np.dot(np.linalg.pinv(S), np.linalg.inv(U)))
else: # faster
S = np.diag(s)
pseu_inv_x = np.dot(np.linalg.inv(V),
np.dot(np.linalg.inv(S), np.linalg.pinv(U)))
# Adjacency matrix between clusters
A = np.dot(Y, pseu_inv_x)
# A = np.dot(Y, np.linalg.pinv(X)) # full SVD (slower)
linear_mappings[descriptive_tuple] = A
nodes_list[descriptive_tuple] = list(df.index)
return linear_mappings, nodes_list
def adjacency_matrix_into_pandas(mappings_array, row_and_colnames):
"""
Turn one matrix with one set of labels into a Pandas DataFrame with
index (row) names set to row_and_colnames as well has column names set
to row_and_colnames.
:param mappings_array: numpy matrix produced from ___
:param row_and_colnames: numpy array of names produced by ___
:return: one Pandas DataFrame with row and column names.
"""
# Goal: return a Pandas DataFrame with suitable labels by combining the
# linear_mappings and nodes_list outputs of find_fixed_adjacency_matrix().
# todo: which labels do we use? So far labels are things like:
# Bacteria,Proteobacteria,Gammaproteobacteria,Pseudomonadales
# and sometimes
# unassigned,,, <-- when the taxonomy was not fully specified.
# for now just return the long strings:
return pd.DataFrame(mappings_array,
columns=row_and_colnames,
index=row_and_colnames)
def DMD_results_dict_from_numpy_to_pandas(adj_dict, node_name_dict):
# transform our dict of descriptive tuple:numpy array pairs into a dict of
# descriptive tuple:pandas dataframe dict.
# assert that the set of keys in both inputs match.
assert (set(adj_dict.keys())== set(node_name_dict.keys()))
dict_with_dataframe_values = {}
for key in adj_dict.keys():
np_to_pd = adjacency_matrix_into_pandas(adj_dict[key],
node_name_dict[key])
dict_with_dataframe_values[key] = np_to_pd
return dict_with_dataframe_values
def find_temporal_adjacency_matrix(min_abundance, phylo_column, full_svd):
"""
Find the adjacency matrix among clusters of bacteria from week to week,
assuming the interaction between clusters is changing.
:param min_abundance: ignore the bacteria if their abundance is always
below the min_abundance
:param phylo_column: the data is clustered based on the phylo_column
:param full_svd:the method of singular value decomposition. full SVD is
more accurate and slower than the reduced SVD
"""
# Default values
if min_abundance is None:
min_abundance = 0
if phylo_column is None:
phylo_column = 'family'
if full_svd is None:
full_svd = False
# snapshots of samples over 11 weeks
# todo: python reserves capital letters for classes.
snapshots = prepare_DMD_matrices(min_abundance, phylo_column, oxygen='all', debug=False)
linear_mappings = {}
nodes_list = {}
for descriptive_tuple in snapshots.keys():
df = snapshots[descriptive_tuple]
data = df.values
for time in range(10):
X = data[:, time:time+1]
Y = data[:, time+1:time+2]
# Preprocess the abundance data
X = normalize(X, axis=0)
Y = normalize(Y, axis=0)
U, s, V = np.linalg.svd(X, full_matrices=full_svd)
if full_svd is True: # slower
S = np.zeros((len(U), len(s)), dtype=complex)
S[:len(s), :len(s)] = np.diag(s)
pseu_inv_x = np.dot(np.linalg.inv(V),
np.dot(np.linalg.pinv(S), np.linalg.inv(U)))
else: # faster
S = np.diag(s)
pseu_inv_x = np.dot(np.linalg.inv(V),
np.dot(np.linalg.inv(S), np.linalg.pinv(U)))
# Adjacency matrix between clusters
A = np.dot(Y, pseu_inv_x)
# A = np.dot(Y, np.linalg.pinv(X)) # full SVD (slower)
key = descriptive_tuple + ('Week ' + str(time+1),)
linear_mappings[key] = A
nodes_list[key] = list(df.index)
return linear_mappings, nodes_list
def aggregate_adjacency_matrix_over_replicates(mappings):
"""
:param mappings: a python dictionarys of pandas data frame that contains
the adjacency matrices for all 8 replicates including
4 high O2 and 4 low O2
:return:
avg_mappings: a dictionary of pandas data frame for low and high
replicates mean
std_mappings: a dictionary of pandas data frame for low and high
replicates standard deviation
snr_mappings: a dictionary of pandas data frame for low and high
replicates signal to noise ratio
"""
std_mappings = {}
avg_mappings = {}
snr_mappings = {}
current_nodes = {}
high_rep_mappings = []
low_rep_mappings = []
current_nodes['Low'] = set([])
current_nodes['High'] = set([])
# create two lists, one for each high or low replicates including all
# labels observed in replicates
for key in mappings.keys():
if key[0] == "High":
current_nodes['High'] = \
current_nodes['High'].union(mappings[key].index)
else:
current_nodes['Low'] = \
current_nodes['Low'].union(mappings[key].index)
# add the missing label to each replicate
for key in mappings.keys():
if key[0] == "High":
for id in current_nodes['High']:
if id not in mappings[key].index:
# add one column
mappings[key][id] = [0.0]*len(mappings[key].index)
# add one row
mappings[key].loc[id] = \
[0.0]*len(mappings[key].columns)
# sort the index and columns labels of data frame in order to
# have an identical ordering in the adjacency matrix
mappings[key] = mappings[key].sort_index(axis=1)
mappings[key] = mappings[key].sort_index()
high_rep_mappings.append(mappings[key].values)
else:
for id in current_nodes['Low']:
if id not in mappings[key].index:
# add one column
mappings[key][id] = [0.0]*len(mappings[key].index)
# add one column
mappings[key].loc[id] = \
[0.0]*len(mappings[key].columns)
# sort the index and columns labels of data frame in order to have
# an identical ordering in the adjacency matrix
mappings[key] = mappings[key].sort_index(axis=1)
mappings[key] = mappings[key].sort_index()
low_rep_mappings.append(mappings[key].values)
# find the element by element average of adjacency matrix over replicates
# of high/low O2
avg_mappings['High'] = np.mean(high_rep_mappings, axis=0)
avg_mappings['Low'] = np.mean(low_rep_mappings, axis=0)
# convert from numpy array to pandas dataframe
avg_mappings = DMD_results_dict_from_numpy_to_pandas(avg_mappings,
current_nodes)
# find the element by element STD of adjacency matrix over replicates of
# high/low O2
std_mappings['High'] = np.std(high_rep_mappings, axis=0, ddof=1)
std_mappings['Low'] = np.std(low_rep_mappings, axis=0, ddof=1)
# convert from numpy array to pandas dataframe
std_mappings = DMD_results_dict_from_numpy_to_pandas(std_mappings,
current_nodes)
# find the element by element SNR of adjacency matrix over replicates of
# high/low O2
snr_mappings['High'] = avg_mappings['High']/std_mappings['High']
snr_mappings['Low'] = avg_mappings['Low'] / std_mappings['Low']
# convert from numpy array to pandas dataframe
snr_mappings = DMD_results_dict_from_numpy_to_pandas(snr_mappings,
current_nodes)
return std_mappings, avg_mappings, snr_mappings
| |
"""Terminal reporting of the full testing process.
This is a good source for looking at the various reporting hooks.
"""
import argparse
import datetime
import inspect
import platform
import sys
import warnings
from collections import Counter
from functools import partial
from pathlib import Path
from typing import Any
from typing import Callable
from typing import cast
from typing import Dict
from typing import Generator
from typing import List
from typing import Mapping
from typing import Optional
from typing import Sequence
from typing import Set
from typing import TextIO
from typing import Tuple
from typing import TYPE_CHECKING
from typing import Union
import attr
import pluggy
import py
import _pytest._version
from _pytest import nodes
from _pytest import timing
from _pytest._code import ExceptionInfo
from _pytest._code.code import ExceptionRepr
from _pytest._io.wcwidth import wcswidth
from _pytest.compat import final
from _pytest.config import _PluggyPlugin
from _pytest.config import Config
from _pytest.config import ExitCode
from _pytest.config import hookimpl
from _pytest.config.argparsing import Parser
from _pytest.nodes import Item
from _pytest.nodes import Node
from _pytest.pathlib import absolutepath
from _pytest.pathlib import bestrelpath
from _pytest.reports import BaseReport
from _pytest.reports import CollectReport
from _pytest.reports import TestReport
if TYPE_CHECKING:
from typing_extensions import Literal
from _pytest.main import Session
REPORT_COLLECTING_RESOLUTION = 0.5
KNOWN_TYPES = (
"failed",
"passed",
"skipped",
"deselected",
"xfailed",
"xpassed",
"warnings",
"error",
)
_REPORTCHARS_DEFAULT = "fE"
class MoreQuietAction(argparse.Action):
"""A modified copy of the argparse count action which counts down and updates
the legacy quiet attribute at the same time.
Used to unify verbosity handling.
"""
def __init__(
self,
option_strings: Sequence[str],
dest: str,
default: object = None,
required: bool = False,
help: Optional[str] = None,
) -> None:
super().__init__(
option_strings=option_strings,
dest=dest,
nargs=0,
default=default,
required=required,
help=help,
)
def __call__(
self,
parser: argparse.ArgumentParser,
namespace: argparse.Namespace,
values: Union[str, Sequence[object], None],
option_string: Optional[str] = None,
) -> None:
new_count = getattr(namespace, self.dest, 0) - 1
setattr(namespace, self.dest, new_count)
# todo Deprecate config.quiet
namespace.quiet = getattr(namespace, "quiet", 0) + 1
def pytest_addoption(parser: Parser) -> None:
group = parser.getgroup("terminal reporting", "reporting", after="general")
group._addoption(
"-v",
"--verbose",
action="count",
default=0,
dest="verbose",
help="increase verbosity.",
)
group._addoption(
"--no-header",
action="store_true",
default=False,
dest="no_header",
help="disable header",
)
group._addoption(
"--no-summary",
action="store_true",
default=False,
dest="no_summary",
help="disable summary",
)
group._addoption(
"-q",
"--quiet",
action=MoreQuietAction,
default=0,
dest="verbose",
help="decrease verbosity.",
)
group._addoption(
"--verbosity",
dest="verbose",
type=int,
default=0,
help="set verbosity. Default is 0.",
)
group._addoption(
"-r",
action="store",
dest="reportchars",
default=_REPORTCHARS_DEFAULT,
metavar="chars",
help="show extra test summary info as specified by chars: (f)ailed, "
"(E)rror, (s)kipped, (x)failed, (X)passed, "
"(p)assed, (P)assed with output, (a)ll except passed (p/P), or (A)ll. "
"(w)arnings are enabled by default (see --disable-warnings), "
"'N' can be used to reset the list. (default: 'fE').",
)
group._addoption(
"--disable-warnings",
"--disable-pytest-warnings",
default=False,
dest="disable_warnings",
action="store_true",
help="disable warnings summary",
)
group._addoption(
"-l",
"--showlocals",
action="store_true",
dest="showlocals",
default=False,
help="show locals in tracebacks (disabled by default).",
)
group._addoption(
"--tb",
metavar="style",
action="store",
dest="tbstyle",
default="auto",
choices=["auto", "long", "short", "no", "line", "native"],
help="traceback print mode (auto/long/short/line/native/no).",
)
group._addoption(
"--show-capture",
action="store",
dest="showcapture",
choices=["no", "stdout", "stderr", "log", "all"],
default="all",
help="Controls how captured stdout/stderr/log is shown on failed tests. "
"Default is 'all'.",
)
group._addoption(
"--fulltrace",
"--full-trace",
action="store_true",
default=False,
help="don't cut any tracebacks (default is to cut).",
)
group._addoption(
"--color",
metavar="color",
action="store",
dest="color",
default="auto",
choices=["yes", "no", "auto"],
help="color terminal output (yes/no/auto).",
)
group._addoption(
"--code-highlight",
default="yes",
choices=["yes", "no"],
help="Whether code should be highlighted (only if --color is also enabled)",
)
parser.addini(
"console_output_style",
help='console output: "classic", or with additional progress information ("progress" (percentage) | "count").',
default="progress",
)
def pytest_configure(config: Config) -> None:
reporter = TerminalReporter(config, sys.stdout)
config.pluginmanager.register(reporter, "terminalreporter")
if config.option.debug or config.option.traceconfig:
def mywriter(tags, args):
msg = " ".join(map(str, args))
reporter.write_line("[traceconfig] " + msg)
config.trace.root.setprocessor("pytest:config", mywriter)
def getreportopt(config: Config) -> str:
reportchars: str = config.option.reportchars
old_aliases = {"F", "S"}
reportopts = ""
for char in reportchars:
if char in old_aliases:
char = char.lower()
if char == "a":
reportopts = "sxXEf"
elif char == "A":
reportopts = "PpsxXEf"
elif char == "N":
reportopts = ""
elif char not in reportopts:
reportopts += char
if not config.option.disable_warnings and "w" not in reportopts:
reportopts = "w" + reportopts
elif config.option.disable_warnings and "w" in reportopts:
reportopts = reportopts.replace("w", "")
return reportopts
@hookimpl(trylast=True) # after _pytest.runner
def pytest_report_teststatus(report: BaseReport) -> Tuple[str, str, str]:
letter = "F"
if report.passed:
letter = "."
elif report.skipped:
letter = "s"
outcome: str = report.outcome
if report.when in ("collect", "setup", "teardown") and outcome == "failed":
outcome = "error"
letter = "E"
return outcome, letter, outcome.upper()
@attr.s
class WarningReport:
"""Simple structure to hold warnings information captured by ``pytest_warning_recorded``.
:ivar str message:
User friendly message about the warning.
:ivar str|None nodeid:
nodeid that generated the warning (see ``get_location``).
:ivar tuple fslocation:
File system location of the source of the warning (see ``get_location``).
"""
message = attr.ib(type=str)
nodeid = attr.ib(type=Optional[str], default=None)
fslocation = attr.ib(type=Optional[Tuple[str, int]], default=None)
count_towards_summary = True
def get_location(self, config: Config) -> Optional[str]:
"""Return the more user-friendly information about the location of a warning, or None."""
if self.nodeid:
return self.nodeid
if self.fslocation:
filename, linenum = self.fslocation
relpath = bestrelpath(config.invocation_params.dir, absolutepath(filename))
return f"{relpath}:{linenum}"
return None
@final
class TerminalReporter:
def __init__(self, config: Config, file: Optional[TextIO] = None) -> None:
import _pytest.config
self.config = config
self._numcollected = 0
self._session: Optional[Session] = None
self._showfspath: Optional[bool] = None
self.stats: Dict[str, List[Any]] = {}
self._main_color: Optional[str] = None
self._known_types: Optional[List[str]] = None
self.startdir = config.invocation_dir
self.startpath = config.invocation_params.dir
if file is None:
file = sys.stdout
self._tw = _pytest.config.create_terminal_writer(config, file)
self._screen_width = self._tw.fullwidth
self.currentfspath: Union[None, Path, str, int] = None
self.reportchars = getreportopt(config)
self.hasmarkup = self._tw.hasmarkup
self.isatty = file.isatty()
self._progress_nodeids_reported: Set[str] = set()
self._show_progress_info = self._determine_show_progress_info()
self._collect_report_last_write: Optional[float] = None
self._already_displayed_warnings: Optional[int] = None
self._keyboardinterrupt_memo: Optional[ExceptionRepr] = None
def _determine_show_progress_info(self) -> "Literal['progress', 'count', False]":
"""Return whether we should display progress information based on the current config."""
# do not show progress if we are not capturing output (#3038)
if self.config.getoption("capture", "no") == "no":
return False
# do not show progress if we are showing fixture setup/teardown
if self.config.getoption("setupshow", False):
return False
cfg: str = self.config.getini("console_output_style")
if cfg == "progress":
return "progress"
elif cfg == "count":
return "count"
else:
return False
@property
def verbosity(self) -> int:
verbosity: int = self.config.option.verbose
return verbosity
@property
def showheader(self) -> bool:
return self.verbosity >= 0
@property
def no_header(self) -> bool:
return bool(self.config.option.no_header)
@property
def no_summary(self) -> bool:
return bool(self.config.option.no_summary)
@property
def showfspath(self) -> bool:
if self._showfspath is None:
return self.verbosity >= 0
return self._showfspath
@showfspath.setter
def showfspath(self, value: Optional[bool]) -> None:
self._showfspath = value
@property
def showlongtestinfo(self) -> bool:
return self.verbosity > 0
def hasopt(self, char: str) -> bool:
char = {"xfailed": "x", "skipped": "s"}.get(char, char)
return char in self.reportchars
def write_fspath_result(self, nodeid: str, res, **markup: bool) -> None:
fspath = self.config.rootpath / nodeid.split("::")[0]
if self.currentfspath is None or fspath != self.currentfspath:
if self.currentfspath is not None and self._show_progress_info:
self._write_progress_information_filling_space()
self.currentfspath = fspath
relfspath = bestrelpath(self.startpath, fspath)
self._tw.line()
self._tw.write(relfspath + " ")
self._tw.write(res, flush=True, **markup)
def write_ensure_prefix(self, prefix: str, extra: str = "", **kwargs) -> None:
if self.currentfspath != prefix:
self._tw.line()
self.currentfspath = prefix
self._tw.write(prefix)
if extra:
self._tw.write(extra, **kwargs)
self.currentfspath = -2
def ensure_newline(self) -> None:
if self.currentfspath:
self._tw.line()
self.currentfspath = None
def write(self, content: str, *, flush: bool = False, **markup: bool) -> None:
self._tw.write(content, flush=flush, **markup)
def flush(self) -> None:
self._tw.flush()
def write_line(self, line: Union[str, bytes], **markup: bool) -> None:
if not isinstance(line, str):
line = str(line, errors="replace")
self.ensure_newline()
self._tw.line(line, **markup)
def rewrite(self, line: str, **markup: bool) -> None:
"""Rewinds the terminal cursor to the beginning and writes the given line.
:param erase:
If True, will also add spaces until the full terminal width to ensure
previous lines are properly erased.
The rest of the keyword arguments are markup instructions.
"""
erase = markup.pop("erase", False)
if erase:
fill_count = self._tw.fullwidth - len(line) - 1
fill = " " * fill_count
else:
fill = ""
line = str(line)
self._tw.write("\r" + line + fill, **markup)
def write_sep(
self,
sep: str,
title: Optional[str] = None,
fullwidth: Optional[int] = None,
**markup: bool,
) -> None:
self.ensure_newline()
self._tw.sep(sep, title, fullwidth, **markup)
def section(self, title: str, sep: str = "=", **kw: bool) -> None:
self._tw.sep(sep, title, **kw)
def line(self, msg: str, **kw: bool) -> None:
self._tw.line(msg, **kw)
def _add_stats(self, category: str, items: Sequence[Any]) -> None:
set_main_color = category not in self.stats
self.stats.setdefault(category, []).extend(items)
if set_main_color:
self._set_main_color()
def pytest_internalerror(self, excrepr: ExceptionRepr) -> bool:
for line in str(excrepr).split("\n"):
self.write_line("INTERNALERROR> " + line)
return True
def pytest_warning_recorded(
self,
warning_message: warnings.WarningMessage,
nodeid: str,
) -> None:
from _pytest.warnings import warning_record_to_str
fslocation = warning_message.filename, warning_message.lineno
message = warning_record_to_str(warning_message)
warning_report = WarningReport(
fslocation=fslocation, message=message, nodeid=nodeid
)
self._add_stats("warnings", [warning_report])
def pytest_plugin_registered(self, plugin: _PluggyPlugin) -> None:
if self.config.option.traceconfig:
msg = f"PLUGIN registered: {plugin}"
# XXX This event may happen during setup/teardown time
# which unfortunately captures our output here
# which garbles our output if we use self.write_line.
self.write_line(msg)
def pytest_deselected(self, items: Sequence[Item]) -> None:
self._add_stats("deselected", items)
def pytest_runtest_logstart(
self, nodeid: str, location: Tuple[str, Optional[int], str]
) -> None:
# Ensure that the path is printed before the
# 1st test of a module starts running.
if self.showlongtestinfo:
line = self._locationline(nodeid, *location)
self.write_ensure_prefix(line, "")
self.flush()
elif self.showfspath:
self.write_fspath_result(nodeid, "")
self.flush()
def pytest_runtest_logreport(self, report: TestReport) -> None:
self._tests_ran = True
rep = report
res: Tuple[
str, str, Union[str, Tuple[str, Mapping[str, bool]]]
] = self.config.hook.pytest_report_teststatus(report=rep, config=self.config)
category, letter, word = res
if not isinstance(word, tuple):
markup = None
else:
word, markup = word
self._add_stats(category, [rep])
if not letter and not word:
# Probably passed setup/teardown.
return
running_xdist = hasattr(rep, "node")
if markup is None:
was_xfail = hasattr(report, "wasxfail")
if rep.passed and not was_xfail:
markup = {"green": True}
elif rep.passed and was_xfail:
markup = {"yellow": True}
elif rep.failed:
markup = {"red": True}
elif rep.skipped:
markup = {"yellow": True}
else:
markup = {}
if self.verbosity <= 0:
self._tw.write(letter, **markup)
else:
self._progress_nodeids_reported.add(rep.nodeid)
line = self._locationline(rep.nodeid, *rep.location)
if not running_xdist:
self.write_ensure_prefix(line, word, **markup)
if rep.skipped or hasattr(report, "wasxfail"):
available_width = (
(self._tw.fullwidth - self._tw.width_of_current_line)
- len(" [100%]")
- 1
)
reason = _get_raw_skip_reason(rep)
reason_ = _format_trimmed(" ({})", reason, available_width)
if reason and reason_ is not None:
self._tw.write(reason_)
if self._show_progress_info:
self._write_progress_information_filling_space()
else:
self.ensure_newline()
self._tw.write("[%s]" % rep.node.gateway.id)
if self._show_progress_info:
self._tw.write(
self._get_progress_information_message() + " ", cyan=True
)
else:
self._tw.write(" ")
self._tw.write(word, **markup)
self._tw.write(" " + line)
self.currentfspath = -2
self.flush()
@property
def _is_last_item(self) -> bool:
assert self._session is not None
return len(self._progress_nodeids_reported) == self._session.testscollected
def pytest_runtest_logfinish(self, nodeid: str) -> None:
assert self._session
if self.verbosity <= 0 and self._show_progress_info:
if self._show_progress_info == "count":
num_tests = self._session.testscollected
progress_length = len(" [{}/{}]".format(str(num_tests), str(num_tests)))
else:
progress_length = len(" [100%]")
self._progress_nodeids_reported.add(nodeid)
if self._is_last_item:
self._write_progress_information_filling_space()
else:
main_color, _ = self._get_main_color()
w = self._width_of_current_line
past_edge = w + progress_length + 1 >= self._screen_width
if past_edge:
msg = self._get_progress_information_message()
self._tw.write(msg + "\n", **{main_color: True})
def _get_progress_information_message(self) -> str:
assert self._session
collected = self._session.testscollected
if self._show_progress_info == "count":
if collected:
progress = self._progress_nodeids_reported
counter_format = "{{:{}d}}".format(len(str(collected)))
format_string = f" [{counter_format}/{{}}]"
return format_string.format(len(progress), collected)
return f" [ {collected} / {collected} ]"
else:
if collected:
return " [{:3d}%]".format(
len(self._progress_nodeids_reported) * 100 // collected
)
return " [100%]"
def _write_progress_information_filling_space(self) -> None:
color, _ = self._get_main_color()
msg = self._get_progress_information_message()
w = self._width_of_current_line
fill = self._tw.fullwidth - w - 1
self.write(msg.rjust(fill), flush=True, **{color: True})
@property
def _width_of_current_line(self) -> int:
"""Return the width of the current line."""
return self._tw.width_of_current_line
def pytest_collection(self) -> None:
if self.isatty:
if self.config.option.verbose >= 0:
self.write("collecting ... ", flush=True, bold=True)
self._collect_report_last_write = timing.time()
elif self.config.option.verbose >= 1:
self.write("collecting ... ", flush=True, bold=True)
def pytest_collectreport(self, report: CollectReport) -> None:
if report.failed:
self._add_stats("error", [report])
elif report.skipped:
self._add_stats("skipped", [report])
items = [x for x in report.result if isinstance(x, Item)]
self._numcollected += len(items)
if self.isatty:
self.report_collect()
def report_collect(self, final: bool = False) -> None:
if self.config.option.verbose < 0:
return
if not final:
# Only write "collecting" report every 0.5s.
t = timing.time()
if (
self._collect_report_last_write is not None
and self._collect_report_last_write > t - REPORT_COLLECTING_RESOLUTION
):
return
self._collect_report_last_write = t
errors = len(self.stats.get("error", []))
skipped = len(self.stats.get("skipped", []))
deselected = len(self.stats.get("deselected", []))
selected = self._numcollected - errors - skipped - deselected
if final:
line = "collected "
else:
line = "collecting "
line += (
str(self._numcollected) + " item" + ("" if self._numcollected == 1 else "s")
)
if errors:
line += " / %d error%s" % (errors, "s" if errors != 1 else "")
if deselected:
line += " / %d deselected" % deselected
if skipped:
line += " / %d skipped" % skipped
if self._numcollected > selected > 0:
line += " / %d selected" % selected
if self.isatty:
self.rewrite(line, bold=True, erase=True)
if final:
self.write("\n")
else:
self.write_line(line)
@hookimpl(trylast=True)
def pytest_sessionstart(self, session: "Session") -> None:
self._session = session
self._sessionstarttime = timing.time()
if not self.showheader:
return
self.write_sep("=", "test session starts", bold=True)
verinfo = platform.python_version()
if not self.no_header:
msg = f"platform {sys.platform} -- Python {verinfo}"
pypy_version_info = getattr(sys, "pypy_version_info", None)
if pypy_version_info:
verinfo = ".".join(map(str, pypy_version_info[:3]))
msg += "[pypy-{}-{}]".format(verinfo, pypy_version_info[3])
msg += ", pytest-{}, py-{}, pluggy-{}".format(
_pytest._version.version, py.__version__, pluggy.__version__
)
if (
self.verbosity > 0
or self.config.option.debug
or getattr(self.config.option, "pastebin", None)
):
msg += " -- " + str(sys.executable)
self.write_line(msg)
lines = self.config.hook.pytest_report_header(
config=self.config, startpath=self.startpath, startdir=self.startdir
)
self._write_report_lines_from_hooks(lines)
def _write_report_lines_from_hooks(
self, lines: Sequence[Union[str, Sequence[str]]]
) -> None:
for line_or_lines in reversed(lines):
if isinstance(line_or_lines, str):
self.write_line(line_or_lines)
else:
for line in line_or_lines:
self.write_line(line)
def pytest_report_header(self, config: Config) -> List[str]:
line = "rootdir: %s" % config.rootpath
if config.inipath:
line += ", configfile: " + bestrelpath(config.rootpath, config.inipath)
testpaths: List[str] = config.getini("testpaths")
if config.invocation_params.dir == config.rootpath and config.args == testpaths:
line += ", testpaths: {}".format(", ".join(testpaths))
result = [line]
plugininfo = config.pluginmanager.list_plugin_distinfo()
if plugininfo:
result.append("plugins: %s" % ", ".join(_plugin_nameversions(plugininfo)))
return result
def pytest_collection_finish(self, session: "Session") -> None:
self.report_collect(True)
lines = self.config.hook.pytest_report_collectionfinish(
config=self.config,
startpath=self.startpath,
startdir=self.startdir,
items=session.items,
)
self._write_report_lines_from_hooks(lines)
if self.config.getoption("collectonly"):
if session.items:
if self.config.option.verbose > -1:
self._tw.line("")
self._printcollecteditems(session.items)
failed = self.stats.get("failed")
if failed:
self._tw.sep("!", "collection failures")
for rep in failed:
rep.toterminal(self._tw)
def _printcollecteditems(self, items: Sequence[Item]) -> None:
# To print out items and their parent collectors
# we take care to leave out Instances aka ()
# because later versions are going to get rid of them anyway.
if self.config.option.verbose < 0:
if self.config.option.verbose < -1:
counts = Counter(item.nodeid.split("::", 1)[0] for item in items)
for name, count in sorted(counts.items()):
self._tw.line("%s: %d" % (name, count))
else:
for item in items:
self._tw.line(item.nodeid)
return
stack: List[Node] = []
indent = ""
for item in items:
needed_collectors = item.listchain()[1:] # strip root node
while stack:
if stack == needed_collectors[: len(stack)]:
break
stack.pop()
for col in needed_collectors[len(stack) :]:
stack.append(col)
if col.name == "()": # Skip Instances.
continue
indent = (len(stack) - 1) * " "
self._tw.line(f"{indent}{col}")
if self.config.option.verbose >= 1:
obj = getattr(col, "obj", None)
doc = inspect.getdoc(obj) if obj else None
if doc:
for line in doc.splitlines():
self._tw.line("{}{}".format(indent + " ", line))
@hookimpl(hookwrapper=True)
def pytest_sessionfinish(
self, session: "Session", exitstatus: Union[int, ExitCode]
):
outcome = yield
outcome.get_result()
self._tw.line("")
summary_exit_codes = (
ExitCode.OK,
ExitCode.TESTS_FAILED,
ExitCode.INTERRUPTED,
ExitCode.USAGE_ERROR,
ExitCode.NO_TESTS_COLLECTED,
)
if exitstatus in summary_exit_codes and not self.no_summary:
self.config.hook.pytest_terminal_summary(
terminalreporter=self, exitstatus=exitstatus, config=self.config
)
if session.shouldfail:
self.write_sep("!", str(session.shouldfail), red=True)
if exitstatus == ExitCode.INTERRUPTED:
self._report_keyboardinterrupt()
self._keyboardinterrupt_memo = None
elif session.shouldstop:
self.write_sep("!", str(session.shouldstop), red=True)
self.summary_stats()
@hookimpl(hookwrapper=True)
def pytest_terminal_summary(self) -> Generator[None, None, None]:
self.summary_errors()
self.summary_failures()
self.summary_warnings()
self.summary_passes()
yield
self.short_test_summary()
# Display any extra warnings from teardown here (if any).
self.summary_warnings()
def pytest_keyboard_interrupt(self, excinfo: ExceptionInfo[BaseException]) -> None:
self._keyboardinterrupt_memo = excinfo.getrepr(funcargs=True)
def pytest_unconfigure(self) -> None:
if self._keyboardinterrupt_memo is not None:
self._report_keyboardinterrupt()
def _report_keyboardinterrupt(self) -> None:
excrepr = self._keyboardinterrupt_memo
assert excrepr is not None
assert excrepr.reprcrash is not None
msg = excrepr.reprcrash.message
self.write_sep("!", msg)
if "KeyboardInterrupt" in msg:
if self.config.option.fulltrace:
excrepr.toterminal(self._tw)
else:
excrepr.reprcrash.toterminal(self._tw)
self._tw.line(
"(to show a full traceback on KeyboardInterrupt use --full-trace)",
yellow=True,
)
def _locationline(self, nodeid, fspath, lineno, domain):
def mkrel(nodeid):
line = self.config.cwd_relative_nodeid(nodeid)
if domain and line.endswith(domain):
line = line[: -len(domain)]
values = domain.split("[")
values[0] = values[0].replace(".", "::") # don't replace '.' in params
line += "[".join(values)
return line
# collect_fspath comes from testid which has a "/"-normalized path.
if fspath:
res = mkrel(nodeid)
if self.verbosity >= 2 and nodeid.split("::")[0] != fspath.replace(
"\\", nodes.SEP
):
res += " <- " + bestrelpath(self.startpath, fspath)
else:
res = "[location]"
return res + " "
def _getfailureheadline(self, rep):
head_line = rep.head_line
if head_line:
return head_line
return "test session" # XXX?
def _getcrashline(self, rep):
try:
return str(rep.longrepr.reprcrash)
except AttributeError:
try:
return str(rep.longrepr)[:50]
except AttributeError:
return ""
#
# Summaries for sessionfinish.
#
def getreports(self, name: str):
values = []
for x in self.stats.get(name, []):
if not hasattr(x, "_pdbshown"):
values.append(x)
return values
def summary_warnings(self) -> None:
if self.hasopt("w"):
all_warnings: Optional[List[WarningReport]] = self.stats.get("warnings")
if not all_warnings:
return
final = self._already_displayed_warnings is not None
if final:
warning_reports = all_warnings[self._already_displayed_warnings :]
else:
warning_reports = all_warnings
self._already_displayed_warnings = len(warning_reports)
if not warning_reports:
return
reports_grouped_by_message: Dict[str, List[WarningReport]] = {}
for wr in warning_reports:
reports_grouped_by_message.setdefault(wr.message, []).append(wr)
def collapsed_location_report(reports: List[WarningReport]) -> str:
locations = []
for w in reports:
location = w.get_location(self.config)
if location:
locations.append(location)
if len(locations) < 10:
return "\n".join(map(str, locations))
counts_by_filename = Counter(
str(loc).split("::", 1)[0] for loc in locations
)
return "\n".join(
"{}: {} warning{}".format(k, v, "s" if v > 1 else "")
for k, v in counts_by_filename.items()
)
title = "warnings summary (final)" if final else "warnings summary"
self.write_sep("=", title, yellow=True, bold=False)
for message, message_reports in reports_grouped_by_message.items():
maybe_location = collapsed_location_report(message_reports)
if maybe_location:
self._tw.line(maybe_location)
lines = message.splitlines()
indented = "\n".join(" " + x for x in lines)
message = indented.rstrip()
else:
message = message.rstrip()
self._tw.line(message)
self._tw.line()
self._tw.line("-- Docs: https://docs.pytest.org/en/stable/warnings.html")
def summary_passes(self) -> None:
if self.config.option.tbstyle != "no":
if self.hasopt("P"):
reports: List[TestReport] = self.getreports("passed")
if not reports:
return
self.write_sep("=", "PASSES")
for rep in reports:
if rep.sections:
msg = self._getfailureheadline(rep)
self.write_sep("_", msg, green=True, bold=True)
self._outrep_summary(rep)
self._handle_teardown_sections(rep.nodeid)
def _get_teardown_reports(self, nodeid: str) -> List[TestReport]:
reports = self.getreports("")
return [
report
for report in reports
if report.when == "teardown" and report.nodeid == nodeid
]
def _handle_teardown_sections(self, nodeid: str) -> None:
for report in self._get_teardown_reports(nodeid):
self.print_teardown_sections(report)
def print_teardown_sections(self, rep: TestReport) -> None:
showcapture = self.config.option.showcapture
if showcapture == "no":
return
for secname, content in rep.sections:
if showcapture != "all" and showcapture not in secname:
continue
if "teardown" in secname:
self._tw.sep("-", secname)
if content[-1:] == "\n":
content = content[:-1]
self._tw.line(content)
def summary_failures(self) -> None:
if self.config.option.tbstyle != "no":
reports: List[BaseReport] = self.getreports("failed")
if not reports:
return
self.write_sep("=", "FAILURES")
if self.config.option.tbstyle == "line":
for rep in reports:
line = self._getcrashline(rep)
self.write_line(line)
else:
for rep in reports:
msg = self._getfailureheadline(rep)
self.write_sep("_", msg, red=True, bold=True)
self._outrep_summary(rep)
self._handle_teardown_sections(rep.nodeid)
def summary_errors(self) -> None:
if self.config.option.tbstyle != "no":
reports: List[BaseReport] = self.getreports("error")
if not reports:
return
self.write_sep("=", "ERRORS")
for rep in self.stats["error"]:
msg = self._getfailureheadline(rep)
if rep.when == "collect":
msg = "ERROR collecting " + msg
else:
msg = f"ERROR at {rep.when} of {msg}"
self.write_sep("_", msg, red=True, bold=True)
self._outrep_summary(rep)
def _outrep_summary(self, rep: BaseReport) -> None:
rep.toterminal(self._tw)
showcapture = self.config.option.showcapture
if showcapture == "no":
return
for secname, content in rep.sections:
if showcapture != "all" and showcapture not in secname:
continue
self._tw.sep("-", secname)
if content[-1:] == "\n":
content = content[:-1]
self._tw.line(content)
def summary_stats(self) -> None:
if self.verbosity < -1:
return
session_duration = timing.time() - self._sessionstarttime
(parts, main_color) = self.build_summary_stats_line()
line_parts = []
display_sep = self.verbosity >= 0
if display_sep:
fullwidth = self._tw.fullwidth
for text, markup in parts:
with_markup = self._tw.markup(text, **markup)
if display_sep:
fullwidth += len(with_markup) - len(text)
line_parts.append(with_markup)
msg = ", ".join(line_parts)
main_markup = {main_color: True}
duration = " in {}".format(format_session_duration(session_duration))
duration_with_markup = self._tw.markup(duration, **main_markup)
if display_sep:
fullwidth += len(duration_with_markup) - len(duration)
msg += duration_with_markup
if display_sep:
markup_for_end_sep = self._tw.markup("", **main_markup)
if markup_for_end_sep.endswith("\x1b[0m"):
markup_for_end_sep = markup_for_end_sep[:-4]
fullwidth += len(markup_for_end_sep)
msg += markup_for_end_sep
if display_sep:
self.write_sep("=", msg, fullwidth=fullwidth, **main_markup)
else:
self.write_line(msg, **main_markup)
def short_test_summary(self) -> None:
if not self.reportchars:
return
def show_simple(stat, lines: List[str]) -> None:
failed = self.stats.get(stat, [])
if not failed:
return
termwidth = self._tw.fullwidth
config = self.config
for rep in failed:
line = _get_line_with_reprcrash_message(config, rep, termwidth)
lines.append(line)
def show_xfailed(lines: List[str]) -> None:
xfailed = self.stats.get("xfailed", [])
for rep in xfailed:
verbose_word = rep._get_verbose_word(self.config)
pos = _get_pos(self.config, rep)
lines.append(f"{verbose_word} {pos}")
reason = rep.wasxfail
if reason:
lines.append(" " + str(reason))
def show_xpassed(lines: List[str]) -> None:
xpassed = self.stats.get("xpassed", [])
for rep in xpassed:
verbose_word = rep._get_verbose_word(self.config)
pos = _get_pos(self.config, rep)
reason = rep.wasxfail
lines.append(f"{verbose_word} {pos} {reason}")
def show_skipped(lines: List[str]) -> None:
skipped: List[CollectReport] = self.stats.get("skipped", [])
fskips = _folded_skips(self.startpath, skipped) if skipped else []
if not fskips:
return
verbose_word = skipped[0]._get_verbose_word(self.config)
for num, fspath, lineno, reason in fskips:
if reason.startswith("Skipped: "):
reason = reason[9:]
if lineno is not None:
lines.append(
"%s [%d] %s:%d: %s"
% (verbose_word, num, fspath, lineno, reason)
)
else:
lines.append("%s [%d] %s: %s" % (verbose_word, num, fspath, reason))
REPORTCHAR_ACTIONS: Mapping[str, Callable[[List[str]], None]] = {
"x": show_xfailed,
"X": show_xpassed,
"f": partial(show_simple, "failed"),
"s": show_skipped,
"p": partial(show_simple, "passed"),
"E": partial(show_simple, "error"),
}
lines: List[str] = []
for char in self.reportchars:
action = REPORTCHAR_ACTIONS.get(char)
if action: # skipping e.g. "P" (passed with output) here.
action(lines)
if lines:
self.write_sep("=", "short test summary info")
for line in lines:
self.write_line(line)
def _get_main_color(self) -> Tuple[str, List[str]]:
if self._main_color is None or self._known_types is None or self._is_last_item:
self._set_main_color()
assert self._main_color
assert self._known_types
return self._main_color, self._known_types
def _determine_main_color(self, unknown_type_seen: bool) -> str:
stats = self.stats
if "failed" in stats or "error" in stats:
main_color = "red"
elif "warnings" in stats or "xpassed" in stats or unknown_type_seen:
main_color = "yellow"
elif "passed" in stats or not self._is_last_item:
main_color = "green"
else:
main_color = "yellow"
return main_color
def _set_main_color(self) -> None:
unknown_types: List[str] = []
for found_type in self.stats.keys():
if found_type: # setup/teardown reports have an empty key, ignore them
if found_type not in KNOWN_TYPES and found_type not in unknown_types:
unknown_types.append(found_type)
self._known_types = list(KNOWN_TYPES) + unknown_types
self._main_color = self._determine_main_color(bool(unknown_types))
def build_summary_stats_line(self) -> Tuple[List[Tuple[str, Dict[str, bool]]], str]:
"""
Build the parts used in the last summary stats line.
The summary stats line is the line shown at the end, "=== 12 passed, 2 errors in Xs===".
This function builds a list of the "parts" that make up for the text in that line, in
the example above it would be:
[
("12 passed", {"green": True}),
("2 errors", {"red": True}
]
That last dict for each line is a "markup dictionary", used by TerminalWriter to
color output.
The final color of the line is also determined by this function, and is the second
element of the returned tuple.
"""
if self.config.getoption("collectonly"):
return self._build_collect_only_summary_stats_line()
else:
return self._build_normal_summary_stats_line()
def _get_reports_to_display(self, key: str) -> List[Any]:
"""Get test/collection reports for the given status key, such as `passed` or `error`."""
reports = self.stats.get(key, [])
return [x for x in reports if getattr(x, "count_towards_summary", True)]
def _build_normal_summary_stats_line(
self,
) -> Tuple[List[Tuple[str, Dict[str, bool]]], str]:
main_color, known_types = self._get_main_color()
parts = []
for key in known_types:
reports = self._get_reports_to_display(key)
if reports:
count = len(reports)
color = _color_for_type.get(key, _color_for_type_default)
markup = {color: True, "bold": color == main_color}
parts.append(("%d %s" % pluralize(count, key), markup))
if not parts:
parts = [("no tests ran", {_color_for_type_default: True})]
return parts, main_color
def _build_collect_only_summary_stats_line(
self,
) -> Tuple[List[Tuple[str, Dict[str, bool]]], str]:
deselected = len(self._get_reports_to_display("deselected"))
errors = len(self._get_reports_to_display("error"))
if self._numcollected == 0:
parts = [("no tests collected", {"yellow": True})]
main_color = "yellow"
elif deselected == 0:
main_color = "green"
collected_output = "%d %s collected" % pluralize(self._numcollected, "test")
parts = [(collected_output, {main_color: True})]
else:
all_tests_were_deselected = self._numcollected == deselected
if all_tests_were_deselected:
main_color = "yellow"
collected_output = f"no tests collected ({deselected} deselected)"
else:
main_color = "green"
selected = self._numcollected - deselected
collected_output = f"{selected}/{self._numcollected} tests collected ({deselected} deselected)"
parts = [(collected_output, {main_color: True})]
if errors:
main_color = _color_for_type["error"]
parts += [("%d %s" % pluralize(errors, "error"), {main_color: True})]
return parts, main_color
def _get_pos(config: Config, rep: BaseReport):
nodeid = config.cwd_relative_nodeid(rep.nodeid)
return nodeid
def _format_trimmed(format: str, msg: str, available_width: int) -> Optional[str]:
"""Format msg into format, ellipsizing it if doesn't fit in available_width.
Returns None if even the ellipsis can't fit.
"""
# Only use the first line.
i = msg.find("\n")
if i != -1:
msg = msg[:i]
ellipsis = "..."
format_width = wcswidth(format.format(""))
if format_width + len(ellipsis) > available_width:
return None
if format_width + wcswidth(msg) > available_width:
available_width -= len(ellipsis)
msg = msg[:available_width]
while format_width + wcswidth(msg) > available_width:
msg = msg[:-1]
msg += ellipsis
return format.format(msg)
def _get_line_with_reprcrash_message(
config: Config, rep: BaseReport, termwidth: int
) -> str:
"""Get summary line for a report, trying to add reprcrash message."""
verbose_word = rep._get_verbose_word(config)
pos = _get_pos(config, rep)
line = f"{verbose_word} {pos}"
line_width = wcswidth(line)
try:
# Type ignored intentionally -- possible AttributeError expected.
msg = rep.longrepr.reprcrash.message # type: ignore[union-attr]
except AttributeError:
pass
else:
available_width = termwidth - line_width
msg = _format_trimmed(" - {}", msg, available_width)
if msg is not None:
line += msg
return line
def _folded_skips(
startpath: Path,
skipped: Sequence[CollectReport],
) -> List[Tuple[int, str, Optional[int], str]]:
d: Dict[Tuple[str, Optional[int], str], List[CollectReport]] = {}
for event in skipped:
assert event.longrepr is not None
assert isinstance(event.longrepr, tuple), (event, event.longrepr)
assert len(event.longrepr) == 3, (event, event.longrepr)
fspath, lineno, reason = event.longrepr
# For consistency, report all fspaths in relative form.
fspath = bestrelpath(startpath, Path(fspath))
keywords = getattr(event, "keywords", {})
# Folding reports with global pytestmark variable.
# This is a workaround, because for now we cannot identify the scope of a skip marker
# TODO: Revisit after marks scope would be fixed.
if (
event.when == "setup"
and "skip" in keywords
and "pytestmark" not in keywords
):
key: Tuple[str, Optional[int], str] = (fspath, None, reason)
else:
key = (fspath, lineno, reason)
d.setdefault(key, []).append(event)
values: List[Tuple[int, str, Optional[int], str]] = []
for key, events in d.items():
values.append((len(events), *key))
return values
_color_for_type = {
"failed": "red",
"error": "red",
"warnings": "yellow",
"passed": "green",
}
_color_for_type_default = "yellow"
def pluralize(count: int, noun: str) -> Tuple[int, str]:
# No need to pluralize words such as `failed` or `passed`.
if noun not in ["error", "warnings", "test"]:
return count, noun
# The `warnings` key is plural. To avoid API breakage, we keep it that way but
# set it to singular here so we can determine plurality in the same way as we do
# for `error`.
noun = noun.replace("warnings", "warning")
return count, noun + "s" if count != 1 else noun
def _plugin_nameversions(plugininfo) -> List[str]:
values: List[str] = []
for plugin, dist in plugininfo:
# Gets us name and version!
name = "{dist.project_name}-{dist.version}".format(dist=dist)
# Questionable convenience, but it keeps things short.
if name.startswith("pytest-"):
name = name[7:]
# We decided to print python package names they can have more than one plugin.
if name not in values:
values.append(name)
return values
def format_session_duration(seconds: float) -> str:
"""Format the given seconds in a human readable manner to show in the final summary."""
if seconds < 60:
return f"{seconds:.2f}s"
else:
dt = datetime.timedelta(seconds=int(seconds))
return f"{seconds:.2f}s ({dt})"
def _get_raw_skip_reason(report: TestReport) -> str:
"""Get the reason string of a skip/xfail/xpass test report.
The string is just the part given by the user.
"""
if hasattr(report, "wasxfail"):
reason = cast(str, report.wasxfail)
if reason.startswith("reason: "):
reason = reason[len("reason: ") :]
return reason
else:
assert report.skipped
assert isinstance(report.longrepr, tuple)
_, _, reason = report.longrepr
if reason.startswith("Skipped: "):
reason = reason[len("Skipped: ") :]
elif reason == "Skipped":
reason = ""
return reason
| |
# Copyright 2013: Mirantis Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from rally.common import objects
from rally.deployment import engine
from tests.unit import test
MOD = "rally.deployment.engines.lxc."
class LxcEngineTestCase(test.TestCase):
def setUp(self):
super(LxcEngineTestCase, self).setUp()
self.config = {
"type": "LxcEngine",
"container_name": "rally",
"containers_per_host": 2,
"tunnel_to": ["1.1.1.1", "2.2.2.2"],
"distribution": "ubuntu",
"start_lxc_network": "10.128.128.0/28",
"engine": {
"name": "FakeEngine",
"config": {
"key": "value",
},
},
"provider": {
"type": "DummyProvider",
"credentials": [{"user": "root", "host": "host1.net"},
{"user": "root", "host": "host2.net"}]
}
}
self.deployment = {
"uuid": "test-deployment-uuid",
"config": self.config,
}
self.engine = engine.Engine.get_engine("LxcEngine",
self.deployment)
@mock.patch(MOD + "objects")
@mock.patch(MOD + "engine")
def test__deploy_first(self, mock_engine, mock_objects):
fake_credentials = {"user": "admin", "host": "host.net"}
fake_deployment = mock.Mock()
fake_engine = mock.Mock()
mock_objects.Deployment = mock.Mock(return_value=fake_deployment)
mock_engine.Engine.get_engine = mock.Mock(
return_value=fake_engine)
fake_host = mock.Mock()
fake_so = mock.Mock()
fake_so.get_credentials.return_value = fake_credentials
fake_host.get_server_object = mock.Mock(return_value=fake_so)
self.engine._deploy_first(fake_host, "name", "dist", "release")
host_calls = [
mock.call.prepare(),
mock.call.create_container("name", "dist", "release"),
mock.call.start_containers(),
mock.call.get_server_object("name"),
mock.call.stop_containers()]
self.assertEqual(host_calls, fake_host.mock_calls)
fake_engine.deploy.assert_called_once_with()
mock_engine.Engine.get_engine.assert_called_once_with(
"FakeEngine", fake_deployment)
engine_config = self.config["engine"].copy()
engine_config["provider"] = {"credentials": [fake_credentials],
"type": "DummyProvider"}
mock_objects.Deployment.assert_called_once_with(
config=engine_config, parent_uuid="test-deployment-uuid")
@mock.patch(MOD + "provider.ProviderFactory.get_provider")
def test__get_provider(self, mock_provider_factory_get_provider):
mock_provider_factory_get_provider.return_value = "fake_provider"
provider = self.engine._get_provider()
self.assertEqual("fake_provider", provider)
mock_provider_factory_get_provider.assert_called_once_with(
self.config["provider"], self.deployment)
@mock.patch(MOD + "open", create=True)
@mock.patch(MOD + "get_script_path", return_value="fake_sp")
@mock.patch(MOD + "lxc.LxcHost")
@mock.patch(MOD + "LxcEngine._deploy_first")
@mock.patch(MOD + "LxcEngine._get_provider")
def test_deploy(self, mock__get_provider, mock__deploy_first,
mock_lxc_host, mock_get_script_path, mock_open):
mock_open.return_value = "fs"
fake_containers = ((mock.Mock(), mock.Mock()),
(mock.Mock(), mock.Mock()))
fake_hosts = mock_lxc_host.side_effect = [mock.Mock(), mock.Mock()]
fake_hosts[0].get_server_objects.return_value = fake_containers[0]
fake_hosts[1].get_server_objects.return_value = fake_containers[1]
fake_hosts[0]._port_cache = {1: 2, 3: 4}
fake_hosts[1]._port_cache = {5: 6, 7: 8}
fake_provider = mock__get_provider.return_value
fake_servers = [mock.Mock(), mock.Mock()]
fake_servers[0].get_credentials.return_value = "fc1"
fake_servers[1].get_credentials.return_value = "fc2"
fake_provider.create_servers.return_value = fake_servers
add_res_calls = [
{"provider_name": "LxcEngine",
"info": {"host": "fc1",
"config": {"network": "10.128.128.0/28",
"tunnel_to": ["1.1.1.1", "2.2.2.2"]},
"forwarded_ports": [(1, 2), (3, 4)],
"containers": fake_hosts[0].containers}},
{"provider_name": "LxcEngine",
"info": {"host": "fc2",
"config": {"network": "10.128.128.16/28",
"tunnel_to": ["1.1.1.1", "2.2.2.2"]},
"forwarded_ports": [(5, 6), (7, 8)],
"containers": fake_hosts[1].containers}}]
def add_resource(**actual_kwargs):
expected_kwargs = add_res_calls.pop(0)
self.assertEqual(expected_kwargs["provider_name"],
actual_kwargs["provider_name"])
self.assertEqual(expected_kwargs["info"]["host"],
actual_kwargs["info"]["host"])
self.assertEqual(expected_kwargs["info"]["config"],
actual_kwargs["info"]["config"])
self.assertEqual(expected_kwargs["info"]["containers"],
actual_kwargs["info"]["containers"])
self.assertSequenceEqual(
expected_kwargs["info"]["forwarded_ports"],
actual_kwargs["info"]["forwarded_ports"])
fake_deployment = mock.MagicMock()
fake_deployment.add_resource = add_resource
with mock.patch.object(self.engine, "deployment", fake_deployment):
credential = self.engine.deploy()
self.assertIsInstance(credential["admin"], objects.Credential)
lxc_host_calls = [
mock.call(fake_servers[0], {"network": "10.128.128.0/28",
"tunnel_to": ["1.1.1.1", "2.2.2.2"]}),
mock.call(fake_servers[1], {"network": "10.128.128.16/28",
"tunnel_to": ["1.1.1.1", "2.2.2.2"]})]
self.assertEqual(lxc_host_calls, mock_lxc_host.mock_calls)
deploy_first_calls = [
mock.call(fake_hosts[0], "rally-10-128-128-0-000", "ubuntu", None),
mock.call(fake_hosts[1], "rally-10-128-128-16-000", "ubuntu",
None)]
self.assertEqual(deploy_first_calls, mock__deploy_first.mock_calls)
host1_calls = [
mock.call.create_clone("rally-10-128-128-0-001",
"rally-10-128-128-0-000"),
mock.call.start_containers(),
mock.call.get_server_objects()]
host2_calls = [
mock.call.create_clone("rally-10-128-128-16-001",
"rally-10-128-128-16-000"),
mock.call.start_containers(),
mock.call.get_server_objects()]
self.assertEqual(host1_calls, fake_hosts[0].mock_calls)
self.assertEqual(host2_calls, fake_hosts[1].mock_calls)
self.assertEqual([mock.call("fake_sp", "rb")] * 4,
mock_open.mock_calls)
for host in fake_containers:
for container in host:
self.assertEqual([mock.call.ssh.run("/bin/sh -e", stdin="fs")],
container.mock_calls)
@mock.patch(MOD + "LxcEngine._get_provider")
@mock.patch(MOD + "lxc.LxcHost")
@mock.patch(MOD + "provider.Server.from_credentials")
def test_cleanup(self, mock_server_from_credentials, mock_lxc_host,
mock__get_provider):
mock__get_provider.return_value = fake_provider = mock.Mock()
mock_lxc_host.side_effect = fake_hosts = [mock.Mock(), mock.Mock()]
mock_server_from_credentials.side_effect = ["s1", "s2"]
fake_resources = []
for i in range(2):
res = mock.Mock()
res.info = {"host": "host%d" % i,
"config": "fake_config%d" % i,
"forwarded_ports": [(1, 2), (3, 4)],
"containers": "fake_containers"}
fake_resources.append(res)
with mock.patch.object(self.engine, "deployment") as mock_deployment:
mock_deployment.get_resources.return_value = fake_resources
self.engine.cleanup()
for host in fake_hosts:
self.assertEqual("fake_containers", host.containers)
self.assertEqual([mock.call.destroy_containers(),
mock.call.destroy_ports([(1, 2), (3, 4)]),
mock.call.delete_tunnels()], host.mock_calls)
delete_calls = [mock.call.delete_resource(r.id)
for r in fake_resources]
self.assertEqual(delete_calls,
mock_deployment.delete_resource.call_args_list)
fake_provider.destroy_servers.assert_called_once_with()
| |
from ply.lex import TOKEN, lex
# token with reserved words and sign alternative
t_OR = r"\|\|"
t_AND = r"&&"
# tokens without reserved words, only signs
t_GEQ = r">="
t_LEQ = r"<="
t_NEQ = r"!="
t_EQ = r"==" # line 331
t_LSH = r"<<"
t_RSH = r">>"
# line XXX
literals = ["+", "-", "*", "/", ":", "[", "]",
"<", ">", "(", ")", "&", "|", "="]
#
# t_S_PLUS = r"\+"
# t_S_MINUS = r"-"
# t_S_START = r"\*"
# t_S_SLASH = r"/"
# t_S_COLON = r":"
# t_S_L_SQUARE_BARCKET = r"\["
# t_S_R_SQUARE_BARCKET = r"\]"
# t_S_L_ANGLE_BARCKET = r"<"
# t_S_R_ANGLE_BARCKET = r">"
# t_S_L_BARCKET = r"\("
# t_S_R_BARCKET = r"\)"
# t_S_BITWISE_AND = r"&"
# t_S_BITWISE_OR = r"\|"
# t_S_EQUALS = r"="
# basic regex defs line 95
B_regex = r"([0-9A-Fa-f][0-9A-Fa-f]?)"
@TOKEN("$" + B_regex)
def t_AID(t):
return t
B2_regex = r"([0-9A-Fa-f][0-9A-Fa-f][0-9A-Fa-f][0-9A-Fa-f])"
mac_regex = r"({B}:{B}:{B}:{B}:{B}:{B}|{B}\-{B}\-{B}\-{B}\-{B}\-{B}|{B}\.{B}\.{B}\.{B}\.{B}\.{B}|{B2}\.{B2}\.{B2}|{B2}{3})"
mac_regex = mac_regex.replace("{B}", B_regex)
mac_regex = mac_regex.replace("{B2}", B2_regex)
@TOKEN(mac_regex)
def t_EID(t):
return t
# ipv6 regex.
ipv6_regex = r"""
(\A([0-9a-f]{1,4}:){1,1}(:[0-9a-f]{1,4}){1,6}\Z)|
(\A([0-9a-f]{1,4}:){1,2}(:[0-9a-f]{1,4}){1,5}\Z)|
(\A([0-9a-f]{1,4}:){1,3}(:[0-9a-f]{1,4}){1,4}\Z)|
(\A([0-9a-f]{1,4}:){1,4}(:[0-9a-f]{1,4}){1,3}\Z)|
(\A([0-9a-f]{1,4}:){1,5}(:[0-9a-f]{1,4}){1,2}\Z)|
(\A([0-9a-f]{1,4}:){1,6}(:[0-9a-f]{1,4}){1,1}\Z)|
(\A(([0-9a-f]{1,4}:){1,7}|:):\Z)|
(\A:(:[0-9a-f]{1,4}){1,7}\Z)|
(\A((([0-9a-f]{1,4}:){6})(25[0-5]|2[0-4]\d|[0-1]?\d?\d)(\.(25[0-5]|2[0-4]\d|[0-1]?\d?\d)){3})\Z)|
(\A(([0-9a-f]{1,4}:){5}[0-9a-f]{1,4}:(25[0-5]|2[0-4]\d|[0-1]?\d?\d)(\.(25[0-5]|2[0-4]\d|[0-1]?\d?\d)){3})\Z)|
(\A([0-9a-f]{1,4}:){5}:[0-9a-f]{1,4}:(25[0-5]|2[0-4]\d|[0-1]?\d?\d)(\.(25[0-5]|2[0-4]\d|[0-1]?\d?\d)){3}\Z)|
(\A([0-9a-f]{1,4}:){1,1}(:[0-9a-f]{1,4}){1,4}:(25[0-5]|2[0-4]\d|[0-1]?\d?\d)(\.(25[0-5]|2[0-4]\d|[0-1]?\d?\d)){3}\Z)|
(\A([0-9a-f]{1,4}:){1,2}(:[0-9a-f]{1,4}){1,3}:(25[0-5]|2[0-4]\d|[0-1]?\d?\d)(\.(25[0-5]|2[0-4]\d|[0-1]?\d?\d)){3}\Z)|
(\A([0-9a-f]{1,4}:){1,3}(:[0-9a-f]{1,4}){1,2}:(25[0-5]|2[0-4]\d|[0-1]?\d?\d)(\.(25[0-5]|2[0-4]\d|[0-1]?\d?\d)){3}\Z)|
(\A([0-9a-f]{1,4}:){1,4}(:[0-9a-f]{1,4}){1,1}:(25[0-5]|2[0-4]\d|[0-1]?\d?\d)(\.(25[0-5]|2[0-4]\d|[0-1]?\d?\d)){3}\Z)|
(\A(([0-9a-f]{1,4}:){1,5}|:):(25[0-5]|2[0-4]\d|[0-1]?\d?\d)(\.(25[0-5]|2[0-4]\d|[0-1]?\d?\d)){3}\Z)|
(\A:(:[0-9a-f]{1,4}){1,5}:(25[0-5]|2[0-4]\d|[0-1]?\d?\d)(\.(25[0-5]|2[0-4]\d|[0-1]?\d?\d)){3}\Z)
"""
ipv6_regex = ''.join(ipv6_regex.split())
@TOKEN(ipv6_regex)
def t_HID6(t):
return t
# ipv4 regex
# the original lex code in line 343 says it should look like this, but
# it cause problems, and any way how use 0xff.0xff.0xff.0xff notation to write
# IP addresses?
# ipv4_regex = r"({N}\.{N})|({N}\.{N}\.{N})|({N}\.{N}\.{N}\.{N})"
ipv4_regex = r'(25[0-5]|2[0-4]\d|[0-1]?\d?\d)(\.(25[0-5]|2[0-4]\d|[0-1]?\d?\d)){3}'
# Notice that this is a function only because we want check if this is ipv4
# address before we check if its NUM token.
@TOKEN(ipv4_regex)
def t_HID(t):
return t
constants = {
"icmptype": 0,
"icmpcode": 1,
"icmp-echoreply": 0,
"icmp-unreach": 3,
"icmp-sourcequench": 4,
"icmp-redirect": 5,
"icmp-echo": 8,
"icmp-routeradvert": 9,
"icmp-routersolicit": 10,
"icmp-timxceed": 11,
"icmp-paramprob": 12,
"icmp-tstamp": 13,
"icmp-tstampreply": 14,
"icmp-ireq": 15,
"icmp-ireqreply": 16,
"icmp-maskreq": 17,
"icmp-maskreply": 18,
"tcpflags": 13,
"tcp-fin": 0x01,
"tcp-syn": 0x02,
"tcp-rst": 0x04,
"tcp-push": 0x08,
"tcp-ack": 0x10,
"tcp-urg": 0x20,
}
N_regex = r"((0X|0x)[0-9A-Fa-f]+)|([0-9]+)"
# order the conts names from the longest to the shortest, since
# there are consts that are substrings of each other.
# exampe: "icmp-tstamp", "icmp-tstampreply"
number_regex = N_regex + "|" + \
('|'.join(sorted(constants.keys(), key=lambda x: -len(x))))
@TOKEN(number_regex)
def t_NUM(t):
if t.value in constants:
t.value = constants[t.value]
elif t.value.lower().startswith('0x'):
t.value = int(t.value, 16)
else:
t.value = int(t.value)
return t
# line 388
reserved_words = {
"aarp": "AARP",
"action": "PF_ACTION",
"addr1": "ADDR1",
"addr2": "ADDR2",
"addr3": "ADDR3",
"addr4": "ADDR4",
"address1": "ADDR1",
"address2": "ADDR2",
"address3": "ADDR3",
"address4": "ADDR4",
"ah": "AH",
"and": "AND",
"arp": "ARP",
"atalk": "ATALK",
"bcc": "BCC",
"broadcast": "TK_BROADCAST",
"byte": "CBYTE",
"CARP": "CARP",
"clnp": "CLNP",
"connectmsg": "CONNECTMSG",
"csnp": "CSNP",
"decnet": "DECNET",
"dir": "DIR",
"direction": "DIR",
"dpc": "DPC",
"dst": "DST",
"es-is": "ESIS",
"esis": "ESIS",
"esp": "ESP",
"ether": "LINK",
"fddi": "LINK",
"fisu": "FISU",
"gateway": "GATEWAY",
"greater": "GREATER",
"hdpc": "HDPC",
"hfisu": "HFISU",
"hlssu": "HLSSU",
"hmsu": "HMSU",
"hopc": "HOPC",
"host": "HOST",
"hsio": "HSIO",
"hsls": "HSLS",
"icmp": "ICMP",
"icmp6": "ICMPV6",
"ifname": "PF_IFNAME",
"igmp": "IGMP",
"igrp": "IGRP",
"iih": "IIH",
"ilmic": "ILMIC",
"inbound": "INBOUND",
"ip": "IP",
"ip6": "IPV6",
"ipx": "IPX",
"is-is": "ISIS",
"isis": "ISIS",
"iso": "ISO",
"l1": "L1",
"l2": "L2",
"lane": "LANE",
"lat": "LAT",
"len": "LEN",
"less": "LESS",
"link": "LINK",
"llc": "LLC",
"lsp": "LSP",
"lssu": "LSSU",
"lsu": "LSSU",
"mask": "NETMASK",
"metac": "METAC",
"metaconnect": "METACONNECT",
"mopdl": "MOPDL",
"moprc": "MOPRC",
"mpls": "MPLS",
"msu": "MSU",
"multicast": "TK_MULTICAST",
"net": "NET",
"netbeui": "NETBEUI",
"not": "NOT",
"oam": "OAM",
"oamf4": "OAMF4",
"oamf4ec": "OAMF4EC",
"oamf4sc": "OAMF4SC",
"on": "PF_IFNAME",
"opc": "OPC",
"or": "OR",
"outbound": "OUTBOUND",
"pim": "PIM",
"port": "PORT",
"portrange": "PORTRANGE",
"ppp": "LINK",
"pppoes": "PPPOES",
"proto": "PROTO",
"protochain": "PROTOCHAIN",
"psnp": "PSNP",
"ra": "RA",
"RADIO": "RADIO",
"rarp": "RARP",
"reason": "PF_REASON",
"rnr": "PF_RNR",
"rset": "PF_RSET",
"rulenum": "PF_RNR",
"ruleset": "PF_RSET",
"sc": "SC",
"sca": "SCA",
"sctp": "SCTP",
"sio": "SIO",
"slip": "LINK",
"sls": "SLS",
"snp": "SNP",
"src": "SRC",
"srnr": "PF_SRNR",
"stp": "STP",
"subrulenum": "PF_SRNR",
"SUBTYPE": "SUBTYPE",
"ta": "TA",
"tcp": "TCP",
"tr": "LINK",
"type": "TYPE",
"udp": "UDP",
"vci": "VCI",
"vlan": "VLAN",
"vpi": "VPI",
"VRRP": "VRRP",
"wlan": "LINK"
}
ID_regex = r"([A-Za-z0-9]([-_.A-Za-z0-9]*[.A-Za-z0-9])?)|(\\[^ !()\n\t]+)"
@TOKEN(ID_regex)
def t_ID(t):
t.type = reserved_words.get(t.value, "ID")
return t
# ignore characters
t_ignore = " \r\n\t"
# define the lexer
tokens = [
'AID',
'EID',
'EQ',
'GEQ',
'HID',
'HID6',
'ID',
'LEQ',
'LSH',
'NEQ',
'NUM',
'PPPOED',
'RSH',
# 'S_BITWISE_AND',
# 'S_BITWISE_OR',
# 'S_COLON',
# 'S_EQUALS',
# 'S_L_ANGLE_BARCKET',
# 'S_L_BARCKET',
# 'S_L_SQUARE_BARCKET',
# 'S_MINUS',
# 'S_PLUS',
# 'S_R_ANGLE_BARCKET',
# 'S_R_BARCKET',
# 'S_R_SQUARE_BARCKET',
# 'S_SLASH',
# 'S_START'
# append the reserved words
] + list(set(reserved_words.values()))
lexer = lex()
def get_tokens(string):
""" parse string to tokens """
lexer.input(string)
tokens = []
while True:
tok = lexer.token()
if not tok:
break
else:
tokens.append(tok)
return tokens
| |
# -*- coding: utf-8 -*-
# Generated by Django 1.10.5 on 2017-05-05 20:31
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='CategoryStatusCount',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('category', models.TextField(blank=True, null=True)),
('canceled', models.IntegerField(blank=True, null=True)),
('failed', models.IntegerField(blank=True, null=True)),
('live', models.IntegerField(blank=True, null=True)),
('successful', models.IntegerField(blank=True, null=True)),
('suspended', models.IntegerField(blank=True, null=True)),
('total', models.IntegerField(blank=True, null=True)),
],
options={
'db_table': 'category_status_count',
'managed': False,
},
),
migrations.CreateModel(
name='CategoryStatusPercent',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('category', models.TextField(blank=True, null=True)),
('canceled', models.IntegerField(blank=True, null=True)),
('failed', models.IntegerField(blank=True, null=True)),
('live', models.IntegerField(blank=True, null=True)),
('successful', models.IntegerField(blank=True, null=True)),
('suspended', models.IntegerField(blank=True, null=True)),
],
options={
'db_table': 'category_status_percent',
'managed': False,
},
),
migrations.CreateModel(
name='CountryStatusCount',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('country', models.TextField(blank=True, null=True)),
('canceled', models.IntegerField(blank=True, null=True)),
('failed', models.IntegerField(blank=True, null=True)),
('live', models.IntegerField(blank=True, null=True)),
('successful', models.IntegerField(blank=True, null=True)),
('suspended', models.IntegerField(blank=True, null=True)),
('total', models.IntegerField(blank=True, null=True)),
],
options={
'db_table': 'country_status_count',
'managed': False,
},
),
migrations.CreateModel(
name='CountryStatusPercent',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('country', models.TextField(blank=True, null=True)),
('canceled', models.IntegerField(blank=True, null=True)),
('failed', models.IntegerField(blank=True, null=True)),
('live', models.IntegerField(blank=True, null=True)),
('successful', models.IntegerField(blank=True, null=True)),
('suspended', models.IntegerField(blank=True, null=True)),
],
options={
'db_table': 'country_status_percent',
'managed': False,
},
),
migrations.CreateModel(
name='Kickstarter',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('status', models.TextField(blank=True, null=True)),
('disable_communication', models.TextField(blank=True, null=True)),
('location_type', models.TextField(blank=True, null=True)),
('category_parent_id', models.IntegerField(blank=True, null=True)),
('sub_category', models.TextField(blank=True, null=True)),
('usd_pledged', models.TextField(blank=True, null=True)),
('launched_at', models.TextField(blank=True, null=True)),
('category_slug', models.TextField(blank=True, null=True)),
('currency', models.TextField(blank=True, null=True)),
('deadline', models.TextField(blank=True, null=True)),
('spotlight', models.TextField(blank=True, null=True)),
('currency_trailing_code', models.TextField(blank=True, null=True)),
('displayable_name', models.TextField(blank=True, null=True)),
('state_changed_at', models.TextField(blank=True, null=True)),
('goal', models.TextField(blank=True, null=True)),
('category', models.TextField(blank=True, null=True)),
('city', models.TextField(blank=True, null=True)),
('name', models.TextField(blank=True, null=True)),
('creator_name', models.TextField(blank=True, null=True)),
('staff_pick', models.TextField(blank=True, null=True)),
('country', models.TextField(blank=True, null=True)),
('pledged', models.TextField(blank=True, null=True)),
('creator', models.TextField(blank=True, null=True)),
('location_code', models.TextField(blank=True, null=True)),
('slug', models.TextField(blank=True, null=True)),
('state', models.TextField(blank=True, null=True)),
('static_usd_rate', models.TextField(blank=True, null=True)),
('location', models.TextField(blank=True, null=True)),
('backers_count', models.TextField(blank=True, null=True)),
('currency_symbol', models.TextField(blank=True, null=True)),
('category_id', models.IntegerField(blank=True, null=True)),
('created_at', models.TextField(blank=True, null=True)),
('blurb', models.TextField(blank=True, null=True)),
('category_position', models.IntegerField(blank=True, null=True)),
],
options={
'db_table': 'ks_project',
'managed': False,
},
),
migrations.CreateModel(
name='MonthStatusCount',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('month', models.TextField(blank=True, null=True)),
('canceled', models.IntegerField(blank=True, null=True)),
('failed', models.IntegerField(blank=True, null=True)),
('live', models.IntegerField(blank=True, null=True)),
('successful', models.IntegerField(blank=True, null=True)),
('suspended', models.IntegerField(blank=True, null=True)),
('total', models.IntegerField(blank=True, null=True)),
],
options={
'db_table': 'monthly_status_count',
'managed': False,
},
),
migrations.CreateModel(
name='MonthStatusPercent',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('month', models.TextField(blank=True, null=True)),
('canceled', models.IntegerField(blank=True, null=True)),
('failed', models.IntegerField(blank=True, null=True)),
('live', models.IntegerField(blank=True, null=True)),
('successful', models.IntegerField(blank=True, null=True)),
('suspended', models.IntegerField(blank=True, null=True)),
],
options={
'db_table': 'monthly_status_percent',
'managed': False,
},
),
migrations.CreateModel(
name='Projects',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.TextField(blank=True, null=True)),
('creator_name', models.TextField(blank=True, null=True)),
('blurb', models.TextField(blank=True, null=True)),
('backers_count', models.TextField(blank=True, null=True)),
('goal', models.TextField(blank=True, null=True)),
('pledged', models.TextField(blank=True, null=True)),
('percent_of_goal', models.TextField(blank=True, null=True)),
('status', models.TextField(blank=True, null=True)),
('category', models.TextField(blank=True, null=True)),
('sub_category', models.TextField(blank=True, null=True)),
('launched_at', models.DateTimeField(blank=True, null=True)),
('deadline', models.DateTimeField(blank=True, null=True)),
('created_at', models.DateTimeField(blank=True, null=True)),
('location', models.TextField(blank=True, null=True)),
('country', models.TextField(blank=True, null=True)),
('state', models.TextField(blank=True, null=True)),
('city', models.TextField(blank=True, null=True)),
],
options={
'db_table': 'projects',
'managed': False,
},
),
migrations.CreateModel(
name='SubCategoryStatusCount',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('sub_category', models.TextField(blank=True, null=True)),
('canceled', models.IntegerField(blank=True, null=True)),
('failed', models.IntegerField(blank=True, null=True)),
('live', models.IntegerField(blank=True, null=True)),
('successful', models.IntegerField(blank=True, null=True)),
('suspended', models.IntegerField(blank=True, null=True)),
('total', models.IntegerField(blank=True, null=True)),
],
options={
'db_table': 'sub_category_status_count',
'managed': False,
},
),
migrations.CreateModel(
name='SubCategoryStatusPercent',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('sub_category', models.TextField(blank=True, null=True)),
('canceled', models.IntegerField(blank=True, null=True)),
('failed', models.IntegerField(blank=True, null=True)),
('live', models.IntegerField(blank=True, null=True)),
('successful', models.IntegerField(blank=True, null=True)),
('suspended', models.IntegerField(blank=True, null=True)),
],
options={
'db_table': 'sub_category_status_percent',
'managed': False,
},
),
]
| |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, AsyncIterable, Callable, Dict, Generic, Optional, TypeVar, Union
import warnings
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest
from azure.core.polling import AsyncLROPoller, AsyncNoPolling, AsyncPollingMethod
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.async_arm_polling import AsyncARMPolling
from ... import models as _models
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class VpnGatewaysOperations:
"""VpnGatewaysOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.network.v2020_07_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
async def get(
self,
resource_group_name: str,
gateway_name: str,
**kwargs: Any
) -> "_models.VpnGateway":
"""Retrieves the details of a virtual wan vpn gateway.
:param resource_group_name: The resource group name of the VpnGateway.
:type resource_group_name: str
:param gateway_name: The name of the gateway.
:type gateway_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: VpnGateway, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2020_07_01.models.VpnGateway
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.VpnGateway"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-07-01"
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'gatewayName': self._serialize.url("gateway_name", gateway_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('VpnGateway', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/vpnGateways/{gatewayName}'} # type: ignore
async def _create_or_update_initial(
self,
resource_group_name: str,
gateway_name: str,
vpn_gateway_parameters: "_models.VpnGateway",
**kwargs: Any
) -> "_models.VpnGateway":
cls = kwargs.pop('cls', None) # type: ClsType["_models.VpnGateway"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-07-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._create_or_update_initial.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'gatewayName': self._serialize.url("gateway_name", gateway_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(vpn_gateway_parameters, 'VpnGateway')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('VpnGateway', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('VpnGateway', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_or_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/vpnGateways/{gatewayName}'} # type: ignore
async def begin_create_or_update(
self,
resource_group_name: str,
gateway_name: str,
vpn_gateway_parameters: "_models.VpnGateway",
**kwargs: Any
) -> AsyncLROPoller["_models.VpnGateway"]:
"""Creates a virtual wan vpn gateway if it doesn't exist else updates the existing gateway.
:param resource_group_name: The resource group name of the VpnGateway.
:type resource_group_name: str
:param gateway_name: The name of the gateway.
:type gateway_name: str
:param vpn_gateway_parameters: Parameters supplied to create or Update a virtual wan vpn
gateway.
:type vpn_gateway_parameters: ~azure.mgmt.network.v2020_07_01.models.VpnGateway
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either VpnGateway or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.network.v2020_07_01.models.VpnGateway]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.VpnGateway"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._create_or_update_initial(
resource_group_name=resource_group_name,
gateway_name=gateway_name,
vpn_gateway_parameters=vpn_gateway_parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('VpnGateway', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'gatewayName': self._serialize.url("gateway_name", gateway_name, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, lro_options={'final-state-via': 'azure-async-operation'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/vpnGateways/{gatewayName}'} # type: ignore
async def _update_tags_initial(
self,
resource_group_name: str,
gateway_name: str,
vpn_gateway_parameters: "_models.TagsObject",
**kwargs: Any
) -> Optional["_models.VpnGateway"]:
cls = kwargs.pop('cls', None) # type: ClsType[Optional["_models.VpnGateway"]]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-07-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._update_tags_initial.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'gatewayName': self._serialize.url("gateway_name", gateway_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(vpn_gateway_parameters, 'TagsObject')
body_content_kwargs['content'] = body_content
request = self._client.patch(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('VpnGateway', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_update_tags_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/vpnGateways/{gatewayName}'} # type: ignore
async def begin_update_tags(
self,
resource_group_name: str,
gateway_name: str,
vpn_gateway_parameters: "_models.TagsObject",
**kwargs: Any
) -> AsyncLROPoller["_models.VpnGateway"]:
"""Updates virtual wan vpn gateway tags.
:param resource_group_name: The resource group name of the VpnGateway.
:type resource_group_name: str
:param gateway_name: The name of the gateway.
:type gateway_name: str
:param vpn_gateway_parameters: Parameters supplied to update a virtual wan vpn gateway tags.
:type vpn_gateway_parameters: ~azure.mgmt.network.v2020_07_01.models.TagsObject
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either VpnGateway or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.network.v2020_07_01.models.VpnGateway]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.VpnGateway"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._update_tags_initial(
resource_group_name=resource_group_name,
gateway_name=gateway_name,
vpn_gateway_parameters=vpn_gateway_parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('VpnGateway', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'gatewayName': self._serialize.url("gateway_name", gateway_name, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, lro_options={'final-state-via': 'azure-async-operation'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_update_tags.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/vpnGateways/{gatewayName}'} # type: ignore
async def _delete_initial(
self,
resource_group_name: str,
gateway_name: str,
**kwargs: Any
) -> None:
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-07-01"
accept = "application/json"
# Construct URL
url = self._delete_initial.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'gatewayName': self._serialize.url("gateway_name", gateway_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/vpnGateways/{gatewayName}'} # type: ignore
async def begin_delete(
self,
resource_group_name: str,
gateway_name: str,
**kwargs: Any
) -> AsyncLROPoller[None]:
"""Deletes a virtual wan vpn gateway.
:param resource_group_name: The resource group name of the VpnGateway.
:type resource_group_name: str
:param gateway_name: The name of the gateway.
:type gateway_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._delete_initial(
resource_group_name=resource_group_name,
gateway_name=gateway_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'gatewayName': self._serialize.url("gateway_name", gateway_name, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/vpnGateways/{gatewayName}'} # type: ignore
async def _reset_initial(
self,
resource_group_name: str,
gateway_name: str,
**kwargs: Any
) -> Optional["_models.VpnGateway"]:
cls = kwargs.pop('cls', None) # type: ClsType[Optional["_models.VpnGateway"]]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-07-01"
accept = "application/json"
# Construct URL
url = self._reset_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'gatewayName': self._serialize.url("gateway_name", gateway_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.post(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('VpnGateway', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_reset_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/vpnGateways/{gatewayName}/reset'} # type: ignore
async def begin_reset(
self,
resource_group_name: str,
gateway_name: str,
**kwargs: Any
) -> AsyncLROPoller["_models.VpnGateway"]:
"""Resets the primary of the vpn gateway in the specified resource group.
:param resource_group_name: The resource group name of the VpnGateway.
:type resource_group_name: str
:param gateway_name: The name of the gateway.
:type gateway_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either VpnGateway or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.network.v2020_07_01.models.VpnGateway]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.VpnGateway"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._reset_initial(
resource_group_name=resource_group_name,
gateway_name=gateway_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('VpnGateway', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'gatewayName': self._serialize.url("gateway_name", gateway_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_reset.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/vpnGateways/{gatewayName}/reset'} # type: ignore
async def _start_packet_capture_initial(
self,
resource_group_name: str,
gateway_name: str,
parameters: Optional["_models.VpnGatewayPacketCaptureStartParameters"] = None,
**kwargs: Any
) -> Optional[str]:
cls = kwargs.pop('cls', None) # type: ClsType[Optional[str]]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-07-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._start_packet_capture_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'gatewayName': self._serialize.url("gateway_name", gateway_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
if parameters is not None:
body_content = self._serialize.body(parameters, 'VpnGatewayPacketCaptureStartParameters')
else:
body_content = None
body_content_kwargs['content'] = body_content
request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('str', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_start_packet_capture_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/vpnGateways/{gatewayName}/startpacketcapture'} # type: ignore
async def begin_start_packet_capture(
self,
resource_group_name: str,
gateway_name: str,
parameters: Optional["_models.VpnGatewayPacketCaptureStartParameters"] = None,
**kwargs: Any
) -> AsyncLROPoller[str]:
"""Starts packet capture on vpn gateway in the specified resource group.
:param resource_group_name: The resource group name of the VpnGateway.
:type resource_group_name: str
:param gateway_name: The name of the gateway.
:type gateway_name: str
:param parameters: Vpn gateway packet capture parameters supplied to start packet capture on
vpn gateway.
:type parameters: ~azure.mgmt.network.v2020_07_01.models.VpnGatewayPacketCaptureStartParameters
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either str or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[str]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[str]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._start_packet_capture_initial(
resource_group_name=resource_group_name,
gateway_name=gateway_name,
parameters=parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('str', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'gatewayName': self._serialize.url("gateway_name", gateway_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_start_packet_capture.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/vpnGateways/{gatewayName}/startpacketcapture'} # type: ignore
async def _stop_packet_capture_initial(
self,
resource_group_name: str,
gateway_name: str,
parameters: Optional["_models.VpnGatewayPacketCaptureStopParameters"] = None,
**kwargs: Any
) -> Optional[str]:
cls = kwargs.pop('cls', None) # type: ClsType[Optional[str]]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-07-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._stop_packet_capture_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'gatewayName': self._serialize.url("gateway_name", gateway_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
if parameters is not None:
body_content = self._serialize.body(parameters, 'VpnGatewayPacketCaptureStopParameters')
else:
body_content = None
body_content_kwargs['content'] = body_content
request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('str', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_stop_packet_capture_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/vpnGateways/{gatewayName}/stoppacketcapture'} # type: ignore
async def begin_stop_packet_capture(
self,
resource_group_name: str,
gateway_name: str,
parameters: Optional["_models.VpnGatewayPacketCaptureStopParameters"] = None,
**kwargs: Any
) -> AsyncLROPoller[str]:
"""Stops packet capture on vpn gateway in the specified resource group.
:param resource_group_name: The resource group name of the VpnGateway.
:type resource_group_name: str
:param gateway_name: The name of the gateway.
:type gateway_name: str
:param parameters: Vpn gateway packet capture parameters supplied to stop packet capture on vpn
gateway.
:type parameters: ~azure.mgmt.network.v2020_07_01.models.VpnGatewayPacketCaptureStopParameters
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either str or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[str]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[str]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._stop_packet_capture_initial(
resource_group_name=resource_group_name,
gateway_name=gateway_name,
parameters=parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('str', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'gatewayName': self._serialize.url("gateway_name", gateway_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_stop_packet_capture.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/vpnGateways/{gatewayName}/stoppacketcapture'} # type: ignore
def list_by_resource_group(
self,
resource_group_name: str,
**kwargs: Any
) -> AsyncIterable["_models.ListVpnGatewaysResult"]:
"""Lists all the VpnGateways in a resource group.
:param resource_group_name: The resource group name of the VpnGateway.
:type resource_group_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either ListVpnGatewaysResult or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.network.v2020_07_01.models.ListVpnGatewaysResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ListVpnGatewaysResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-07-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list_by_resource_group.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('ListVpnGatewaysResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list_by_resource_group.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/vpnGateways'} # type: ignore
def list(
self,
**kwargs: Any
) -> AsyncIterable["_models.ListVpnGatewaysResult"]:
"""Lists all the VpnGateways in a subscription.
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either ListVpnGatewaysResult or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.network.v2020_07_01.models.ListVpnGatewaysResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ListVpnGatewaysResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-07-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('ListVpnGatewaysResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.Network/vpnGateways'} # type: ignore
| |
# Copyright 2015 Conchylicultor. All Rights Reserved.
# Modifications copyright (C) 2016 Carlos Segura
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""
Model to predict the next sentence given an input sequence
"""
import tensorflow as tf
from chatbot.textdata import Batch
class ProjectionOp:
""" Single layer perceptron
Project input tensor on the output dimension
"""
def __init__(self, shape, scope=None, dtype=None):
"""
Args:
shape: a tuple (input dim, output dim)
scope (str): encapsulate variables
dtype: the weights type
"""
assert len(shape) == 2
self.scope = scope
# Projection on the keyboard
with tf.variable_scope('weights_' + self.scope):
self.W_t = tf.get_variable(
'weights',
shape,
# initializer=tf.truncated_normal_initializer() # TODO: Tune value (fct of input size: 1/sqrt(input_dim))
dtype=dtype
)
self.b = tf.get_variable(
'bias',
shape[0],
initializer=tf.constant_initializer(),
dtype=dtype
)
self.W = tf.transpose(self.W_t)
def getWeights(self):
""" Convenience method for some tf arguments
"""
return self.W, self.b
def __call__(self, X):
""" Project the output of the decoder into the vocabulary space
Args:
X (tf.Tensor): input value
"""
with tf.name_scope(self.scope):
return tf.matmul(X, self.W) + self.b
class Model:
"""
Implementation of a seq2seq model.
Architecture:
Encoder/decoder
2 LTSM layers
"""
def __init__(self, args, textData):
"""
Args:
args: parameters of the model
textData: the dataset object
"""
print("Model creation...")
self.textData = textData # Keep a reference on the dataset
self.args = args # Keep track of the parameters of the model
self.dtype = tf.float32
# Placeholders
self.encoderInputs = None
self.decoderInputs = None # Same that decoderTarget plus the <go>
self.decoderTargets = None
self.decoderWeights = None # Adjust the learning to the target sentence size
# Main operators
self.lossFct = None
self.optOp = None
self.outputs = None # Outputs of the network, list of probability for each words
# Construct the graphs
self.buildNetwork()
def buildNetwork(self):
""" Create the computational graph
"""
# TODO: Create name_scopes (for better graph visualisation)
# TODO: Use buckets (better perfs)
# Parameters of sampled softmax (needed for attention mechanism and a large vocabulary size)
outputProjection = None
# Sampled softmax only makes sense if we sample less than vocabulary size.
if 0 < self.args.softmaxSamples < self.textData.getVocabularySize():
outputProjection = ProjectionOp(
(self.textData.getVocabularySize(), self.args.hiddenSize),
scope='softmax_projection',
dtype=self.dtype
)
def sampledSoftmax(labels, inputs):
labels = tf.reshape(labels, [-1, 1]) # Add one dimension (nb of true classes, here 1)
# We need to compute the sampled_softmax_loss using 32bit floats to
# avoid numerical instabilities.
localWt = tf.cast(outputProjection.W_t, tf.float32)
localB = tf.cast(outputProjection.b, tf.float32)
localInputs = tf.cast(inputs, tf.float32)
return tf.cast(
tf.nn.sampled_softmax_loss(
localWt, # Should have shape [num_classes, dim]
localB,
labels,
localInputs,
self.args.softmaxSamples, # The number of classes to randomly sample per batch
self.textData.getVocabularySize()), # The number of classes
self.dtype)
# Creation of the rnn cell
def create_rnn_cell():
encoDecoCell = tf.contrib.rnn.BasicLSTMCell( # Or GRUCell, LSTMCell(args.hiddenSize)
self.args.hiddenSize,
)
if not self.args.test: # TODO: Should use a placeholder instead
encoDecoCell = tf.contrib.rnn.DropoutWrapper(
encoDecoCell,
input_keep_prob=1.0,
output_keep_prob=self.args.dropout
)
return encoDecoCell
encoDecoCell = tf.contrib.rnn.MultiRNNCell(
[create_rnn_cell() for _ in range(self.args.numLayers)],
)
# Network input (placeholders)
with tf.name_scope('placeholder_encoder'):
self.encoderInputs = [tf.placeholder(tf.int32, [None, ]) for _ in range(self.args.maxLengthEnco)] # Batch size * sequence length * input dim
with tf.name_scope('placeholder_decoder'):
self.decoderInputs = [tf.placeholder(tf.int32, [None, ], name='inputs') for _ in range(self.args.maxLengthDeco)] # Same sentence length for input and output (Right ?)
self.decoderTargets = [tf.placeholder(tf.int32, [None, ], name='targets') for _ in range(self.args.maxLengthDeco)]
self.decoderWeights = [tf.placeholder(tf.float32, [None, ], name='weights') for _ in range(self.args.maxLengthDeco)]
# Define the network
# Here we use an embedding model, it takes integer as input and convert them into word vector for
# better word representation
decoderOutputs, states = tf.contrib.legacy_seq2seq.embedding_attention_seq2seq(
self.encoderInputs, # List<[batch=?, inputDim=1]>, list of size args.maxLength
self.decoderInputs, # For training, we force the correct output (feed_previous=False)
encoDecoCell,
self.textData.getVocabularySize(),
self.textData.getVocabularySize(), # Both encoder and decoder have the same number of class
embedding_size=self.args.embeddingSize, # Dimension of each word
output_projection=outputProjection.getWeights() if outputProjection else None,
feed_previous=bool(self.args.test) # When we test (self.args.test), we use previous output as next input (feed_previous)
)
# TODO: When the LSTM hidden size is too big, we should project the LSTM output into a smaller space (4086 => 2046): Should speed up
# training and reduce memory usage. Other solution, use sampling softmax
# For testing only
if self.args.test:
if not outputProjection:
self.outputs = decoderOutputs
else:
self.outputs = [outputProjection(output) for output in decoderOutputs]
# TODO: Attach a summary to visualize the output
# For training only
else:
# Finally, we define the loss function
self.lossFct = tf.contrib.legacy_seq2seq.sequence_loss(
decoderOutputs,
self.decoderTargets,
self.decoderWeights,
self.textData.getVocabularySize(),
softmax_loss_function= sampledSoftmax if outputProjection else None # If None, use default SoftMax
)
tf.summary.scalar('loss', self.lossFct) # Keep track of the cost
# Initialize the optimizer
opt = tf.train.AdamOptimizer(
learning_rate=self.args.learningRate,
beta1=0.9,
beta2=0.999,
epsilon=1e-08
)
self.optOp = opt.minimize(self.lossFct)
def step(self, batch):
""" Forward/training step operation.
Does not perform run on itself but just return the operators to do so. Those have then to be run
Args:
batch (Batch): Input data on testing mode, input and target on output mode
Return:
(ops), dict: A tuple of the (training, loss) operators or (outputs,) in testing mode with the associated feed dictionary
"""
# Feed the dictionary
feedDict = {}
ops = None
if not self.args.test: # Training
for i in range(self.args.maxLengthEnco):
feedDict[self.encoderInputs[i]] = batch.encoderSeqs[i]
for i in range(self.args.maxLengthDeco):
feedDict[self.decoderInputs[i]] = batch.decoderSeqs[i]
feedDict[self.decoderTargets[i]] = batch.targetSeqs[i]
feedDict[self.decoderWeights[i]] = batch.weights[i]
ops = (self.optOp, self.lossFct)
else: # Testing (batchSize == 1)
for i in range(self.args.maxLengthEnco):
feedDict[self.encoderInputs[i]] = batch.encoderSeqs[i]
feedDict[self.decoderInputs[0]] = [self.textData.goToken]
ops = (self.outputs,)
# Return one pass operator
return ops, feedDict
| |
#!/usr/bin/python
#
# Copyright (c) 2016 Matt Davis, <mdavis@ansible.com>
# Chris Houseknecht, <house@redhat.com>
#
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: azure_rm_subnet
version_added: "2.1"
short_description: Manage Azure subnets
description:
- Create, update or delete a subnet within a given virtual network.
- Allows setting and updating the address prefix CIDR, which must be valid within the context of the virtual network.
- Use the M(azure_rm_networkinterface) module to associate interfaces with the subnet and assign specific IP addresses.
options:
resource_group:
description:
- Name of resource group.
required: true
name:
description:
- Name of the subnet.
required: true
address_prefix_cidr:
description:
- CIDR defining the IPv4 address space of the subnet. Must be valid within the context of the virtual network.
aliases:
- address_prefix
security_group:
description:
- Existing security group with which to associate the subnet.
- It can be the security group name which is in the same resource group.
- Can be the resource ID of the security group.
- Can be a dict containing the I(name) and I(resource_group) of the security group.
aliases:
- security_group_name
state:
description:
- Assert the state of the subnet. Use C(present) to create or update a subnet and use C(absent) to delete a subnet.
default: present
choices:
- absent
- present
virtual_network_name:
description:
- Name of an existing virtual network with which the subnet is or will be associated.
required: true
aliases:
- virtual_network
route_table:
description:
- The reference of the RouteTable resource.
- Can be the name or resource ID of the route table.
- Can be a dict containing the I(name) and I(resource_group) of the route table.
version_added: "2.7"
service_endpoints:
description:
- An array of service endpoints.
type: list
suboptions:
service:
description:
- The type of the endpoint service.
required: True
locations:
description:
- A list of locations.
type: list
version_added: "2.8"
extends_documentation_fragment:
- azure
author:
- Chris Houseknecht (@chouseknecht)
- Matt Davis (@nitzmahone)
'''
EXAMPLES = '''
- name: Create a subnet
azure_rm_subnet:
resource_group: myResourceGroup
virtual_network_name: myVirtualNetwork
name: mySubnet
address_prefix_cidr: "10.1.0.0/24"
- name: Create a subnet refer nsg from other resource group
azure_rm_subnet:
resource_group: myResourceGroup
virtual_network_name: myVirtualNetwork
name: mySubnet
address_prefix_cidr: "10.1.0.0/16"
security_group:
name: secgroupfoo
resource_group: mySecondResourceGroup
route_table: route
- name: Delete a subnet
azure_rm_subnet:
resource_group: myResourceGroup
virtual_network_name: myVirtualNetwork
name: mySubnet
state: absent
'''
RETURN = '''
state:
description:
- Current state of the subnet.
returned: success
type: complex
contains:
address_prefix:
description:
- IP address CIDR.
returned: always
type: str
sample: "10.1.0.0/16"
id:
description:
- Subnet resource path.
returned: always
type: str
sample: "/subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroup/myResourceGroup/providers/Microsoft.Network/virtualNetworks/myVirtualNetwork/subnets/mySubnet"
name:
description:
- Subnet name.
returned: always
type: str
sample: "foobar"
network_security_group:
description:
- Associated network security group of subnets.
returned: always
type: complex
contains:
id:
description:
- Security group resource identifier.
returned: always
type: str
sample: "/subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroup/myResourceGroup/providers/Microsoft.Network/networkSecurityGroups/secgroupfoo"
name:
description:
- Name of the security group.
returned: always
type: str
sample: "secgroupfoo"
provisioning_state:
description:
- Success or failure of the provisioning event.
returned: always
type: str
sample: "Succeeded"
''' # NOQA
from ansible.module_utils.azure_rm_common import AzureRMModuleBase, CIDR_PATTERN, azure_id_to_dict, format_resource_id
try:
from msrestazure.azure_exceptions import CloudError
except ImportError:
# This is handled in azure_rm_common
pass
def subnet_to_dict(subnet):
result = dict(
id=subnet.id,
name=subnet.name,
provisioning_state=subnet.provisioning_state,
address_prefix=subnet.address_prefix,
network_security_group=dict(),
route_table=dict()
)
if subnet.network_security_group:
id_keys = azure_id_to_dict(subnet.network_security_group.id)
result['network_security_group']['id'] = subnet.network_security_group.id
result['network_security_group']['name'] = id_keys['networkSecurityGroups']
result['network_security_group']['resource_group'] = id_keys['resourceGroups']
if subnet.route_table:
id_keys = azure_id_to_dict(subnet.route_table.id)
result['route_table']['id'] = subnet.route_table.id
result['route_table']['name'] = id_keys['routeTables']
result['route_table']['resource_group'] = id_keys['resourceGroups']
if subnet.service_endpoints:
result['service_endpoints'] = [{'service': item.service, 'locations': item.locations or []} for item in subnet.service_endpoints]
return result
class AzureRMSubnet(AzureRMModuleBase):
def __init__(self):
self.module_arg_spec = dict(
resource_group=dict(type='str', required=True),
name=dict(type='str', required=True),
state=dict(type='str', default='present', choices=['present', 'absent']),
virtual_network_name=dict(type='str', required=True, aliases=['virtual_network']),
address_prefix_cidr=dict(type='str', aliases=['address_prefix']),
security_group=dict(type='raw', aliases=['security_group_name']),
route_table=dict(type='raw'),
service_endpoints=dict(
type='list'
)
)
self.results = dict(
changed=False,
state=dict()
)
self.resource_group = None
self.name = None
self.state = None
self.virtual_network_name = None
self.address_prefix_cidr = None
self.security_group = None
self.route_table = None
self.service_endpoints = None
super(AzureRMSubnet, self).__init__(self.module_arg_spec,
supports_check_mode=True,
supports_tags=False)
def exec_module(self, **kwargs):
nsg = None
subnet = None
for key in self.module_arg_spec:
setattr(self, key, kwargs[key])
if self.address_prefix_cidr and not CIDR_PATTERN.match(self.address_prefix_cidr):
self.fail("Invalid address_prefix_cidr value {0}".format(self.address_prefix_cidr))
nsg = dict()
if self.security_group:
nsg = self.parse_nsg()
route_table = dict()
if self.route_table:
route_table = self.parse_resource_to_dict(self.route_table)
self.route_table = format_resource_id(val=route_table['name'],
subscription_id=route_table['subscription_id'],
namespace='Microsoft.Network',
types='routeTables',
resource_group=route_table['resource_group'])
results = dict()
changed = False
try:
self.log('Fetching subnet {0}'.format(self.name))
subnet = self.network_client.subnets.get(self.resource_group,
self.virtual_network_name,
self.name)
self.check_provisioning_state(subnet, self.state)
results = subnet_to_dict(subnet)
if self.state == 'present':
if self.address_prefix_cidr and results['address_prefix'] != self.address_prefix_cidr:
self.log("CHANGED: subnet {0} address_prefix_cidr".format(self.name))
changed = True
results['address_prefix'] = self.address_prefix_cidr
if self.security_group is not None and results['network_security_group'].get('id') != nsg.get('id'):
self.log("CHANGED: subnet {0} network security group".format(self.name))
changed = True
results['network_security_group']['id'] = nsg.get('id')
results['network_security_group']['name'] = nsg.get('name')
if self.route_table is not None and self.route_table != results['route_table'].get('id'):
changed = True
results['route_table']['id'] = self.route_table
self.log("CHANGED: subnet {0} route_table to {1}".format(self.name, route_table.get('name')))
if self.service_endpoints:
oldd = {}
for item in self.service_endpoints:
name = item['service']
locations = item.get('locations') or []
oldd[name] = {'service': name, 'locations': locations.sort()}
newd = {}
if 'service_endpoints' in results:
for item in results['service_endpoints']:
name = item['service']
locations = item.get('locations') or []
newd[name] = {'service': name, 'locations': locations.sort()}
if newd != oldd:
changed = True
results['service_endpoints'] = self.service_endpoints
elif self.state == 'absent':
changed = True
except CloudError:
# the subnet does not exist
if self.state == 'present':
changed = True
self.results['changed'] = changed
self.results['state'] = results
if not self.check_mode:
if self.state == 'present' and changed:
if not subnet:
# create new subnet
if not self.address_prefix_cidr:
self.fail('address_prefix_cidr is not set')
self.log('Creating subnet {0}'.format(self.name))
subnet = self.network_models.Subnet(
address_prefix=self.address_prefix_cidr
)
if nsg:
subnet.network_security_group = self.network_models.NetworkSecurityGroup(id=nsg.get('id'))
if self.route_table:
subnet.route_table = self.network_models.RouteTable(id=self.route_table)
if self.service_endpoints:
subnet.service_endpoints = self.service_endpoints
else:
# update subnet
self.log('Updating subnet {0}'.format(self.name))
subnet = self.network_models.Subnet(
address_prefix=results['address_prefix']
)
if results['network_security_group'].get('id') is not None:
subnet.network_security_group = self.network_models.NetworkSecurityGroup(id=results['network_security_group'].get('id'))
if results['route_table'].get('id') is not None:
subnet.route_table = self.network_models.RouteTable(id=results['route_table'].get('id'))
if results.get('service_endpoints') is not None:
subnet.service_endpoints = results['service_endpoints']
self.results['state'] = self.create_or_update_subnet(subnet)
elif self.state == 'absent' and changed:
# delete subnet
self.delete_subnet()
# the delete does not actually return anything. if no exception, then we'll assume
# it worked.
self.results['state']['status'] = 'Deleted'
return self.results
def create_or_update_subnet(self, subnet):
try:
poller = self.network_client.subnets.create_or_update(self.resource_group,
self.virtual_network_name,
self.name,
subnet)
new_subnet = self.get_poller_result(poller)
except Exception as exc:
self.fail("Error creating or updating subnet {0} - {1}".format(self.name, str(exc)))
self.check_provisioning_state(new_subnet)
return subnet_to_dict(new_subnet)
def delete_subnet(self):
self.log('Deleting subnet {0}'.format(self.name))
try:
poller = self.network_client.subnets.delete(self.resource_group,
self.virtual_network_name,
self.name)
result = self.get_poller_result(poller)
except Exception as exc:
self.fail("Error deleting subnet {0} - {1}".format(self.name, str(exc)))
return result
def parse_nsg(self):
nsg = self.security_group
resource_group = self.resource_group
if isinstance(self.security_group, dict):
nsg = self.security_group.get('name')
resource_group = self.security_group.get('resource_group', self.resource_group)
id = format_resource_id(val=nsg,
subscription_id=self.subscription_id,
namespace='Microsoft.Network',
types='networkSecurityGroups',
resource_group=resource_group)
name = azure_id_to_dict(id).get('name')
return dict(id=id, name=name)
def main():
AzureRMSubnet()
if __name__ == '__main__':
main()
| |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import sys
import argparse
import ConfigParser
import logging
import math
import itertools
from api_utils import TokensAPI, SimilarityAPI
class ComputeSimilarity( object ):
"""
Similarity measures.
Compute term similarity based on co-occurrence and
collocation likelihoods.
"""
DEFAULT_SLIDING_WINDOW_SIZE = 10
MAX_FREQ = 100.0
def __init__( self, logging_level ):
self.logger = logging.getLogger( 'ComputeSimilarity' )
self.logger.setLevel( logging_level )
handler = logging.StreamHandler( sys.stderr )
handler.setLevel( logging_level )
self.logger.addHandler( handler )
def execute( self, data_path, sliding_window_size = None ):
assert data_path is not None
if sliding_window_size is None:
sliding_window_size = ComputeSimilarity.DEFAULT_SLIDING_WINDOW_SIZE
self.logger.info( '--------------------------------------------------------------------------------' )
self.logger.info( 'Computing term similarity...' )
self.logger.info( ' data_path = %s', data_path )
self.logger.info( ' sliding_window_size = %d', sliding_window_size )
self.logger.info( 'Connecting to data...' )
self.tokens = TokensAPI( data_path )
self.similarity = SimilarityAPI( data_path )
self.logger.info( 'Reading data from disk...' )
self.tokens.read()
self.logger.info( 'Computing document co-occurrence...' )
self.computeDocumentCooccurrence()
self.logger.info( 'Computing sliding-window co-occurrence...' )
self.computeSlidingWindowCooccurrence( sliding_window_size )
self.logger.info( 'Counting total number of tokens, unigrams, and bigrams in the corpus...' )
self.computeTokenCounts()
self.logger.info( 'Computing document co-occurrence likelihood...' )
self.similarity.document_g2 = self.getG2Stats( self.document_count, self.similarity.document_occurrence, self.similarity.document_cooccurrence )
self.logger.info( 'Computing sliding-window co-occurrence likelihood...' )
self.similarity.window_g2 = self.getG2Stats( self.window_count, self.similarity.window_occurrence, self.similarity.window_cooccurrence )
self.logger.info( 'Computing collocation likelihood...' )
self.similarity.collocation_g2 = self.getG2Stats( self.token_count, self.similarity.unigram_counts, self.similarity.bigram_counts )
self.combineSimilarityMatrices()
self.logger.info( 'Writing data to disk...' )
self.similarity.write()
self.logger.info( '--------------------------------------------------------------------------------' )
def incrementCount( self, occurrence, key ):
if key not in occurrence:
occurrence[ key ] = 1
else:
occurrence[ key ] += 1
def computeDocumentCooccurrence( self ):
document_count = 0
occurrence = {}
cooccurrence = {}
for docID, docTokens in self.tokens.data.iteritems():
self.logger.debug( ' %s (%d tokens)', docID, len(docTokens) )
tokenSet = frozenset(docTokens)
document_count += 1
for token in tokenSet:
self.incrementCount( occurrence, token )
for aToken in tokenSet:
for bToken in tokenSet:
if aToken < bToken:
self.incrementCount( cooccurrence, (aToken, bToken) )
self.document_count = document_count
self.similarity.document_occurrence = occurrence
self.similarity.document_cooccurrence = cooccurrence
def computeSlidingWindowCooccurrence( self, sliding_window_size ):
window_count = 0
occurrence = {}
cooccurrence = {}
for docID, docTokens in self.tokens.data.iteritems():
allWindowTokens = self.getSlidingWindowTokens( docTokens, sliding_window_size )
self.logger.debug( ' %s (%d tokens, %d windows)', docID, len(docTokens), len(allWindowTokens) )
for windowTokens in allWindowTokens:
tokenSet = frozenset(windowTokens)
window_count += 1
for token in tokenSet:
self.incrementCount( occurrence, token )
for aToken in tokenSet:
for bToken in tokenSet:
if aToken < bToken:
self.incrementCount( cooccurrence, (aToken, bToken) )
self.window_count = window_count
self.similarity.window_occurrence = occurrence
self.similarity.window_cooccurrence = cooccurrence
def getSlidingWindowTokens( self, tokens, sliding_window_size ):
allWindows = []
aIndex = 0 - sliding_window_size
bIndex = len(tokens) + sliding_window_size
for index in range( aIndex, bIndex ):
a = max( 0 , index - sliding_window_size )
b = min( len(tokens) , index + sliding_window_size )
allWindows.append( tokens[a:b] )
return allWindows
def computeTokenCounts( self ):
token_count = sum( len(docTokens) for docTokens in self.tokens.data.itervalues() )
unigram_counts = {}
for docTokens in self.tokens.data.itervalues():
for token in docTokens:
self.incrementCount( unigram_counts, token )
bigram_counts = {}
for docTokens in self.tokens.data.itervalues():
prevToken = None
for currToken in docTokens:
if prevToken is not None:
self.incrementCount( bigram_counts, (prevToken, currToken) )
prevToken = currToken
self.token_count = token_count
self.similarity.unigram_counts = unigram_counts
self.similarity.bigram_counts = bigram_counts
def getBinomial( self, B_given_A, any_given_A, B_given_notA, any_given_notA ):
assert B_given_A >= 0
assert B_given_notA >= 0
assert any_given_A >= B_given_A
assert any_given_notA >= B_given_notA
a = float( B_given_A )
b = float( B_given_notA )
c = float( any_given_A )
d = float( any_given_notA )
E1 = c * ( a + b ) / ( c + d )
E2 = d * ( a + b ) / ( c + d )
g2a = 0
g2b = 0
if a > 0:
g2a = a * math.log( a / E1 )
if b > 0:
g2b = b * math.log( b / E2 )
return 2 * ( g2a + g2b )
def getG2( self, freq_all, freq_ab, freq_a, freq_b ):
assert freq_all >= freq_a
assert freq_all >= freq_b
assert freq_a >= freq_ab
assert freq_b >= freq_ab
assert freq_all >= 0
assert freq_ab >= 0
assert freq_a >= 0
assert freq_b >= 0
B_given_A = freq_ab
B_given_notA = freq_b - freq_ab
any_given_A = freq_a
any_given_notA = freq_all - freq_a
return self.getBinomial( B_given_A, any_given_A, B_given_notA, any_given_notA )
def getG2Stats( self, max_count, occurrence, cooccurrence ):
g2_stats = {}
freq_all = max_count
for ( firstToken, secondToken ) in cooccurrence:
freq_a = occurrence[ firstToken ]
freq_b = occurrence[ secondToken ]
freq_ab = cooccurrence[ (firstToken, secondToken) ]
scale = ComputeSimilarity.MAX_FREQ / freq_all
rescaled_freq_all = freq_all * scale
rescaled_freq_a = freq_a * scale
rescaled_freq_b = freq_b * scale
rescaled_freq_ab = freq_ab * scale
if rescaled_freq_a > 1.0 and rescaled_freq_b > 1.0:
g2_stats[ (firstToken, secondToken) ] = self.getG2( freq_all, freq_ab, freq_a, freq_b )
return g2_stats
def combineSimilarityMatrices( self ):
self.logger.info( 'Combining similarity matrices...' )
self.similarity.combined_g2 = {}
keys_queued = []
for key in self.similarity.document_g2:
( firstToken, secondToken ) = key
otherKey = ( secondToken, firstToken )
keys_queued.append( key )
keys_queued.append( otherKey )
for key in self.similarity.window_g2:
( firstToken, secondToken ) = key
otherKey = ( secondToken, firstToken )
keys_queued.append( key )
keys_queued.append( otherKey )
for key in self.similarity.collocation_g2:
keys_queued.append( key )
keys_processed = {}
for key in keys_queued:
keys_processed[ key ] = False
for key in keys_queued:
if not keys_processed[ key ]:
keys_processed[ key ] = True
( firstToken, secondToken ) = key
if firstToken < secondToken:
orderedKey = key
else:
orderedKey = ( secondToken, firstToken )
score = 0.0
if orderedKey in self.similarity.document_g2:
score += self.similarity.document_g2[ orderedKey ]
if orderedKey in self.similarity.window_g2:
score += self.similarity.window_g2[ orderedKey ]
if key in self.similarity.collocation_g2:
score += self.similarity.collocation_g2[ key ]
if score > 0.0:
self.similarity.combined_g2[ key ] = score
#-------------------------------------------------------------------------------#
def main():
parser = argparse.ArgumentParser( description = 'Compute term similarity for TermiteVis.' )
parser.add_argument( 'config_file' , type = str, default = None , help = 'Path of Termite configuration file.' )
parser.add_argument( '--data-path' , type = str, dest = 'data_path' , help = 'Override data path.' )
parser.add_argument( '--sliding-window-size', type = int, dest = 'sliding_window_size', help = 'Override sliding window size.' )
parser.add_argument( '--logging' , type = int, dest = 'logging' , help = 'Override logging level.' )
args = parser.parse_args()
data_path = None
sliding_window_size = None
logging_level = 20
# Read in default values from the configuration file
if args.config_file is not None:
config = ConfigParser.RawConfigParser()
config.read( args.config_file )
if config.has_section( 'Termite' ) and config.has_option( 'Termite', 'path' ):
data_path = config.get( 'Termite', 'path' )
if config.has_section( 'Termite' ) and config.has_option( 'Termite', 'sliding_window_size' ):
sliding_window_size = config.get( 'Termite', 'sliding_window_size' )
if config.has_section( 'Misc' ) and config.has_option( 'Misc', 'logging' ):
logging_level = config.getint( 'Misc', 'logging' )
# Read in user-specifiec values from the program arguments
if args.data_path is not None:
data_path = args.data_path
if args.sliding_window_size is not None:
sliding_window_size = args.sliding_window_size
if args.logging is not None:
logging_level = args.logging
ComputeSimilarity( logging_level ).execute( data_path, sliding_window_size )
if __name__ == '__main__':
main()
| |
#
# Copyright (c) 2004 Conectiva, Inc.
#
# Written by Gustavo Niemeyer <niemeyer@conectiva.com>
#
# This file is part of Smart Package Manager.
#
# Smart Package Manager is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as published
# by the Free Software Foundation; either version 2 of the License, or (at
# your option) any later version.
#
# Smart Package Manager is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Smart Package Manager; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
from smart.transaction import Transaction, PolicyUpgrade, \
UPGRADE, INSTALL, REMOVE
from smart.option import OptionParser
from smart.cache import Package
from smart import *
import cPickle
import string
import re
import os
USAGE=_("smart upgrade [options] [package] ...")
DESCRIPTION=_("""
This command will upgrade one or more packages which
are currently installed in the system. If no packages
are given, all installed packages will be checked.
If pkgname is prefixed by a '+', it will be installed
even if it was not currently installed in the system.
If pkgname is prefixed with a '-', it will be removed.
""")
EXAMPLES=_("""
smart upgrade
smart upgrade pkgname
smart upgrade '*kgnam*'
smart upgrade pkgname-1.0
smart upgrade pkgname-1.0-1
smart upgrade pkgname1 pkgname2
""")
def option_parser():
parser = OptionParser(usage=USAGE,
description=DESCRIPTION,
examples=EXAMPLES)
parser.allow_interspersed_args = False
parser.add_option("--stepped", action="store_true",
help=_("split operation in steps"))
parser.add_option("--urls", action="store_true",
help=_("dump needed urls and don't commit operation"))
parser.add_option("--metalink", action="store_true",
help=_("dump metalink xml and don't commit operation"))
parser.add_option("--download", action="store_true",
help=_("download packages and don't commit operation"))
parser.add_option("--update", action="store_true",
help=_("update channel information before trying "
"to upgrade"))
parser.add_option("--check", action="store_true",
help=_("just check if there are upgrades to be done"))
parser.add_option("--check-update", action="store_true",
help=_("check if there are upgrades to be done, and "
"update the known upgrades"))
parser.add_option("--explain", action="store_true",
help=_("include additional information about changes,"
"when possible"))
parser.add_option("--flag", action="store", default=None,
help=_("check only upgrades with the given flag set"))
parser.add_option("-y", "--yes", action="store_true",
help=_("do not ask for confirmation"))
parser.add_option("--dump", action="store_true",
help=_("dump package names and versions to stderr but "
"don't commit operation"))
return parser
def parse_options(argv):
parser = option_parser()
opts, args = parser.parse_args(argv)
opts.args = args
return opts
def main(ctrl, opts):
# Argument check
opts.check_args_of_option("flag", 1)
if opts.explain:
sysconf.set("explain-changesets", True, soft=True)
if opts.update or sysconf.get("auto-update"):
from smart.commands import update
updateopts = update.parse_options([])
update.main(ctrl, updateopts)
else:
ctrl.reloadChannels()
cache = ctrl.getCache()
trans = Transaction(cache, PolicyUpgrade)
if opts.args:
for arg in opts.args:
op = UPGRADE
if arg.startswith('+'):
arg = arg[1:]
op = INSTALL
if arg.startswith('-'):
arg = arg[1:]
op = REMOVE
ratio, results, suggestions = ctrl.search(arg)
if not results:
if suggestions:
dct = {}
for r, obj in suggestions:
if isinstance(obj, Package):
if obj.installed or op == INSTALL:
dct[obj] = True
else:
for pkg in obj.packages:
if pkg.installed or op == INSTALL:
dct[pkg] = True
if not dct:
del suggestions[:]
if suggestions:
raise Error, _("'%s' matches no packages. "
"Suggestions:\n%s") % \
(arg, "\n".join([" "+str(x) for x in dct]))
else:
raise Error, _("'%s' matches no packages") % arg
foundany = False
foundinstalled = False
for obj in results:
if isinstance(obj, Package):
if (obj.installed or op == INSTALL) and (not opts.flag or pkgconf.testFlag(opts.flag, obj)):
trans.enqueue(obj, op)
foundinstalled = obj.installed
foundany = True
if not foundany:
for obj in results:
if not isinstance(obj, Package):
for pkg in obj.packages:
if (pkg.installed or op == INSTALL) and (not opts.flag or pkgconf.testFlag(opts.flag, pkg)):
foundinstalled = pkg.installed
trans.enqueue(pkg, op)
foundany = True
if not foundinstalled and op != INSTALL:
iface.warning(_("'%s' matches no installed packages") % arg)
else:
for pkg in cache.getPackages():
if pkg.installed and (not opts.flag or pkgconf.testFlag(opts.flag, pkg)):
trans.enqueue(pkg, UPGRADE)
iface.showStatus(_("Computing transaction..."))
trans.run()
if trans and opts.check or opts.check_update:
checkfile = os.path.expanduser("~/.smart/upgradecheck")
if os.path.isfile(checkfile):
file = open(checkfile)
checkstate = cPickle.load(file)
file.close()
else:
checkstate = None
changeset = trans.getChangeSet()
state = changeset.getPersistentState()
if opts.check_update:
dirname = os.path.dirname(checkfile)
if not os.path.isdir(dirname):
os.makedirs(dirname)
file = open(checkfile, "w")
cPickle.dump(state, file, 2)
file.close()
if not state:
iface.showStatus(_("No interesting upgrades available."))
return 2
elif checkstate:
for entry in state:
if checkstate.get(entry) != state[entry]:
break
else:
iface.showStatus(_("There are pending upgrades!"))
return 1
iface.showStatus(_("There are new upgrades available!"))
elif not trans:
iface.showStatus(_("No interesting upgrades available."))
else:
iface.hideStatus()
confirm = not opts.yes
if opts.urls:
ctrl.dumpTransactionURLs(trans)
elif opts.metalink:
ctrl.dumpTransactionMetalink(trans)
elif opts.dump:
ctrl.dumpTransactionPackages(trans, install=True)
elif opts.download:
ctrl.downloadTransaction(trans, confirm=confirm)
elif opts.stepped:
ctrl.commitTransactionStepped(trans, confirm=confirm)
else:
ctrl.commitTransaction(trans, confirm=confirm)
# vim:ts=4:sw=4:et
| |
"""Tests for the TP-Link component."""
from typing import Any, Dict
from unittest.mock import MagicMock, patch
from pyHS100 import SmartBulb, SmartDevice, SmartDeviceException, SmartPlug
import pytest
from homeassistant import config_entries, data_entry_flow
from homeassistant.components import tplink
from homeassistant.components.tplink.common import (
CONF_DIMMER,
CONF_DISCOVERY,
CONF_LIGHT,
CONF_SWITCH,
)
from homeassistant.const import CONF_HOST
from homeassistant.setup import async_setup_component
from tests.common import MockConfigEntry, MockDependency, mock_coro
MOCK_PYHS100 = MockDependency("pyHS100")
async def test_creating_entry_tries_discover(hass):
"""Test setting up does discovery."""
with MOCK_PYHS100, patch(
"homeassistant.components.tplink.async_setup_entry",
return_value=mock_coro(True),
) as mock_setup, patch(
"homeassistant.components.tplink.common.Discover.discover",
return_value={"host": 1234},
):
result = await hass.config_entries.flow.async_init(
tplink.DOMAIN, context={"source": config_entries.SOURCE_USER}
)
# Confirmation form
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
result = await hass.config_entries.flow.async_configure(result["flow_id"], {})
assert result["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY
await hass.async_block_till_done()
assert len(mock_setup.mock_calls) == 1
async def test_configuring_tplink_causes_discovery(hass):
"""Test that specifying empty config does discovery."""
with MOCK_PYHS100, patch(
"homeassistant.components.tplink.common.Discover.discover"
) as discover:
discover.return_value = {"host": 1234}
await async_setup_component(hass, tplink.DOMAIN, {tplink.DOMAIN: {}})
await hass.async_block_till_done()
assert len(discover.mock_calls) == 1
@pytest.mark.parametrize(
"name,cls,platform",
[
("pyHS100.SmartPlug", SmartPlug, "switch"),
("pyHS100.SmartBulb", SmartBulb, "light"),
],
)
@pytest.mark.parametrize("count", [1, 2, 3])
async def test_configuring_device_types(hass, name, cls, platform, count):
"""Test that light or switch platform list is filled correctly."""
with patch(
"homeassistant.components.tplink.common.Discover.discover"
) as discover, patch(
"homeassistant.components.tplink.common.SmartDevice._query_helper"
):
discovery_data = {
"123.123.123.{}".format(c): cls("123.123.123.123") for c in range(count)
}
discover.return_value = discovery_data
await async_setup_component(hass, tplink.DOMAIN, {tplink.DOMAIN: {}})
await hass.async_block_till_done()
assert len(discover.mock_calls) == 1
assert len(hass.data[tplink.DOMAIN][platform]) == count
class UnknownSmartDevice(SmartDevice):
"""Dummy class for testing."""
@property
def has_emeter(self) -> bool:
"""Do nothing."""
pass
def turn_off(self) -> None:
"""Do nothing."""
pass
def turn_on(self) -> None:
"""Do nothing."""
pass
@property
def is_on(self) -> bool:
"""Do nothing."""
pass
@property
def state_information(self) -> Dict[str, Any]:
"""Do nothing."""
pass
async def test_configuring_devices_from_multiple_sources(hass):
"""Test static and discover devices are not duplicated."""
with patch(
"homeassistant.components.tplink.common.Discover.discover"
) as discover, patch(
"homeassistant.components.tplink.common.SmartDevice._query_helper"
):
discover_device_fail = SmartPlug("123.123.123.123")
discover_device_fail.get_sysinfo = MagicMock(side_effect=SmartDeviceException())
discover.return_value = {
"123.123.123.1": SmartBulb("123.123.123.1"),
"123.123.123.2": SmartPlug("123.123.123.2"),
"123.123.123.3": SmartBulb("123.123.123.3"),
"123.123.123.4": SmartPlug("123.123.123.4"),
"123.123.123.123": discover_device_fail,
"123.123.123.124": UnknownSmartDevice("123.123.123.124"),
}
await async_setup_component(
hass,
tplink.DOMAIN,
{
tplink.DOMAIN: {
CONF_LIGHT: [{CONF_HOST: "123.123.123.1"}],
CONF_SWITCH: [{CONF_HOST: "123.123.123.2"}],
CONF_DIMMER: [{CONF_HOST: "123.123.123.22"}],
}
},
)
await hass.async_block_till_done()
assert len(discover.mock_calls) == 1
assert len(hass.data[tplink.DOMAIN][CONF_LIGHT]) == 3
assert len(hass.data[tplink.DOMAIN][CONF_SWITCH]) == 2
async def test_is_dimmable(hass):
"""Test that is_dimmable switches are correctly added as lights."""
with patch(
"homeassistant.components.tplink.common.Discover.discover"
) as discover, patch(
"homeassistant.components.tplink.light.async_setup_entry",
return_value=mock_coro(True),
) as setup, patch(
"homeassistant.components.tplink.common.SmartDevice._query_helper"
), patch(
"homeassistant.components.tplink.common.SmartPlug.is_dimmable", True
):
dimmable_switch = SmartPlug("123.123.123.123")
discover.return_value = {"host": dimmable_switch}
await async_setup_component(hass, tplink.DOMAIN, {tplink.DOMAIN: {}})
await hass.async_block_till_done()
assert len(discover.mock_calls) == 1
assert len(setup.mock_calls) == 1
assert len(hass.data[tplink.DOMAIN][CONF_LIGHT]) == 1
assert not hass.data[tplink.DOMAIN][CONF_SWITCH]
async def test_configuring_discovery_disabled(hass):
"""Test that discover does not get called when disabled."""
with MOCK_PYHS100, patch(
"homeassistant.components.tplink.async_setup_entry",
return_value=mock_coro(True),
) as mock_setup, patch(
"homeassistant.components.tplink.common.Discover.discover", return_value=[]
) as discover:
await async_setup_component(
hass, tplink.DOMAIN, {tplink.DOMAIN: {tplink.CONF_DISCOVERY: False}}
)
await hass.async_block_till_done()
assert discover.call_count == 0
assert mock_setup.call_count == 1
async def test_platforms_are_initialized(hass):
"""Test that platforms are initialized per configuration array."""
config = {
tplink.DOMAIN: {
CONF_DISCOVERY: False,
CONF_LIGHT: [{CONF_HOST: "123.123.123.123"}],
CONF_SWITCH: [{CONF_HOST: "321.321.321.321"}],
}
}
with patch(
"homeassistant.components.tplink.common.Discover.discover"
) as discover, patch(
"homeassistant.components.tplink.common.SmartDevice._query_helper"
), patch(
"homeassistant.components.tplink.light.async_setup_entry",
return_value=mock_coro(True),
) as light_setup, patch(
"homeassistant.components.tplink.switch.async_setup_entry",
return_value=mock_coro(True),
) as switch_setup, patch(
"homeassistant.components.tplink.common.SmartPlug.is_dimmable", False
):
# patching is_dimmable is necessray to avoid misdetection as light.
await async_setup_component(hass, tplink.DOMAIN, config)
await hass.async_block_till_done()
assert discover.call_count == 0
assert light_setup.call_count == 1
assert switch_setup.call_count == 1
async def test_no_config_creates_no_entry(hass):
"""Test for when there is no tplink in config."""
with MOCK_PYHS100, patch(
"homeassistant.components.tplink.async_setup_entry",
return_value=mock_coro(True),
) as mock_setup:
await async_setup_component(hass, tplink.DOMAIN, {})
await hass.async_block_till_done()
assert mock_setup.call_count == 0
@pytest.mark.parametrize("platform", ["switch", "light"])
async def test_unload(hass, platform):
"""Test that the async_unload_entry works."""
# As we have currently no configuration, we just to pass the domain here.
entry = MockConfigEntry(domain=tplink.DOMAIN)
entry.add_to_hass(hass)
with patch(
"homeassistant.components.tplink.common.SmartDevice._query_helper"
), patch(
"homeassistant.components.tplink.{}" ".async_setup_entry".format(platform),
return_value=mock_coro(True),
) as light_setup:
config = {
tplink.DOMAIN: {
platform: [{CONF_HOST: "123.123.123.123"}],
CONF_DISCOVERY: False,
}
}
assert await async_setup_component(hass, tplink.DOMAIN, config)
await hass.async_block_till_done()
assert len(light_setup.mock_calls) == 1
assert tplink.DOMAIN in hass.data
assert await tplink.async_unload_entry(hass, entry)
assert not hass.data[tplink.DOMAIN]
| |
itera=input("How many results do you want? \n")
for iteracje in range(0,itera):
#**************************************************************************************************
#Program description *
#Simulation of chromosome teritory *
#licence: CC BY-NC-SA 3.0 *
#when you used it please cite: Plos .... *
#**************************************************************************************************
# -*- coding: cp1250 -*-
from visual import *
import time
import random
import gc
from array import *
import datetime
###############################################################################
#************************* PROGRAM PARAMETERS - can be modified *******************************
l_arm_c=[7,7,6,6,6,6,4,4,3,3] #length of chromosome's arm before decondensation, seperate by coma
l_arm_d=[37,29,25,20,8,38,30,35,29,20] #length of chromosome's arm after decondensation, separeta by coma
chr_pair=5 #number of chromosome pairs
n_arm=2 #how many arms should be colored (1 or 2)
n_chr=1 #which pair of chromosome sholud be colored - 0 if none
n_chr_2=0 #second pair of chromosome which can be colored - 0 if none
min_rad_nu=6.8 #minimal radius of nucleus
max_rad_nu=7 #maksimal radius of nucleus
min_vol_no=0.05 #minimum occupancy of nucleus by nucleolus
max_vol_no=0.15 #maksimum occupancy of nucleus by nucleolus
rad_bead=0.5 #radius of beads
eps_1=0.004 #admissible distance from the beads with the same chromosome
eps_2=0.05 #admissible distance from the beads with diffrent chromosome
trsp=1 #transparency of non-colore beads
multi=1 #chromosome size multiplier
#****************** END PROGRAM PARAMETERS *********************************
#****************** variables in the program - DO NOT MODIFY WITHOUT UNDERSTANDING THE CODE *****************************
licznik=0
par_12=0.95
lb_ram=n_arm
tab = [[0 for col in range(90000)] for row in range(10)] #the array to save the coordinate of centers of Domains
ind=19
#the array for storing coordinates of the points
xyz = [[0 for col in range(chr_pair*4)] for row in range(4)]
#the array to write the length of chromosomes before dekondensation
l_ch=[0 for col in range(chr_pair*4)]
#program parameter
fk=0
for ft in range(0,2*chr_pair,2):
fh=ft+(chr_pair*2)
l_ch[ft]=l_ch[ft+1]=l_arm_c[fk]
fk=fk+1
l_ch[fh]=l_ch[fh+1]=l_arm_c[fk]
#the array of arms length
l_ram=[0 for col in range (chr_pair*4)]
fk=0
for ft in range(0,4*chr_pair,2):
l_ram[ft]=l_ram[ft+1]=l_arm_d[fk]
fk=fk+1
print(l_ram)
kr = [0 for col in range(chr_pair*4)] #the array of chromosomes length - a counter for length
ty=[0 for col in range(chr_pair*4)]
#the array of colors and transparency of domains
k_kol=[[0 for col in range(4)] for row in range(chr_pair*4)]
for re in range(0, chr_pair*4):
for re_2 in range (0,3):
k_kol[re][re_2]=1
k_kol[re][3]=trsp
if (n_chr != 0 and lb_ram==2):
k_kol[n_chr-1][0]=0.12
k_kol[n_chr-1][1]=0.55
k_kol[n_chr-1][2]=0.1
k_kol[n_chr-1][3]=1
k_kol[n_chr][0]=0.0
k_kol[n_chr][1]=0.1
k_kol[n_chr][2]=0.0
k_kol[n_chr][3]=1
k_kol[(n_chr-1)+(chr_pair*2)][0]=0.6
k_kol[(n_chr-1)+(chr_pair*2)][1]=0.0
k_kol[(n_chr-1)+(chr_pair*2)][2]=0.0
k_kol[(n_chr-1)+(chr_pair*2)][3]=1
k_kol[n_chr+(chr_pair*2)][0]=0.8
k_kol[(n_chr)+(chr_pair*2)][1]=0.04
k_kol[(n_chr)+(chr_pair*2)][2]=0.04
k_kol[(n_chr)+(chr_pair*2)][3]=1
if (n_chr_2 != 0):
k_kol[n_chr_2-1][0]=0.12
k_kol[n_chr_2-1][1]=0.55
k_kol[n_chr_2-1][2]=0.1
k_kol[n_chr_2-1][3]=1
k_kol[n_chr_2][0]=0.0
k_kol[n_chr_2][1]=0.1
k_kol[n_chr_2][2]=0.0
k_kol[n_chr_2][3]=1
k_kol[(n_chr_2-1)+(chr_pair*2)][0]=0.6
k_kol[(n_chr_2-1)+(chr_pair*2)][1]=0.0
k_kol[(n_chr_2-1)+(chr_pair*2)][2]=0.0
k_kol[(n_chr_2-1)+(chr_pair*2)][3]=1
k_kol[n_chr_2+(chr_pair*2)][0]=0.8
k_kol[(n_chr_2)+(chr_pair*2)][1]=0.04
k_kol[(n_chr_2)+(chr_pair*2)][2]=0.04
k_kol[(n_chr_2)+(chr_pair*2)][3]=1
if (lb_ram==1 and n_chr!=0):
k_kol[n_chr-1][0]=k_kol[(n_chr-1)+(chr_pair*2)][0]=0.12
k_kol[n_chr-1][1]=k_kol[(n_chr-1)+(chr_pair*2)][1]=0.55
k_kol[n_chr-1][2]=k_kol[(n_chr-1)+(chr_pair*2)][2]=0.1
k_kol[n_chr-1][3]=k_kol[(n_chr-1)+(chr_pair*2)][3]=1
k_kol[n_chr][0]=k_kol[n_chr+(chr_pair*2)][0]=0.6
k_kol[n_chr][1]=k_kol[(n_chr)+(chr_pair*2)][1]=0.0
k_kol[n_chr][2]=k_kol[(n_chr)+(chr_pair*2)][2]=0.0
k_kol[n_chr][3]=k_kol[(n_chr)+(chr_pair*2)][3]=1
if (n_chr_2 != 0):
k_kol[n_chr_2-1][0]=k_kol[n_chr_2][0]=0.12
k_kol[n_chr_2-1][1]=k_kol[n_chr_2][1]=0.55
k_kol[n_chr_2-1][2]=k_kol[n_chr_2][2]=0.1
k_kol[n_chr_2-1][3]=k_kol[n_chr_2][3]=1
k_kol[(n_chr_2-1)+(chr_pair*2)][0]=k_kol[n_chr_2+(chr_pair*2)][0]=0.6
k_kol[(n_chr_2-1)+(chr_pair*2)][1]=k_kol[(n_chr_2)+(chr_pair*2)][1]=0.0
k_kol[(n_chr_2-1)+(chr_pair*2)][2]=k_kol[(n_chr_2)+(chr_pair*2)][2]=0.0
k_kol[(n_chr_2-1)+(chr_pair*2)][3]=k_kol[(n_chr_2)+(chr_pair*2)][3]=1
#****************** END variables in the program **********************************
#Writing parameters to the file
date_time_str = datetime.datetime.now().strftime('%d-%m-%Y_%H^%M^%S')
plik="workfile "+date_time_str+".txt"
f = open(plik, 'w')
f.write(str(rad_bead))
f.write("\n")
f.write(str(2*chr_pair))
f.write("\n")
for po in range(0,chr_pair*4):
for po2 in range(0, 4):
f.write(str(k_kol[po][po2]))
f.write("\n")
gc.collect()
#generating nucleus
def nucleus():
global x_s, y_s, z_s, r_s
r_s=round(random.uniform(min_rad_nu,max_rad_nu),5)
f.write(str(r_s))
f.write("\n")
x_s=0
y_s=0
z_s=0
ball = sphere(pos=(x_s,y_s,z_s), radius=r_s, material=materials.emissive, opacity=0.2)
nucleus()
#generating nucleolus
def nucleolus():
global x_j, y_j, z_j, r_j
r_s5=(max_vol_no*(r_s**(3)))**(1/3.0)
r_s30=(max_vol_no*(r_s**(3)))**(1/3.0)
r_j=round(random.uniform(r_s5, r_s30),3) #generate the size of the nucleolus
#generate the position of the nucleolus
t=True
while (t==True):
x_j=round(random.uniform((x_s-(r_s-r_j-1)),(x_s+(r_s-r_j-2))),5)
y_j=round(random.uniform((y_s-(r_s-r_j-1)),(y_s+(r_s-r_j-2))),5)
z_j=round(random.uniform((z_s-(r_s-r_j-1)),(z_s+(r_s-r_j-2))),5)
if (sqrt((x_j-0)**2+(y_j)**2+z_j**2)<(r_s-r_j-eps_2)):
t=False
nucleolus()
f.write(str(x_j))
f.write("\n")
f.write(str(y_j))
f.write("\n")
f.write(str(z_j))
f.write("\n")
f.write(str(r_j))
f.write("\n")
ball_2 = sphere(pos=(x_j,y_j,z_j), radius=r_j, color=color.orange, material=materials.chrome, opacity=1)
gc.collect()
kol_1=1
kol_2=0.5
dfg=0
#function responsible for generating domains
def bead(x_1, y_1, z_1, k1, k2, k3, k4):
global dfg
dfg=dfg+1
kul = sphere(pos=(x_1,y_1,z_1), radius=rad_bead, color=(k1, k2, k3), opacity=k4)
gc.collect()
#***************************** Function in program ***************************************
#function to check the output of the nucleus
def is_in_nu(nr_k):
global ind
global L
global H
global W
global tab
global k
global xyz
global r_s, rad_bead
if (((r_s-rad_bead-eps_2)>sqrt((xyz[0][nr_k])**2+(xyz[1][nr_k])**2+(xyz[2][nr_k])**2))):
ind=ind+1
tab[0][ind]=nr_k #save number of domain
tab[1][ind]=xyz[0][nr_k]
tab[2][ind]=xyz[1][nr_k]
tab[3][ind]=xyz[2][nr_k]
k=1
else:
xyz[0][nr_k]=x
xyz[1][nr_k]=y
xyz[2][nr_k]=z
k=0
#function to generate starting points - centromeres
def centromere():
global xyz, r_s, rad_bead, x_j, y_j, z_j
for i in range (0,chr_pair*2):
ij=i+(chr_pair*2)
print("i: ",i)
spr=0
while(spr==0):
#generate x
xyz[0][i]=round(random.uniform((-r_s+4*rad_bead),(r_s-4*rad_bead)),5)
xyz[0][ij]=xyz[0][i]
tab[0][i]=i
tab[0][ij]=ij
tab[1][i]=xyz[0][i]
tab[1][ij]=xyz[0][i]
#generate y
xyz[1][i]=round(random.uniform((-r_s+4*rad_bead),(r_s-4*rad_bead)),5)
xyz[1][ij]=xyz[1][i]
tab[2][i]=xyz[1][i]
tab[2][ij]=xyz[1][i]
#generate z
xyz[2][i]=round(random.uniform((-r_s+4*rad_bead),(r_s-4*rad_bead)),5)
xyz[2][ij]=xyz[2][i]
tab[3][i]=xyz[2][i]
tab[3][ij]=xyz[2][i]
if (i>0):
if (sqrt((xyz[0][i]-x_j)**2+(xyz[1][i]-y_j)**2+(xyz[2][i]-z_j)**2)>(r_j+rad_bead+2*eps_2)):
if (sqrt((xyz[0][i])**2+(xyz[1][i])**2+(xyz[2][i])**2)<(r_s-(rad_bead+5*eps_2))):
for j in range (0,i):
if (sqrt((xyz[0][i]-xyz[0][j])**2+(xyz[1][i]-xyz[1][j])**2+(xyz[2][i]-xyz[2][j])**2)>(2*rad_bead+2*eps_2)):
spr=spr+1
if (spr>(i-2)):
spr=1
break
spr=1
else:
if (sqrt((xyz[0][i]-x_s)**2+(xyz[1][i]-y_s)**2+(xyz[2][i]-z_s)**2)<(r_s-(rad_bead+4*eps_2))):
if (sqrt((xyz[0][i]-x_j)**2+(xyz[1][i]-y_j)**2+(xyz[2][i]-z_j)**2)>(r_j+rad_bead+2*eps_2)):
spr=1
break
#function to generate the direction of moving for the new domain
def new_domain(nr_k_2):
global xyz
for i in range (0,3):
znak=bool(random.getrandbits(1))
if znak==True:
xyz[i][nr_k_2]=xyz[i][nr_k_2]+round(random.uniform(0, 2*rad_bead+2*eps_1),5)
else:
xyz[i][nr_k_2]=xyz[i][nr_k_2]-round(random.uniform(0, 2*rad_bead+2*eps_1),5)
def is_out_no():
global xyz, nr_k, x_j, pot, k_1, r_j, r_k, k, eps_2
if (sqrt((xyz[0][nr_k]-x_j)**2+(xyz[1][nr_k]-y_j)**2+(xyz[2][nr_k]-z_j)**2)<(r_j+rad_bead+eps_2)):
pot=1
xyz[0][nr_k]=x
xyz[1][nr_k]=y
xyz[2][nr_k]=z
k=0
def dist_bead():
global xyz, nr_k, ind, tab, eps_1, eps_2, x, y ,z,k, k_3
for df in range (0, ind+1):
if (df != k_3):
if (tab[0][df]==nr_k or (tab[0][df]==nr_k+(chr_pair*2) and nr_k< (chr_pair*2)) or (tab[0][df]==nr_k-(chr_pair*2) and nr_k>(chr_pair*2-1))):
if (sqrt((xyz[0][nr_k]-tab[1][df])**2+(xyz[1][nr_k]-tab[2][df])**2+(xyz[2][nr_k]-tab[3][df])**2)<(2*rad_bead-2*eps_1)):
pot=1
xyz[0][nr_k]=x
xyz[1][nr_k]=y
xyz[2][nr_k]=z
k=0
break
else:
if (sqrt((xyz[0][nr_k]-tab[1][df])**2+(xyz[1][nr_k]-tab[2][df])**2+(xyz[2][nr_k]-tab[3][df])**2)<(2*rad_bead+eps_2)):
pot=1
xyz[0][nr_k]=x
xyz[1][nr_k]=y
xyz[2][nr_k]=z
k=0
break
def dist_precurs():
global xyz, nr_k, k_1, z, y, z, epx_1, eps_2, k
if (((2*rad_bead-eps_1) > sqrt((xyz[0][nr_k]-x)**2+(xyz[1][nr_k]-y)**2+(xyz[2][nr_k]-z)**2)) or ((2*rad_bead+eps_1) < sqrt((xyz[0][nr_k]-x)**2+(xyz[1][nr_k]-y)**2+(xyz[2][nr_k]-z)**2))):
xyz[0][nr_k]=x
xyz[1][nr_k]=y
xyz[2][nr_k]=z
k=0
def bead_generate(nr_k_1, ty, dl_ch):
global licznik
global nr_k
global y
global x
global z
global xyz
global k
global k_1
global k_2
global k_3
global tab
global rad_bead
global eps_1
global eps_2
global ind
global k_3
nr_k=nr_k_1
k=0
k_1=0
k_2=0
k_3=ind+1
while (k==0):
k=1
k_1=k_1+1
k_2=0
if (ty>dl_ch or k_1>1000):
while (k_2==0):
k_3=random.randrange(ind)
if (tab[0][k_3]==nr_k):
xyz[0][nr_k]=tab[1][k_3]
xyz[1][nr_k]=tab[2][k_3]
xyz[2][nr_k]=tab[3][k_3]
k_2=1
x=xyz[0][nr_k]
y=xyz[1][nr_k]
z=xyz[2][nr_k]
global pot
pot=0
new_domain(nr_k)
licznik=licznik+1
dist_bead()
is_out_no()
dist_precurs()
if (k==1):
is_in_nu(nr_k)
# ********************************* END Function *****************************************************
# ********************************* Program Begin **************************************************
print("RUN...")
centromere()
for i in range (0,max(l_arm_d)+1):
rate(5)
for n_i in range (0, (4*chr_pair)): #generate all chromosome
if (kr[n_i]<l_ram[n_i]): #until kr = length of chromose
bead(xyz[0][n_i],xyz[1][n_i],xyz[2][n_i],k_kol[n_i][0],k_kol[n_i][1], k_kol[n_i][2], k_kol[n_i][3])
kr[n_i]=kr[n_i]+1
bead_generate(n_i,ty[n_i],l_ch[n_i])
ty[n_i]=ty[n_i]+1
f.write(str(ind+1))
f.write("\n")
for i in range(0, ind+1):
for iu in range(0,4):
f.write(str(tab[iu][i]))
f.write("\n")
f.close()
print("FINISH")
| |
# -*- coding: iso-8859-1 -*-
#
"""
"""
#Create a Python couchdb attachment:
#curl -H "Content-Type: application/python" -X PUT --data-binary "@-" http://server:5984/test1/$docid/attachment?rev=$rev < spam.rsheet
#Note: attachments must always be on a doc, and rev is mandatory.
#See: http://wiki.apache.org/couchdb/HTTP%5FDocument%5FAPI#Standalone_Attachments
#Retrieve it:
#curl http://server:5984/test1/$docid/attachment
import cStringIO
import hashlib
import datetime
import urllib
from gettext import gettext as _
from functools import partial
from itertools import islice, dropwhile
from amara.thirdparty import json, httplib2
from amara.lib.iri import split_uri_ref, split_fragment, relativize, absolutize, IriError, join, is_absolute
from akara.util import status_response, requested_imt, header_credentials, extract_auth
#from akara.util.moin import wiki_uri, wiki_normalize, WSGI_ORIG_BASE_HEADER, XML_IMT
from akara.util.moin import wiki_uri, wiki_normalize, ORIG_BASE_HEADER, XML_IMT
from akara.services import convert_body, service_method_dispatcher
# Extracts languages from Accept string, stripping q values
EXTRACT_LANG = lambda x: [ s.split(';')[0] for s in x.split(',') ]
try:
from akara import logger
except ImportError:
logger = None
from zen import ZEN_SERVICE_ID
from zen.services import SERVICES, zservice, service_proxy
from zen.util import use
MOINREST_SERVICE_ID = 'http://purl.org/xml3k/akara/services/demo/moinrest'
#WSGI_ORIG_BASE_HEADER = 'HTTP_X_AKARA_WRAPPED_MOIN'
FIND_PEER_SERVICE_KEY = 'akara.FIND_PEER_SERVICE'
RESOURCE_TYPE_TYPE = u'http://purl.org/xml3k/akara/cms/resource-type'
class space(object):
def __init__(self, params, space_tag, logger, zensecret, initial_environ=None):
'''
initial_environ is the environment used from the first call to Zen for this space, which causes
this space to be set up
'''
#Use akara discovery, via find_peer_service, to get the full base URI for this very
#zen endpoint, and its moinrest peer
#self.space_tag = kwargs['space_tag']
if initial_environ:
find_peer_service = initial_environ[FIND_PEER_SERVICE_KEY]
self.ZEN_BASEURI = find_peer_service(ZEN_SERVICE_ID)
#Start out with the first environment used for a call that activated this space
#self.environ will be updated upon every invocation of this space
self.environ = initial_environ
#Set up class/instance params based on live Akara environment
self.params = params
self.remotedb = params['dburi']
self.space_tag = space_tag
self.logger = logger
self.zensecret = zensecret
return
def setup_request(self, environ):
'''
Prepare to service a forwarded call from Zen central
environ - the WSGI environ of the original invocation
'''
#Prepare the WSGI start_response function, which covers response headers and status
self.resp_status = None
self.resp_headers = None
self.exc_info = None
self.environ = environ
#FIXME: Use akara to get the right cache location
self.h = httplib2.Http('/tmp/.cache')
self.h.force_exception_to_status_code = True
#Set up utility environ variable for rulesheets
self.environ['zen.RESOURCE_URI'] = join(self.ZEN_BASEURI, environ['PATH_INFO'].lstrip('/').split('/')[0])
self.environ['couchdb.RESOURCE_URI'] = self.remotedb
return
def prep_slave_response(self, resp):
'''
Convert CouchDB response to Zen response
'''
# Keep it conservative. etag? cache-control?
COPY_HEADERS = lambda x: x[0] in ['date','content-type']
self.resp_headers = filter(COPY_HEADERS,resp.iteritems())
self.resp_status = status_response(resp.get('status') or '500')
def resource_factory(self, path=None):
'''
Look up and retrieve a new resource based on WSGI environment or a uri path
'''
if path:
docid = path
if is_absolute(path):
docid = relativize(path, self.remotedb)
else:
docid = self.environ['PATH_INFO'].lstrip('/').rsplit(self.space_tag, 1)[1].lstrip('/') #e.g. '/mydb/MyDoc' -> 'MyDoc'
#resp, content = self.h.request(slave_uri + ';history', "GET", headers=auth_headers)
if logger: logger.debug('query ' + repr((self.remotedb, docid, join(self.remotedb, docid))))
resp, content = self.h.request(join(self.remotedb, urllib.quote_plus(docid)))
if logger: logger.debug('resp ' + repr((content[:100], resp)))
self.prep_slave_response(resp)
if not (self.resp_status.startswith('2') or self.resp_status.startswith('304')):
if logger: logger.debug("Error looking up resource: %s: %s\n" % (content, self.resp_status))
return '' #No resource could be retrieved
data = json.loads(content)
return resource.factory(self, docid, data)
def update_resource(self, path=None):
'''
Update a resource based on WSGI environment or a uri path
'''
if path:
docid = path
if is_absolute(path):
docid = relativize(path, self.remotedb)
else:
docid = self.environ['PATH_INFO'].lstrip('/').rsplit(self.space_tag, 1)[1].lstrip('/') #e.g. '/mydb/MyDoc' -> 'MyDoc'
if logger: logger.debug('query ' + repr((self.remotedb, docid, join(self.remotedb, docid))))
body = self.environ['wsgi.input'].read()
# If the document already exists, we need to determine its current rev and add it to the
# input body, skipping the process if rev is provided in the PUT request body
body_js = json.loads(body)
rev = json.loads(body).get('_rev',None)
if not rev:
# Need to GET the rev
resp, content = self.h.request(join(self.remotedb, docid), "GET")
if str(resp.status).startswith('2'):
rev = json.loads(content).get('_rev',None)
logger.debug('update_resource: found existing rev = '+repr(rev))
if rev:
body_js['_rev'] = rev
body = json.dumps(body_js)
headers = {'content-type':self.environ['CONTENT_TYPE']}
resp, content = self.h.request(join(self.remotedb, docid), "PUT", body=body, headers=headers)
if logger: logger.debug('resp ' + repr((content[:100], resp)))
self.prep_slave_response(resp)
if not (self.resp_status.startswith('2') or self.resp_status.startswith('304')):
if logger: logger.debug("Error looking up resource: %s: %s\n" % (content, self.resp_status))
return '' #No resource could be retrieved
return content
#For couchdb create & update happen to be the same back end mechanism (since rulesheets are expected to provide the URL location)
create_resource = update_resource
def delete_resource(self, path=None):
'''
Delete a resource based on WSGI environment or a uri path
'''
if path:
docid = path
if is_absolute(path):
docid = relativize(path, self.remotedb)
else:
docid = self.environ['PATH_INFO'].lstrip('/').rsplit(self.space_tag, 1)[1].lstrip('/') #e.g. '/mydb/MyDoc' -> 'MyDoc'
if logger: logger.debug('query ' + repr((self.remotedb, docid, join(self.remotedb, docid))))
resp, content = self.h.request(join(self.remotedb, docid), "DELETE")#, headers=headers)
if logger: logger.debug('resp ' + repr((content[:100], resp)))
self.prep_slave_response(resp)
if not (self.resp_status.startswith('2') or self.resp_status.startswith('304')):
if logger: logger.debug("Error looking up resource: %s: %s\n" % (content, self.resp_status))
return '' #No resource could be retrieved
return content
#FIXME: Detect resource reference loops
class resource(object):
'''
Akara Moin/CMS node, a Moin wiki page that follows a template to direct workflow
activity, including metadata extraction
'''
AKARA_TYPE = u'http://purl.org/xml3k/akara/cms/resource-type'
@staticmethod
def factory(space, docid, data, rtype=None):
'''
Note: it's a fatal error if this can't figure out the resource type
'''
#Primarily to decide whether to create a resource or a resource_type object
if not rtype:
typeid, tpath = resource.zen_type(space, data)
if typeid == RESOURCE_TYPE_TYPE:
return resource_type(space, docid, data, rtype=typeid)
return resource(space, docid, data, rtype=typeid)
def __init__(self, space, docid, data, rtype=None):
'''
'''
self.docid = docid
self.space = space
self.slave_uri = join(space.remotedb, docid)
self.data = data
self.rulesheet = None
if logger: logger.debug('GRIPPO: ' + repr(rtype))
if isinstance(rtype, basestring) and rtype != RESOURCE_TYPE_TYPE:
self.type = space.resource_factory(rtype)
else:
self.type = rtype
return
@staticmethod
def zen_type(space, data):
'''
Computer a Zen type full moinrest uri as well as a path relative to top of the wiki instance
'''
rtype = data['zen:metadata']['zen:type']
if logger: logger.debug('zen_type link: ' + repr(rtype))
tpath, tid = rtype, absolutize(rtype, space.remotedb)
if logger: logger.debug('Retrieved zen_type: ' + repr((tid, tpath)))
return (tid, tpath)
def get_proxy(self, environ, method, accept_imt=None, accept_lang=None):
return self.resource_type.run_rulesheet(environ, method, accept_imt, accept_lang)
UNSPECIFIED = object()
class resource_type(resource):
def get_rulesheet(self):
rsheet = self.data['zen:metadata']['zen:rulesheet']
if rsheet == '.':
#The rulesheet is in a standalone attachment to thios doc
rev = self.data['_rev']
self.rulesheet = join(self.slave_uri, u'attachment?rev=' + rev)
else:
#self.rulesheet = UNSPECIFIED
self.rulesheet = rsheet
if self.space: self.space.logger.debug('resource_type.get_rulesheet slave_uri, rulesheet: ' + repr((self.slave_uri, self.rulesheet)))
return self.rulesheet
def run_rulesheet(self, environ, method='GET', accept_imt='application/json', accept_lang=None):
#FIXME: Deprecate
auth = extract_auth(environ)
return rulesheet(self.get_rulesheet(), self.space, auth).run(environ, method, accept_imt, accept_lang)
class rulesheet(object):
def __init__(self, source, space, auth):
'''
'''
#rs = inputsource(source, resolver=resolver)
#self.token = rs.stream.readline().strip().lstrip('#')
h = httplib2.Http('/tmp/.cache')
if auth:
user, passwd = auth
h.add_credentials(user, passwd)
resp, body = h.request(source)
if logger: logger.debug('rsheet_body ' + repr((body[:200],)))
resp, body = h.request(source)
stream = cStringIO.StringIO(body)
self.token = stream.readline().strip().lstrip('#')
#XXX In theory this is a microscopic security hole. If someone could find a way
#to open up an expliot by changing whitespace *in the middle of the line*
#(wiki_normalize does not touch WS at the beginning of a line)
#In practice, we accept this small risk
self.body = wiki_normalize(stream.read())
self.space = space
return
def run(self, environ, method='GET', accept_imt='application/json', accept_lang=None):
#e.g. you can sign a rulesheet as follows:
#python -c "import sys, hashlib; print hashlib.sha1('MYSECRET' + sys.stdin.read()).hexdigest()" < rsheet.py
#Make sure the rulesheet has not already been signed (i.e. does not have a hash on the first line)
rheet_sig = hashlib.sha1(self.space.zensecret + self.body).hexdigest()
if self.token != rheet_sig:
if logger: logger.debug("Computed signature: " + repr(rheet_sig))
raise RuntimeError('Security token verification failed')
#chunks = []
#U1 is just a smarter variant of the "Unicode, dammit!"
def U1(text): return U(text, noneok=True)
#def write(text):
# chunks.append(text)
handlers = {}
#Decorator that allows the user to define request handler functions in rule sheets
def handles(method, match=None, lang=None, ttl=3600):
'''
method - HTTP method for this handler to use, e.g. 'GET' or 'PUT'
Might be a non-standard, internal method for special cases (e.g. 'collect')
match - condition to determine when this handler is to be invoked for a given method
if a tuple, this should be an IMT and lang to compare to the Accept[-Language] info
if a callable, should have signature match(accept_imt), returning True or False
ttl - time-to-live for (GET) requests, for setting cache-control headers
'''
def deco(func):
func.ttl = ttl
# Set appropriate default media type when no match is specified in @handles
if match is None :
func.imt = 'application/json'
func.lang = None
else :
func.imt = match
func.lang = lang
handlers.setdefault(method, []).append((match, lang, func))
return func
return deco
#env = {'write': write, 'resource': self, 'service': service, 'U': U1}
#resource_getter = partial(node.lookup, resolver=self.rtype.resolver)
resource_getter = self.space.resource_factory
env = {'service': service_proxy, 'U': U1, 'handles': handles, 'R': resource_getter,
'use': use, 'environ': environ, 'logger': logger, 'H': self.space.h}
#Execute the rule sheet
exec self.body in env
default = None
matching_handler = None
for (match, lang, func) in handlers.get(method, []):
if logger:logger.debug('(match, lang, func), method : ' + repr((match, lang, func)) + "," + method )
if isinstance(match, basestring):
if match == accept_imt:
if not accept_lang or lang in EXTRACT_LANG(accept_lang):
matching_handler = func
else:
default = func # need a default in case no match for lang preference is found
elif (match is None):
default = func
else:
if match(accept_imt):
matching_handler = func
if logger: logger.debug('(matching_handler, default): ' + repr((matching_handler, default)))
return matching_handler or default
| |
"""
inertia.py
-------------
Functions for dealing with inertia tensors.
Results validated against known geometries and checked for
internal consistency.
"""
import numpy as np
from trimesh import util
def cylinder_inertia(mass, radius, height, transform=None):
"""
Return the inertia tensor of a cylinder.
Parameters
------------
mass : float
Mass of cylinder
radius : float
Radius of cylinder
height : float
Height of cylinder
transform : (4, 4) float
Transformation of cylinder
Returns
------------
inertia : (3, 3) float
Inertia tensor
"""
h2, r2 = height ** 2, radius ** 2
diagonal = np.array([((mass * h2) / 12) + ((mass * r2) / 4),
((mass * h2) / 12) + ((mass * r2) / 4),
(mass * r2) / 2])
inertia = diagonal * np.eye(3)
if transform is not None:
inertia = transform_inertia(transform, inertia)
return inertia
def sphere_inertia(mass, radius):
"""
Return the inertia tensor of a sphere.
Parameters
------------
mass : float
Mass of sphere
radius : float
Radius of sphere
Returns
------------
inertia : (3, 3) float
Inertia tensor
"""
inertia = (2.0 / 5.0) * (radius ** 2) * mass * np.eye(3)
return inertia
def principal_axis(inertia):
"""
Find the principal components and principal axis
of inertia from the inertia tensor.
Parameters
------------
inertia : (3, 3) float
Inertia tensor
Returns
------------
components : (3,) float
Principal components of inertia
vectors : (3, 3) float
Row vectors pointing along the
principal axes of inertia
"""
inertia = np.asanyarray(inertia, dtype=np.float64)
if inertia.shape != (3, 3):
raise ValueError('inertia tensor must be (3, 3)!')
# you could any of the following to calculate this:
# np.linalg.svd, np.linalg.eig, np.linalg.eigh
# moment of inertia is square symmetric matrix
# eigh has the best numeric precision in tests
components, vectors = np.linalg.eigh(inertia)
# eigh returns them as column vectors, change them to row vectors
vectors = vectors.T
return components, vectors
def transform_inertia(transform, inertia_tensor):
"""
Transform an inertia tensor to a new frame.
More details in OCW PDF:
MIT16_07F09_Lec26.pdf
Parameters
------------
transform : (3, 3) or (4, 4) float
Transformation matrix
inertia_tensor : (3, 3) float
Inertia tensor
Returns
------------
transformed : (3, 3) float
Inertia tensor in new frame
"""
# check inputs and extract rotation
transform = np.asanyarray(transform, dtype=np.float64)
if transform.shape == (4, 4):
rotation = transform[:3, :3]
elif transform.shape == (3, 3):
rotation = transform
else:
raise ValueError('transform must be (3, 3) or (4, 4)!')
inertia_tensor = np.asanyarray(inertia_tensor, dtype=np.float64)
if inertia_tensor.shape != (3, 3):
raise ValueError('inertia_tensor must be (3, 3)!')
transformed = util.multi_dot([rotation,
inertia_tensor,
rotation.T])
return transformed
def radial_symmetry(mesh):
"""
Check whether a mesh has radial symmetry.
Returns
-----------
symmetry : None or str
None No rotational symmetry
'radial' Symmetric around an axis
'spherical' Symmetric around a point
axis : None or (3,) float
Rotation axis or point
section : None or (3, 2) float
If radial symmetry provide vectors
to get cross section
"""
# shortcuts to avoid typing and hitting cache
scalar = mesh.principal_inertia_components
vector = mesh.principal_inertia_vectors
# the sorted order of the principal components
order = scalar.argsort()
# we are checking if a geometry has radial symmetry
# if 2 of the PCI are equal, it is a revolved 2D profile
# if 3 of the PCI (all of them) are equal it is a sphere
# thus we take the diff of the sorted PCI, scale it as a ratio
# of the largest PCI, and then scale to the tolerance we care about
# if tol is 1e-3, that means that 2 components are identical if they
# are within .1% of the maximum PCI.
diff = np.abs(np.diff(scalar[order]))
diff /= np.abs(scalar).max()
# diffs that are within tol of zero
diff_zero = (diff / 1e-3).astype(int) == 0
if diff_zero.all():
# this is the case where all 3 PCI are identical
# this means that the geometry is symmetric about a point
# examples of this are a sphere, icosahedron, etc
axis = vector[0]
section = vector[1:]
return 'spherical', axis, section
elif diff_zero.any():
# this is the case for 2/3 PCI are identical
# this means the geometry is symmetric about an axis
# probably a revolved 2D profile
# we know that only 1/2 of the diff values are True
# if the first diff is 0, it means if we take the first element
# in the ordered PCI we will have one of the non- revolve axis
# if the second diff is 0, we take the last element of
# the ordered PCI for the section axis
# if we wanted the revolve axis we would just switch [0,-1] to
# [-1,0]
# since two vectors are the same, we know the middle
# one is one of those two
section_index = order[np.array([[0, 1],
[1, -1]])[diff_zero]].flatten()
section = vector[section_index]
# we know the rotation axis is the sole unique value
# and is either first or last of the sorted values
axis_index = order[np.array([-1, 0])[diff_zero]][0]
axis = vector[axis_index]
return 'radial', axis, section
return None, None, None
| |
'''
Created on 03 Aug 2017
@author: ernesto
'''
import os
import subprocess
import gzip
import re
from collections import namedtuple
from Utils.RunProgram import RunProgram
class VcfUtils(object):
"""
Class to represent a misc of actions that can be done on a single or multiple VCF files
"""
def __init__(self, vcf=None, vcflist=None, bcftools_folder=None, bgzip_folder=None,
gatk_folder=None, java_folder=None, tmp_dir=None):
"""
Constructor
Parameters
----------
vcf : str, optional
Path to gzipped vcf file.
vcflist : list, optional
List of dicts containing setname:vcf_paths (keys:values) pairs.
bcftools_folder : str, optional
Path to folder containing the bcftools binary.
bgzip_folder : str, optional
Path to folder containing the bgzip binary.
gatk_folder : str, optional
Path to folder containing the jar file.
java_folder : str, optional
Path to folder containing the java binary.
tmp_dir : str, optional
Path to java temporary directory. This needs to be
set for GATK modules that
fail because there is not enough space in the default java tmp dir.
Imp: Either 'vcf' or 'vcflist' variables should be initialized
"""
if not vcf and not vcflist:
raise Exception("Either a vcf file or a list of vcf files should be used\
to initialize this class")
if vcf is not None:
if os.path.isfile(vcf) is False:
raise Exception("File does not exist")
self.vcf = vcf
self.vcflist = vcflist
self.bcftools_folder = bcftools_folder
self.bgzip_folder = bgzip_folder
self.gatk_folder = gatk_folder
self.java_folder = java_folder
self.tmp_dir = tmp_dir
def reheader(self, newheader, outprefix, samplefile=None, verbose=False):
"""
Modifiy the VCF's header with the newheader
Parameters
----------
newheader : str
Path to the file containing the new header.
outprefix : string
Prefix for output files
samplefile : str, optional
Path to the file with the sample names that will included
in the new header.
verbose : bool, default=False
increase the verbosity.
Returns
-------
outfile : str
Path to the VCF with the modified header.
"""
outfile = outprefix+".reheaded.vcf.gz"
Arg = namedtuple('Argument', 'option value')
args = [Arg('-h', newheader), Arg('-o', outfile)]
if samplefile is not None:
args.append(Arg('-s', samplefile))
runner = RunProgram(path=self.bcftools_folder, program='bcftools reheader',
args=args, parameters=[self.vcf])
if verbose is True:
print("Command line is: {0}".format(runner.cmd_line))
stdout = runner.run_checkoutput()
return outfile
def add_to_header(self, header_f, outfilename, line_ann):
"""
Function to add to the header of a VCF the string passed with 'line_ann'
Parameters
----------
header_f : str
Path to file containing the header file that will be modified.
outfilename : str
Path to the new header file that is modified.
line_ann : str
Str with line that will be used to add to the header.
Returns
-------
outfilename : str
Path to modified header file. The new annotation will be added in the following line
after the desired annotation.
"""
of = open(outfilename, 'w')
# getting the type of line that is being passed
p = re.compile("^##(\w+)=")
m1 = p.match(line_ann)
type_ann = m1.group(1)
line_seen = False
with open(header_f) as f:
for line in f:
line = line.rstrip("\n")
m2 = p.match(line)
if m2 is None:
of.write(line+"\n")
continue
type1_ann = m2.group(1)
if type_ann == type1_ann and line_seen is False:
line_seen = True
of.write(line+"\n"+line_ann+"\n")
continue
else:
of.write(line+"\n")
of.close()
return outfilename
def combine(self, labels, reference, outprefix, compress=False, outdir=None,
ginterval=None, genotypemergeoption=None, filteredrecordsmergetype=None,
threads=1, options=None, verbose=False):
"""
Combine VCFs using GATK's CombineVariants into a single VCF
Parameters
----------
labels : list
List of labels used for each of the VCFs in self.vcflist. The order of the labels
should be the same that the VCFs in the list.
reference : str
Path to Fasta file with reference.
outprefix : str
Prefix used for output file.
compress : bool, default=False
Compress the output VCF with bgzip.
outdir : str, optional
Path to folder used to write the results to.
ginterval : str, optional
Genomic interval used to restrict the analysis. i.e. chr20:1000-2000.
genotypemergeoption : {'UNIQUIFY', 'PRIORITIZE', 'UNSORTED', 'REQUIRE_UNIQUE'}, optional
Determines how we should merge genotype records for samples shared
across the ROD files.
filteredrecordsmergetype : {'KEEP_IF_ANY_UNFILTERED', 'KEEP_IF_ANY_UNFILTERED',
'KEEP_UNCONDITIONAL'}, optional
Determines how we should handle records seen at the
same site in the VCF, but with different FILTER fields.
threads : int, default=1
Number of trades to use.
options : list, optional
List of options. i.e. ['-env','--filteredAreUncalled'].
verbose : bool, default=False
increase the verbosity.
Returns
-------
outfile : str
Path to the merged VCF.
"""
Arg = namedtuple('Argument', 'option value')
args = [Arg('-T', 'CombineVariants'), Arg('-R', reference), Arg('-nt', threads)]
variants_str = ""
for path, label in zip(self.vcflist, labels):
if os.path.isfile(path) == False:
print("Error reading from {0}".format(path))
raise Exception("File does not exist")
args.append(Arg('-V:{0}'.format(label), path))
outfile = ""
if outdir:
outfile = "{0}/".format(outdir)
outfile += "{0}.vcf".format(outprefix)
if ginterval is not None:
args.append(Arg('-L', ginterval))
if genotypemergeoption is not None:
args.append(Arg('--genotypemergeoption', genotypemergeoption))
if filteredrecordsmergetype is not None:
args.append(Arg('--filteredrecordsmergetype', filteredrecordsmergetype))
params = []
if options:
for opt in options:
params.append(opt)
pipelist = None
if compress is True:
outfile += ".gz"
compressRunner = RunProgram(path=self.bgzip_folder,
program='bgzip',
parameters=['-c', '>', outfile])
pipelist = [compressRunner]
else:
args.append(Arg('-o', outfile))
program_str = None
if self.tmp_dir is not None:
program_str = "java -Djava.io.tmpdir={0} " \
"-jar {1}/GenomeAnalysisTK.jar".format(self.tmp_dir, self.gatk_folder)
else:
program_str = "java -jar {0}/GenomeAnalysisTK.jar".format(self.gatk_folder)
runner = RunProgram(path=self.java_folder, program=program_str,
args=args, parameters=params, downpipe=pipelist)
if verbose is True:
print("Command line is: {0}".format(runner.cmd_line))
stdout, stderr, is_exc = runner.run_popen()
return outfile
def rename_chros(self, chr_types, outfile, compress=True):
"""
Function to modify the chr names in the VCF file
For example:
If file has UCSC-type chr names (i.e. chr1,chr2,...) then this
function will convert the UCSC-type chr names to Ensembl-type
chr names (i.e. 1,2,...) or vice-versa
Parameters
----------
chr_types : {'ucsc','ensembl'}
Type of chr names that will be written to the file.
outfile : str
File used for the output VCF.
compress : bool, default=True
Returns
-------
outfile : str
Path to the VCF with the chrosomes renamed.
"""
command = ""
if chr_types == 'ensembl':
if compress is True:
command += "zcat {0} | awk '{{gsub(/^chr/,\"\"); print}}' - | {1}/bgzip -c > {2}".\
format(self.vcf, self.bgzip_folder, outfile)
else:
command += "zcat {0} | awk '{{gsub(/^chr/,\"\"); print}}' - > {1}".\
format(self.vcf, outfile)
elif chr_types == 'ucsc':
if compress is True:
command += "zcat {0} | awk '{{if($0 !~ /^#/) print \"chr\"$0; " \
"else print $0}}' - | {1}/bgzip -c > {2}".format(self.vcf,
self.bgzip_folder,
outfile)
else:
command += "zcat {0} | awk '{{if($0 !~ /^#/) print \"chr\"$0; " \
"else print $0}}' - > {1}".format(self.vcf, outfile)
try:
subprocess.check_output(command, shell=True)
except subprocess.CalledProcessError as exc:
print("Something went wrong.\n"
"Command used was: %s" % command)
raise Exception(exc.output)
return outfile
def correct_ambiguity_codes(self, outfile):
"""
Function to correct the ambiguity bases in the VCF. This ambiguity
may appear in the REF or ALT columns
Parameters
----------
outfile : str
File where the output VCF will be written.
Returns
-------
outfile : str
Path to vcf.gz file compressed with GZIP.
"""
ref_count = 0
alt_count = 0
f = gzip.open(outfile, 'wb')
with gzip.open(self.vcf, 'r') as fin:
for line in fin:
if not line.startswith(b"#"):
bits = line.split(b"\t")
ref = bits[3].decode("utf-8")
alt = bits[4].decode("utf-8")
if re.search(r"[^ATGC.,]", ref):
ref_count += 1
ref = re.sub('[^ACGT.]', 'N', ref)
if re.search(r"[^ATGC.,]", alt):
alt_count += 1
alt = re.sub('[^ACGT.]', 'N', alt)
bits[3] = ref.encode('utf-8')
bits[4] = alt.encode('utf-8')
nline = b'\t'.join(bits)
f.write(nline)
else:
f.write(line)
f.close()
print("Sites with ambiguous bases in the REF column is:{0}".format(ref_count))
print("Sites with ambiguous bases in the ALT column is:{0}".format(alt_count))
return outfile
def drop_genotypes(self, outfile, verbose=False):
"""
Function to drop the Genotype information from a VCF.
This function uses bcftools -G to perform this operation
Parameters
----------
outfile : str
File where the output VCF will be written.
verbose : bool, default=False
increase the verbosity.
Returns
-------
outfile : str
Path to the vcf.gz file without the GT information.
"""
Arg = namedtuple('Argument', 'option value')
args = [Arg('-o', outfile), Arg('-O', 'z')]
runner = RunProgram(path=self.bcftools_folder,
program='bcftools view -G', args=args, parameters=[self.vcf])
if verbose is True:
print("Command line is: {0}".format(runner.cmd_line))
stdout = runner.run_checkoutput()
return outfile
def drop_info(self, outfile, verbose=False):
"""
Function to remove the INFO annotation from a VCF.
This function uses bcftools annotate to perform this operation
Parameters
----------
outfile : str
File where the output VCF will be written.
verbose : bool, default=False
increase the verbosity.
Returns
-------
outfile : str
Path to the vcf.gz file without the INFO annotation.
"""
Arg = namedtuple('Argument', 'option value')
args = [Arg('-o', outfile), Arg('-O', 'z')]
runner = RunProgram(path=self.bcftools_folder,
program='bcftools annotate --remove INFO',
args=args,
parameters=[self.vcf])
if verbose is True:
print("Command line is: {0}".format(runner.cmd_line))
stdout = runner.run_checkoutput()
return outfile
def convert_PL2GL(self, outfile, threads=1, verbose=False):
"""
Function to convert PL fields into GT.
This function makes use of Bcftools +tag2tag plugin
Parameters
----------
outfile : str
File where the output VCF will be written.
threads : int, default=1
Number of trades to use.
verbose : bool, default=False
increase the verbosity.
Returns
-------
outfile : str
Path to the vcf.gz file with the PL fields converted.
"""
Arg = namedtuple('Argument', 'option value')
params = [self.vcf, '-Oz', '--', '-r', '--pl-to-gl']
runner = RunProgram(path=self.bcftools_folder,
program='bcftools +tag2tag',
args=[Arg('--threads', threads), Arg('-o', outfile)],
parameters=params)
if verbose is True:
print("Command line is: {0}".format(runner.cmd_line))
stdout = runner.run_checkoutput()
return outfile
| |
# Copyright 2018 The TensorFlow Probability Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Tests for tensorflow_probability.python.sts.structural_time_series."""
import numpy as np
import tensorflow.compat.v1 as tf1
import tensorflow.compat.v2 as tf
import tensorflow_probability as tfp
from tensorflow_probability.python import distributions as tfd
from tensorflow_probability.python.internal import dtype_util
from tensorflow_probability.python.internal import samplers
from tensorflow_probability.python.internal import test_util
from tensorflow_probability.python.sts import Autoregressive
from tensorflow_probability.python.sts import AutoregressiveIntegratedMovingAverage
from tensorflow_probability.python.sts import DynamicLinearRegression
from tensorflow_probability.python.sts import LinearRegression
from tensorflow_probability.python.sts import LocalLevel
from tensorflow_probability.python.sts import LocalLinearTrend
from tensorflow_probability.python.sts import Seasonal
from tensorflow_probability.python.sts import SemiLocalLinearTrend
from tensorflow_probability.python.sts import SmoothSeasonal
from tensorflow_probability.python.sts import SparseLinearRegression
from tensorflow_probability.python.sts import Sum
from tensorflow_probability.python.sts.internal import util as sts_util
class _StructuralTimeSeriesTests(object):
def test_broadcast_batch_shapes(self):
seed = test_util.test_seed(sampler_type='stateless')
batch_shape = [3, 1, 4]
partial_batch_shape = [2, 1]
expected_broadcast_batch_shape = [3, 2, 4]
# Build a model where parameters have different batch shapes.
partial_batch_loc = self._build_placeholder(
np.random.randn(*partial_batch_shape))
full_batch_loc = self._build_placeholder(
np.random.randn(*batch_shape))
partial_scale_prior = tfd.LogNormal(
loc=partial_batch_loc, scale=tf.ones_like(partial_batch_loc))
full_scale_prior = tfd.LogNormal(
loc=full_batch_loc, scale=tf.ones_like(full_batch_loc))
loc_prior = tfd.Normal(loc=partial_batch_loc,
scale=tf.ones_like(partial_batch_loc))
linear_trend = LocalLinearTrend(level_scale_prior=full_scale_prior,
slope_scale_prior=full_scale_prior,
initial_level_prior=loc_prior,
initial_slope_prior=loc_prior)
seasonal = Seasonal(num_seasons=3,
drift_scale_prior=partial_scale_prior,
initial_effect_prior=loc_prior)
model = Sum([linear_trend, seasonal],
observation_noise_scale_prior=partial_scale_prior)
param_samples = [p.prior.sample(seed=seed) for p in model.parameters]
ssm = model.make_state_space_model(num_timesteps=2,
param_vals=param_samples)
# Test that the model's batch shape matches the SSM's batch shape,
# and that they both match the expected broadcast shape.
self.assertAllEqual(model.batch_shape, ssm.batch_shape)
(model_batch_shape_tensor_,
ssm_batch_shape_tensor_) = self.evaluate((model.batch_shape_tensor(),
ssm.batch_shape_tensor()))
self.assertAllEqual(model_batch_shape_tensor_, ssm_batch_shape_tensor_)
self.assertAllEqual(model_batch_shape_tensor_,
expected_broadcast_batch_shape)
def test_addition_raises_error_with_no_observed_time_series(self):
c1 = tfp.sts.LocalLevel(level_scale_prior=tfd.Normal(0., 1.),
initial_level_prior=tfd.Normal(0., 1.))
c2 = tfp.sts.LocalLevel(level_scale_prior=tfd.Normal(0., 0.1),
initial_level_prior=tfd.Normal(1., 2.))
with self.assertRaisesRegex(
ValueError, 'Could not automatically create a `Sum` component'):
c1 + c2 # pylint: disable=pointless-statement
def test_adding_two_sums(self):
observed_time_series = self._build_placeholder([1., 2., 3., 4., 5.])
s1 = tfp.sts.Sum(
[tfp.sts.LocalLevel(observed_time_series=observed_time_series)],
observed_time_series=observed_time_series)
s2 = tfp.sts.Sum(
[tfp.sts.LocalLinearTrend(observed_time_series=observed_time_series)],
observed_time_series=observed_time_series)
s3 = s1 + s2
self.assertLen(s3.components, 2)
seed = test_util.test_seed(sampler_type='stateless')
def observation_noise_scale_prior_sample(s):
return s.parameters[0].prior.sample(seed=seed)
self.assertAllEqual(observation_noise_scale_prior_sample(s3),
observation_noise_scale_prior_sample(s1))
self.assertAllEqual(observation_noise_scale_prior_sample(s3),
observation_noise_scale_prior_sample(s2))
self.assertAllEqual(s3.constant_offset, s1.constant_offset)
self.assertAllEqual(s3.constant_offset, s2.constant_offset)
with self.assertRaisesRegex(
ValueError, 'Cannot add Sum components'):
s1.copy(observed_time_series=3 * observed_time_series) + s2 # pylint: disable=expression-not-assigned
with self.assertRaisesRegex(
ValueError, 'Cannot add Sum components'):
s1.copy(constant_offset=4.) + s2 # pylint: disable=expression-not-assigned
with self.assertRaisesRegex(
ValueError, 'Cannot add Sum components'):
s1.copy(observation_noise_scale_prior=tfd.Normal( # pylint: disable=expression-not-assigned
self._build_placeholder(0.), self._build_placeholder(1.))) + s2
def _build_placeholder(self, ndarray, dtype=None):
"""Convert a numpy array to a TF placeholder.
Args:
ndarray: any object convertible to a numpy array via `np.asarray()`.
dtype: optional `dtype`; if not specified, defaults to `self.dtype`.
Returns:
placeholder: a TensorFlow `placeholder` with default value given by the
provided `ndarray`, dtype given by `self.dtype`, and shape specified
statically only if `self.use_static_shape` is `True`.
"""
if dtype is None:
dtype = self.dtype
ndarray = np.asarray(ndarray).astype(dtype)
return tf1.placeholder_with_default(
ndarray, shape=ndarray.shape if self.use_static_shape else None)
@test_util.test_all_tf_execution_regimes
class StructuralTimeSeriesTestsStaticShape32(
_StructuralTimeSeriesTests, test_util.TestCase):
dtype = np.float32
use_static_shape = True
@test_util.test_all_tf_execution_regimes
class StructuralTimeSeriesTestsDynamicShape32(
_StructuralTimeSeriesTests, test_util.TestCase):
dtype = np.float32
use_static_shape = False
@test_util.test_all_tf_execution_regimes
class StructuralTimeSeriesTestsStaticShape64(
_StructuralTimeSeriesTests, test_util.TestCase):
dtype = np.float64
use_static_shape = True
class _StsTestHarness(object):
def test_state_space_model(self):
seed = test_util.test_seed(sampler_type='stateless')
model = self._build_sts()
dummy_param_vals = [p.prior.sample(seed=seed) for p in model.parameters]
initial_state_prior = tfd.MultivariateNormalDiag(
loc=-2. + tf.zeros([model.latent_size]),
scale_diag=3. * tf.ones([model.latent_size]))
mask = tf.convert_to_tensor(
[False, True, True, False, False, False, False, True, False, False],
dtype=tf.bool)
# Verify we build the LGSSM without errors.
ssm = model.make_state_space_model(
num_timesteps=10,
param_vals=dummy_param_vals,
initial_state_prior=initial_state_prior,
initial_step=1,
mask=mask)
# Verify that the child class passes the initial step, prior, and mask
# arguments through to the SSM.
self.assertEqual(self.evaluate(ssm.initial_step), 1)
self.assertEqual(ssm.initial_state_prior, initial_state_prior)
self.assertAllEqual(ssm.mask, mask)
# Verify the model has the correct latent size.
self.assertEqual(
self.evaluate(
tf.convert_to_tensor(
ssm.latent_size_tensor())),
model.latent_size)
# Verify that the SSM tracks its parameters.
seed = test_util.test_seed(sampler_type='stateless')
observed_time_series = self.evaluate(
samplers.normal([10, 1], seed=seed))
ssm_copy = ssm.copy(name='copied_ssm')
self.assertAllClose(*self.evaluate((
ssm.log_prob(observed_time_series),
ssm_copy.log_prob(observed_time_series))))
def test_log_joint(self):
seed = test_util.test_seed(sampler_type='stateless')
model = self._build_sts()
num_timesteps = 5
# simple case: single observation, and all params unbatched
log_joint_fn = model.joint_log_prob(
observed_time_series=np.float32(
np.random.standard_normal([num_timesteps, 1])))
lp_seed1, lp_seed2 = tfp.random.split_seed(seed, n=2)
seeds = tfp.random.split_seed(lp_seed1, n=len(model.parameters))
lp = self.evaluate(
log_joint_fn(*[p.prior.sample(seed=seed) for seed, p in zip(
seeds, model.parameters)]))
self.assertEqual(tf.TensorShape([]), lp.shape)
# more complex case: y has sample and batch shapes, some parameters
# have partial batch shape.
full_batch_shape = [2, 3]
partial_batch_shape = [3]
sample_shape = [4]
log_joint_fn = model.joint_log_prob(
observed_time_series=np.float32(
np.random.standard_normal(sample_shape + full_batch_shape +
[num_timesteps, 1])))
# We alternate full_batch_shape, partial_batch_shape in sequence so that in
# a model with only one parameter, that parameter is constructed with full
# batch shape.
seeds = tfp.random.split_seed(lp_seed2, n=len(model.parameters))
batch_shaped_parameters_ = self.evaluate([
p.prior.sample(sample_shape=full_batch_shape if (i % 2 == 0)
else partial_batch_shape, seed=seed)
for (i, (seed, p)) in enumerate(zip(seeds, model.parameters))])
lp = self.evaluate(log_joint_fn(*batch_shaped_parameters_))
self.assertEqual(tf.TensorShape(full_batch_shape), lp.shape)
# Check that the log joint function also supports parameters passed
# as kwargs.
parameters_by_name_ = {
p.name: v for (p, v) in zip(model.parameters, batch_shaped_parameters_)}
lp_with_kwargs = self.evaluate(log_joint_fn(**parameters_by_name_))
self.assertAllClose(lp, lp_with_kwargs)
def test_constant_series_does_not_induce_constant_prior(self):
observed_time_series = np.array([1.0, 1.0, 1.0]).astype(np.float32)
model = self._build_sts(observed_time_series=observed_time_series)
for parameter in model.parameters:
param_samples = self.evaluate(
parameter.prior.sample(
30, seed=test_util.test_seed(sampler_type='stateless')))
self.assertAllGreater(tf.math.reduce_std(param_samples), 0.)
def test_log_joint_with_missing_observations(self):
# Test that this component accepts MaskedTimeSeries inputs. In most
# cases, it is sufficient that the component accesses only
# `empirical_statistics(observed_time_series)`.
# TODO(b/139483802): De-flake this test when run with --vary_seed.
seed = test_util.test_seed(hardcoded_seed=123, sampler_type='stateless')
observed_time_series = np.array(
[1.0, 2.0, -1000., 0.4, np.nan, 1000., 4.2, np.inf]).astype(np.float32)
observation_mask = np.array(
[False, False, True, False, True, True, False, True]).astype(np.bool_)
masked_time_series = tfp.sts.MaskedTimeSeries(observed_time_series,
is_missing=observation_mask)
model = self._build_sts(observed_time_series=masked_time_series)
log_joint_fn = model.joint_log_prob(
observed_time_series=masked_time_series)
seeds = tfp.random.split_seed(seed, n=len(model.parameters))
lp = self.evaluate(
log_joint_fn(*[p.prior.sample(seed=seed) for seed, p in zip(
seeds, model.parameters)]))
self.assertEqual(tf.TensorShape([]), lp.shape)
self.assertTrue(np.isfinite(lp))
def test_prior_sample(self):
model = self._build_sts()
ys, param_samples = model.prior_sample(
num_timesteps=5, params_sample_shape=[2], trajectories_sample_shape=[3],
seed=test_util.test_seed(sampler_type='stateless'))
self.assertAllEqual(ys.shape, [3, 2, 5, 1])
self.assertEqual(len(param_samples), len(model.parameters))
for i in range(len(param_samples)):
sampled = param_samples[i]
param = model.parameters[i]
self.assertAllEqual(sampled.shape, [
2,
] + param.prior.batch_shape.as_list() + param.prior.event_shape.as_list())
def test_joint_distribution_name(self):
model = self._build_sts(name='foo')
jd = model.joint_distribution(num_timesteps=5)
self.assertEqual('foo', jd.name)
def test_joint_distribution_log_prob(self):
model = self._build_sts(
# Dummy series to build the model with float64 priors. Working in
# float64 minimizes numeric inconsistencies between log-prob
# implementations.
observed_time_series=np.float64([1., 0., 1., 0.]))
jd_no_trajectory_shape = model.joint_distribution(num_timesteps=11)
self.assertLen(jd_no_trajectory_shape.dtype, len(model.parameters) + 1)
jd = model.joint_distribution(trajectories_shape=[2], num_timesteps=11)
self.assertLen(jd.dtype, len(model.parameters) + 1)
# Time series sampled from the JD should have the expected shape.
samples = self.evaluate(
jd.sample(seed=test_util.test_seed(sampler_type='stateless')))
observed_time_series = samples['observed_time_series']
self.assertAllEqual(tf.shape(observed_time_series),
tf.concat([model.batch_shape_tensor(), [2, 11, 1]],
axis=0))
# The JD's `log_prob` val should match the previous `joint_log_prob` method.
sampled_params = list(samples.values())[:-1]
lp0 = model.joint_log_prob(observed_time_series)(*sampled_params)
lp1 = jd.log_prob(samples)
self.assertAllClose(lp0, lp1)
# Passing `observed_time_series` should return the pinned distribution.
jd_pinned = model.joint_distribution(
observed_time_series=observed_time_series)
lp2 = jd_pinned.unnormalized_log_prob(*sampled_params)
self.assertAllClose(lp0, lp2)
# The JD should expose the STS bijectors as its default bijectors.
jd_bijectors = jd._model_unflatten(
jd.experimental_default_event_space_bijector().bijectors)
for param in model.parameters:
self.assertEqual(param.bijector, jd_bijectors[param.name])
def test_default_priors_follow_batch_shapes(self):
seed = test_util.test_seed(sampler_type='stateless')
num_timesteps = 3
time_series_sample_shape = [4, 2]
observation_shape_full = time_series_sample_shape + [num_timesteps]
dummy_observation = np.random.randn(
*(observation_shape_full)).astype(np.float32)
model = self._build_sts(observed_time_series=dummy_observation)
# The model should construct a default parameter prior for *each* observed
# time series, so the priors will have batch_shape equal to
# `time_series_sample_shape`.
for parameter in model.parameters:
self.assertEqual(parameter.prior.batch_shape, time_series_sample_shape)
# The initial state prior should also have the appropriate batch shape.
# To test this, we build the ssm and test that it has a consistent
# broadcast batch shape.
seeds = tfp.random.split_seed(seed, n=len(model.parameters))
param_samples = [p.prior.sample(seed=seed) for seed, p in zip(
seeds, model.parameters)]
ssm = model.make_state_space_model(
num_timesteps=num_timesteps, param_vals=param_samples)
self.assertEqual(ssm.batch_shape, time_series_sample_shape)
def test_copy(self):
model = self._build_sts()
copy = model.copy()
self.assertNotEqual(id(model), id(copy))
self.assertAllEqual([p.name for p in model.parameters],
[p.name for p in copy.parameters])
def test_add_component(self):
model = self._build_sts(
observed_time_series=np.array([1., 2., 3.], np.float32))
new_component = tfp.sts.LocalLevel(name='LocalLevel2')
sum_model = model + new_component
ledom_mus = new_component + model # `sum_model` backwards.
self.assertIsInstance(sum_model, tfp.sts.Sum)
self.assertIsInstance(ledom_mus, tfp.sts.Sum)
self.assertEqual(sum_model.components[-1], new_component)
self.assertEqual(ledom_mus.components[0], new_component)
self.assertEqual(set([p.name for p in sum_model.parameters]),
set([p.name for p in ledom_mus.parameters]))
# If we built a new Sum component (rather than extending an existing one),
# we should have passed an observed_time_series so that we get reasonable
# default priors.
if not isinstance(model, tfp.sts.Sum):
self.assertIsNotNone(sum_model.init_parameters['observed_time_series'])
self.assertIsNotNone(ledom_mus.init_parameters['observed_time_series'])
@test_util.test_all_tf_execution_regimes
class AutoregressiveTest(test_util.TestCase, _StsTestHarness):
def _build_sts(self, observed_time_series=None, **kwargs):
return Autoregressive(
order=3,
observed_time_series=observed_time_series,
**kwargs)
@test_util.test_all_tf_execution_regimes
class ARMATest(test_util.TestCase, _StsTestHarness):
def _build_sts(self, observed_time_series=None, **kwargs):
one = 1.
if observed_time_series is not None:
observed_time_series = (
sts_util.canonicalize_observed_time_series_with_mask(
observed_time_series))
one = tf.ones_like(observed_time_series.time_series[..., 0, 0])
return AutoregressiveIntegratedMovingAverage(
ar_order=3,
ma_order=1,
integration_degree=0,
level_drift_prior=tfd.Normal(loc=one, scale=one),
observed_time_series=observed_time_series,
**kwargs)
@test_util.test_all_tf_execution_regimes
class ARIMATest(test_util.TestCase, _StsTestHarness):
def _build_sts(self, observed_time_series=None, **kwargs):
return AutoregressiveIntegratedMovingAverage(
ar_order=1, ma_order=2, integration_degree=2,
observed_time_series=observed_time_series,
**kwargs)
@test_util.test_all_tf_execution_regimes
class LocalLevelTest(test_util.TestCase, _StsTestHarness):
def _build_sts(self, observed_time_series=None, **kwargs):
return LocalLevel(
observed_time_series=observed_time_series,
**kwargs)
@test_util.test_all_tf_execution_regimes
class LocalLinearTrendTest(test_util.TestCase, _StsTestHarness):
def _build_sts(self, observed_time_series=None, **kwargs):
return LocalLinearTrend(
observed_time_series=observed_time_series,
**kwargs)
@test_util.test_all_tf_execution_regimes
class SeasonalTest(test_util.TestCase, _StsTestHarness):
def _build_sts(self, observed_time_series=None, **kwargs):
# Note that a Seasonal model with `num_steps_per_season > 1` would have
# deterministic dependence between timesteps, so evaluating `log_prob` of an
# arbitrary time series leads to Cholesky decomposition errors unless the
# model also includes an observation noise component (which it would in
# practice, but this test harness attempts to test the component in
# isolation). The `num_steps_per_season=1` case tested here will not suffer
# from this issue.
return Seasonal(num_seasons=7,
num_steps_per_season=1,
observed_time_series=observed_time_series,
constrain_mean_effect_to_zero=False,
**kwargs)
@test_util.test_all_tf_execution_regimes
class SeasonalWithZeroMeanConstraintTest(test_util.TestCase, _StsTestHarness):
def _build_sts(self, observed_time_series=None, **kwargs):
return Seasonal(num_seasons=7,
num_steps_per_season=1,
observed_time_series=observed_time_series,
constrain_mean_effect_to_zero=True,
**kwargs)
@test_util.test_all_tf_execution_regimes
class SeasonalWithMultipleStepsAndNoiseTest(test_util.TestCase,
_StsTestHarness):
def _build_sts(self, observed_time_series=None, **kwargs):
day_of_week = tfp.sts.Seasonal(num_seasons=7,
num_steps_per_season=24,
allow_drift=False,
observed_time_series=observed_time_series,
name='day_of_week')
return tfp.sts.Sum(components=[day_of_week],
observed_time_series=observed_time_series,
**kwargs)
@test_util.test_all_tf_execution_regimes
class SemiLocalLinearTrendTest(test_util.TestCase, _StsTestHarness):
def _build_sts(self, observed_time_series=None, **kwargs):
return SemiLocalLinearTrend(
observed_time_series=observed_time_series,
**kwargs)
@test_util.test_all_tf_execution_regimes
class SmoothSeasonalTest(test_util.TestCase, _StsTestHarness):
def _build_sts(self, observed_time_series=None, **kwargs):
return SmoothSeasonal(period=42,
frequency_multipliers=[1, 2, 4],
observed_time_series=observed_time_series,
**kwargs)
@test_util.test_all_tf_execution_regimes
class SmoothSeasonalWithNoDriftTest(test_util.TestCase, _StsTestHarness):
def _build_sts(self, observed_time_series=None, **kwargs):
smooth_seasonal = SmoothSeasonal(period=42,
frequency_multipliers=[1, 2, 4],
allow_drift=False,
observed_time_series=observed_time_series)
# The test harness doesn't like models with no parameters, so wrap with Sum.
return tfp.sts.Sum([smooth_seasonal],
observed_time_series=observed_time_series,
**kwargs)
@test_util.test_all_tf_execution_regimes
class SumTest(test_util.TestCase, _StsTestHarness):
def _build_sts(self, observed_time_series=None, **kwargs):
first_component = LocalLinearTrend(
observed_time_series=observed_time_series, name='first_component')
second_component = LocalLinearTrend(
observed_time_series=observed_time_series, name='second_component')
return Sum(
components=[first_component, second_component],
observed_time_series=observed_time_series,
**kwargs)
@test_util.test_all_tf_execution_regimes
class LinearRegressionTest(test_util.TestCase, _StsTestHarness):
def _build_sts(self, observed_time_series=None, **kwargs):
max_timesteps = 100
num_features = 3
prior = tfd.Sample(tfd.Laplace(0., 1.), sample_shape=[num_features])
# LinearRegression components don't currently take an `observed_time_series`
# argument, so they can't infer a prior batch shape. This means we have to
# manually set the batch shape expected by the tests.
dtype = np.float32
if observed_time_series is not None:
observed_time_series_tensor, _ = (
sts_util.canonicalize_observed_time_series_with_mask(
observed_time_series))
batch_shape = tf.shape(observed_time_series_tensor)[:-2]
dtype = dtype_util.as_numpy_dtype(observed_time_series_tensor.dtype)
prior = tfd.Sample(tfd.Laplace(tf.zeros(batch_shape, dtype=dtype), 1.),
sample_shape=[num_features])
regression = LinearRegression(
design_matrix=np.random.randn(
max_timesteps, num_features).astype(dtype),
weights_prior=prior)
return Sum(components=[regression],
observed_time_series=observed_time_series,
**kwargs)
@test_util.test_all_tf_execution_regimes
class SparseLinearRegressionTest(test_util.TestCase, _StsTestHarness):
def _build_sts(self, observed_time_series=None, **kwargs):
max_timesteps = 100
num_features = 3
# LinearRegression components don't currently take an `observed_time_series`
# argument, so they can't infer a prior batch shape. This means we have to
# manually set the batch shape expected by the tests.
batch_shape = None
dtype = np.float32
if observed_time_series is not None:
observed_time_series_tensor, _ = (
sts_util.canonicalize_observed_time_series_with_mask(
observed_time_series))
batch_shape = tf.shape(observed_time_series_tensor)[:-2]
dtype = dtype_util.as_numpy_dtype(observed_time_series_tensor.dtype)
regression = SparseLinearRegression(
design_matrix=np.random.randn(
max_timesteps, num_features).astype(dtype),
weights_batch_shape=batch_shape)
return Sum(components=[regression],
observed_time_series=observed_time_series,
**kwargs)
@test_util.test_all_tf_execution_regimes
class DynamicLinearRegressionTest(test_util.TestCase, _StsTestHarness):
def _build_sts(self, observed_time_series=None, **kwargs):
max_timesteps = 100
num_features = 3
dtype = np.float32
if observed_time_series is not None:
observed_time_series_tensor, _ = (
sts_util.canonicalize_observed_time_series_with_mask(
observed_time_series))
dtype = dtype_util.as_numpy_dtype(observed_time_series_tensor.dtype)
return DynamicLinearRegression(
design_matrix=np.random.randn(
max_timesteps, num_features).astype(dtype),
observed_time_series=observed_time_series,
**kwargs)
if __name__ == '__main__':
test_util.main()
| |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# type: ignore
# Compiled with Coconut version 1.2.3-post_dev1 [Colonel]
"""Built-in Coconut utilities."""
# Coconut Header: --------------------------------------------------------
from __future__ import print_function, absolute_import, unicode_literals, division
import sys as _coconut_sys
if _coconut_sys.version_info < (3,):
py_chr, py_filter, py_hex, py_input, py_int, py_map, py_object, py_oct, py_open, py_print, py_range, py_str, py_zip, py_filter, py_reversed, py_enumerate = chr, filter, hex, input, int, map, object, oct, open, print, range, str, zip, filter, reversed, enumerate
py_raw_input, py_xrange = raw_input, xrange
_coconut_NotImplemented, _coconut_raw_input, _coconut_xrange, _coconut_int, _coconut_long, _coconut_print, _coconut_str, _coconut_unicode, _coconut_repr = NotImplemented, raw_input, xrange, int, long, print, str, unicode, repr
from future_builtins import *
chr, str = unichr, unicode
from io import open
class object(object):
__slots__ = ()
def __ne__(self, other):
eq = self == other
if eq is _coconut_NotImplemented:
return eq
else:
return not eq
class range(object):
__slots__ = ("_xrange",)
if hasattr(_coconut_xrange, "__doc__"):
__doc__ = _coconut_xrange.__doc__
def __init__(self, *args):
self._xrange = _coconut_xrange(*args)
def __iter__(self):
return _coconut.iter(self._xrange)
def __reversed__(self):
return _coconut.reversed(self._xrange)
def __len__(self):
return _coconut.len(self._xrange)
def __contains__(self, elem):
return elem in self._xrange
def __getitem__(self, index):
if _coconut.isinstance(index, _coconut.slice):
args = _coconut.slice(*self._args)
start, stop, step, ind_step = (args.start if args.start is not None else 0), args.stop, (args.step if args.step is not None else 1), (index.step if index.step is not None else 1)
return self.__class__((start if ind_step >= 0 else stop - step) if index.start is None else start + step * index.start if index.start >= 0 else stop + step * index.start, (stop if ind_step >= 0 else start - step) if index.stop is None else start + step * index.stop if index.stop >= 0 else stop + step * index.stop, step if index.step is None else step * index.step)
else:
return self._xrange[index]
def count(self, elem):
"""Count the number of times elem appears in the range."""
return int(elem in self._xrange)
def index(self, elem):
"""Find the index of elem in the range."""
if elem not in self._xrange: raise _coconut.ValueError(_coconut.repr(elem) + " is not in range")
start, _, step = self._xrange.__reduce_ex__(2)[1]
return (elem - start) // step
def __repr__(self):
return _coconut.repr(self._xrange)[1:]
@property
def _args(self):
return self._xrange.__reduce__()[1]
def __reduce_ex__(self, protocol):
return (self.__class__, self._xrange.__reduce_ex__(protocol)[1])
def __reduce__(self):
return self.__reduce_ex__(_coconut.pickle.DEFAULT_PROTOCOL)
def __hash__(self):
return _coconut.hash(self._args)
def __copy__(self):
return self.__class__(*self._args)
def __eq__(self, other):
return _coconut.isinstance(other, self.__class__) and self._args == other._args
from collections import Sequence as _coconut_Sequence
_coconut_Sequence.register(range)
class int(_coconut_int):
__slots__ = ()
if hasattr(_coconut_int, "__doc__"):
__doc__ = _coconut_int.__doc__
class __metaclass__(type):
def __instancecheck__(cls, inst):
return _coconut.isinstance(inst, (_coconut_int, _coconut_long))
def __subclasscheck__(cls, subcls):
return _coconut.issubclass(subcls, (_coconut_int, _coconut_long))
from functools import wraps as _coconut_wraps
@_coconut_wraps(_coconut_print)
def print(*args, **kwargs):
file = kwargs.get("file", _coconut_sys.stdout)
if _coconut.hasattr(file, "encoding") and file.encoding is not None:
return _coconut_print(*(_coconut_unicode(x).encode(file.encoding) for x in args), **kwargs)
else:
return _coconut_print(*(_coconut_unicode(x).encode() for x in args), **kwargs)
@_coconut_wraps(_coconut_raw_input)
def input(*args, **kwargs):
if _coconut.hasattr(_coconut_sys.stdout, "encoding") and _coconut_sys.stdout.encoding is not None:
return _coconut_raw_input(*args, **kwargs).decode(_coconut_sys.stdout.encoding)
else:
return _coconut_raw_input(*args, **kwargs).decode()
@_coconut_wraps(_coconut_repr)
def repr(obj):
if isinstance(obj, _coconut_unicode):
return _coconut_repr(obj)[1:]
else:
return _coconut_repr(obj)
ascii = repr
def raw_input(*args):
"""Coconut uses Python 3 "input" instead of Python 2 "raw_input"."""
raise _coconut.NameError('Coconut uses Python 3 "input" instead of Python 2 "raw_input"')
def xrange(*args):
"""Coconut uses Python 3 "range" instead of Python 2 "xrange"."""
raise _coconut.NameError('Coconut uses Python 3 "range" instead of Python 2 "xrange"')
if _coconut_sys.version_info < (2, 7):
import functools as _coconut_functools, copy_reg as _coconut_copy_reg
def _coconut_new_partial(func, args, keywords):
return _coconut_functools.partial(func, *(args if args is not None else ()), **(keywords if keywords is not None else {}))
_coconut_copy_reg.constructor(_coconut_new_partial)
def _coconut_reduce_partial(self):
return (_coconut_new_partial, (self.func, self.args, self.keywords))
_coconut_copy_reg.pickle(_coconut_functools.partial, _coconut_reduce_partial)
else:
py_chr, py_filter, py_hex, py_input, py_int, py_map, py_object, py_oct, py_open, py_print, py_range, py_str, py_zip, py_filter, py_reversed, py_enumerate = chr, filter, hex, input, int, map, object, oct, open, print, range, str, zip, filter, reversed, enumerate
class _coconut(object):
import collections, functools, imp, itertools, operator, types, copy, pickle
if _coconut_sys.version_info >= (2, 7):
OrderedDict = collections.OrderedDict
else:
OrderedDict = dict
if _coconut_sys.version_info < (3, 3):
abc = collections
else:
import collections.abc as abc
IndexError, KeyError, NameError, TypeError, ValueError, classmethod, dict, enumerate, filter, frozenset, getattr, hasattr, hash, int, isinstance, issubclass, iter, len, list, map, min, max, next, object, property, range, reversed, set, slice, str, sum, super, tuple, zip, repr, bytearray = IndexError, KeyError, NameError, TypeError, ValueError, classmethod, dict, enumerate, filter, frozenset, getattr, hasattr, hash, int, isinstance, issubclass, iter, len, list, map, min, max, next, object, property, range, reversed, set, slice, str, sum, super, tuple, zip, staticmethod(repr), bytearray
class MatchError(Exception):
"""Pattern-matching error."""
__slots__ = ("pattern", "value")
class _coconut_tail_call(Exception):
__slots__ = ("func", "args", "kwargs")
def __init__(self, func, *args, **kwargs):
self.func, self.args, self.kwargs = func, args, kwargs
def _coconut_tco(func):
@_coconut.functools.wraps(func)
def tail_call_optimized_func(*args, **kwargs):
call_func = func
while True:
try:
del kwargs["_coconut_inside_tco"]
except _coconut.KeyError:
pass
else:
return call_func(*args, **kwargs) # pass --no-tco to clean up your traceback
if _coconut.hasattr(call_func, "_coconut_is_tco"):
kwargs["_coconut_inside_tco"] = call_func._coconut_is_tco
try:
return call_func(*args, **kwargs) # pass --no-tco to clean up your traceback
except _coconut_tail_call as tail_call:
call_func, args, kwargs = tail_call.func, tail_call.args, tail_call.kwargs
tail_call_optimized_func._coconut_is_tco = True
return tail_call_optimized_func
def _coconut_igetitem(iterable, index):
if isinstance(iterable, (_coconut_reversed, _coconut_map, _coconut.filter, _coconut.zip, _coconut_enumerate, _coconut_count, _coconut.abc.Sequence)):
return iterable[index]
elif not _coconut.isinstance(index, _coconut.slice):
if index < 0:
return _coconut.collections.deque(iterable, maxlen=-index)[0]
else:
return _coconut.next(_coconut.itertools.islice(iterable, index, index + 1))
elif index.start is not None and index.start < 0 and (index.stop is None or index.stop < 0) and index.step is None:
queue = _coconut.collections.deque(iterable, maxlen=-index.start)
if index.stop is not None:
queue = _coconut.tuple(queue)[:index.stop - index.start]
return queue
elif (index.start is not None and index.start < 0) or (index.stop is not None and index.stop < 0) or (index.step is not None and index.step < 0):
return _coconut.tuple(iterable)[index]
else:
return _coconut.itertools.islice(iterable, index.start, index.stop, index.step)
class _coconut_compose(object):
__slots__ = ("funcs",)
def __init__(self, *funcs):
self.funcs = funcs
def __call__(self, *args, **kwargs):
arg = self.funcs[-1](*args, **kwargs)
for f in self.funcs[-2::-1]:
arg = f(arg)
return arg
def __repr__(self):
return "..".join(_coconut.repr(f) for f in self.funcs)
def __reduce__(self):
return (_coconut_compose, self.funcs)
def _coconut_pipe(x, f): return f(x)
def _coconut_starpipe(xs, f): return f(*xs)
def _coconut_backpipe(f, x): return f(x)
def _coconut_backstarpipe(f, xs): return f(*xs)
def _coconut_bool_and(a, b): return a and b
def _coconut_bool_or(a, b): return a or b
def _coconut_minus(*args): return _coconut.operator.neg(*args) if len(args) < 2 else _coconut.operator.sub(*args)
@_coconut.functools.wraps(_coconut.itertools.tee)
def tee(iterable, n=2):
if n >= 0 and _coconut.isinstance(iterable, (_coconut.tuple, _coconut.frozenset)):
return (iterable,)*n
elif n > 0 and (_coconut.hasattr(iterable, "__copy__") or _coconut.isinstance(iterable, _coconut.abc.Sequence)):
return (iterable,) + _coconut.tuple(_coconut.copy.copy(iterable) for i in range(n - 1))
else:
return _coconut.itertools.tee(iterable, n)
class reversed(object):
__slots__ = ("_iter",)
if hasattr(_coconut.map, "__doc__"):
__doc__ = _coconut.reversed.__doc__
def __new__(cls, iterable):
if _coconut.isinstance(iterable, _coconut.range):
return iterable[::-1]
elif not _coconut.hasattr(iterable, "__reversed__") or _coconut.isinstance(iterable, (_coconut.list, _coconut.tuple)):
return _coconut.object.__new__(cls)
else:
return _coconut.reversed(iterable)
def __init__(self, iterable):
self._iter = iterable
def __iter__(self):
return _coconut.reversed(self._iter)
def __getitem__(self, index):
if _coconut.isinstance(index, _coconut.slice):
return _coconut_igetitem(self._iter, _coconut.slice(-(index.start + 1) if index.start is not None else None, -(index.stop + 1) if index.stop else None, -(index.step if index.step is not None else 1)))
else:
return _coconut_igetitem(self._iter, -(index + 1))
def __reversed__(self):
return self._iter
def __len__(self):
return _coconut.len(self._iter)
def __repr__(self):
return "reversed(" + _coconut.repr(self._iter) + ")"
def __hash__(self):
return -_coconut.hash(self._iter)
def __reduce__(self):
return (self.__class__, (self._iter,))
def __reduce_ex__(self, _):
return self.__reduce__()
def __copy__(self):
return self.__class__(_coconut.copy.copy(self._iter))
def __eq__(self, other):
return isinstance(other, self.__class__) and self._iter == other._iter
def __contains__(self, elem):
return elem in self._iter
def count(self, elem):
"""Count the number of times elem appears in the reversed iterator."""
return self._iter.count(elem)
def index(self, elem):
"""Find the index of elem in the reversed iterator."""
return _coconut.len(self._iter) - self._iter.index(elem) - 1
def __fmap__(self, func):
return self.__class__(_coconut_map(func, self._iter))
class map(_coconut.map):
__slots__ = ("_func", "_iters")
if hasattr(_coconut.map, "__doc__"):
__doc__ = _coconut.map.__doc__
def __new__(cls, function, *iterables):
new_map = _coconut.map.__new__(cls, function, *iterables)
new_map._func, new_map._iters = function, iterables
return new_map
def __getitem__(self, index):
if _coconut.isinstance(index, _coconut.slice):
return self.__class__(self._func, *(_coconut_igetitem(i, index) for i in self._iters))
else:
return self._func(*(_coconut_igetitem(i, index) for i in self._iters))
def __reversed__(self):
return self.__class__(self._func, *(_coconut_reversed(i) for i in self._iters))
def __len__(self):
return _coconut.min(_coconut.len(i) for i in self._iters)
def __repr__(self):
return "map(" + _coconut.repr(self._func) + ", " + ", ".join((_coconut.repr(i) for i in self._iters)) + ")"
def __reduce__(self):
return (self.__class__, (self._func,) + self._iters)
def __reduce_ex__(self, _):
return self.__reduce__()
def __copy__(self):
return self.__class__(self._func, *_coconut_map(_coconut.copy.copy, self._iters))
def __fmap__(self, func):
return self.__class__(_coconut_compose(func, self._func), *self._iters)
class parallel_map(map):
"""Multiprocessing implementation of map using concurrent.futures.
Requires arguments to be pickleable."""
__slots__ = ()
def __iter__(self):
from concurrent.futures import ProcessPoolExecutor
with ProcessPoolExecutor() as executor:
return _coconut.iter(_coconut.tuple(executor.map(self._func, *self._iters)))
def __repr__(self):
return "parallel_" + _coconut_map.__repr__(self)
class concurrent_map(map):
"""Multithreading implementation of map using concurrent.futures."""
__slots__ = ()
def __iter__(self):
from concurrent.futures import ThreadPoolExecutor
from multiprocessing import cpu_count # cpu_count() * 5 is the default Python 3 thread count
with ThreadPoolExecutor(cpu_count() * 5) as executor:
return _coconut.iter(_coconut.tuple(executor.map(self._func, *self._iters)))
def __repr__(self):
return "concurrent_" + _coconut_map.__repr__(self)
class filter(_coconut.filter):
__slots__ = ("_func", "_iter")
if hasattr(_coconut.filter, "__doc__"):
__doc__ = _coconut.filter.__doc__
def __new__(cls, function, iterable):
new_filter = _coconut.filter.__new__(cls, function, iterable)
new_filter._func, new_filter._iter = function, iterable
return new_filter
def __reversed__(self):
return self.__class__(self._func, _coconut_reversed(self._iter))
def __repr__(self):
return "filter(" + _coconut.repr(self._func) + ", " + _coconut.repr(self._iter) + ")"
def __reduce__(self):
return (self.__class__, (self._func, self._iter))
def __reduce_ex__(self, _):
return self.__reduce__()
def __copy__(self):
return self.__class__(self._func, _coconut.copy.copy(self._iter))
def __fmap__(self, func):
return _coconut_map(func, self)
class zip(_coconut.zip):
__slots__ = ("_iters",)
if hasattr(_coconut.zip, "__doc__"):
__doc__ = _coconut.zip.__doc__
def __new__(cls, *iterables):
new_zip = _coconut.zip.__new__(cls, *iterables)
new_zip._iters = iterables
return new_zip
def __getitem__(self, index):
if _coconut.isinstance(index, _coconut.slice):
return self.__class__(*(_coconut_igetitem(i, index) for i in self._iters))
else:
return _coconut.tuple(_coconut_igetitem(i, index) for i in self._iters)
def __reversed__(self):
return self.__class__(*(_coconut_reversed(i) for i in self._iters))
def __len__(self):
return _coconut.min(_coconut.len(i) for i in self._iters)
def __repr__(self):
return "zip(" + ", ".join((_coconut.repr(i) for i in self._iters)) + ")"
def __reduce__(self):
return (self.__class__, self._iters)
def __reduce_ex__(self, _):
return self.__reduce__()
def __copy__(self):
return self.__class__(*_coconut_map(_coconut.copy.copy, self._iters))
def __fmap__(self, func):
return _coconut_map(func, self)
class enumerate(_coconut.enumerate):
__slots__ = ("_iter", "_start")
if hasattr(_coconut.enumerate, "__doc__"):
__doc__ = _coconut.enumerate.__doc__
def __new__(cls, iterable, start=0):
new_enumerate = _coconut.enumerate.__new__(cls, iterable, start)
new_enumerate._iter, new_enumerate._start = iterable, start
return new_enumerate
def __getitem__(self, index):
if _coconut.isinstance(index, _coconut.slice):
return self.__class__(_coconut_igetitem(self._iter, index), self._start + (0 if index.start is None else index.start if index.start >= 0 else len(self._iter) + index.start))
else:
return (self._start + index, _coconut_igetitem(self._iter, index))
def __len__(self):
return _coconut.len(self._iter)
def __repr__(self):
return "enumerate(" + _coconut.repr(self._iter) + ", " + _coconut.repr(self._start) + ")"
def __reduce__(self):
return (self.__class__, (self._iter, self._start))
def __reduce_ex__(self, _):
return self.__reduce__()
def __copy__(self):
return self.__class__(_coconut.copy.copy(self._iter), self._start)
def __fmap__(self, func):
return _coconut_map(func, self)
class count(object):
"""count(start, step) returns an infinite iterator starting at start and increasing by step."""
__slots__ = ("_start", "_step")
def __init__(self, start=0, step=1):
self._start, self._step = start, step
def __iter__(self):
while True:
yield self._start
self._start += self._step
def __contains__(self, elem):
return elem >= self._start and (elem - self._start) % self._step == 0
def __getitem__(self, index):
if _coconut.isinstance(index, _coconut.slice) and (index.start is None or index.start >= 0) and (index.stop is None or index.stop >= 0):
if index.stop is None:
return self.__class__(self._start + (index.start if index.start is not None else 0), self._step * (index.step if index.step is not None else 1))
elif _coconut.isinstance(self._start, _coconut.int) and _coconut.isinstance(self._step, _coconut.int):
return _coconut.range(self._start + self._step * (index.start if index.start is not None else 0), self._start + self._step * index.stop, self._step * (index.step if index.step is not None else 1))
else:
return _coconut_map(self.__getitem__, _coconut.range(index.start if index.start is not None else 0, index.stop, index.step if index.step is not None else 1))
elif index >= 0:
return self._start + self._step * index
else:
raise _coconut.IndexError("count indices must be positive")
def count(self, elem):
"""Count the number of times elem appears in the count."""
return int(elem in self)
def index(self, elem):
"""Find the index of elem in the count."""
if elem not in self:
raise _coconut.ValueError(_coconut.repr(elem) + " is not in count")
return (elem - self._start) // self._step
def __repr__(self):
return "count(" + _coconut.str(self._start) + ", " + _coconut.str(self._step) + ")"
def __hash__(self):
return _coconut.hash((self._start, self._step))
def __reduce__(self):
return (self.__class__, (self._start, self._step))
def __copy__(self):
return self.__class__(self._start, self._step)
def __eq__(self, other):
return isinstance(other, self.__class__) and self._start == other._start and self._step == other._step
def __fmap__(self, func):
return _coconut_map(func, self)
def recursive_iterator(func):
"""Decorates a function by optimizing it for iterator recursion.
Requires function arguments to be pickleable."""
tee_store = {}
@_coconut.functools.wraps(func)
def recursive_iterator_func(*args, **kwargs):
hashable_args_kwargs = _coconut.pickle.dumps((args, kwargs), _coconut.pickle.HIGHEST_PROTOCOL)
try:
to_tee = tee_store[hashable_args_kwargs]
except _coconut.KeyError:
to_tee = func(*args, **kwargs)
tee_store[hashable_args_kwargs], to_return = _coconut_tee(to_tee)
return to_return
return recursive_iterator_func
def addpattern(base_func):
"""Decorator to add a new case to a pattern-matching function, where the new case is checked last."""
def pattern_adder(func):
@_coconut.functools.wraps(func)
@_coconut_tco
def add_pattern_func(*args, **kwargs):
try:
return base_func(*args, **kwargs)
except _coconut_MatchError:
raise _coconut_tail_call(func, *args, **kwargs)
return add_pattern_func
return pattern_adder
def prepattern(base_func):
"""Decorator to add a new case to a pattern-matching function, where the new case is checked first."""
def pattern_prepender(func):
return addpattern(func)(base_func)
return pattern_prepender
class _coconut_partial(object):
__slots__ = ("func", "_argdict", "_arglen", "_stargs", "keywords")
if hasattr(_coconut.functools.partial, "__doc__"):
__doc__ = _coconut.functools.partial.__doc__
def __init__(self, func, argdict, arglen, *args, **kwargs):
self.func, self._argdict, self._arglen, self._stargs, self.keywords = func, argdict, arglen, args, kwargs
def __reduce__(self):
return (self.__class__, (self.func, self._argdict, self._arglen) + self._stargs, self.keywords)
def __setstate__(self, keywords):
self.keywords = keywords
@property
def args(self):
return _coconut.tuple(self._argdict.get(i) for i in _coconut.range(self._arglen)) + self._stargs
def __call__(self, *args, **kwargs):
callargs = []
argind = 0
for i in _coconut.range(self._arglen):
if i in self._argdict:
callargs.append(self._argdict[i])
elif argind >= _coconut.len(args):
raise _coconut.TypeError("expected at least " + _coconut.str(self._arglen - _coconut.len(self._argdict)) + " argument(s) to " + _coconut.repr(self))
else:
callargs.append(args[argind])
argind += 1
callargs += self._stargs
callargs += args[argind:]
kwargs.update(self.keywords)
return self.func(*callargs, **kwargs)
def __repr__(self):
args = []
for i in _coconut.range(self._arglen):
if i in self._argdict:
args.append(_coconut.repr(self._argdict[i]))
else:
args.append("?")
for arg in self._stargs:
args.append(_coconut.repr(arg))
return _coconut.repr(self.func) + "$(" + ", ".join(args) + ")"
class datamaker(object):
__slots__ = ("data_type",)
def __new__(cls, data_type):
if _coconut.hasattr(data_type, "_make") and (_coconut.issubclass(data_type, _coconut.tuple) or _coconut.isinstance(data_type, _coconut.tuple)):
return _coconut.object.__new__(cls)
else:
return _coconut.functools.partial(_coconut.super(data_type, data_type).__new__, data_type)
def __init__(self, data_type):
self.data_type = data_type
def __call__(self, *args, **kwargs):
return self.data_type._make(args, **kwargs)
def __repr__(self):
return "datamaker(" + _coconut.repr(data_type) + ")"
def __reduce__(self):
return (_coconut_datamaker, (self.data_type,))
def consume(iterable, keep_last=0):
"""Fully exhaust iterable and return the last keep_last elements."""
return _coconut.collections.deque(iterable, maxlen=keep_last) # fastest way to exhaust an iterator
def fmap(func, obj):
"""Creates a copy of obj with func applied to its contents."""
if _coconut.hasattr(obj, "__fmap__"):
return obj.__fmap__(func)
args = _coconut_map(func, obj)
if _coconut.isinstance(obj, _coconut.dict):
args = _coconut_zip(args, obj.values())
if _coconut.isinstance(obj, _coconut.tuple) and _coconut.hasattr(obj, "_make"):
return obj._make(args)
if _coconut.isinstance(obj, (_coconut.map, _coconut.range)):
return args
if _coconut.isinstance(obj, _coconut.str):
return "".join(args)
return obj.__class__(args)
_coconut_MatchError, _coconut_count, _coconut_enumerate, _coconut_reversed, _coconut_map, _coconut_tee, _coconut_zip, reduce, takewhile, dropwhile = MatchError, count, enumerate, reversed, map, tee, zip, _coconut.functools.reduce, _coconut.itertools.takewhile, _coconut.itertools.dropwhile
| |
# -*- coding: utf-8 -*-
from datetime import datetime, timedelta
import re
import pkg_resources
import os
import logging
import struct
import json
try:
import hashlib
md5 = hashlib.md5
except ImportError:
# for Python << 2.5
import md5
md5 = md5.new
# import codecs
import numpy as np
from numpy import ma
from seabird.exceptions import CNVError
from seabird.utils import load_rule
module_logger = logging.getLogger('seabird.cnv')
class CNV(object):
""" Main class to parse the .cnv style content
Input:
raw_text [String]: The full content of the .cnv file.
Output:
This class responds as it was a dictionary of variables,
and each hash has a Masked Array.
Ex.:
f = open("CTD.cnv")
text = f.read()
profile = CNV(text)
profile.keys() # Return the available variables
profile['temperature'] # Return the temperature sensor as a
masked array
profile['timeS'] # Return the time in Seconds
profile.attrs # Return a dictionary with the file header
"""
def __init__(self, raw_text, defaults=None):
module_logger.debug('Initializing CNV class')
# Clean empty lines first
self.raw_text = re.sub('\n\s*(?=\n)', '', raw_text)
self.defaults = defaults
self.attrs = {}
# ----
self.rule, self.parsed = load_rule(self.raw_text)
if not hasattr(self, 'parsed'):
return
self.get_intro()
self.get_attrs()
self.prepare_data()
self.get_datetime()
self.get_location()
# Think well how/where to implement this. It should overwrite
# the regular attributes input, but might be necessary to load the
# real attributes to respond right.
# It definitely should not be here, but inside some function.
try:
for k in defaults['attrs']:
self.attrs[k] = defaults['attrs'][k]
except:
pass
if 'bindata' in self.raw_data().keys():
self.load_bindata()
elif 'bottledata' in self.raw_data().keys():
self.load_bottledata()
else:
self.load_data()
self.products()
self.check_consistency()
def keys(self):
""" Return the available keys in self.data
"""
return [d.attrs['name'] for d in self.data]
def __getitem__(self, key):
""" Return the key array from self.data
"""
for d in self.data:
if d.attrs['name'] == key:
return d
raise KeyError('%s not found' % key)
@property
def attributes(self):
return self.attrs
def raw_header(self):
r = self.rule['header'] + self.rule['sep']
content_re = re.compile(r, re.VERBOSE)
return content_re.search(self.raw_text).groupdict()
def raw_data(self):
if ('instrument_type' in self.attrs) and \
self.attrs['instrument_type'] == 'CTD-bottle':
return {'bottledata': self.parsed['data']}
r = self.rule['sep'] + self.rule['data']
content_re = re.compile(r, re.VERBOSE)
return content_re.search(self.raw_text).groupdict()
def get_intro(self):
""" Parse the intro part of the header
"""
for k in self.rule['intro'].keys():
pattern = re.compile(self.rule['intro'][k], re.VERBOSE)
if pattern.search(self.parsed['intro']):
self.attrs[k] = pattern.search(
self.parsed['intro']
).groupdict()['value']
self.parsed['intro'] = pattern.sub(
'', self.parsed['intro'], count=1)
try:
self.attrs['instrument_type'] = \
self.rule['attributes']['instrument_type']
except:
if 'sbe_model' in self.attrs:
if self.attrs['sbe_model'] in ['9', '17', '19plus',
'19plus V2']:
self.attrs['instrument_type'] = 'CTD'
elif self.attrs['sbe_model'] in ['21', '45']:
self.attrs['instrument_type'] = 'TSG'
def get_attrs(self):
"""
"""
for k in self.rule['descriptors'].keys():
pattern = re.compile(self.rule['descriptors'][k], re.VERBOSE)
if pattern.search(self.parsed['descriptors']):
self.attrs[k] = pattern.search(
self.parsed['descriptors']
).groupdict()['value']
self.parsed['descriptors'] = \
pattern.sub('', self.parsed['descriptors'], count=1)
# ----
# Temporary solution. Failsafe MD5
try:
self.attrs['md5'] = md5(
self.raw_text.encode('utf-8')
).hexdigest()
except:
self.attrs['md5'] = md5(
self.raw_text.decode(
'latin1', 'replace'
).encode('utf-8')
).hexdigest()
def prepare_data(self):
"""
"""
attrib_text = self.parsed['descriptors']
self.data = []
self.ids = []
# ----
rule_file = "rules/refnames.json"
text = pkg_resources.resource_string(__name__, rule_file)
refnames = json.loads(text.decode('utf-8'), encoding="utf-8")
# ---- Parse fields
if ('attrs' in self.rule) and \
(self.rule['attrs']['instrument_type'] == 'CTD-bottle'):
rule = r"""
\s+ Bottle \s+ Date .* \n
\s+ Position \s+ Time .* \n
"""
attrib_text = re.search(r"""\n \s+ Bottle \s+ Date \s+ (.*) \s*\r?\n \s+ Position \s+ Time""", self.parsed['header'], re.VERBOSE).group(1)
pattern = re.compile(r"""(?P<varname>[-|+|\w|\.|/]+)""", re.VERBOSE)
self.ids = [0, 1, 2]
self.data = [ma.array([]), ma.array([]), ma.array([])]
self.data[0].attrs = {
'id': 0,
'name': 'bottle'}
self.data[1].attrs = {
'id': 1,
'name': 'date'}
self.data[2].attrs = {
'id': 2,
'name': 'time'}
for x in pattern.finditer(str(attrib_text)):
self.ids.append(len(self.ids))
self.data.append(ma.array([]))
try:
reference = refnames[x.groupdict()['varname']]
varname = reference['name']
#longname = reference['longname']
except:
varname = x.groupdict()['varname']
self.data[-1].attrs = {
'id': self.ids[-1],
'name': varname,
#'longname': x.groupdict()['longname'],
}
return
pattern = re.compile(self.rule['fieldname'], re.VERBOSE)
for x in pattern.finditer(str(attrib_text)):
self.ids.append(int(x.groupdict()['id']))
try:
reference = refnames[x.groupdict()['name']]
name = reference['name']
except:
name = x.groupdict()['name']
self.data.append(ma.array([]))
self.data[-1].attrs = {
'id': (x.groupdict()['id']),
'name': name,
'longname': x.groupdict()['longname'],
}
attrib_text = pattern.sub('', attrib_text)
# ---- Load span limits on each list item
pattern = re.compile(self.rule['fieldspan'], re.VERBOSE)
for x in pattern.finditer(str(attrib_text)):
i = self.ids.index(int(x.groupdict()['id']))
self.data[i].attrs['span'] = [
x.groupdict()['valuemin'].strip(),
x.groupdict()['valuemax'].strip()]
attrib_text = pattern.sub('', attrib_text)
def load_data(self):
"""
Sure there is a better way to do it.
Think about, should I do things using nvalues as expected
number of rows? Maybe do it free, and on checks, validate it.
In the case of an incomplete file, I think I should load it
anyways, and the check alerts me that it is missing data.
There is a problem here. This atol is just a temporary solution,
but it's not the proper way to handle it.
"""
data_rows = re.sub(
'(\n\s*)+\n', '\n',
re.sub('\r\n', '\n', self.raw_data()['data'])
).split('\n')[:-1]
data = ma.masked_values(
np.array(
[CNV.__split_row(d) for d in data_rows], dtype=np.float),
float(self.attrs['bad_flag']),
atol=1e-30)
# Talvez usar o np.fromstring(data, sep=" ")
for i in self.ids:
attrs = self.data[i].attrs
self.data[i] = data[:, i]
self.data[i].attrs = attrs
# ma.masked_all(int(self.attrs['nvalues']))
@staticmethod
def __split_row(row):
"""
Splits rows based on position. Seabird cnv files delimit fields after
11 positions
:param row: string representation of a row that needs to be split into
fields
:return: list of fields as strings
"""
n = 11 # number of chars per row
return [row[start:start+n].strip() for start in range(0, len(row), n)]
def load_bindata(self):
content = self.raw_data()['bindata']
nvars = len(self.ids)
fmt = nvars*'f'
linesize = struct.calcsize(fmt)
output = []
# FIXME: This does not allow to read the most it can from a corrupted
# file, i.e. incomplete file.
for n in range(len(content)/linesize):
output.append(struct.unpack_from(fmt, content, n*linesize))
data = ma.masked_values(
output,
float(self.attrs['bad_flag']),
atol=1e-30)
for i in self.ids:
attrs = self.data[i].attrs
self.data[i] = data[:, i]
self.data[i].attrs = attrs
def load_bottledata(self):
content = self.raw_data()['bottledata']
nvars = len(self.ids)
for rec in re.finditer(self.rule['data'], content, re.VERBOSE):
attrs = self.data[0].attrs
self.data[0] = np.append(self.data[0],
int(rec.groupdict()['bottle']))
self.data[0].attrs = attrs
d = datetime.strptime(rec.groupdict()['date'].strip(), '%b %d %Y')
attrs = self.data[1].attrs
self.data[1] = np.append(self.data[1], d.date())
self.data[1].attrs = attrs
d = datetime.strptime(rec.groupdict()['time'].strip(), '%H:%M:%S')
attrs = self.data[2].attrs
self.data[2] = np.append(self.data[2], d.time())
self.data[2].attrs = attrs
for n, v in enumerate(re.findall('[-|+|\w|\.]+',
rec.groupdict()['values']),
start=3):
attrs = self.data[n].attrs
self.data[n] = np.append(self.data[n], v)
self.data[n].attrs = attrs
def products(self):
"""
To think about, should I really estimate the products,
or should they be estimated on the fly, on demand?
To Think About!! :
I'm not sure what would be the best way to handle,
timeQ. I actually couldn't find a definition of what
is that. PyCurrents (Eric) considers the seconds from
2010-1-1. It's probably a good solution.
For now, I'll use the just the incremental time. At
some point I defined the datetime before, so what
matters now is the increment.
If I have the timeQ, I must have a NMEA (Time), and
Wait a minute, the NMEA Time is probably when the
header is opened, not necessarily when the rossette was
switched on. I'll just follow Eric for now.
"""
if ('timeS' not in self.keys()):
if ('timeJ' in self.keys()):
j0 = int(self.attrs['datetime'].date().strftime('%j'))
t0 = self.attrs['datetime'].time()
t0 = (t0.hour*60+t0.minute)*60+t0.second
# I need to subtract one day, but I'm not so sure why should I.
# dref = datetime(self.attrs['datetime'].year,1,1) \
# - timedelta(days=1) \
# - self.attrs['datetime']
# dJ0 = datetime(dref.year,1,1)
timeS = ma.masked_all(
self['timeJ'].shape, self['timeJ'].dtype)
timeS.set_fill_value(float(self.attrs['bad_flag']))
ind = np.nonzero(~ma.getmaskarray(self['timeJ']))[0]
try:
timeS[ind] = ma.array([
timedelta(days=t).total_seconds() - t0
for t in self['timeJ'][ind]-j0])
# ma.array( [(dref + timedelta(float(d))).total_seconds()
# for d in self['timeJ'][ind]])
except:
D = [timedelta(days=t) for t in self['timeJ'][ind]-j0]
# D = [(dref + timedelta(float(d)))
# for d in self['timeJ'][ind]]
timeS[ind] = ma.array([
d.days * 86400 + d.seconds - t0 for d in D])
elif ('timeQ' in self.keys()):
# yref = self.attrs['datetime'].year - \
# int(self['timeQ'].min()/86400./365.25
# dref = datetime(yref,1,1)
# timeS[ind] = self['timeQ'][ind] - self['timeQ'].min()
timeS = ma.masked_all(
self['timeQ'].shape, self['timeQ'].dtype)
timeS.set_fill_value(float(self.attrs['bad_flag']))
ind = np.nonzero(~ma.getmaskarray(self['timeQ']))[0]
try:
dref = (self.attrs['datetime'] -
datetime(2000, 1, 1)).total_seconds()
except:
dref = (self.attrs['datetime'] -
datetime(2000, 1, 1))
dref = dref.days*24*60*60+dref.seconds
timeS = self['timeQ'] - dref
else:
return
self.data.append(timeS)
self.data[-1].attrs = {'name': 'timeS'}
self.ids.append(len(self.data))
def get_datetime(self):
""" Extract the reference date and time
!!! ATENTION, better move it to a rule in the rules.
"""
# datetime.strptime('Aug 28 2008 12:33:46','%b %d %Y %H:%M:%S')
# Needed to include an :21, because some cases has a [bla bla]
# after.
# It's probably not the best solution.
self.attrs['datetime'] = datetime.strptime(
self.attrs['start_time'][:20], '%b %d %Y %H:%M:%S')
def get_location(self):
""" Extract the station location (Lat, Lon)
Sometimes the CTD unit station is not connected to the GPS, so it's
written manually in the headerblob. In that case, I'll try to
extract it
!! ATENTION!!! Might be a good idea to store lat,lon as floats
with min. and sec. as fractions.
On some old format files, the notes where stored with single
* instead of **. One possible solution is if can't load from
notes, try to load from intro.
In the rules, it is set to use only . as separator for the
decimals of the minutes. Might be a good idea to allow \.|\,
but on that case I would need to substitute , by . for proper
load as a float.
"""
if ('LATITUDE' in self.attrs) and \
(re.search(self.rule['LATITUDE'],
self.attrs['LATITUDE'],
re.VERBOSE)):
lat = re.search(self.rule['LATITUDE'],
self.attrs['LATITUDE'],
re.VERBOSE).groupdict()
elif ('notes' in self.raw_header().keys()) and \
re.search(self.rule['LATITUDE'],
self.raw_header()['notes'],
re.VERBOSE):
lat = re.search(self.rule['LATITUDE'],
self.raw_header()['notes'],
re.VERBOSE).groupdict()
try:
lat_deg = int(lat['degree'])
lat_min = float(lat['minute'])
# self.attrs['lat_deg'] = lat_deg
# self.attrs['lat_min'] = lat_min
self.attrs['LATITUDE'] = lat_deg + lat_min/60.
if lat['hemisphere'] in ['S', 's']:
self.attrs['LATITUDE'] = -self.attrs['LATITUDE']
except:
pass
# self.attrs['LATITUDE'] = None
if ('LONGITUDE' in self.attrs) and \
(re.search(self.rule['LONGITUDE'],
self.attrs['LONGITUDE'],
re.VERBOSE)):
lon = re.search(self.rule['LONGITUDE'],
self.attrs['LONGITUDE'],
re.VERBOSE).groupdict()
elif ('notes' in self.raw_header().keys()) and \
(re.search(self.rule['LONGITUDE'],
self.raw_header()['notes'],
re.VERBOSE)):
lon = re.search(self.rule['LONGITUDE'],
self.raw_header()['notes'],
re.VERBOSE).groupdict()
try:
lon_deg = int(lon['degree'])
lon_min = float(lon['minute'])
# self.attrs['lon_deg'] = lon_deg
# self.attrs['lon_min'] = lon_min
self.attrs['LONGITUDE'] = lon_deg + lon_min/60.
if lon['hemisphere'] in ['W', 'w']:
self.attrs['LONGITUDE'] = \
-self.attrs['LONGITUDE']
except:
pass
# self.attrs['LONGITUDE'] = None
def as_DataFrame(self):
""" Return the data as a pandas.DataFrame
ATENTION, I should improve this.
"""
try:
import pandas as pd
except:
module_logger.warning("I'm not able to import pandas")
return
output = {}
for k in self.keys():
tmp = self[k].data
tmp[self[k].mask] = np.nan
output[k] = tmp
output = pd.DataFrame(output)
output['LATITUDE'] = self.attrs['LATITUDE']
output['LONGITUDE'] = self.attrs['LONGITUDE']
return output
def check_consistency(self):
""" Some consistency checks
Check if the dataset is consistent with the info from the
header.
Might be a good idea to move these tests outside the
class.
"""
if 'nquan' in self.attrs:
# Check if the number of variables is equal to nquan
nquan = int(self.attrs['nquan'])
if nquan != len(self.keys()):
module_logger.warning(
"It was supposed to has %s variables." % (nquan))
if 'nvalues' in self.attrs:
# Check if each variable have nvalues
nvalues = int(self.attrs['nvalues'])
for k in self.keys():
if len(self[k]) != nvalues:
module_logger.warning(
("\033[91m%s was supposed to has %s values, "
"but found only %s.\033[0m") %
(k, nvalues, len(self[k])))
class fCNV(CNV):
""" The same of CNV class, but the input is a filename
instead of the straight text.
Input:
filename [String]: The path/filename to the CTD file.
Output:
This class responds as it was a dictionary of variables,
and each hash has a Masked Array.
Check out the doc of the class CNV for more details.
Ex.:
profile = fCNV("~/data/CTD.cnv")
profile.keys() # Return the available variables
profile.attrs # Return a dictionary with the file header
masked array
"""
def __init__(self, filename, defaultsfile=None):
module_logger.debug('Initializing fCNV class with file: %s' % filename)
self.filename = filename
try:
# Python 3 requires this.
f = open(filename, "r", encoding="utf-8", errors="replace")
except:
f = open(filename, "r")
text = f.read()
f.close()
# if defaultsfile is given, read as a yaml file
if defaultsfile:
f = open(defaultsfile)
defaults = json.loads(f.read())
f.close()
else:
defaults = None
try:
super(fCNV, self).__init__(text, defaults)
except CNVError as e:
if e.tag == 'noparsingrule':
e.msg += " File: %s" % self.filename
raise
self.name = 'fCNV'
self.attrs['filename'] = os.path.basename(filename)
def load_defaults(self, defaultsfile):
pass
| |
#!/usr/bin/env python3
# Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import re
import sys
import yaml
import argparse
import logging
import fnmatch
import googleapiclient.discovery
from googleapiclient import http
from pythonjsonlogger import jsonlogger
import google_auth_httplib2
import google.auth
NAME = 'custom-role-manager'
TERRAFORM_TEMPLATES = {
'pre':
'',
'organization':
'''resource "google_organization_iam_custom_role" "{terraform_id}" {{
role_id = "{role_id}"
org_id = "{organization_id}"
title = "{role_title}"
description = "{role_description}"
permissions = {role_permissions}
}}
''',
'project':
'''resource "google_project_iam_custom_role" "{terraform_id}" {{
role_id = "{role_id}"
title = "{role_title}"
description = "{role_description}"
permissions = {role_permissions}
}}
''',
'post':
''
}
TERRAFORM_PRE = False
TERRAFORM_RESOURCES = {}
def process_permission(logger, role, permissions_to_grant, permission_name):
permission_candidate = None
if 'include' in role:
for include_permission in role['include']:
permission_candidate = None
if include_permission.startswith(
'/') and include_permission.endswith('/'): # Regexp
if re.match(include_permission[1:len(include_permission) - 1],
permission_name):
permission_candidate = permission_name
break
else:
if fnmatch.fnmatch(permission_name, include_permission):
permission_candidate = permission_name
break
if permission_candidate:
if 'exclude' in role:
for exclude_permission in role['exclude']:
if exclude_permission.startswith(
'/') and exclude_permission.endswith('/'): # Regexp
if re.match(
exclude_permission[1:len(exclude_permission) - 1],
permission_candidate):
permission_candidate = None
break
else:
if fnmatch.fnmatch(permission_candidate,
exclude_permission):
permission_candidate = None
break
if permission_candidate:
permissions_to_grant.append(permission_candidate)
else:
permission_candidate = permission_name
return permissions_to_grant
def process_role(logger, service, role, output_terraform=False):
global TERRAFORM_PRE, TERRAFORM_TEMPLATES, TERRAFORM_RESOURCES
if 'source' not in role:
logger.error('Source not defined for role.', extra={'role': role['id']})
sys.exit(2)
role_exists = True
role_name = '%s/roles/%s' % (role['parent'], role['id'])
if role['parent'].startswith('organizations/'):
role_request = service.organizations().roles().get(name=role_name)
else:
role_request = service.projects().roles().get(name=role_name)
try:
role_response = role_request.execute()
except googleapiclient.errors.HttpError as e:
if e.resp.status == 404 or e.resp.status == 400:
role_exists = False
else:
raise e
if not isinstance(role['source'], list):
sources = [role['source']]
else:
sources = role['source']
permissions_to_grant = []
for source in sources:
if source.startswith('roles/'):
source_request = service.roles().get(name=source)
source_response = source_request.execute()
for p in source_response['includedPermissions']:
permissions_to_grant = process_permission(
logger, role, permissions_to_grant, p)
if source.startswith('//'):
next_page_token = None
while True:
permissions = service.permissions().queryTestablePermissions(
body={
'fullResourceName': source,
'pageToken': next_page_token
}).execute()
for p in permissions['permissions']:
if ('stage' not in p or p['stage'] != 'DEPRECATED') and (
'customRolesSupportLevel' not in p or
p['customRolesSupportLevel'] != 'NOT_SUPPORTED'):
permissions_to_grant = process_permission(
logger, role, permissions_to_grant, p['name'])
else:
if 'stage' in p and p['stage'] == 'DEPRECATED':
logger.info('Permission %s is deprecated.' %
(p['name']),
extra={
'permission': p['name'],
})
if 'customRolesSupportLevel' in p and p[
'customRolesSupportLevel'] == 'NOT_SUPPORTED':
logger.info(
'Permission %s is not supported in custom roles.' %
(p['name']),
extra={
'permission': p['name'],
})
if 'nextPageToken' in permissions:
next_page_token = permissions['nextPageToken']
else:
break
if 'append' in role:
for permission in role['append']:
permissions_to_grant.append(permission)
logger.info('%d permissions determined for role %s.' %
(len(permissions_to_grant), role['id']),
extra={
'role': role['id'],
'permissions': permissions_to_grant
})
if output_terraform:
if not TERRAFORM_PRE:
print(TERRAFORM_TEMPLATES['pre'])
TERRAFORM_PRE = True
organization_id = ''
project_id = ''
terraform_id = role['tfId'] if 'tfId' in role else role['id']
tf_template = 'project'
if 'organizations/' in role['parent']:
organization_id = role['parent'].replace('organizations/', '')
tf_template = 'organization'
TERRAFORM_RESOURCES[
terraform_id] = 'google_organization_iam_custom_role.%s' % (
terraform_id)
else:
project_id = role['parent'].replace('projects/', '')
TERRAFORM_RESOURCES[
terraform_id] = 'google_project_iam_custom_role.%s' % (
terraform_id)
role_id = role['id']
role_title = role['title'] if 'title' in role else ''
role_description = role['description'] if 'description' in role else ''
role_permissions = str(permissions_to_grant).replace('\'', '"')
print(TERRAFORM_TEMPLATES[tf_template].format(
role_id=role_id,
terraform_id=terraform_id,
organization_id=organization_id,
project_id=project_id,
role_title=role_title.replace('"', '\\"'),
role_description=role_description.replace('"', '\\"'),
role_permissions=role_permissions))
elif not role_exists:
logger.info('Creating role: %s' % (role['id']),
extra={
'role': role['id'],
})
create_role_request_body = {
'roleId': role['id'],
'role': {
'title': role['title'],
'description': role['description'],
'includedPermissions': permissions_to_grant,
'stage': role['stage'],
}
}
if role['parent'].startswith('organizations/'):
role_create_request = service.organizations().roles().create(
parent=role['parent'], body=create_role_request_body)
else:
role_create_request = service.projects().roles().create(
parent=role['parent'], body=create_role_request_body)
role_create_response = role_create_request.execute()
logger.warning('Role created: %s' % (role['id']),
extra={
'role': role['id'],
'role_name': role_create_response['name'],
'etag': role_create_response['etag']
})
elif not output_terraform:
if 'includedPermissions' not in role_response:
role_response['includedPermissions'] = []
added_permissions = set(permissions_to_grant) - set(
role_response['includedPermissions'])
removed_permissions = set(
role_response['includedPermissions']) - set(permissions_to_grant)
if len(added_permissions) > 0 or len(removed_permissions) > 0:
logger.info('Permissions changed for role: %s' % (role['id']),
extra={
'role': role['id'],
'role_name': role_response['name'],
'added_permissions': list(added_permissions),
'removed_permissions': list(removed_permissions),
'etag': role_response['etag']
})
patch_role_request_body = {
'name': role_response['name'],
'title': role['title'],
'description': role['description'],
'includedPermissions': permissions_to_grant,
'stage': role['stage'],
'etag': role_response['etag']
}
if role['parent'].startswith('organizations/'):
role_patch_request = service.organizations().roles().patch(
name=role_response['name'], body=patch_role_request_body)
else:
role_patch_request = service.projects().roles().patch(
name=role_response['name'], body=patch_role_request_body)
role_patch_response = role_patch_request.execute()
logger.warning('Role updated: %s' % (role['id']),
extra={
'role': role['id'],
'role_name': role_patch_response['name'],
'added_permissions': list(added_permissions),
'removed_permissions': list(removed_permissions),
'etag': role_patch_response['etag']
})
else:
logger.info('Permissions unchanged for role: %s' % (role['id']),
extra={
'role': role['id'],
'role_name': role_response['name'],
'etag': role_response['etag']
})
def setup_logging():
logger = logging.getLogger(NAME)
if os.getenv('LOG_LEVEL'):
logger.setLevel(int(os.getenv('LOG_LEVEL')))
else:
logger.setLevel(logging.INFO)
json_handler = logging.StreamHandler()
formatter = jsonlogger.JsonFormatter()
json_handler.setFormatter(formatter)
logger.addHandler(json_handler)
return logger
logger = setup_logging()
def process_pubsub(event, context):
logger.info('%s starting to process Pub/Sub message.' % (NAME))
with open('config.yaml') as f:
config = yaml.load(f, Loader=yaml.SafeLoader)
if 'roles' not in config:
logger.error('Roles are not defined in the configuration!')
sys.exit(1)
for role in config['roles']:
process_role(logger, service, role)
if __name__ == '__main__':
arg_parser = argparse.ArgumentParser(
description=
'Create custom roles by filtering existing permissions or roles')
arg_parser.add_argument('--config',
type=str,
help='Configuration file',
default='config.yaml')
arg_parser.add_argument(
'--terraform',
action='store_true',
help='Output a Terraform compatible custom role definition instead',
default=False)
args = arg_parser.parse_args()
credentials, project_id = google.auth.default(
['https://www.googleapis.com/auth/cloud-platform'])
branded_http = google_auth_httplib2.AuthorizedHttp(credentials)
branded_http = http.set_user_agent(
branded_http, 'google-pso-tool/custom-role-manager/1.0.0')
service = googleapiclient.discovery.build('iam', 'v1', http=branded_http)
with open(args.config) as f:
config = yaml.load(f, Loader=yaml.SafeLoader)
if 'roles' not in config:
logger.error('Roles are not defined in the configuration!')
sys.exit(1)
if 'terraform' in config:
for k, v in config['terraform'].items():
TERRAFORM_TEMPLATES[k] = v
for role in config['roles']:
process_role(logger, service, role, args.terraform)
if TERRAFORM_PRE:
resources = ''
for k, v in TERRAFORM_RESOURCES.items():
if resources == '':
resources = '{'
else:
resources += ', '
resources += '"%s" = %s' % (k, v)
if resources != '':
resources += '}'
print(TERRAFORM_TEMPLATES['post'].format(resources=resources))
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.