gt stringclasses 1 value | context stringlengths 2.49k 119k |
|---|---|
# coding: utf-8
from __future__ import unicode_literals, division, print_function
"""
Created on Jun 1, 2012
"""
__author__ = "Shyue Ping Ong"
__copyright__ = "Copyright 2012, The Materials Project"
__version__ = "0.1"
__maintainer__ = "Shyue Ping Ong"
__email__ = "shyue@mit.edu"
__date__ = "Jun 1, 2012"
import unittest
import random
from custodian.custodian import Job, ErrorHandler, Custodian, Validator
import os
import glob
import shutil
class ExampleJob(Job):
def __init__(self, jobid, params={"initial": 0, "total": 0}):
self.jobid = jobid
self.params = params
def setup(self):
self.params["initial"] = 0
self.params["total"] = 0
def run(self):
sequence = [random.uniform(0, 1) for i in range(100)]
self.params["total"] = self.params["initial"] + sum(sequence)
def postprocess(self):
pass
@property
def name(self):
return "ExampleJob{}".format(self.jobid)
class ExampleHandler(ErrorHandler):
def __init__(self, params):
self.params = params
def check(self):
return self.params["total"] < 50
def correct(self):
self.params["initial"] += 1
return {"errors": "total < 50", "actions": "increment by 1"}
class ExampleHandler2(ErrorHandler):
"""
This handler always result in an error.
"""
def __init__(self, params):
self.params = params
def check(self):
return True
def correct(self):
self.has_error = True
return {"errors": "Unrecoverable error", "actions": None}
class ExampleHandler2b(ExampleHandler2):
"""
This handler always result in an error. No runtime error though
"""
raises_runtime_error = False
def correct(self):
self.has_error = True
return {"errors": "Unrecoverable error", "actions": []}
class ExampleValidator1(Validator):
def check(self):
return False
class ExampleValidator2(Validator):
def check(self):
return True
class CustodianTest(unittest.TestCase):
def setUp(self):
self.cwd = os.getcwd()
os.chdir(os.path.abspath(os.path.dirname(__file__)))
def test_run(self):
njobs = 100
params = {"initial": 0, "total": 0}
c = Custodian([ExampleHandler(params)],
[ExampleJob(i, params) for i in range(njobs)],
max_errors=njobs)
output = c.run()
self.assertEqual(len(output), njobs)
print(ExampleHandler(params).as_dict())
def test_run_interrupted(self):
njobs = 100
params = {'initial': 0, 'total': 0}
c = Custodian([ExampleHandler(params)],
[ExampleJob(i,params) for i in range(njobs)],
max_errors=njobs)
total = njobs
self.assertEqual(c.run_interrupted(),100)
self.assertEqual(c.run_interrupted(),100)
total_done = 1
while total_done < 100:
c.jobs[i].run()
if params['total'] > 50:
self.assertEqual(c.run_interrupted(),100-total_done)
total_done += 1
def test_unrecoverable(self):
njobs = 100
params = {"initial": 0, "total": 0}
h = ExampleHandler2(params)
c = Custodian([h],
[ExampleJob(i, params) for i in range(njobs)],
max_errors=njobs)
self.assertRaises(RuntimeError, c.run)
self.assertTrue(h.has_error)
h = ExampleHandler2b(params)
c = Custodian([h],
[ExampleJob(i, params) for i in range(njobs)],
max_errors=njobs)
c.run()
self.assertTrue(h.has_error)
def test_validators(self):
njobs = 100
params = {"initial": 0, "total": 0}
c = Custodian([ExampleHandler(params)],
[ExampleJob(i, params) for i in range(njobs)],
[ExampleValidator1()],
max_errors=njobs)
output = c.run()
self.assertEqual(len(output), njobs)
njobs = 100
params = {"initial": 0, "total": 0}
c = Custodian([ExampleHandler(params)],
[ExampleJob(i, params) for i in range(njobs)],
[ExampleValidator2()],
max_errors=njobs)
self.assertRaises(RuntimeError, c.run)
def test_from_spec(self):
spec = """jobs:
- jb: custodian.vasp.jobs.VaspJob
params:
final: False
suffix: .relax1
- jb: custodian.vasp.jobs.VaspJob
params:
final: True
suffix: .relax2
settings_override: {"file": "CONTCAR", "action": {"_file_copy": {"dest": "POSCAR"}}}
jobs_common_params:
$vasp_cmd: ["mpirun", "-machinefile", "$PBS_NODEFILE", "-np", "24", "/opt/vasp/5.4.1/bin/vasp"]
handlers:
- hdlr: custodian.vasp.handlers.VaspErrorHandler
- hdlr: custodian.vasp.handlers.AliasingErrorHandler
- hdlr: custodian.vasp.handlers.MeshSymmetryErrorHandler
validators:
- vldr: custodian.vasp.validators.VasprunXMLValidator
custodian_params:
$scratch_dir: $TMPDIR"""
import yaml
os.environ["TMPDIR"] = "/tmp/random"
os.environ["PBS_NODEFILE"] = "whatever"
d = yaml.load(spec)
c = Custodian.from_spec(d)
self.assertEqual(c.jobs[0].vasp_cmd[2], "whatever")
self.assertEqual(c.scratch_dir, "/tmp/random")
self.assertEqual(len(c.jobs), 2)
self.assertEqual(len(c.handlers), 3)
self.assertEqual(len(c.validators), 1)
def tearDown(self):
for f in glob.glob("custodian.*.tar.gz"):
os.remove(f)
try:
os.remove("custodian.json")
except OSError:
pass #Ignore if file cannot be found.
os.chdir(self.cwd)
class CustodianCheckpointTest(unittest.TestCase):
def setUp(self):
self.cwd = os.getcwd()
os.chdir(os.path.join(os.path.dirname(__file__), "..", "..",
"test_files", "checkpointing"))
shutil.copy(os.path.join('backup.tar.gz'),
'custodian.chk.3.tar.gz')
def test_checkpoint_loading(self):
njobs = 5
params = {"initial": 0, "total": 0}
c = Custodian([ExampleHandler(params)],
[ExampleJob(i, params) for i in range(njobs)],
[ExampleValidator1()],
max_errors=100, checkpoint=True)
self.assertEqual(len(c.run_log), 3)
self.assertEqual(len(c.run()), 5)
def tearDown(self):
os.remove("custodian.json")
os.chdir(self.cwd)
if __name__ == "__main__":
#import sys;sys.argv = ['', 'Test.testName']
unittest.main()
| |
# -*- coding: utf-8 -*-
#
# pandas documentation build configuration file, created by
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
import re
import inspect
import importlib
from pandas.compat import u, PY3
try:
raw_input # Python 2
except NameError:
raw_input = input # Python 3
# https://github.com/sphinx-doc/sphinx/pull/2325/files
# Workaround for sphinx-build recursion limit overflow:
# pickle.dump(doctree, f, pickle.HIGHEST_PROTOCOL)
# RuntimeError: maximum recursion depth exceeded while pickling an object
#
# Python's default allowed recursion depth is 1000.
sys.setrecursionlimit(5000)
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
# sys.path.append(os.path.abspath('.'))
sys.path.insert(0, os.path.abspath('../sphinxext'))
sys.path.extend([
# numpy standard doc extensions
os.path.join(os.path.dirname(__file__),
'..', '../..',
'sphinxext')
])
# -- General configuration -----------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones. sphinxext.
extensions = ['sphinx.ext.autodoc',
'sphinx.ext.autosummary',
'sphinx.ext.doctest',
'sphinx.ext.extlinks',
'sphinx.ext.todo',
'numpydoc',
'ipython_sphinxext.ipython_directive',
'ipython_sphinxext.ipython_console_highlighting',
'IPython.sphinxext.ipython_console_highlighting', # lowercase didn't work
'sphinx.ext.intersphinx',
'sphinx.ext.coverage',
'sphinx.ext.mathjax',
'sphinx.ext.ifconfig',
'sphinx.ext.linkcode',
'nbsphinx',
]
exclude_patterns = ['**.ipynb_checkpoints']
with open("index.rst") as f:
index_rst_lines = f.readlines()
# only include the slow autosummary feature if we're building the API section
# of the docs
# JP: added from sphinxdocs
autosummary_generate = False
if any(re.match("\s*api\s*", l) for l in index_rst_lines):
autosummary_generate = True
files_to_delete = []
for f in os.listdir(os.path.dirname(__file__)):
if (not f.endswith(('.ipynb', '.rst')) or
f.startswith('.') or os.path.basename(f) == 'index.rst'):
continue
_file_basename = os.path.splitext(f)[0]
_regex_to_match = "\s*{}\s*$".format(_file_basename)
if not any(re.match(_regex_to_match, line) for line in index_rst_lines):
files_to_delete.append(f)
if files_to_delete:
print("I'm about to DELETE the following:\n%s\n" % list(sorted(files_to_delete)))
sys.stdout.write("WARNING: I'd like to delete those to speed up processing (yes/no)? ")
if PY3:
answer = input()
else:
answer = raw_input()
if answer.lower().strip() in ('y','yes'):
for f in files_to_delete:
f = os.path.join(os.path.join(os.path.dirname(__file__),f))
f= os.path.abspath(f)
try:
print("Deleting %s" % f)
os.unlink(f)
except:
print("Error deleting %s" % f)
pass
# Add any paths that contain templates here, relative to this directory.
templates_path = ['../_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
source_encoding = 'utf-8'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u('pandas')
copyright = u('2008-2014, the pandas development team')
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
import pandas
# version = '%s r%s' % (pandas.__version__, svn_version())
version = '%s' % (pandas.__version__)
# The full version, including alpha/beta/rc tags.
release = version
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
# language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
# today = ''
# Else, today_fmt is used as the format for a strftime call.
# today_fmt = '%B %d, %Y'
# List of documents that shouldn't be included in the build.
# unused_docs = []
# List of directories, relative to source directory, that shouldn't be searched
# for source files.
exclude_trees = []
# The reST default role (used for this markup: `text`) to use for all documents.
# default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
# add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
# add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
# show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
# modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------
# The theme to use for HTML and HTML Help pages. Major themes that come with
# Sphinx are currently 'default' and 'sphinxdoc'.
html_theme = 'nature_with_gtoc'
# The style sheet to use for HTML and HTML Help pages. A file of that name
# must exist either in Sphinx' static/ path, or in one of the custom paths
# given in html_static_path.
# html_style = 'statsmodels.css'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
# html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
html_theme_path = ['themes']
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
# html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
# html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
# html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
# html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
# html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
# html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
# html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
# Add redirect for previously existing API pages
# each item is like `(from_old, to_new)`
# To redirect a class and all its methods, see below
# https://github.com/pandas-dev/pandas/issues/16186
moved_api_pages = [
('pandas.core.common.isnull', 'pandas.isna'),
('pandas.core.common.notnull', 'pandas.notna'),
('pandas.core.reshape.get_dummies', 'pandas.get_dummies'),
('pandas.tools.merge.concat', 'pandas.concat'),
('pandas.tools.merge.merge', 'pandas.merge'),
('pandas.tools.pivot.pivot_table', 'pandas.pivot_table'),
('pandas.tseries.tools.to_datetime', 'pandas.to_datetime'),
('pandas.io.clipboard.read_clipboard', 'pandas.read_clipboard'),
('pandas.io.excel.ExcelFile.parse', 'pandas.ExcelFile.parse'),
('pandas.io.excel.read_excel', 'pandas.read_excel'),
('pandas.io.gbq.read_gbq', 'pandas.read_gbq'),
('pandas.io.html.read_html', 'pandas.read_html'),
('pandas.io.json.read_json', 'pandas.read_json'),
('pandas.io.parsers.read_csv', 'pandas.read_csv'),
('pandas.io.parsers.read_fwf', 'pandas.read_fwf'),
('pandas.io.parsers.read_table', 'pandas.read_table'),
('pandas.io.pickle.read_pickle', 'pandas.read_pickle'),
('pandas.io.pytables.HDFStore.append', 'pandas.HDFStore.append'),
('pandas.io.pytables.HDFStore.get', 'pandas.HDFStore.get'),
('pandas.io.pytables.HDFStore.put', 'pandas.HDFStore.put'),
('pandas.io.pytables.HDFStore.select', 'pandas.HDFStore.select'),
('pandas.io.pytables.read_hdf', 'pandas.read_hdf'),
('pandas.io.sql.read_sql', 'pandas.read_sql'),
('pandas.io.sql.read_frame', 'pandas.read_frame'),
('pandas.io.sql.write_frame', 'pandas.write_frame'),
('pandas.io.stata.read_stata', 'pandas.read_stata'),
]
# Again, tuples of (from_old, to_new)
moved_classes = [
('pandas.tseries.resample.Resampler', 'pandas.core.resample.Resampler'),
('pandas.formats.style.Styler', 'pandas.io.formats.style.Styler'),
]
for old, new in moved_classes:
# the class itself...
moved_api_pages.append((old, new))
mod, classname = new.rsplit('.', 1)
klass = getattr(importlib.import_module(mod), classname)
methods = [x for x in dir(klass)
if not x.startswith('_') or x in ('__iter__', '__array__')]
for method in methods:
# ... and each of its public methods
moved_api_pages.append(
("{old}.{method}".format(old=old, method=method),
"{new}.{method}".format(new=new, method=method))
)
html_additional_pages = {
'generated/' + page[0]: 'api_redirect.html'
for page in moved_api_pages
}
html_context = {
'redirects': {old: new for old, new in moved_api_pages}
}
# If false, no module index is generated.
html_use_modindex = True
# If false, no index is generated.
# html_use_index = True
# If true, the index is split into individual pages for each letter.
# html_split_index = False
# If true, links to the reST sources are added to the pages.
# html_show_sourcelink = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
# html_use_opensearch = ''
# If nonempty, this is the file name suffix for HTML files (e.g. ".xhtml").
# html_file_suffix = ''
# Output file base name for HTML help builder.
htmlhelp_basename = 'pandas'
# -- Options for nbsphinx ------------------------------------------------
nbsphinx_allow_errors = True
# -- Options for LaTeX output --------------------------------------------
# The paper size ('letter' or 'a4').
# latex_paper_size = 'letter'
# The font size ('10pt', '11pt' or '12pt').
# latex_font_size = '10pt'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'pandas.tex',
u('pandas: powerful Python data analysis toolkit'),
u('Wes McKinney\n\& PyData Development Team'), 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
# latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
# latex_use_parts = False
# Additional stuff for the LaTeX preamble.
# latex_preamble = ''
# Documents to append as an appendix to all manuals.
# latex_appendices = []
# If false, no module index is generated.
# latex_use_modindex = True
intersphinx_mapping = {
'statsmodels': ('http://www.statsmodels.org/devel/', None),
'matplotlib': ('http://matplotlib.org/', None),
'python': ('https://docs.python.org/3/', None),
'numpy': ('https://docs.scipy.org/doc/numpy/', None),
'scipy': ('https://docs.scipy.org/doc/scipy/reference/', None),
'py': ('https://pylib.readthedocs.io/en/latest/', None)
}
import glob
autosummary_generate = glob.glob("*.rst")
# extlinks alias
extlinks = {'issue': ('https://github.com/pandas-dev/pandas/issues/%s',
'GH'),
'wiki': ('https://github.com/pandas-dev/pandas/wiki/%s',
'wiki ')}
ipython_exec_lines = [
'import numpy as np',
'import pandas as pd',
# This ensures correct rendering on system with console encoding != utf8
# (windows). It forces pandas to encode its output reprs using utf8
# whereever the docs are built. The docs' target is the browser, not
# the console, so this is fine.
'pd.options.display.encoding="utf8"'
]
# Add custom Documenter to handle attributes/methods of an AccessorProperty
# eg pandas.Series.str and pandas.Series.dt (see GH9322)
import sphinx
from sphinx.util import rpartition
from sphinx.ext.autodoc import Documenter, MethodDocumenter, AttributeDocumenter
from sphinx.ext.autosummary import Autosummary
class AccessorDocumenter(MethodDocumenter):
"""
Specialized Documenter subclass for accessors.
"""
objtype = 'accessor'
directivetype = 'method'
# lower than MethodDocumenter so this is not chosen for normal methods
priority = 0.6
def format_signature(self):
# this method gives an error/warning for the accessors, therefore
# overriding it (accessor has no arguments)
return ''
class AccessorLevelDocumenter(Documenter):
"""
Specialized Documenter subclass for objects on accessor level (methods,
attributes).
"""
# This is the simple straightforward version
# modname is None, base the last elements (eg 'hour')
# and path the part before (eg 'Series.dt')
# def resolve_name(self, modname, parents, path, base):
# modname = 'pandas'
# mod_cls = path.rstrip('.')
# mod_cls = mod_cls.split('.')
#
# return modname, mod_cls + [base]
def resolve_name(self, modname, parents, path, base):
if modname is None:
if path:
mod_cls = path.rstrip('.')
else:
mod_cls = None
# if documenting a class-level object without path,
# there must be a current class, either from a parent
# auto directive ...
mod_cls = self.env.temp_data.get('autodoc:class')
# ... or from a class directive
if mod_cls is None:
mod_cls = self.env.temp_data.get('py:class')
# ... if still None, there's no way to know
if mod_cls is None:
return None, []
# HACK: this is added in comparison to ClassLevelDocumenter
# mod_cls still exists of class.accessor, so an extra
# rpartition is needed
modname, accessor = rpartition(mod_cls, '.')
modname, cls = rpartition(modname, '.')
parents = [cls, accessor]
# if the module name is still missing, get it like above
if not modname:
modname = self.env.temp_data.get('autodoc:module')
if not modname:
if sphinx.__version__ > '1.3':
modname = self.env.ref_context.get('py:module')
else:
modname = self.env.temp_data.get('py:module')
# ... else, it stays None, which means invalid
return modname, parents + [base]
class AccessorAttributeDocumenter(AccessorLevelDocumenter, AttributeDocumenter):
objtype = 'accessorattribute'
directivetype = 'attribute'
# lower than AttributeDocumenter so this is not chosen for normal attributes
priority = 0.6
class AccessorMethodDocumenter(AccessorLevelDocumenter, MethodDocumenter):
objtype = 'accessormethod'
directivetype = 'method'
# lower than MethodDocumenter so this is not chosen for normal methods
priority = 0.6
class AccessorCallableDocumenter(AccessorLevelDocumenter, MethodDocumenter):
"""
This documenter lets us removes .__call__ from the method signature for
callable accessors like Series.plot
"""
objtype = 'accessorcallable'
directivetype = 'method'
# lower than MethodDocumenter; otherwise the doc build prints warnings
priority = 0.5
def format_name(self):
return MethodDocumenter.format_name(self).rstrip('.__call__')
class PandasAutosummary(Autosummary):
"""
This alternative autosummary class lets us override the table summary for
Series.plot and DataFrame.plot in the API docs.
"""
def _replace_pandas_items(self, display_name, sig, summary, real_name):
# this a hack: ideally we should extract the signature from the
# .__call__ method instead of hard coding this
if display_name == 'DataFrame.plot':
sig = '([x, y, kind, ax, ....])'
summary = 'DataFrame plotting accessor and method'
elif display_name == 'Series.plot':
sig = '([kind, ax, figsize, ....])'
summary = 'Series plotting accessor and method'
return (display_name, sig, summary, real_name)
def get_items(self, names):
items = Autosummary.get_items(self, names)
items = [self._replace_pandas_items(*item) for item in items]
return items
# based on numpy doc/source/conf.py
def linkcode_resolve(domain, info):
"""
Determine the URL corresponding to Python object
"""
if domain != 'py':
return None
modname = info['module']
fullname = info['fullname']
submod = sys.modules.get(modname)
if submod is None:
return None
obj = submod
for part in fullname.split('.'):
try:
obj = getattr(obj, part)
except:
return None
try:
fn = inspect.getsourcefile(obj)
except:
fn = None
if not fn:
return None
try:
source, lineno = inspect.getsourcelines(obj)
except:
lineno = None
if lineno:
linespec = "#L%d-L%d" % (lineno, lineno + len(source) - 1)
else:
linespec = ""
fn = os.path.relpath(fn, start=os.path.dirname(pandas.__file__))
if '+' in pandas.__version__:
return "http://github.com/pandas-dev/pandas/blob/master/pandas/%s%s" % (
fn, linespec)
else:
return "http://github.com/pandas-dev/pandas/blob/v%s/pandas/%s%s" % (
pandas.__version__, fn, linespec)
# remove the docstring of the flags attribute (inherited from numpy ndarray)
# because these give doc build errors (see GH issue 5331)
def remove_flags_docstring(app, what, name, obj, options, lines):
if what == "attribute" and name.endswith(".flags"):
del lines[:]
suppress_warnings = [
# We "overwrite" autosummary with our PandasAutosummary, but
# still want the regular autosummary setup to run. So we just
# suppress this warning.
'app.add_directive'
]
def setup(app):
app.connect("autodoc-process-docstring", remove_flags_docstring)
app.add_autodocumenter(AccessorDocumenter)
app.add_autodocumenter(AccessorAttributeDocumenter)
app.add_autodocumenter(AccessorMethodDocumenter)
app.add_autodocumenter(AccessorCallableDocumenter)
app.add_directive('autosummary', PandasAutosummary)
| |
# http://www.g-loaded.eu/2009/10/30/python-ping/
#!/usr/bin/env python
"""
A pure python ping implementation using raw socket.
Note that ICMP messages can only be sent from processes running as root.
Derived from ping.c distributed in Linux's netkit. That code is
copyright (c) 1989 by The Regents of the University of California.
That code is in turn derived from code written by Mike Muuss of the
US Army Ballistic Research Laboratory in December, 1983 and
placed in the public domain. They have my thanks.
Bugs are naturally mine. I'd be glad to hear about them. There are
certainly word - size dependenceies here.
Copyright (c) Matthew Dixon Cowles, <http://www.visi.com/~mdc/>.
Distributable under the terms of the GNU General Public License
version 2. Provided with no warranties of any sort.
Original Version from Matthew Dixon Cowles:
-> ftp://ftp.visi.com/users/mdc/ping.py
Rewrite by Jens Diemer:
-> http://www.python-forum.de/post-69122.html#69122
Rewrite by George Notaras:
-> http://www.g-loaded.eu/2009/10/30/python-ping/
Revision history
~~~~~~~~~~~~~~~~
November 8, 2009
----------------
Improved compatibility with GNU/Linux systems.
Fixes by:
* George Notaras -- http://www.g-loaded.eu
Reported by:
* Chris Hallman -- http://cdhallman.blogspot.com
Changes in this release:
- Re-use time.time() instead of time.clock(). The 2007 implementation
worked only under Microsoft Windows. Failed on GNU/Linux.
time.clock() behaves differently under the two OSes[1].
[1] http://docs.python.org/library/time.html#time.clock
May 30, 2007
------------
little rewrite by Jens Diemer:
- change socket asterisk import to a normal import
- replace time.time() with time.clock()
- delete "return None" (or change to "return" only)
- in checksum() rename "str" to "source_string"
November 22, 1997
-----------------
Initial hack. Doesn't do much, but rather than try to guess
what features I (or others) will want in the future, I've only
put in what I need now.
December 16, 1997
-----------------
For some reason, the checksum bytes are in the wrong order when
this is run under Solaris 2.X for SPARC but it works right under
Linux x86. Since I don't know just what's wrong, I'll swap the
bytes always and then do an htons().
December 4, 2000
----------------
Changed the struct.pack() calls to pack the checksum and ID as
unsigned. My thanks to Jerome Poincheval for the fix.
Last commit info:
~~~~~~~~~~~~~~~~~
$LastChangedDate: $
$Rev: $
$Author: $
"""
import os, sys, socket, struct, select, time
# From /usr/include/linux/icmp.h; your milage may vary.
ICMP_ECHO_REQUEST = 8 # Seems to be the same on Solaris.
def checksum(source_string):
"""
I'm not too confident that this is right but testing seems
to suggest that it gives the same answers as in_cksum in ping.c
"""
sum = 0
countTo = (len(source_string)/2)*2
count = 0
while count < countTo:
thisVal = ord(source_string[count + 1])*256 + ord(source_string[count])
sum = sum + thisVal
sum = sum & 0xffffffff # Necessary?
count = count + 2
if countTo < len(source_string):
sum = sum + ord(source_string[len(source_string) - 1])
sum = sum & 0xffffffff # Necessary?
sum = (sum >> 16) + (sum & 0xffff)
sum = sum + (sum >> 16)
answer = ~sum
answer = answer & 0xffff
# Swap bytes. Bugger me if I know why.
answer = answer >> 8 | (answer << 8 & 0xff00)
return answer
def receive_one_ping(my_socket, ID, timeout):
"""
receive the ping from the socket.
"""
timeLeft = timeout
while True:
startedSelect = time.time()
whatReady = select.select([my_socket], [], [], timeLeft)
howLongInSelect = (time.time() - startedSelect)
if whatReady[0] == []: # Timeout
return
timeReceived = time.time()
recPacket, addr = my_socket.recvfrom(1024)
icmpHeader = recPacket[20:28]
type, code, checksum, packetID, sequence = struct.unpack(
"bbHHh", icmpHeader
)
if packetID == ID:
bytesInDouble = struct.calcsize("d")
timeSent = struct.unpack("d", recPacket[28:28 + bytesInDouble])[0]
return timeReceived - timeSent
timeLeft = timeLeft - howLongInSelect
if timeLeft <= 0:
return
def send_one_ping(my_socket, dest_addr, ID):
"""
Send one ping to the given >dest_addr<.
"""
dest_addr = socket.gethostbyname(dest_addr)
# Header is type (8), code (8), checksum (16), id (16), sequence (16)
my_checksum = 0
# Make a dummy heder with a 0 checksum.
header = struct.pack("bbHHh", ICMP_ECHO_REQUEST, 0, my_checksum, ID, 1)
bytesInDouble = struct.calcsize("d")
data = (192 - bytesInDouble) * "Q"
data = struct.pack("d", time.time()) + data
# Calculate the checksum on the data and the dummy header.
my_checksum = checksum(header + data)
# Now that we have the right checksum, we put that in. It's just easier
# to make up a new header than to stuff it into the dummy.
header = struct.pack(
"bbHHh", ICMP_ECHO_REQUEST, 0, socket.htons(my_checksum), ID, 1
)
packet = header + data
my_socket.sendto(packet, (dest_addr, 1)) # Don't know about the 1
def do_one(dest_addr, timeout):
"""
Returns either the delay (in seconds) or none on timeout.
"""
icmp = socket.getprotobyname("icmp")
try:
my_socket = socket.socket(socket.AF_INET, socket.SOCK_RAW, icmp)
except socket.error, (errno, msg):
if errno == 1:
# Operation not permitted
msg = msg + (
" - Note that ICMP messages can only be sent from processes"
" running as root."
)
raise socket.error(msg)
raise # raise the original error
my_ID = os.getpid() & 0xFFFF
send_one_ping(my_socket, dest_addr, my_ID)
delay = receive_one_ping(my_socket, my_ID, timeout)
my_socket.close()
return delay
def verbose_ping(dest_addr, timeout = 2, count = 4):
"""
Send >count< ping to >dest_addr< with the given >timeout< and display
the result.
"""
for i in xrange(count):
print "ping %s..." % dest_addr,
try:
delay = do_one(dest_addr, timeout)
except socket.gaierror, e:
print "failed. (socket error: '%s')" % e[1]
break
if delay == None:
print "failed. (timeout within %ssec.)" % timeout
else:
delay = delay * 1000
print "get ping in %0.4fms" % delay
print
if __name__ == '__main__':
verbose_ping("heise.de")
verbose_ping("google.com")
verbose_ping("a-test-url-taht-is-not-available.com")
verbose_ping("192.168.1.1")
| |
import os
import copy
import logging
from itertools import chain
import tensorflow as tf
from baseline.tf.embeddings import *
from eight_mile.tf.layers import *
from baseline.version import __version__
from baseline.utils import (
fill_y,
listify,
ls_props,
read_json,
write_json,
MAGIC_VARS,
MEAD_HUB_MODULES
)
from baseline.model import ClassifierModel, register_model
from baseline.tf.tfy import (
TRAIN_FLAG,
reload_embeddings,
new_placeholder_dict,
tf_device_wrapper,
create_session,
BaseLayer,
TensorDef
)
logger = logging.getLogger('baseline')
class ClassifierModelBase(tf.keras.Model, ClassifierModel):
"""Base for all baseline implementations of token-based classifiers
This class provides a loose skeleton around which the baseline models
are built. It is built on the Keras Model base, and fulfills the `ClassifierModel` interface.
To override this class, the use would typically override the `create_layers` function which will
create and attach all sub-layers of this model into the class, and the `call` function which will
give the model some implementation to call on forward.
"""
def __init__(self, name=None):
"""Base
"""
super().__init__(name=name)
self._unserializable = []
def set_saver(self, saver):
self.saver = saver
def save_values(self, basename):
"""Save tensor files out
:param basename: Base name of model
:return:
"""
if not tf.executing_eagerly():
self.saver.save(self.sess, basename, write_meta_graph=False)
else:
self.save_weights(f"{basename}.wgt")
def save_md(self, basename):
"""This method saves out a `.state` file containing meta-data from these classes and any info
registered by a user-defined derived class as a `property`. Also write the `graph` and `saver` and `labels`
:param basename:
:return:
"""
write_json(self._state, '{}.state'.format(basename))
write_json(self.labels, '{}.labels'.format(basename))
for key, embedding in self.embeddings.items():
if hasattr(embedding, 'save_md'):
embedding.save_md('{}-{}-md.json'.format(basename, key))
def _record_state(self, embeddings: Dict[str, BaseLayer], **kwargs):
embeddings_info = {}
for k, v in embeddings.items():
embeddings_info[k] = v.__class__.__name__
blacklist = set(chain(
self._unserializable,
MAGIC_VARS,
embeddings.keys(),
(f'{k}_lengths' for k in embeddings.keys())
))
self._state = {k: v for k, v in kwargs.items() if k not in blacklist}
self._state.update({
'version': __version__,
'module': self.__class__.__module__,
'class': self.__class__.__name__,
'embeddings': embeddings_info,
'hub_modules': MEAD_HUB_MODULES
})
def save(self, basename, **kwargs):
"""Save meta-data and actual data for a model
:param basename: (``str``) The model basename
:param kwargs:
:return: None
"""
self.save_md(basename)
self.save_values(basename)
def create_test_loss(self):
with tf.name_scope("test_loss"):
loss = tf.nn.softmax_cross_entropy_with_logits(logits=self.logits, labels=tf.cast(self.y, "float"))
all_loss = tf.reduce_mean(loss)
return all_loss
def create_loss(self):
"""The loss function is currently provided here, although this is not a great place for it
as it provides a coupling between the model and its loss function. Just here for convenience at the moment.
:return:
"""
with tf.name_scope("loss"):
loss = tf.nn.softmax_cross_entropy_with_logits(logits=self.logits, labels=tf.cast(self.y, "float"))
all_loss = tf.reduce_mean(loss)
return all_loss
def predict_batch(self, batch_dict):
"""This method provides a basic routine to run "inference" or predict outputs based on data.
It runs the `x` tensor in (`BxT`), and turns dropout off, running the network all the way to a softmax
output. You can use this method directly if you have vector input, or you can use the `ClassifierService`
which can convert directly from text durign its `transform`. That method calls this one underneath.
:param batch_dict: (``dict``) Contains any inputs to embeddings for this model
:return: Each outcome as a ``list`` of tuples `(label, probability)`
"""
batch_dict = self.make_input(batch_dict)
if not tf.executing_eagerly():
probs = self.sess.run(self.probs, batch_dict)
else:
probs = tf.nn.softmax(self(batch_dict)).numpy()
return probs
def predict(self, batch_dict, raw=False, dense=False):
"""This method provides a basic routine to run "inference" or predict outputs based on data.
It runs the `x` tensor in (`BxT`), and turns dropout off, running the network all the way to a softmax
output. You can use this method directly if you have vector input, or you can use the `ClassifierService`
which can convert directly from text durign its `transform`. That method calls this one underneath.
:param batch_dict: (``dict``) Contains any inputs to embeddings for this model
:return: Each outcome as a ``list`` of tuples `(label, probability)`
"""
probs = self.predict_batch(batch_dict)
if raw and not dense:
logger.warning("Warning: `raw` parameter is deprecated pass `dense=True` to get back values as a single tensor")
dense = True
if dense:
return probs
results = []
batchsz = probs.shape[0]
for b in range(batchsz):
outcomes = [(self.labels[id_i], prob_i) for id_i, prob_i in enumerate(probs[b])]
results.append(outcomes)
return results
def make_input(self, batch_dict, train=False):
"""Transform a `batch_dict` into a TensorFlow `feed_dict`
:param batch_dict: (``dict``) A dictionary containing all inputs to the embeddings for this model
:param train: (``bool``) Are we training. Defaults to False
:return:
"""
y = batch_dict.get('y', None)
if not tf.executing_eagerly():
batch_for_model = new_placeholder_dict(train)
for k in self.embeddings.keys():
batch_for_model["{}:0".format(k)] = batch_dict[k]
# Allow us to track a length, which is needed for BLSTMs
if self.lengths_key is not None:
batch_for_model[self.lengths] = batch_dict[self.lengths_key]
if y is not None:
batch_for_model[self.y] = fill_y(len(self.labels), y)
else:
SET_TRAIN_FLAG(train)
batch_for_model = {}
for k in self.embeddings.keys():
batch_for_model[k] = batch_dict[k]
# Allow us to track a length, which is needed for BLSTMs
if self.lengths_key is not None:
batch_for_model["lengths"] = batch_dict[self.lengths_key]
return batch_for_model
def get_labels(self) -> List[str]:
"""Get the string labels back
:return: labels
"""
return self.labels
@classmethod
@tf_device_wrapper
def load(cls, basename: str, **kwargs) -> 'ClassifierModelBase':
"""Reload the model from a graph file and a checkpoint
The model that is loaded is independent of the pooling and stacking layers, making this class reusable
by sub-classes.
:param basename: The base directory to load from
:param kwargs: See below
:Keyword Arguments:
* *sess* -- An optional tensorflow session. If not passed, a new session is
created
:return: A restored model
"""
_state = read_json("{}.state".format(basename))
if __version__ != _state['version']:
logger.warning("Loaded model is from baseline version %s, running version is %s", _state['version'], __version__)
if not tf.executing_eagerly():
_state['sess'] = kwargs.pop('sess', create_session())
with _state['sess'].graph.as_default():
embeddings_info = _state.pop('embeddings')
embeddings = reload_embeddings(embeddings_info, basename)
# If there is a kwarg that is the same name as an embedding object that
# is taken to be the input of that layer. This allows for passing in
# subgraphs like from a tf.split (for data parallel) or preprocessing
# graphs that convert text to indices
for k in embeddings_info:
if k in kwargs:
_state[k] = kwargs[k]
labels = read_json("{}.labels".format(basename))
model = cls.create(embeddings, labels, **_state)
model._state = _state
if kwargs.get('init', True):
model.sess.run(tf.compat.v1.global_variables_initializer())
model.saver = tf.compat.v1.train.Saver()
model.saver.restore(model.sess, basename)
else:
embeddings_info = _state.pop('embeddings')
embeddings = reload_embeddings(embeddings_info, basename)
# If there is a kwarg that is the same name as an embedding object that
# is taken to be the input of that layer. This allows for passing in
# subgraphs like from a tf.split (for data parallel) or preprocessing
# graphs that convert text to indices
for k in embeddings_info:
if k in kwargs:
_state[k] = kwargs[k]
# TODO: convert labels into just another vocab and pass number of labels to models.
labels = read_json("{}.labels".format(basename))
model = cls.create(embeddings, labels, **_state)
model._state = _state
model.load_weights(f"{basename}.wgt")
return model
@property
def lengths_key(self) -> str:
return self._lengths_key
@lengths_key.setter
def lengths_key(self, value: str):
self._lengths_key = value
@classmethod
def create(cls, embeddings: Dict[str, BaseLayer], labels: List[str], **kwargs) -> 'ClassifierModelBase':
"""The main method for creating all :class:`ClassifierBasedModel` types.
This method typically instantiates a model with pooling and optional stacking layers.
Many of the arguments provided are reused by each implementation, but some sub-classes need more
information in order to properly initialize. For this reason, the full list of keyword args are passed
to the :method:`pool` and :method:`stacked` methods.
:param embeddings: This is a dictionary of embeddings, mapped to their numerical indices in the lookup table
:param labels: This is a list of the `str` labels
:param kwargs: There are sub-graph specific Keyword Args allowed for e.g. embeddings. See below for known args:
:Keyword Arguments:
* *gpus* -- (``int``) How many GPUs to split training across. If called this function delegates to
another class `ClassifyParallelModel` which creates a parent graph and splits its inputs across each
sub-model, by calling back into this exact method (w/o this argument), once per GPU
* *model_type* -- The string name for the model (defaults to `default`)
* *sess* -- An optional tensorflow session. If not passed, a new session is
created
* *lengths_key* -- (``str``) Specifies which `batch_dict` property should be used to determine the temporal length
if this is not set, it defaults to either `word`, or `x` if `word` is also not a feature
* *finetune* -- Are we doing fine-tuning of word embeddings (defaults to `True`)
* *mxlen* -- The maximum signal (`x` tensor temporal) length (defaults to `100`)
* *dropout* -- This indicates how much dropout should be applied to the model when training.
* *filtsz* -- This is actually a top-level param due to an unfortunate coupling between the pooling layer
and the input, which, for convolution, requires input padding.
:return: A fully-initialized tensorflow classifier
"""
model = cls(name=kwargs.get('name'))
#embeddings_ = {}
#for k, embedding in embeddings.items():
# embeddings_[k] = embedding #.detached_ref()
model.lengths_key = kwargs.get('lengths_key')
if not tf.executing_eagerly():
inputs = {}
if model.lengths_key is not None:
model._unserializable.append(model.lengths_key)
model.lengths = kwargs.get('lengths', tf.compat.v1.placeholder(tf.int32, [None], name="lengths"))
inputs['lengths'] = model.lengths
else:
model.lengths = None
model._record_state(embeddings, **kwargs)
nc = len(labels)
if not tf.executing_eagerly():
model.y = kwargs.get('y', tf.compat.v1.placeholder(tf.int32, [None, nc], name="y"))
for k, embedding in embeddings.items():
x = kwargs.get(k, embedding.create_placeholder(name=k))
inputs[k] = x
model.sess = kwargs.get('sess', create_session())
model.pdrop_value = kwargs.get('dropout', 0.5)
model.labels = labels
model.create_layers(embeddings, **kwargs)
if not tf.executing_eagerly():
model.logits = tf.identity(model(inputs), name="logits")
model.best = tf.argmax(model.logits, 1, name="best")
model.probs = tf.nn.softmax(model.logits, name="probs")
return model
def create_layers(self, embeddings: Dict[str, TensorDef], **kwargs):
"""This method defines the model itself, and must be overloaded by derived classes
This function will update `self` with the layers required to execute the `call()` method
:param embeddings: The input feature indices
:param kwargs:
:return:
"""
class EmbedPoolStackClassifier(ClassifierModelBase):
"""Provides a simple but effective base for most `ClassifierModel`s
This class provides a common base for classifiers by identifying and codifying
and idiomatic pattern where a typical classifier may be though of as a composition
between a stack of embeddings, followed by a pooling operation yielding a fixed length
tensor, followed by one or more dense layers, and ultimately, a projection to the output space.
To provide an useful interface to sub-classes, we override the `create_layers` to provide a hook
for each layer identified in this idiom, and leave the implementations up to the sub-class.
We also fully implement the `call` method.
"""
def create_layers(self, embeddings: Dict[str, TensorDef], **kwargs):
self.embeddings = self.init_embed(embeddings, **kwargs)
self.pool_model = self.init_pool(self.embeddings.output_dim, **kwargs)
self.stack_model = self.init_stacked(self.pool_model.output_dim, **kwargs)
self.output_layer = self.init_output(**kwargs)
def init_embed(self, embeddings: Dict[str, TensorDef], **kwargs) -> BaseLayer:
"""This method creates the "embedding" layer of the inputs, with an optional reduction
:param embeddings: A dictionary of embeddings
:param kwargs: See below
:Keyword Arguments:
* *embeddings_reduction* (defaults to `concat`) An operator to perform on a stack of embeddings
* *embeddings_name* (``str``) Optional override to Keras default names
:return: The output of the embedding stack followed by its reduction. This will typically be an output
with an additional dimension which is the hidden representation of the input
"""
reduction = kwargs.get('embeddings_reduction', 'concat')
name = kwargs.get('embeddings_name')
return EmbeddingsStack(embeddings, self.pdrop_value, reduction=reduction, name=name)
def init_pool(self, input_dim: int, **kwargs) -> BaseLayer:
"""Produce a pooling operation that will be used in the model
:param input_dim: The input dimension size
:param kwargs:
:return: A pooling operation
"""
def init_stacked(self, input_dim: int, **kwargs) -> BaseLayer:
"""Produce a stacking operation that will be used in the model
:param input_dim: The input dimension size
:param kwargs: See below
:keyword arguments:
* *hsz* (``list``), defaults to nothing, in which case this function is pass-through
* *stacked_name* (``str``) Optional override to stacking name
:return: A stacking operation (or None)
"""
hszs = listify(kwargs.get('hsz', []))
if not hszs:
return PassThru(input_dim)
name = kwargs.get('stacked_name')
return DenseStack(input_dim, hszs, pdrop_value=self.pdrop_value, name=name)
def init_output(self, **kwargs):
"""Provide a projection from the encoder output to the number of labels
This projection typically will not include any activation, since its output is the logits that
the decoder is built on
:param kwargs: See below
:keyword arguments:
* *output_name* (``str``) Optional override to default Keras layer name
:return: A projection from the encoder output size to the final number of labels
"""
name = kwargs.get('output_name')
return tf.keras.layers.Dense(len(self.labels), name=name)
def call(self, inputs: Dict[str, TensorDef]) -> TensorDef:
"""Forward execution of the model. Sub-classes typically shouldnt need to override
:param inputs: An input dictionary containing the features and the primary key length
:return: A tensor
"""
lengths = inputs.get("lengths")
embedded = self.embeddings(inputs)
embedded = (embedded, lengths)
pooled = self.pool_model(embedded)
stacked = self.stack_model(pooled)
return self.output_layer(stacked)
@register_model(task='classify', name='default')
class ConvModel(EmbedPoolStackClassifier):
"""Current default model for `baseline` classification. Parallel convolutions of varying receptive field width
"""
def init_pool(self, input_dim: int, **kwargs) -> BaseLayer:
"""Do parallel convolutional filtering with varied receptive field widths, followed by max-over-time pooling
:param input_dim: Embedding output size
:param kwargs: See below
:Keyword Arguments:
* *cmotsz* -- (``int``) The number of convolutional feature maps for each filter
These are MOT-filtered, leaving this # of units per parallel filter
* *filtsz* -- (``list``) This is a list of filter widths to use
* *pool_name* -- (``str``) Optional name to override default Keras layer name
:return: A pooling layer
"""
cmotsz = kwargs['cmotsz']
filtsz = kwargs['filtsz']
name = kwargs.get('pool_name')
return WithoutLength(WithDropout(ParallelConv(input_dim, cmotsz, filtsz, name=name), self.pdrop_value))
@register_model(task='classify', name='lstm')
class LSTMModel(EmbedPoolStackClassifier):
"""A simple single-directional single-layer LSTM. No layer-stacking.
"""
def __init__(self, name=None):
super().__init__(name=name)
self._vdrop = None
@property
def vdrop(self):
return self._vdrop
@vdrop.setter
def vdrop(self, value):
self._vdrop = value
def init_pool(self, input_dim: int, **kwargs) -> BaseLayer:
"""LSTM with dropout yielding a final-state as output
:param input_dim: The input word embedding depth
:param kwargs: See below
:Keyword Arguments:
* *rnnsz* -- (``int``) The number of hidden units (defaults to `hsz`)
* *rnntype/rnn_type* -- (``str``) The RNN type, defaults to `lstm`, other valid values: `blstm`
* *hsz* -- (``int``) backoff for `rnnsz`, typically a result of stacking params. This keeps things simple so
its easy to do things like residual connections between LSTM and post-LSTM stacking layers
* *pool_name* -- (``str``) Optional name to override default Keras layer name
:return: A pooling layer
"""
hsz = kwargs.get('rnnsz', kwargs.get('hsz', 100))
vdrop = bool(kwargs.get('variational', False))
if type(hsz) is list:
hsz = hsz[0]
rnntype = kwargs.get('rnn_type', kwargs.get('rnntype', 'lstm'))
nlayers = int(kwargs.get('layers', 1))
name = kwargs.get('pool_name')
if rnntype == 'blstm':
return BiLSTMEncoderHidden(None, hsz, nlayers, self.pdrop_value, vdrop, name=name)
return LSTMEncoderHidden(None, hsz, nlayers, self.pdrop_value, vdrop, name=name)
class NBowModelBase(EmbedPoolStackClassifier):
"""Neural Bag-of-Words Model base class. Defines stacking of fully-connected layers, but leaves pooling to derived
"""
def init_stacked(self, **kwargs):
"""Produce a stacking operation that will be used in the model, defaulting to a single layer
:param input_dim: The input dimension size
:param kwargs: See below
:Keyword Arguments:
* *hsz* -- (``List[int]``) The number of hidden units (defaults to 100)
* *stacked_name* -- (``str``) Optional name to override default Keras layer name
"""
kwargs.setdefault('hsz', [100])
return super().stacked(**kwargs)
@register_model(task='classify', name='nbow')
class NBowModel(NBowModelBase):
"""Neural Bag-of-Words average pooling (standard) model"""
def init_pool(self, input_dim: int, **kwargs):
"""Do average pooling on input embeddings, yielding a `dsz` output layer
:param input_dim: The word embedding depth
:param kwargs: See below
:keyword arguments:
* *pool_name* -- (``str``) Optional name to override default Keras layer name
:return: The average pooling representation
"""
name = kwargs.get('pool_name')
return MeanPool1D(input_dim, name=name)
@register_model(task='classify', name='nbowmax')
class NBowMaxModel(NBowModelBase):
"""Max-pooling model for Neural Bag-of-Words. Sometimes does better than avg pooling
"""
def init_pool(self, input_dim: int, **kwargs) -> BaseLayer:
"""Do max pooling on input embeddings, yielding a `dsz` output layer
:param input_dim: The word embedding depth
:param kwargs: See below
:keyword arguments:
* *pool_name* -- (``str``) Optional name to override default Keras layer name
:return: The max pooling representation
"""
name = kwargs.get('pool_name')
return WithoutLength(tf.keras.layers.GlobalMaxPooling1D(name=name))
@register_model(task='classify', name='fine-tune')
class FineTuneModelClassifier(ClassifierModelBase):
"""Fine-tune based on pre-pooled representations"""
def init_embed(self, embeddings: Dict[str, TensorDef], **kwargs) -> BaseLayer:
"""This method creates the "embedding" layer of the inputs, with an optional reduction
:param embeddings: A dictionary of embeddings
:param kwargs: See below
:Keyword Arguments:
* *embeddings_reduction* (defaults to `concat`) An operator to perform on a stack of embeddings
* *embeddings_name* (``str``) Optional override to Keras default names
* *embeddings_dropout* (``float``) how much dropout post-reduction (defaults to 0.0)
:return: The output of the embedding stack followed by its reduction. This will typically be an output
with an additional dimension which is the hidden representation of the input
"""
reduction = kwargs.get('embeddings_reduction', 'concat')
embeddings_dropout = float(kwargs.get('embeddings_dropout', 0.0))
name = kwargs.get('embeddings_name')
return EmbeddingsStack(embeddings, embeddings_dropout, reduction=reduction, name=name)
def init_stacked(self, input_dim: int, **kwargs) -> BaseLayer:
"""Produce a stacking operation that will be used in the model
:param input_dim: The input dimension size
:param kwargs: See below
:keyword arguments:
* *hsz* (``list``), defaults to nothing, in which case this function is pass-through
* *stacked_name* (``str``) Optional override to stacking name
:return: A stacking operation (or None)
"""
hszs = listify(kwargs.get('hsz', []))
if not hszs:
return PassThru(input_dim)
name = kwargs.get('stacked_name')
return DenseStack(input_dim, hszs, pdrop_value=self.pdrop_value, name=name)
def init_output(self, **kwargs):
"""Provide a projection from the encoder output to the number of labels
This projection typically will not include any activation, since its output is the logits that
the decoder is built on
:param kwargs: See below
:keyword arguments:
* *output_name* (``str``) Optional override to default Keras layer name
:return: A projection from the encoder output size to the final number of labels
"""
name = kwargs.get('output_name')
return tf.keras.layers.Dense(len(self.labels), name=name)
def create_layers(self, embeddings, **kwargs):
self.embeddings = self.init_embed(embeddings, **kwargs)
self.stack_model = self.init_stacked(self.embeddings.output_dim, **kwargs)
self.output_layer = self.init_output(**kwargs)
def call(self, inputs):
base_layers = self.embeddings(inputs)
stacked = self.stack_model(base_layers)
return self.output_layer(stacked)
@register_model(task='classify', name='fine-tune-paired')
class FineTunePairedClassifierModel(FineTuneModelClassifier):
"""Fine-tuning model for pairs
This model encodes a pair as a single utterance using some encoding scheme defined in
``_convert_pair`` which is fed directly into the fine-tuning model.
For BERT, this simply encodes the input key pair as a single utterance while building
a token-type vector.
For the input, we will assume that the vectorizer will be producing a start token and an end token.
We will simply remove the start token from the second sentence and concatenate
[CLS] this is sentence one [SEP]
[CLS] this is sentence two [SEP]
"""
def _convert_pair(self, key, batch_dict, example_dict):
toks = batch_dict[key]
token_type_key = f"{key}_tt"
#eager = tf.executing_eagerly()
target_key = key # if eager else f"{key}:0"
tt = batch_dict.get(token_type_key)
if tt is not None:
#if not eager:
# raise Exception("We arent currently supporting non-eager mode with token_types")
#else:
example_dict[target_key] = (toks, tt)
else:
example_dict[target_key] = toks
def call(self, inputs):
inputs_for_model = {}
for key in self.embeddings.keys():
self._convert_pair(key, inputs, inputs_for_model)
base_layers = self.embeddings(inputs_for_model)
stacked = self.stack_model(base_layers)
return self.output_layer(stacked)
@register_model(task='classify', name='fine-tune-dual')
class FineTuneDualModelClassifier(FineTuneModelClassifier):
"""Fine-tune based on pre-pooled representations"""
def init_embed(self, embeddings: Dict[str, TensorDef], **kwargs) -> BaseLayer:
"""This method creates the "embedding" layer of the inputs, with an optional reduction
:param embeddings: A dictionary of embeddings
:Keyword Arguments: See below
* *embeddings_reduction* (defaults to `concat`) An operator to perform on a stack of embeddings
* *embeddings_dropout = float(kwargs.get('embeddings_dropout', 0.0))
:return: The output of the embedding stack followed by its reduction. This will typically be an output
with an additional dimension which is the hidden representation of the input
"""
reduction = kwargs.get('embeddings_reduction', 'concat-subtract')
embeddings_dropout = float(kwargs.get('embeddings_dropout', 0.0))
if len(embeddings) != 1:
raise Exception("Currently we only support a single embedding")
name = kwargs.get('embeddings_name')
key_name = list(embeddings.keys())[0]
key1 = f"{key_name}[0]"
key2 = f"{key_name}[1]"
embeddings_dual = {key1: embeddings[key_name], key2: embeddings[key_name]}
return EmbeddingsStack(embeddings_dual, embeddings_dropout, reduction=reduction, name=name)
@register_model(task='classify', name='composite')
class CompositePoolingModel(EmbedPoolStackClassifier):
"""Fulfills pooling contract by aggregating pooling from a set of sub-models and concatenates each
"""
def pool(self, dsz, **kwargs):
"""Cycle each sub-model and call its pool method, then concatenate along final dimension
:param word_embeddings: The input graph
:param dsz: The number of input units
:param init: The initializer operation
:param kwargs:
:return: A pooled composite output
"""
SubModels = [eval(model) for model in kwargs.get('sub')]
models = []
for SubClass in SubModels:
models.append(SubClass.pool(self, dsz, **kwargs))
return CompositeModel(models)
| |
#!/usr/bin/env python
# Software License Agreement (BSD License)
#
# Copyright (c) 2009, Willow Garage, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of Willow Garage, Inc. nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
## ROS message source code generation for C++
##
## Converts ROS .msg files in a package into C++ source code implementations.
import msg_gen as genmsg_cpp
import sys
import os
import traceback
# roslib.msgs contains the utilities for parsing .msg specifications. It is meant to have no rospy-specific knowledge
import roslib.srvs
import roslib.packages
import roslib.gentools
from rospkg import RosPack
try:
from cStringIO import StringIO #Python 2.x
except ImportError:
from io import StringIO #Python 3.x
def write_begin(s, spec, file):
"""
Writes the beginning of the header file: a comment saying it's auto-generated and the include guards
@param s: The stream to write to
@type s: stream
@param spec: The spec
@type spec: roslib.srvs.SrvSpec
@param file: The file this service is being generated for
@type file: str
"""
s.write("/* Auto-generated by genmsg_cpp for file %s */\n"%(file))
s.write('#ifndef %s_SERVICE_%s_H\n'%(spec.package.upper(), spec.short_name.upper()))
s.write('#define %s_SERVICE_%s_H\n'%(spec.package.upper(), spec.short_name.upper()))
def write_end(s, spec):
"""
Writes the end of the header file: the ending of the include guards
@param s: The stream to write to
@type s: stream
@param spec: The spec
@type spec: roslib.srvs.SrvSpec
"""
s.write('#endif // %s_SERVICE_%s_H\n'%(spec.package.upper(), spec.short_name.upper()))
def write_generic_includes(s):
"""
Writes the includes that all service need
@param s: The stream to write to
@type s: stream
"""
s.write('#include "ros/service_traits.h"\n\n')
def write_trait_char_class(s, class_name, cpp_msg, value):
"""
Writes a class trait for traits which have static value() members that return const char*
e.g. write_trait_char_class(s, "MD5Sum", "std_srvs::Empty", "hello") yields:
template<>
struct MD5Sum<std_srvs::Empty>
{
static const char* value() { return "hello"; }
static const char* value(const std_srvs::Empty&) { return value(); }
};
@param s: The stream to write to
@type s: stream
@param class_name: The name of the trait class
@type class_name: str
@param cpp_msg: The C++ message declaration, e.g. "std_srvs::Empty"
@type cpp_msg: str
@param value: The string value to return from the value() function
@type value: str
"""
s.write('template<>\nstruct %s<%s> {\n'%(class_name, cpp_msg))
s.write(' static const char* value() \n {\n return "%s";\n }\n\n'%(value))
s.write(' static const char* value(const %s&) { return value(); } \n'%(cpp_msg))
s.write('};\n\n')
def write_traits(s, spec, cpp_name_prefix, rospack=None):
"""
Write all the service traits for a message
@param s: The stream to write to
@type s: stream
@param spec: The service spec
@type spec: roslib.srvs.SrvSpec
@param cpp_name_prefix: The C++ prefix to prepend when referencing the service, e.g. "std_srvs::"
@type cpp_name_prefix: str
"""
gendeps_dict = roslib.gentools.get_dependencies(spec, spec.package, rospack=rospack)
md5sum = roslib.gentools.compute_md5(gendeps_dict, rospack=rospack)
s.write('namespace ros\n{\n')
s.write('namespace service_traits\n{\n')
request_with_allocator = '%s%s_<ContainerAllocator> '%(cpp_name_prefix, spec.request.short_name)
response_with_allocator = '%s%s_<ContainerAllocator> '%(cpp_name_prefix, spec.response.short_name)
write_trait_char_class(s, 'MD5Sum', '%s%s'%(cpp_name_prefix, spec.short_name), md5sum);
write_trait_char_class(s, 'DataType', '%s%s'%(cpp_name_prefix, spec.short_name), spec.full_name);
genmsg_cpp.write_trait_char_class(s, 'MD5Sum', request_with_allocator, md5sum)
genmsg_cpp.write_trait_char_class(s, 'DataType', request_with_allocator, spec.full_name)
genmsg_cpp.write_trait_char_class(s, 'MD5Sum', response_with_allocator, md5sum)
genmsg_cpp.write_trait_char_class(s, 'DataType', response_with_allocator, spec.full_name)
s.write('} // namespace service_traits\n')
s.write('} // namespace ros\n\n')
def generate(srv_path):
"""
Generate a service
@param srv_path: the path to the .srv file
@type srv_path: str
"""
(package_dir, package) = roslib.packages.get_dir_pkg(srv_path)
(_, spec) = roslib.srvs.load_from_file(srv_path, package)
s = StringIO()
cpp_prefix = '%s::'%(package)
write_begin(s, spec, srv_path)
genmsg_cpp.write_generic_includes(s)
write_generic_includes(s)
genmsg_cpp.write_includes(s, spec.request)
s.write('\n')
genmsg_cpp.write_includes(s, spec.response)
rospack = RosPack()
gendeps_dict = roslib.gentools.get_dependencies(spec, spec.package, rospack=rospack)
md5sum = roslib.gentools.compute_md5(gendeps_dict, rospack=rospack)
s.write('namespace %s\n{\n'%(package))
genmsg_cpp.write_struct(s, spec.request, cpp_prefix, {'ServerMD5Sum': md5sum})
s.write('\n')
genmsg_cpp.write_struct(s, spec.response, cpp_prefix, {'ServerMD5Sum': md5sum})
s.write('struct %s\n{\n'%(spec.short_name))
s.write('\n')
s.write('typedef %s Request;\n'%(spec.request.short_name))
s.write('typedef %s Response;\n'%(spec.response.short_name))
s.write('Request request;\n')
s.write('Response response;\n\n')
s.write('typedef Request RequestType;\n')
s.write('typedef Response ResponseType;\n')
s.write('}; // struct %s\n'%(spec.short_name))
s.write('} // namespace %s\n\n'%(package))
request_cpp_name = "Request"
response_cpp_name = "Response"
genmsg_cpp.write_traits(s, spec.request, cpp_prefix, rospack=rospack)
s.write('\n')
genmsg_cpp.write_traits(s, spec.response, cpp_prefix, rospack=rospack)
genmsg_cpp.write_serialization(s, spec.request, cpp_prefix)
s.write('\n')
genmsg_cpp.write_serialization(s, spec.response, cpp_prefix)
write_traits(s, spec, cpp_prefix, rospack=rospack)
write_end(s, spec)
output_dir = '%s/srv_gen/cpp/include/%s'%(package_dir, package)
if (not os.path.exists(output_dir)):
# if we're being run concurrently, the above test can report false but os.makedirs can still fail if
# another copy just created the directory
try:
os.makedirs(output_dir)
except OSError as e:
pass
f = open('%s/%s.h'%(output_dir, spec.short_name), 'w')
f.write(s.getvalue() + "\n")
s.close()
def generate_services(argv):
for arg in argv[1:]:
generate(arg)
if __name__ == "__main__":
roslib.msgs.set_verbose(False)
generate_services(sys.argv)
| |
#!/usr/bin/env python
"""
@file coi-services/mi.idk/egg_generator.py
@author Bill French
@brief Generate egg for a driver. This uses snakefood to build
a dependecy list and includes all files in the driver directory.
"""
__author__ = 'Bill French'
__license__ = 'Apache 2.0'
import re
import os
import sys
import shutil
import subprocess
from os.path import basename, dirname, isdir
from operator import itemgetter
from string import Template
from mi.idk import prompt
from mi.idk.config import Config
from mi.idk.metadata import Metadata
from mi.idk.driver_generator import DriverGenerator
from mi.core.log import get_logger ; log = get_logger()
from mi.idk.exceptions import NotPython
from mi.idk.exceptions import NoRoot
from mi.idk.exceptions import FileNotFound
from mi.idk.exceptions import InvalidParameters
from mi.idk.exceptions import MissingTemplate
from mi.idk.exceptions import ValidationFailure
from mi.idk.exceptions import IDKException
from mi.idk.unit_test import InstrumentDriverTestConfig
from mi.idk.driver_generator import DriverGenerator
from snakefood.util import iter_pyfiles, setup_logging, def_ignores, is_python
from snakefood.depends import output_depends, read_depends
from snakefood.find import find_dependencies
from snakefood.find import find_imports
from snakefood.find import ERROR_IMPORT, ERROR_SYMBOL, ERROR_UNUSED
from snakefood.fallback.collections import defaultdict
from snakefood.roots import *
REPODIR = '/tmp/repoclone/marine-integrations'
class DependencyList:
"""
Build a list of dependency classes for a python module. This uses the snakefood
module to build out the list. This class does not output __init__.py files by
default, but passing the include_interanl_init option to the constructor can
change this behavior.
Usage:
deplist = DependencyList(target_file, True)
# All dependency files.
all_deps = deplist.all_dependencies()
# Internal dependencies
int_deps = deplist.internal_dependencies()
# External dependencies
extern_deps = deplist.external_dependencies()
"""
def __init__(self, filename, include_internal_init = False):
if not os.path.isfile(filename):
raise FileNotFound(filename)
if not is_python(filename):
raise NotPython(filename)
self.include_internal_init = include_internal_init
self.dependency_list = None
self.file_roots = None
self.filename = filename
def fetch_dependencies(self):
"""
Fetch all dependencies and follow the target file.
This was inspired by the snakefood library
snakefood-1.4-py2.7.egg/snakefood/gendeps.py
"""
# No need to run this twice
if self.dependency_list: return self.dependency_list
log.info("Fetching internal dependecies: %s" % self.filename)
depends = find_imports(self.filename, 1, 0)
# Get the list of package roots for our input files and prepend them to the
# module search path to insure localized imports.
inroots = find_roots([self.filename], [])
self.file_roots = inroots
if not inroots:
raise NoRoot
for file in inroots:
log.debug("Root found: %s" % file)
sys.path = inroots + sys.path
#log.debug("Using the following import path to search for modules:")
#for dn in sys.path:
# log.debug(" -- %s" % dn)
inroots = frozenset(inroots)
# Find all the dependencies.
log.debug("Processing file:")
allfiles = defaultdict(set)
allerrors = []
processed_files = set()
ignorefiles = []
alldependencies = []
fiter = iter_pyfiles([self.filename], ignorefiles, False)
while 1:
newfiles = set()
for fn in fiter:
log.debug(" post-filter: %s" % fn)
processed_files.add(fn)
if is_python(fn):
files, errors = find_dependencies(fn, 0, 0)
log.debug("dependency file count: %d" % len(files))
allerrors.extend(errors)
else:
# If the file is not a source file, we don't know how to get the
# dependencies of that (without importing, which we want to
# avoid).
files = []
# When packages are the source of dependencies, remove the __init__
# file. This is important because the targets also do not include the
# __init__ (i.e. when "from <package> import <subpackage>" is seen).
if basename(fn) == '__init__.py':
fn = dirname(fn)
# no dependency.
from_ = relfile(fn, ignorefiles)
if from_ is None:
log.debug("from_ empty. Move on")
continue
infrom = from_[0] in inroots
log.debug( " from: %s" % from_[0])
log.debug( " file: %s" % from_[1])
allfiles[from_].add((None, None))
# Add the dependencies.
for dfn in files:
xfn = dfn
if basename(xfn) == '__init__.py':
xfn = dirname(xfn)
to_ = relfile(xfn, ignorefiles)
into = to_[0] in inroots
log.debug( " from: %s, to: %s" % (from_[1], to_[1]))
if dfn in alldependencies:
log.trace("Already added %s to dependency list" % dfn)
else:
log.debug("Add %s to dependency list" % dfn)
allfiles[from_].add(to_)
newfiles.add(dfn)
alldependencies.append(dfn)
if not newfiles:
log.debug("No more new files. all done")
break
else:
fiter = iter(sorted(newfiles))
# Output the list of roots found.
log.debug("Found roots:")
found_roots = set()
for key, files in allfiles.iteritems():
found_roots.add(key[0])
found_roots.update(map(itemgetter(0),files))
if None in found_roots:
found_roots.remove(None)
for root in sorted(found_roots):
log.debug(" %s" % root)
self.dependency_list = allfiles
return self.dependency_list;
def internal_dependencies(self):
"""
Return a list of internal dependencies for self.filename
"""
filelist = self.fetch_dependencies()
initlist = []
if self.include_internal_init:
initlist = self._init_set(filelist, self.file_roots[0])
log.debug( "Internal dependencies:" )
return sorted(initlist + self._dependency_set(filelist, True, False))
def external_dependencies(self):
"""
Return a list of external dependencies for self.filename
"""
filelist = self.fetch_dependencies()
log.debug( "External dependencies:" )
return self._dependency_set(filelist, False, True)
def all_dependencies(self):
"""
Return all dependencies for self.filename
"""
filelist = self.fetch_dependencies()
log.debug( "All dependencies:" )
return self._dependency_set(filelist)
def internal_roots(self):
"""
Return a list of internal dependency roots for self.filename
"""
filelist = self.fetch_dependencies()
log.debug( "Internal roots:" )
return self._root_set(filelist, True, False)
def external_roots(self):
"""
Return a list of external dependency roots for self.filename
"""
filelist = self.fetch_dependencies()
log.debug( "External roots:" )
return self._root_set(filelist, False, True)
def all_roots(self):
"""
Return all dependency roots for self.filename
"""
filelist = self.fetch_dependencies()
log.debug( "All roots:" )
return self._root_set(filelist)
def _dependency_set(self, filelist, internal = None, external = None):
"""
Return a set of files that are in the dependency list. The internal and external
flags speficy the list contents. Internal set returns all files that have the same
root as the target file. External is the opposite.
"""
result = []
for key, files in filelist.iteritems():
inroot = key[0] in self.file_roots
if(not(internal or external) or (internal and inroot) or (external and not inroot)):
if not isdir(key[1]):
result.append(key[1])
result = sorted(result)
for file in result:
log.debug(" %s" % file)
return result
def _root_set(self, filelist, internal = None, external = None):
"""
Return a set of files that are in the dependency list. The internal and external
flags speficy the list contents. Internal set returns all files that have the same
root as the target file. External is the opposite.
"""
result = []
for key, files in filelist.iteritems():
inroot = key[0] in self.file_roots
if(not(internal or external) or (internal and inroot) or (external and not inroot)):
if not key[0] in result:
result.append(key[0])
result = sorted(result)
for root in result:
log.debug(" %s" % root)
return result
def _init_set(self, filelist, root):
"""
Return a set of init files for the filelist passed in.
"""
#log.debug("Build __init__.py list")
result = []
for key, files in filelist.iteritems():
if key[0] == root:
for initfile in self._get_init_files(key[1], root):
if not initfile in result:
result.append(initfile)
result = sorted(result)
for file in result:
log.debug(" add init file %s" % file)
return result
def _get_init_files(self, pyfile, root):
"""
recursively step through a python module path and find all the __init__.py files
"""
result = []
current_path = dirname(pyfile);
while current_path:
initfile = "%s/__init__.py" % current_path
abspath = "%s/%s" % (root, initfile)
log.debug( " -- Does %s exist?" % abspath )
if os.path.exists(abspath):
result.append(initfile)
current_path = dirname(current_path)
return result
class DriverFileList:
"""
Build list of files that are associated to a driver. It uses the DependencyList
object to get all python files. Then it will look in the target module directory
for additional files.
"""
def __init__(self, metadata, basedir, driver_file = None, driver_test_file = None):
driver_generator = DriverGenerator(metadata)
self.basedir = basedir
if driver_file:
self.driver_file = driver_file
else:
self.driver_file = driver_generator.driver_path()
if driver_test_file:
self.driver_test_file = driver_test_file
else:
self.driver_test_file = driver_generator.driver_test_path()
self.driver_dependency = None
self.test_dependency = None
self.driver_dependency = DependencyList(self.driver_file, include_internal_init=True)
self.test_dependency = DependencyList(self.driver_test_file, include_internal_init=True)
def files(self):
basep = re.compile(self.basedir)
rootp = re.compile('^/')
result = []
log.debug( "F: %s" % self.driver_file)
driver_files = []
driver_files = self.driver_dependency.internal_dependencies()
test_files = []
test_files = self._scrub_test_files(self.test_dependency.internal_dependencies())
extra_files = []
extra_files = self._extra_files()
files = extra_files + driver_files + test_files
for fn in files:
if not fn in result:
f = basep.sub('', fn)
f = rootp.sub('', f)
result.append(f)
log.debug("Result File List: %s", result)
return result
def _scrub_test_files(self, filelist):
"""
The interface directory is built by generate interfaces so it comes up as an internal dependency.
"""
# I don't particularly like this, but these generated interface files need to be removed
result = []
p = re.compile('^interface\/')
for file in filelist:
if p.findall(file):
log.debug(" Remove: %s" % file)
else:
result.append(file)
return result
def _extra_files(self):
result = []
p = re.compile('\.(py|pyc)$')
for root, dirs, names in os.walk(dirname(self.driver_file)):
for filename in names:
# Ignore python files
if not p.search(filename):
result.append("%s/%s" % (root, filename))
# Add the resources directory __init__.py file, too. Without a .py file,
# it can get forgotten by dependencies
#result.append(os.path.join(dirname(self.driver_file), "resource/__init__.py"))
return result
class EggGenerator:
"""
Generate driver egg
"""
def __init__(self, metadata, repo_dir=REPODIR):
"""
@brief Constructor
@param metadata IDK Metadata object
"""
self.metadata = metadata
self._bdir = None
self._repodir = repo_dir
if not self._tmp_dir():
raise InvalidParameters("missing tmp_dir configuration")
if not self._tmp_dir():
raise InvalidParameters("missing working_repo configuration")
self.generator = DriverGenerator(self.metadata)
test_import = __import__(self._test_module())
def _test_module(self):
return self.generator.test_modulename()
def _driver_module(self):
test_config = InstrumentDriverTestConfig()
return test_config.driver_module
def _driver_class(self):
test_config = InstrumentDriverTestConfig()
return test_config.driver_class
def _repo_dir(self):
return self._repodir
def _res_dir(self):
return os.path.join(self._versioned_dir(), 'res')
def _res_config_dir(self):
return os.path.join(self._res_dir(), 'config' )
def _tmp_dir(self):
return Config().get('tmp_dir')
def _setup_path(self):
return os.path.join(self._build_dir(), 'setup.py' )
def _setup_template_path(self):
return os.path.join(Config().template_dir(), 'setup.tmpl' )
def _main_path(self):
return os.path.join(self._versioned_dir(), 'mi/main.py' )
def _main_template_path(self):
return os.path.join(Config().template_dir(), 'main.tmpl' )
def _build_name(self):
return "%s_%s_%s_%s" % (
self.metadata.driver_make,
self.metadata.driver_model,
self.metadata.driver_name,
self.metadata.version.replace('.', '_'),
)
def _build_dir(self):
if self._bdir:
return self._bdir
self._bdir = self._generate_build_dir()
log.info( "egg build dir: %s" % self._bdir)
return self._bdir
def _generate_build_dir(self):
build_dir = os.path.join(self._tmp_dir(), self._build_name())
# clean out an old build if it exists
if os.path.exists(build_dir):
shutil.rmtree(build_dir)
return build_dir
def _versioned_dir(self):
return self._build_dir()
#return os.path.join(self._build_dir(),
# self._build_name())
def _stage_files(self, files):
"""
Copy files from the original directory into two levels of versioned
directories within a staging directory, and replace the mi namespace
with the versioned driver name.mi to account for the new directory
(only the lower versioned dir is included in the egg)
@param files - a list of files to copy into the staging directory
"""
log.error(repr(files))
# make two levels of versioned file directories, i.e.
# driverA_0_1 (= build_dir)
# driverA_0_1 (= versioned_dir)
# then copy driverA files into the bottom versioned dir
if not os.path.exists(self._build_dir()):
os.makedirs(self._build_dir())
if not os.path.exists(self._versioned_dir()):
os.makedirs(self._versioned_dir())
for file in files:
if file not in ['res/config/__init__.py', 'res/__init__.py']:
dest = os.path.join(self._versioned_dir(), file)
destdir = dirname(dest)
source = os.path.join(self._repo_dir(), file)
# this one goes elsewhere so the InstrumentDict can find it
if basename(file) == 'strings.yml':
dest = os.path.join(self._res_dir(), basename(file))
destdir = dirname(dest)
log.debug(" Copy %s => %s" % (source, dest))
# make sure the destination directory exists, if it doesn't make it
if not os.path.exists(destdir):
os.makedirs(destdir)
shutil.copy(source, dest)
# replace mi in the copied files with the versioned driver module.mi
# this is necessary because the top namespace in the versioned files starts
# with the versioned driver name directory, not mi
#driver_file = open(dest, "r")
#contents = driver_file.read()
#driver_file.close()
#new_contents = re.sub(r'(^import |^from |\'|= )mi\.|res/config/mi-logging|\'mi\'',
# self._mi_replace,
# contents,
# count=0,
# flags=re.MULTILINE)
#driver_file = open(dest, "w")
#driver_file.write(new_contents)
#driver_file.close()
# need to add mi-logging.yml special because it is not in cloned repo, only in local repository
milog = "mi-logging.yml"
dest = os.path.join(self._res_config_dir(), milog)
destdir = dirname(dest)
source = os.path.join(Config().base_dir(), "res/config/" + milog)
log.debug(" Copy %s => %s" % (source, dest))
# make sure the destination directory exists, if it doesn't make it
if not os.path.exists(destdir):
os.makedirs(destdir)
shutil.copy(source, dest)
# we need to make sure an init file is in the versioned dir and
# resource directories so that find_packages() will look in here
init_file_list = [os.path.join(self._versioned_dir(), "__init__.py"),
os.path.join(self._versioned_dir(), "res", "__init__.py"),
os.path.join(self._versioned_dir(), "res", "config", "__init__.py")]
for file in init_file_list:
self._create_file(file)
@staticmethod
def _create_file(file):
"""
Create a file if it isnt there already
"""
if not os.path.exists(file):
init_file = open(file, "w")
init_file.close()
def _mi_replace(self, matchobj):
"""
This function is used in regex sub to replace mi with the versioned
driver name followed by mi
@param matchobj - the match object from re.sub
"""
if matchobj.group(0) == 'res/config/mi-logging':
return self._build_name() + '/' + matchobj.group(0)
elif matchobj.group(0) == '\'mi\'':
return '\'' + self._build_name() + '.mi\''
else:
return matchobj.group(1) + self._build_name() + '.mi.'
def _get_template(self, template_file):
"""
@brief return a string.Template object constructed with the contents of template_file
@param template_file path to a file that containes a template
@retval string.Template object
"""
try:
infile = open(template_file)
tmpl_str = infile.read()
return Template(tmpl_str)
except IOError:
raise MissingTemplate(msg="Missing: %s" % template_file)
def _generate_setup_file(self):
if not os.path.exists(self._build_dir()):
os.makedirs(self._build_dir())
if not os.path.exists(self._build_dir()):
raise IDKException("failed to create build dir: %s" % self._build_dir())
setup_file = self._setup_path()
setup_template = self._get_template(self._setup_template_path())
log.debug("Create setup.py file: %s", setup_file )
log.debug("setup.py template file: %s", self._setup_template_path())
log.debug("setup.py template date: %s", self._setup_template_data())
log.debug("setup.py template: %s", setup_template)
ofile = open(setup_file, 'w')
code = setup_template.substitute(self._setup_template_data())
log.debug("CODE: %s", code)
ofile.write(code)
ofile.close()
def _setup_template_data(self):
return {
'name': self._build_name(),
'version': self.metadata.version,
'description': 'ooi core driver',
'author': self.metadata.author,
'email': self.metadata.email,
'url': 'http://www.oceanobservatories.org',
'driver_module': self._driver_module(),
'driver_class': self._driver_class(),
'driver_path': self.metadata.relative_driver_path(),
'short_name': self.metadata.relative_driver_path().replace('/', '_')
}
def _generate_main_file(self):
if not os.path.exists(self._versioned_dir()):
os.makedirs(self._versioned_dir())
main_file= self._main_path()
main_template = self._get_template(self._main_template_path())
log.debug("Create mi/main.py file: %s" % main_file )
log.debug("main.py template file: %s" % self._main_template_path())
log.debug("main.py template date: %s" % self._setup_template_data())
ofile = open(main_file, 'w')
code = main_template.substitute(self._setup_template_data())
ofile.write(code)
ofile.close()
def _verify_ready(self):
"""
verify that we have all the information and are in the correct state to build the egg
"""
self._verify_python()
self._verify_metadata()
self._verify_version()
def _verify_metadata(self):
"""
Ensure we have all the metadata we need to build the egg
"""
pass
def _verify_version(self, version = None):
"""
Ensure we have a good version number and that it has not already been packaged and published
"""
if version == None:
version = self.metadata.version
if not version:
raise ValidationFailure("Driver version required in metadata")
p = re.compile("^\d+\.\d+\.\d+$")
if not p.findall("%s" % version):
raise ValidationFailure("Version format incorrect '%s', should be x.x.x" % version)
def _verify_python(self):
"""
Ensure we build with the correct python version
"""
if sys.version_info < (2, 7) or sys.version_info >= (2, 8):
raise ValidationFailure("Egg generation required version 2.7 of python")
def _build_egg(self, files):
try:
self._verify_ready()
self._stage_files(files)
self._generate_setup_file()
self._generate_main_file()
cmd = "cd %s; python setup.py bdist_egg" % self._build_dir()
log.info("CMD: %s" % cmd)
os.system(cmd)
egg_file = "%s/dist/%s-%s-py2.7.egg" % (self._build_dir(),
self.metadata.relative_driver_path().replace('/', '_'),
self.metadata.version)
# Remove all pyc files from the egg. There was a jira case that suggested
# including the compiled py files caused the drivers to run slower.
# https://jira.oceanobservatories.org/tasks/browse/OOIION-1167
cmd = "zip %s -d \*.pyc" % egg_file
log.info("CMD: %s" % cmd)
os.system(cmd)
except ValidationFailure, e:
log.error("Failed egg verification: %s" % e )
return None
log.debug("Egg file created: %s" % egg_file)
return egg_file
def save(self):
driver_file = self.metadata.driver_dir() + '/' + DriverGenerator(self.metadata).driver_filename()
driver_test_file = self.metadata.driver_dir() + '/test/' + DriverGenerator(self.metadata).driver_test_filename()
filelist = DriverFileList(self.metadata, self._repo_dir(), driver_file, driver_test_file)
return self._build_egg(filelist.files())
if __name__ == '__main__':
pass
| |
import sys
import rbtree
from PySide.QtGui import *
from PySide.QtCore import *
class BaseNode:
pass
class BaseLink:
pass
def rbtree_node_depth_adjust(tree):
if tree.empty():
return
def traverse(subtree, red_cnt):
if subtree is tree.head:
return
if subtree.color is False:
red_cnt += 1
subtree.depth -= red_cnt
traverse(subtree.l, red_cnt)
traverse(subtree.r, red_cnt)
traverse(tree.root(), 0)
def create_node_cnt_for_tree(subtree, head):
if subtree is head:
subtree.cnt = 0
return 0
else:
l_cnt = create_node_cnt_for_tree(subtree.l, head)
r_cnt = create_node_cnt_for_tree(subtree.r, head)
subtree.cnt = l_cnt + r_cnt + 1
return subtree.cnt
def create_node_depth_for_tree(subtree, init_value, head):
if subtree is head:
return
subtree.depth = init_value
create_node_depth_for_tree(subtree.l, init_value + 1, head)
create_node_depth_for_tree(subtree.r, init_value + 1, head)
def create_x_pos_for_tree(tree):
if tree.empty():
return
cnt = 0
for node in tree:
node.x = cnt
cnt += 1
def get_x_y_for_node(node, head):
x = node.x
y = node.depth
return (x, y)
app = QApplication(sys.argv)
class BaseScene(QGraphicsScene):
def add_item_from_tree(self, tree):
if tree.empty():
return
head = tree.head
def create_all_nodes(subtree):
if subtree is head:
return
subtree.item = BaseNode()
subtree.item.node = subtree
subtree.item.setToolTip(str(subtree.data))
self.addItem(subtree.item)
create_all_nodes(subtree.l)
create_all_nodes(subtree.r)
create_all_nodes(tree.root())
def create_all_links(subtree):
if subtree is head:
return
if subtree.l is not head:
link = BaseLink(subtree.item, subtree.l.item)
self.addItem(link)
# print 'l add'
if subtree.r is not head:
link = BaseLink(subtree.item, subtree.r.item)
self.addItem(link)
# print 'r add'
create_all_links(subtree.l)
create_all_links(subtree.r)
#pos
#create_node_cnt_for_tree(tree.root(), tree.head)
create_node_depth_for_tree(tree.root(), 0, tree.head)
rbtree_node_depth_adjust(tree)
create_x_pos_for_tree(tree)
create_all_links(tree.root())
for node in tree:
# node.item.prepareGeometryChange()
node.item.setPos(node.x * 30, node.depth * 30)
create_all_links(tree.root())
def keyPressEvent(self, event):
if event.key() == Qt.Key_F5:
pass
class BaseView(QGraphicsView):
def __init__(self):
QGraphicsView.__init__(self)
self.setDragMode(QGraphicsView.RubberBandDrag)
self.setRenderHint(QPainter.Antialiasing)
def wheelEvent(self, event):
if event.delta() > 0:
self.scale(1.1, 1.1)
else:
self.scale(0.9, 0.9)
class BaseNode(QGraphicsItem):
def __init__(self, *args, **kwargs):
QGraphicsItem.__init__(self, *args, **kwargs)
# self.setZValue()
self.links = []
self.setFlags(QGraphicsItem.ItemIsSelectable | QGraphicsItem.ItemIsMovable | QGraphicsItem.ItemSendsGeometryChanges)
def add_link(self, link):
self.links.append(link)
def remove_link(self, link):
self.links.remove(link)
def setPos(self, *args, **kwargs):
QGraphicsItem.setPos(self, *args, **kwargs)
for l in self.links:
l.trackNodes()
def boundingRect(self):
penWidth = 1.0
return QRectF(-10 - penWidth / 2, -10 - penWidth / 2,
20 + penWidth, 20 + penWidth)
def itemChange(self, change, value):
# print 'changed', change
if change == QGraphicsItem.ItemPositionChange :
# print self.data
for l in self.links:
# print 'call track'
l.trackNodes()
# value is the new position.
return QGraphicsItem.itemChange(self, change, value)
def paint(self, painter, option, widget):
#painter.drawRoundedRect(-10, -10, 20, 20, 5, 5)
painter.setBrush(Qt.red if self.node.color is False else Qt.black)
painter.drawEllipse(-10, -10, 20, 20)
painter.setPen(Qt.white)
painter.drawText(self.boundingRect(), Qt.AlignCenter, str(self.node.data))
#self.setZValue(-1)
def __del__(self):
for l in self.links:
del l
class BaseLink(QGraphicsLineItem):
def __init__(self, start, end, *args, **kwargs):
QGraphicsLineItem.__init__(self)
self.start = start
self.end = end
self.start.add_link(self)
self.end.add_link(self)
#self.setFlags(QGraphicsItem.ItemIsSelectable);
self.setZValue(-1);
self.setPen(QPen(QColor(Qt.blue), 1.0) )
self.trackNodes()
def trackNodes(self):
self.setLine(QLineF(self.start.pos(), self.end.pos()))
def __del__(self):
self.start.remove_link(self)
self.end.remove_link(self)
view = BaseView()
scene = BaseScene()
view.setScene(scene)
view.show()
tree = rbtree.Rbtree()
tree.insert_data_list(range(20))
tree.tree_shape()
scene.add_item_from_tree(tree)
scene.setBackgroundBrush(Qt.white)
view.setBackgroundBrush(Qt.white)
rect = scene.itemsBoundingRect()
img = QImage(rect.width(), rect.height(), QImage.Format_RGB32)
p = QPainter(img)
scene.render(p)
p.end()
print scene.sceneRect()
img.save('scene.png')
# tree.root().item.setPos(0, 400)
# for node in tree:
# print node.data, node.x, node.depth
# node1 = BaseNode()
# node1.setPos(0, 0)
#
# node2 = BaseNode()
# node2.setPos(100, 200)
#
# scene.addItem(node1)
# scene.addItem(node2)
#
# link = BaseLink(node1, node2)
# scene.addItem(link)
# #link.setZValue(1)
#
# print node1.zValue(), node2.zValue(), link.zValue()
# node1 = QGraphicsEllipseItem(0, 0, 100, 400)
# #node1.setFlags(QGraphicsItem.ItemIsSelectable)
# node1.setFlags(QGraphicsItem.ItemIsSelectable | QGraphicsItem.ItemIsMovable | QGraphicsItem.ItemSendsGeometryChanges)
# node2 = QGraphicsEllipseItem(0, 0, 100, 400)
# node2.setFlags(QGraphicsItem.ItemIsSelectable | QGraphicsItem.ItemIsMovable | QGraphicsItem.ItemSendsGeometryChanges)
# scene.addItem(node1)
# scene.addItem(node2)
# scene.addEllipse(0, 0, 100, 400)
# scene.addEllipse(0, 0, 100, 400)
# scene.addEllipse(0, 0, 100, 400)
app.exec_()
#sys.exit()
| |
import threading
import Queue
from wsgiref.simple_server import make_server
from functools import partial
from pprint import pprint
from requests_debug import debug as requests_debug
import requests
import time
from testfixtures import compare
from contextlib import contextmanager
import logging
logging.basicConfig(level=logging.DEBUG)
def client_thread_target(results_q, thread_id, url):
for n in xrange(2):
requests.get(
url,
params={"thread_id": thread_id, "n": n}
)
results_q.put(
(thread_id, requests_debug.checkpoint_id(), requests_debug.items())
)
def client_thread(results_q, thread_id, url):
return threading.Thread(
target=partial(
client_thread_target,
results_q,
thread_id,
url,
)
)
def server_timeout_thread(timeout, http_server):
time.sleep(timeout)
stop_server(http_server)
@contextmanager
def start_server():
def app(environ, start_response):
if "error" in environ.get('PATH_INFO', ''):
start_response("302 Moved Temporarily", [
("Location", environ['PATH_INFO'])])
return []
elif "404" in environ.get('PATH_INFO', ''):
start_response("404 Not Found", [])
return []
else:
start_response("200 OK", [])
return ["ok."]
http_server = make_server('127.0.0.1', 0, app)
timeout_thread = threading.Thread(
target=partial(
server_timeout_thread,
3,
http_server))
timeout_thread.start()
server_thread = threading.Thread(target=http_server.serve_forever)
server_thread.start()
yield http_server
stop_server(http_server)
def stop_server(http_server):
http_server.shutdown()
def server_port(http_server):
return http_server.server_address[1]
def test_exception():
requests_debug.install_hook()
with start_server() as http_server:
url = make_url(
server_port(http_server),
"error/")
try:
requests.get(url)
except requests.TooManyRedirects, e:
stop_server(http_server)
compare(
normalize_items(requests_debug.items()),
[{'checkpoint_id': requests_debug.checkpoint_id(),
'method': 'get',
'status': None,
'url': url}])
def test_uninstall_hook():
def assert_items(items_cb):
with start_server() as http_server:
url = make_url(server_port(http_server),
"test.py")
requests.get(url)
compare(
normalize_items(requests_debug.items()),
items_cb(url)
)
# install the hook
requests_debug.install_hook()
# assert that the hook is working
assert_items(lambda url: [
{'method': 'get',
'checkpoint_id': requests_debug.checkpoint_id(),
'status': 200,
'url': url}
])
# uninstall the hook
requests_debug.uninstall_hook()
# assert that nothing is recorded when we uninstall the hook
assert_items(lambda url: [])
def make_url(port, path):
return "http://localhost:{0}/".format(port) + path
# make the results look like the values we care about
def normalize_items(items):
return [
{'method': item['method'],
'checkpoint_id': item['checkpoint_id'],
'status': item['status'],
'url': item['url']}
for item in items
]
def test_threading():
"""
Assert that the thread locals actually work correctly by making requests
"""
with start_server() as http_server:
requests_debug.install_hook()
make_url_ = partial(make_url, server_port(http_server))
results_q = Queue.Queue()
client_threads = [
client_thread(results_q, 0, make_url_("test.py")),
client_thread(results_q, 1, make_url_("test.py")),
client_thread(results_q, 2, make_url_("404")),
]
# use an ordered dict to keep things sorted
# as we collect the results
results = []
for client in client_threads:
client.start()
for client in client_threads:
# we may not get the result for the client
# we're on but we need to collect that many
# values, so this is a quick way to do that.
# this may timeout and return None if a request
# takes longer than 2 seconds (it shouldn't)
results.append(results_q.get(True, 2))
results.sort(key=lambda x: x[0])
def normalize(results):
return [
(thread_id, checkpoint_id, normalize_items(items))
for thread_id, checkpoint_id, items in results
]
compare(normalize(results), [
(0, results[0][1], [
{'method': 'get',
'checkpoint_id': results[0][1],
'status': 200,
'url': make_url_("test.py?thread_id=0&n=0")},
{'method': 'get',
'checkpoint_id': results[0][1],
'status': 200,
'url': make_url_("test.py?thread_id=0&n=1")},
]),
(1, results[1][1], [
{'method': 'get',
'checkpoint_id': results[1][1],
'status': 200,
'url': make_url_("test.py?thread_id=1&n=0")},
{'method': 'get',
'checkpoint_id': results[1][1],
'status': 200,
'url': make_url_("test.py?thread_id=1&n=1")},
]),
(2, results[2][1], [
{'method': 'get',
'checkpoint_id': results[2][1],
'status': 404,
'url': make_url_("404?thread_id=2&n=0")},
{'method': 'get',
'checkpoint_id': results[2][1],
'status': 404,
'url': make_url_("404?thread_id=2&n=1")},
])])
if __name__ == '__main__':
test_threading()
| |
"""Provides 'odometry', which loads and parses odometry benchmark data."""
import datetime as dt
import glob
import os
from collections import namedtuple
import numpy as np
import pykitti.utils as utils
__author__ = "Lee Clement"
__email__ = "lee.clement@robotics.utias.utoronto.ca"
##Since Python2.x has no 'FileNotFoundError' exception, define it
##Python3.x should do fine
try:
FileNotFoundError
except NameError:
FileNotFoundError = IOError
class odometry:
"""Load and parse odometry benchmark data into a usable format."""
def __init__(self, base_path, sequence, **kwargs):
"""Set the path."""
self.sequence = sequence
self.sequence_path = os.path.join(base_path, 'sequences', sequence)
self.pose_path = os.path.join(base_path, 'poses')
self.frames = kwargs.get('frames', None)
# Default image file extension is 'png'
self.imtype = kwargs.get('imtype', 'png')
# Find all the data files
self._get_file_lists()
# Pre-load data that isn't returned as a generator
self._load_calib()
self._load_timestamps()
self._load_poses()
def __len__(self):
"""Return the number of frames loaded."""
return len(self.timestamps)
@property
def cam0(self):
"""Generator to read image files for cam0 (monochrome left)."""
return utils.yield_images(self.cam0_files, mode='L')
def get_cam0(self, idx):
"""Read image file for cam0 (monochrome left) at the specified index."""
return utils.load_image(self.cam0_files[idx], mode='L')
@property
def cam1(self):
"""Generator to read image files for cam1 (monochrome right)."""
return utils.yield_images(self.cam1_files, mode='L')
def get_cam1(self, idx):
"""Read image file for cam1 (monochrome right) at the specified index."""
return utils.load_image(self.cam1_files[idx], mode='L')
@property
def cam2(self):
"""Generator to read image files for cam2 (RGB left)."""
return utils.yield_images(self.cam2_files, mode='RGB')
def get_cam2(self, idx):
"""Read image file for cam2 (RGB left) at the specified index."""
return utils.load_image(self.cam2_files[idx], mode='RGB')
@property
def cam3(self):
"""Generator to read image files for cam0 (RGB right)."""
return utils.yield_images(self.cam3_files, mode='RGB')
def get_cam3(self, idx):
"""Read image file for cam3 (RGB right) at the specified index."""
return utils.load_image(self.cam3_files[idx], mode='RGB')
@property
def gray(self):
"""Generator to read monochrome stereo pairs from file.
"""
return zip(self.cam0, self.cam1)
def get_gray(self, idx):
"""Read monochrome stereo pair at the specified index."""
return (self.get_cam0(idx), self.get_cam1(idx))
@property
def rgb(self):
"""Generator to read RGB stereo pairs from file.
"""
return zip(self.cam2, self.cam3)
def get_rgb(self, idx):
"""Read RGB stereo pair at the specified index."""
return (self.get_cam2(idx), self.get_cam3(idx))
@property
def velo(self):
"""Generator to read velodyne [x,y,z,reflectance] scan data from binary files."""
# Return a generator yielding Velodyne scans.
# Each scan is a Nx4 array of [x,y,z,reflectance]
return utils.yield_velo_scans(self.velo_files)
def get_velo(self, idx):
"""Read velodyne [x,y,z,reflectance] scan at the specified index."""
return utils.load_velo_scan(self.velo_files[idx])
def _get_file_lists(self):
"""Find and list data files for each sensor."""
self.cam0_files = sorted(glob.glob(
os.path.join(self.sequence_path, 'image_0',
'*.{}'.format(self.imtype))))
self.cam1_files = sorted(glob.glob(
os.path.join(self.sequence_path, 'image_1',
'*.{}'.format(self.imtype))))
self.cam2_files = sorted(glob.glob(
os.path.join(self.sequence_path, 'image_2',
'*.{}'.format(self.imtype))))
self.cam3_files = sorted(glob.glob(
os.path.join(self.sequence_path, 'image_3',
'*.{}'.format(self.imtype))))
self.velo_files = sorted(glob.glob(
os.path.join(self.sequence_path, 'velodyne',
'*.bin')))
# Subselect the chosen range of frames, if any
if self.frames is not None:
self.cam0_files = utils.subselect_files(
self.cam0_files, self.frames)
self.cam1_files = utils.subselect_files(
self.cam1_files, self.frames)
self.cam2_files = utils.subselect_files(
self.cam2_files, self.frames)
self.cam3_files = utils.subselect_files(
self.cam3_files, self.frames)
self.velo_files = utils.subselect_files(
self.velo_files, self.frames)
def _load_calib(self):
"""Load and compute intrinsic and extrinsic calibration parameters."""
# We'll build the calibration parameters as a dictionary, then
# convert it to a namedtuple to prevent it from being modified later
data = {}
# Load the calibration file
calib_filepath = os.path.join(self.sequence_path, 'calib.txt')
filedata = utils.read_calib_file(calib_filepath)
# Create 3x4 projection matrices
P_rect_00 = np.reshape(filedata['P0'], (3, 4))
P_rect_10 = np.reshape(filedata['P1'], (3, 4))
P_rect_20 = np.reshape(filedata['P2'], (3, 4))
P_rect_30 = np.reshape(filedata['P3'], (3, 4))
data['P_rect_00'] = P_rect_00
data['P_rect_10'] = P_rect_10
data['P_rect_20'] = P_rect_20
data['P_rect_30'] = P_rect_30
# Compute the rectified extrinsics from cam0 to camN
T1 = np.eye(4)
T1[0, 3] = P_rect_10[0, 3] / P_rect_10[0, 0]
T2 = np.eye(4)
T2[0, 3] = P_rect_20[0, 3] / P_rect_20[0, 0]
T3 = np.eye(4)
T3[0, 3] = P_rect_30[0, 3] / P_rect_30[0, 0]
# Compute the velodyne to rectified camera coordinate transforms
data['T_cam0_velo'] = np.reshape(filedata['Tr'], (3, 4))
data['T_cam0_velo'] = np.vstack([data['T_cam0_velo'], [0, 0, 0, 1]])
data['T_cam1_velo'] = T1.dot(data['T_cam0_velo'])
data['T_cam2_velo'] = T2.dot(data['T_cam0_velo'])
data['T_cam3_velo'] = T3.dot(data['T_cam0_velo'])
# Compute the camera intrinsics
data['K_cam0'] = P_rect_00[0:3, 0:3]
data['K_cam1'] = P_rect_10[0:3, 0:3]
data['K_cam2'] = P_rect_20[0:3, 0:3]
data['K_cam3'] = P_rect_30[0:3, 0:3]
# Compute the stereo baselines in meters by projecting the origin of
# each camera frame into the velodyne frame and computing the distances
# between them
p_cam = np.array([0, 0, 0, 1])
p_velo0 = np.linalg.inv(data['T_cam0_velo']).dot(p_cam)
p_velo1 = np.linalg.inv(data['T_cam1_velo']).dot(p_cam)
p_velo2 = np.linalg.inv(data['T_cam2_velo']).dot(p_cam)
p_velo3 = np.linalg.inv(data['T_cam3_velo']).dot(p_cam)
data['b_gray'] = np.linalg.norm(p_velo1 - p_velo0) # gray baseline
data['b_rgb'] = np.linalg.norm(p_velo3 - p_velo2) # rgb baseline
self.calib = namedtuple('CalibData', data.keys())(*data.values())
def _load_timestamps(self):
"""Load timestamps from file."""
timestamp_file = os.path.join(self.sequence_path, 'times.txt')
# Read and parse the timestamps
self.timestamps = []
with open(timestamp_file, 'r') as f:
for line in f.readlines():
t = dt.timedelta(seconds=float(line))
self.timestamps.append(t)
# Subselect the chosen range of frames, if any
if self.frames is not None:
self.timestamps = [self.timestamps[i] for i in self.frames]
def _load_poses(self):
"""Load ground truth poses (T_w_cam0) from file."""
pose_file = os.path.join(self.pose_path, self.sequence + '.txt')
# Read and parse the poses
poses = []
try:
with open(pose_file, 'r') as f:
lines = f.readlines()
if self.frames is not None:
lines = [lines[i] for i in self.frames]
for line in lines:
T_w_cam0 = np.fromstring(line, dtype=float, sep=' ')
T_w_cam0 = T_w_cam0.reshape(3, 4)
T_w_cam0 = np.vstack((T_w_cam0, [0, 0, 0, 1]))
poses.append(T_w_cam0)
except FileNotFoundError:
print('Ground truth poses are not available for sequence ' +
self.sequence + '.')
self.poses = poses
| |
from __future__ import annotations
from typing import Any
import numpy as np
SEED_NONE = None
SEED_INT = 4579435749574957634658964293569
SEED_ARR: np.ndarray[Any, np.dtype[np.int64]] = np.array([1, 2, 3, 4], dtype=np.int64)
SEED_ARRLIKE: list[int] = [1, 2, 3, 4]
SEED_SEED_SEQ: np.random.SeedSequence = np.random.SeedSequence(0)
SEED_MT19937: np.random.MT19937 = np.random.MT19937(0)
SEED_PCG64: np.random.PCG64 = np.random.PCG64(0)
SEED_PHILOX: np.random.Philox = np.random.Philox(0)
SEED_SFC64: np.random.SFC64 = np.random.SFC64(0)
# default rng
np.random.default_rng()
np.random.default_rng(SEED_NONE)
np.random.default_rng(SEED_INT)
np.random.default_rng(SEED_ARR)
np.random.default_rng(SEED_ARRLIKE)
np.random.default_rng(SEED_SEED_SEQ)
np.random.default_rng(SEED_MT19937)
np.random.default_rng(SEED_PCG64)
np.random.default_rng(SEED_PHILOX)
np.random.default_rng(SEED_SFC64)
# Seed Sequence
np.random.SeedSequence(SEED_NONE)
np.random.SeedSequence(SEED_INT)
np.random.SeedSequence(SEED_ARR)
np.random.SeedSequence(SEED_ARRLIKE)
# Bit Generators
np.random.MT19937(SEED_NONE)
np.random.MT19937(SEED_INT)
np.random.MT19937(SEED_ARR)
np.random.MT19937(SEED_ARRLIKE)
np.random.MT19937(SEED_SEED_SEQ)
np.random.PCG64(SEED_NONE)
np.random.PCG64(SEED_INT)
np.random.PCG64(SEED_ARR)
np.random.PCG64(SEED_ARRLIKE)
np.random.PCG64(SEED_SEED_SEQ)
np.random.Philox(SEED_NONE)
np.random.Philox(SEED_INT)
np.random.Philox(SEED_ARR)
np.random.Philox(SEED_ARRLIKE)
np.random.Philox(SEED_SEED_SEQ)
np.random.SFC64(SEED_NONE)
np.random.SFC64(SEED_INT)
np.random.SFC64(SEED_ARR)
np.random.SFC64(SEED_ARRLIKE)
np.random.SFC64(SEED_SEED_SEQ)
seed_seq: np.random.bit_generator.SeedSequence = np.random.SeedSequence(SEED_NONE)
seed_seq.spawn(10)
seed_seq.generate_state(3)
seed_seq.generate_state(3, "u4")
seed_seq.generate_state(3, "uint32")
seed_seq.generate_state(3, "u8")
seed_seq.generate_state(3, "uint64")
seed_seq.generate_state(3, np.uint32)
seed_seq.generate_state(3, np.uint64)
def_gen: np.random.Generator = np.random.default_rng()
D_arr_0p1: np.ndarray[Any, np.dtype[np.float64]] = np.array([0.1])
D_arr_0p5: np.ndarray[Any, np.dtype[np.float64]] = np.array([0.5])
D_arr_0p9: np.ndarray[Any, np.dtype[np.float64]] = np.array([0.9])
D_arr_1p5: np.ndarray[Any, np.dtype[np.float64]] = np.array([1.5])
I_arr_10: np.ndarray[Any, np.dtype[np.int_]] = np.array([10], dtype=np.int_)
I_arr_20: np.ndarray[Any, np.dtype[np.int_]] = np.array([20], dtype=np.int_)
D_arr_like_0p1: list[float] = [0.1]
D_arr_like_0p5: list[float] = [0.5]
D_arr_like_0p9: list[float] = [0.9]
D_arr_like_1p5: list[float] = [1.5]
I_arr_like_10: list[int] = [10]
I_arr_like_20: list[int] = [20]
D_2D_like: list[list[float]] = [[1, 2], [2, 3], [3, 4], [4, 5.1]]
D_2D: np.ndarray[Any, np.dtype[np.float64]] = np.array(D_2D_like)
S_out: np.ndarray[Any, np.dtype[np.float32]] = np.empty(1, dtype=np.float32)
D_out: np.ndarray[Any, np.dtype[np.float64]] = np.empty(1)
def_gen.standard_normal()
def_gen.standard_normal(dtype=np.float32)
def_gen.standard_normal(dtype="float32")
def_gen.standard_normal(dtype="double")
def_gen.standard_normal(dtype=np.float64)
def_gen.standard_normal(size=None)
def_gen.standard_normal(size=1)
def_gen.standard_normal(size=1, dtype=np.float32)
def_gen.standard_normal(size=1, dtype="f4")
def_gen.standard_normal(size=1, dtype="float32", out=S_out)
def_gen.standard_normal(dtype=np.float32, out=S_out)
def_gen.standard_normal(size=1, dtype=np.float64)
def_gen.standard_normal(size=1, dtype="float64")
def_gen.standard_normal(size=1, dtype="f8")
def_gen.standard_normal(out=D_out)
def_gen.standard_normal(size=1, dtype="float64")
def_gen.standard_normal(size=1, dtype="float64", out=D_out)
def_gen.random()
def_gen.random(dtype=np.float32)
def_gen.random(dtype="float32")
def_gen.random(dtype="double")
def_gen.random(dtype=np.float64)
def_gen.random(size=None)
def_gen.random(size=1)
def_gen.random(size=1, dtype=np.float32)
def_gen.random(size=1, dtype="f4")
def_gen.random(size=1, dtype="float32", out=S_out)
def_gen.random(dtype=np.float32, out=S_out)
def_gen.random(size=1, dtype=np.float64)
def_gen.random(size=1, dtype="float64")
def_gen.random(size=1, dtype="f8")
def_gen.random(out=D_out)
def_gen.random(size=1, dtype="float64")
def_gen.random(size=1, dtype="float64", out=D_out)
def_gen.standard_cauchy()
def_gen.standard_cauchy(size=None)
def_gen.standard_cauchy(size=1)
def_gen.standard_exponential()
def_gen.standard_exponential(method="inv")
def_gen.standard_exponential(dtype=np.float32)
def_gen.standard_exponential(dtype="float32")
def_gen.standard_exponential(dtype="double")
def_gen.standard_exponential(dtype=np.float64)
def_gen.standard_exponential(size=None)
def_gen.standard_exponential(size=None, method="inv")
def_gen.standard_exponential(size=1, method="inv")
def_gen.standard_exponential(size=1, dtype=np.float32)
def_gen.standard_exponential(size=1, dtype="f4", method="inv")
def_gen.standard_exponential(size=1, dtype="float32", out=S_out)
def_gen.standard_exponential(dtype=np.float32, out=S_out)
def_gen.standard_exponential(size=1, dtype=np.float64, method="inv")
def_gen.standard_exponential(size=1, dtype="float64")
def_gen.standard_exponential(size=1, dtype="f8")
def_gen.standard_exponential(out=D_out)
def_gen.standard_exponential(size=1, dtype="float64")
def_gen.standard_exponential(size=1, dtype="float64", out=D_out)
def_gen.zipf(1.5)
def_gen.zipf(1.5, size=None)
def_gen.zipf(1.5, size=1)
def_gen.zipf(D_arr_1p5)
def_gen.zipf(D_arr_1p5, size=1)
def_gen.zipf(D_arr_like_1p5)
def_gen.zipf(D_arr_like_1p5, size=1)
def_gen.weibull(0.5)
def_gen.weibull(0.5, size=None)
def_gen.weibull(0.5, size=1)
def_gen.weibull(D_arr_0p5)
def_gen.weibull(D_arr_0p5, size=1)
def_gen.weibull(D_arr_like_0p5)
def_gen.weibull(D_arr_like_0p5, size=1)
def_gen.standard_t(0.5)
def_gen.standard_t(0.5, size=None)
def_gen.standard_t(0.5, size=1)
def_gen.standard_t(D_arr_0p5)
def_gen.standard_t(D_arr_0p5, size=1)
def_gen.standard_t(D_arr_like_0p5)
def_gen.standard_t(D_arr_like_0p5, size=1)
def_gen.poisson(0.5)
def_gen.poisson(0.5, size=None)
def_gen.poisson(0.5, size=1)
def_gen.poisson(D_arr_0p5)
def_gen.poisson(D_arr_0p5, size=1)
def_gen.poisson(D_arr_like_0p5)
def_gen.poisson(D_arr_like_0p5, size=1)
def_gen.power(0.5)
def_gen.power(0.5, size=None)
def_gen.power(0.5, size=1)
def_gen.power(D_arr_0p5)
def_gen.power(D_arr_0p5, size=1)
def_gen.power(D_arr_like_0p5)
def_gen.power(D_arr_like_0p5, size=1)
def_gen.pareto(0.5)
def_gen.pareto(0.5, size=None)
def_gen.pareto(0.5, size=1)
def_gen.pareto(D_arr_0p5)
def_gen.pareto(D_arr_0p5, size=1)
def_gen.pareto(D_arr_like_0p5)
def_gen.pareto(D_arr_like_0p5, size=1)
def_gen.chisquare(0.5)
def_gen.chisquare(0.5, size=None)
def_gen.chisquare(0.5, size=1)
def_gen.chisquare(D_arr_0p5)
def_gen.chisquare(D_arr_0p5, size=1)
def_gen.chisquare(D_arr_like_0p5)
def_gen.chisquare(D_arr_like_0p5, size=1)
def_gen.exponential(0.5)
def_gen.exponential(0.5, size=None)
def_gen.exponential(0.5, size=1)
def_gen.exponential(D_arr_0p5)
def_gen.exponential(D_arr_0p5, size=1)
def_gen.exponential(D_arr_like_0p5)
def_gen.exponential(D_arr_like_0p5, size=1)
def_gen.geometric(0.5)
def_gen.geometric(0.5, size=None)
def_gen.geometric(0.5, size=1)
def_gen.geometric(D_arr_0p5)
def_gen.geometric(D_arr_0p5, size=1)
def_gen.geometric(D_arr_like_0p5)
def_gen.geometric(D_arr_like_0p5, size=1)
def_gen.logseries(0.5)
def_gen.logseries(0.5, size=None)
def_gen.logseries(0.5, size=1)
def_gen.logseries(D_arr_0p5)
def_gen.logseries(D_arr_0p5, size=1)
def_gen.logseries(D_arr_like_0p5)
def_gen.logseries(D_arr_like_0p5, size=1)
def_gen.rayleigh(0.5)
def_gen.rayleigh(0.5, size=None)
def_gen.rayleigh(0.5, size=1)
def_gen.rayleigh(D_arr_0p5)
def_gen.rayleigh(D_arr_0p5, size=1)
def_gen.rayleigh(D_arr_like_0p5)
def_gen.rayleigh(D_arr_like_0p5, size=1)
def_gen.standard_gamma(0.5)
def_gen.standard_gamma(0.5, size=None)
def_gen.standard_gamma(0.5, dtype="float32")
def_gen.standard_gamma(0.5, size=None, dtype="float32")
def_gen.standard_gamma(0.5, size=1)
def_gen.standard_gamma(D_arr_0p5)
def_gen.standard_gamma(D_arr_0p5, dtype="f4")
def_gen.standard_gamma(0.5, size=1, dtype="float32", out=S_out)
def_gen.standard_gamma(D_arr_0p5, dtype=np.float32, out=S_out)
def_gen.standard_gamma(D_arr_0p5, size=1)
def_gen.standard_gamma(D_arr_like_0p5)
def_gen.standard_gamma(D_arr_like_0p5, size=1)
def_gen.standard_gamma(0.5, out=D_out)
def_gen.standard_gamma(D_arr_like_0p5, out=D_out)
def_gen.standard_gamma(D_arr_like_0p5, size=1)
def_gen.standard_gamma(D_arr_like_0p5, size=1, out=D_out, dtype=np.float64)
def_gen.vonmises(0.5, 0.5)
def_gen.vonmises(0.5, 0.5, size=None)
def_gen.vonmises(0.5, 0.5, size=1)
def_gen.vonmises(D_arr_0p5, 0.5)
def_gen.vonmises(0.5, D_arr_0p5)
def_gen.vonmises(D_arr_0p5, 0.5, size=1)
def_gen.vonmises(0.5, D_arr_0p5, size=1)
def_gen.vonmises(D_arr_like_0p5, 0.5)
def_gen.vonmises(0.5, D_arr_like_0p5)
def_gen.vonmises(D_arr_0p5, D_arr_0p5)
def_gen.vonmises(D_arr_like_0p5, D_arr_like_0p5)
def_gen.vonmises(D_arr_0p5, D_arr_0p5, size=1)
def_gen.vonmises(D_arr_like_0p5, D_arr_like_0p5, size=1)
def_gen.wald(0.5, 0.5)
def_gen.wald(0.5, 0.5, size=None)
def_gen.wald(0.5, 0.5, size=1)
def_gen.wald(D_arr_0p5, 0.5)
def_gen.wald(0.5, D_arr_0p5)
def_gen.wald(D_arr_0p5, 0.5, size=1)
def_gen.wald(0.5, D_arr_0p5, size=1)
def_gen.wald(D_arr_like_0p5, 0.5)
def_gen.wald(0.5, D_arr_like_0p5)
def_gen.wald(D_arr_0p5, D_arr_0p5)
def_gen.wald(D_arr_like_0p5, D_arr_like_0p5)
def_gen.wald(D_arr_0p5, D_arr_0p5, size=1)
def_gen.wald(D_arr_like_0p5, D_arr_like_0p5, size=1)
def_gen.uniform(0.5, 0.5)
def_gen.uniform(0.5, 0.5, size=None)
def_gen.uniform(0.5, 0.5, size=1)
def_gen.uniform(D_arr_0p5, 0.5)
def_gen.uniform(0.5, D_arr_0p5)
def_gen.uniform(D_arr_0p5, 0.5, size=1)
def_gen.uniform(0.5, D_arr_0p5, size=1)
def_gen.uniform(D_arr_like_0p5, 0.5)
def_gen.uniform(0.5, D_arr_like_0p5)
def_gen.uniform(D_arr_0p5, D_arr_0p5)
def_gen.uniform(D_arr_like_0p5, D_arr_like_0p5)
def_gen.uniform(D_arr_0p5, D_arr_0p5, size=1)
def_gen.uniform(D_arr_like_0p5, D_arr_like_0p5, size=1)
def_gen.beta(0.5, 0.5)
def_gen.beta(0.5, 0.5, size=None)
def_gen.beta(0.5, 0.5, size=1)
def_gen.beta(D_arr_0p5, 0.5)
def_gen.beta(0.5, D_arr_0p5)
def_gen.beta(D_arr_0p5, 0.5, size=1)
def_gen.beta(0.5, D_arr_0p5, size=1)
def_gen.beta(D_arr_like_0p5, 0.5)
def_gen.beta(0.5, D_arr_like_0p5)
def_gen.beta(D_arr_0p5, D_arr_0p5)
def_gen.beta(D_arr_like_0p5, D_arr_like_0p5)
def_gen.beta(D_arr_0p5, D_arr_0p5, size=1)
def_gen.beta(D_arr_like_0p5, D_arr_like_0p5, size=1)
def_gen.f(0.5, 0.5)
def_gen.f(0.5, 0.5, size=None)
def_gen.f(0.5, 0.5, size=1)
def_gen.f(D_arr_0p5, 0.5)
def_gen.f(0.5, D_arr_0p5)
def_gen.f(D_arr_0p5, 0.5, size=1)
def_gen.f(0.5, D_arr_0p5, size=1)
def_gen.f(D_arr_like_0p5, 0.5)
def_gen.f(0.5, D_arr_like_0p5)
def_gen.f(D_arr_0p5, D_arr_0p5)
def_gen.f(D_arr_like_0p5, D_arr_like_0p5)
def_gen.f(D_arr_0p5, D_arr_0p5, size=1)
def_gen.f(D_arr_like_0p5, D_arr_like_0p5, size=1)
def_gen.gamma(0.5, 0.5)
def_gen.gamma(0.5, 0.5, size=None)
def_gen.gamma(0.5, 0.5, size=1)
def_gen.gamma(D_arr_0p5, 0.5)
def_gen.gamma(0.5, D_arr_0p5)
def_gen.gamma(D_arr_0p5, 0.5, size=1)
def_gen.gamma(0.5, D_arr_0p5, size=1)
def_gen.gamma(D_arr_like_0p5, 0.5)
def_gen.gamma(0.5, D_arr_like_0p5)
def_gen.gamma(D_arr_0p5, D_arr_0p5)
def_gen.gamma(D_arr_like_0p5, D_arr_like_0p5)
def_gen.gamma(D_arr_0p5, D_arr_0p5, size=1)
def_gen.gamma(D_arr_like_0p5, D_arr_like_0p5, size=1)
def_gen.gumbel(0.5, 0.5)
def_gen.gumbel(0.5, 0.5, size=None)
def_gen.gumbel(0.5, 0.5, size=1)
def_gen.gumbel(D_arr_0p5, 0.5)
def_gen.gumbel(0.5, D_arr_0p5)
def_gen.gumbel(D_arr_0p5, 0.5, size=1)
def_gen.gumbel(0.5, D_arr_0p5, size=1)
def_gen.gumbel(D_arr_like_0p5, 0.5)
def_gen.gumbel(0.5, D_arr_like_0p5)
def_gen.gumbel(D_arr_0p5, D_arr_0p5)
def_gen.gumbel(D_arr_like_0p5, D_arr_like_0p5)
def_gen.gumbel(D_arr_0p5, D_arr_0p5, size=1)
def_gen.gumbel(D_arr_like_0p5, D_arr_like_0p5, size=1)
def_gen.laplace(0.5, 0.5)
def_gen.laplace(0.5, 0.5, size=None)
def_gen.laplace(0.5, 0.5, size=1)
def_gen.laplace(D_arr_0p5, 0.5)
def_gen.laplace(0.5, D_arr_0p5)
def_gen.laplace(D_arr_0p5, 0.5, size=1)
def_gen.laplace(0.5, D_arr_0p5, size=1)
def_gen.laplace(D_arr_like_0p5, 0.5)
def_gen.laplace(0.5, D_arr_like_0p5)
def_gen.laplace(D_arr_0p5, D_arr_0p5)
def_gen.laplace(D_arr_like_0p5, D_arr_like_0p5)
def_gen.laplace(D_arr_0p5, D_arr_0p5, size=1)
def_gen.laplace(D_arr_like_0p5, D_arr_like_0p5, size=1)
def_gen.logistic(0.5, 0.5)
def_gen.logistic(0.5, 0.5, size=None)
def_gen.logistic(0.5, 0.5, size=1)
def_gen.logistic(D_arr_0p5, 0.5)
def_gen.logistic(0.5, D_arr_0p5)
def_gen.logistic(D_arr_0p5, 0.5, size=1)
def_gen.logistic(0.5, D_arr_0p5, size=1)
def_gen.logistic(D_arr_like_0p5, 0.5)
def_gen.logistic(0.5, D_arr_like_0p5)
def_gen.logistic(D_arr_0p5, D_arr_0p5)
def_gen.logistic(D_arr_like_0p5, D_arr_like_0p5)
def_gen.logistic(D_arr_0p5, D_arr_0p5, size=1)
def_gen.logistic(D_arr_like_0p5, D_arr_like_0p5, size=1)
def_gen.lognormal(0.5, 0.5)
def_gen.lognormal(0.5, 0.5, size=None)
def_gen.lognormal(0.5, 0.5, size=1)
def_gen.lognormal(D_arr_0p5, 0.5)
def_gen.lognormal(0.5, D_arr_0p5)
def_gen.lognormal(D_arr_0p5, 0.5, size=1)
def_gen.lognormal(0.5, D_arr_0p5, size=1)
def_gen.lognormal(D_arr_like_0p5, 0.5)
def_gen.lognormal(0.5, D_arr_like_0p5)
def_gen.lognormal(D_arr_0p5, D_arr_0p5)
def_gen.lognormal(D_arr_like_0p5, D_arr_like_0p5)
def_gen.lognormal(D_arr_0p5, D_arr_0p5, size=1)
def_gen.lognormal(D_arr_like_0p5, D_arr_like_0p5, size=1)
def_gen.noncentral_chisquare(0.5, 0.5)
def_gen.noncentral_chisquare(0.5, 0.5, size=None)
def_gen.noncentral_chisquare(0.5, 0.5, size=1)
def_gen.noncentral_chisquare(D_arr_0p5, 0.5)
def_gen.noncentral_chisquare(0.5, D_arr_0p5)
def_gen.noncentral_chisquare(D_arr_0p5, 0.5, size=1)
def_gen.noncentral_chisquare(0.5, D_arr_0p5, size=1)
def_gen.noncentral_chisquare(D_arr_like_0p5, 0.5)
def_gen.noncentral_chisquare(0.5, D_arr_like_0p5)
def_gen.noncentral_chisquare(D_arr_0p5, D_arr_0p5)
def_gen.noncentral_chisquare(D_arr_like_0p5, D_arr_like_0p5)
def_gen.noncentral_chisquare(D_arr_0p5, D_arr_0p5, size=1)
def_gen.noncentral_chisquare(D_arr_like_0p5, D_arr_like_0p5, size=1)
def_gen.normal(0.5, 0.5)
def_gen.normal(0.5, 0.5, size=None)
def_gen.normal(0.5, 0.5, size=1)
def_gen.normal(D_arr_0p5, 0.5)
def_gen.normal(0.5, D_arr_0p5)
def_gen.normal(D_arr_0p5, 0.5, size=1)
def_gen.normal(0.5, D_arr_0p5, size=1)
def_gen.normal(D_arr_like_0p5, 0.5)
def_gen.normal(0.5, D_arr_like_0p5)
def_gen.normal(D_arr_0p5, D_arr_0p5)
def_gen.normal(D_arr_like_0p5, D_arr_like_0p5)
def_gen.normal(D_arr_0p5, D_arr_0p5, size=1)
def_gen.normal(D_arr_like_0p5, D_arr_like_0p5, size=1)
def_gen.triangular(0.1, 0.5, 0.9)
def_gen.triangular(0.1, 0.5, 0.9, size=None)
def_gen.triangular(0.1, 0.5, 0.9, size=1)
def_gen.triangular(D_arr_0p1, 0.5, 0.9)
def_gen.triangular(0.1, D_arr_0p5, 0.9)
def_gen.triangular(D_arr_0p1, 0.5, D_arr_like_0p9, size=1)
def_gen.triangular(0.1, D_arr_0p5, 0.9, size=1)
def_gen.triangular(D_arr_like_0p1, 0.5, D_arr_0p9)
def_gen.triangular(0.5, D_arr_like_0p5, 0.9)
def_gen.triangular(D_arr_0p1, D_arr_0p5, 0.9)
def_gen.triangular(D_arr_like_0p1, D_arr_like_0p5, 0.9)
def_gen.triangular(D_arr_0p1, D_arr_0p5, D_arr_0p9, size=1)
def_gen.triangular(D_arr_like_0p1, D_arr_like_0p5, D_arr_like_0p9, size=1)
def_gen.noncentral_f(0.1, 0.5, 0.9)
def_gen.noncentral_f(0.1, 0.5, 0.9, size=None)
def_gen.noncentral_f(0.1, 0.5, 0.9, size=1)
def_gen.noncentral_f(D_arr_0p1, 0.5, 0.9)
def_gen.noncentral_f(0.1, D_arr_0p5, 0.9)
def_gen.noncentral_f(D_arr_0p1, 0.5, D_arr_like_0p9, size=1)
def_gen.noncentral_f(0.1, D_arr_0p5, 0.9, size=1)
def_gen.noncentral_f(D_arr_like_0p1, 0.5, D_arr_0p9)
def_gen.noncentral_f(0.5, D_arr_like_0p5, 0.9)
def_gen.noncentral_f(D_arr_0p1, D_arr_0p5, 0.9)
def_gen.noncentral_f(D_arr_like_0p1, D_arr_like_0p5, 0.9)
def_gen.noncentral_f(D_arr_0p1, D_arr_0p5, D_arr_0p9, size=1)
def_gen.noncentral_f(D_arr_like_0p1, D_arr_like_0p5, D_arr_like_0p9, size=1)
def_gen.binomial(10, 0.5)
def_gen.binomial(10, 0.5, size=None)
def_gen.binomial(10, 0.5, size=1)
def_gen.binomial(I_arr_10, 0.5)
def_gen.binomial(10, D_arr_0p5)
def_gen.binomial(I_arr_10, 0.5, size=1)
def_gen.binomial(10, D_arr_0p5, size=1)
def_gen.binomial(I_arr_like_10, 0.5)
def_gen.binomial(10, D_arr_like_0p5)
def_gen.binomial(I_arr_10, D_arr_0p5)
def_gen.binomial(I_arr_like_10, D_arr_like_0p5)
def_gen.binomial(I_arr_10, D_arr_0p5, size=1)
def_gen.binomial(I_arr_like_10, D_arr_like_0p5, size=1)
def_gen.negative_binomial(10, 0.5)
def_gen.negative_binomial(10, 0.5, size=None)
def_gen.negative_binomial(10, 0.5, size=1)
def_gen.negative_binomial(I_arr_10, 0.5)
def_gen.negative_binomial(10, D_arr_0p5)
def_gen.negative_binomial(I_arr_10, 0.5, size=1)
def_gen.negative_binomial(10, D_arr_0p5, size=1)
def_gen.negative_binomial(I_arr_like_10, 0.5)
def_gen.negative_binomial(10, D_arr_like_0p5)
def_gen.negative_binomial(I_arr_10, D_arr_0p5)
def_gen.negative_binomial(I_arr_like_10, D_arr_like_0p5)
def_gen.negative_binomial(I_arr_10, D_arr_0p5, size=1)
def_gen.negative_binomial(I_arr_like_10, D_arr_like_0p5, size=1)
def_gen.hypergeometric(20, 20, 10)
def_gen.hypergeometric(20, 20, 10, size=None)
def_gen.hypergeometric(20, 20, 10, size=1)
def_gen.hypergeometric(I_arr_20, 20, 10)
def_gen.hypergeometric(20, I_arr_20, 10)
def_gen.hypergeometric(I_arr_20, 20, I_arr_like_10, size=1)
def_gen.hypergeometric(20, I_arr_20, 10, size=1)
def_gen.hypergeometric(I_arr_like_20, 20, I_arr_10)
def_gen.hypergeometric(20, I_arr_like_20, 10)
def_gen.hypergeometric(I_arr_20, I_arr_20, 10)
def_gen.hypergeometric(I_arr_like_20, I_arr_like_20, 10)
def_gen.hypergeometric(I_arr_20, I_arr_20, I_arr_10, size=1)
def_gen.hypergeometric(I_arr_like_20, I_arr_like_20, I_arr_like_10, size=1)
I_int64_100: np.ndarray[Any, np.dtype[np.int64]] = np.array([100], dtype=np.int64)
def_gen.integers(0, 100)
def_gen.integers(100)
def_gen.integers([100])
def_gen.integers(0, [100])
I_bool_low: np.ndarray[Any, np.dtype[np.bool_]] = np.array([0], dtype=np.bool_)
I_bool_low_like: list[int] = [0]
I_bool_high_open: np.ndarray[Any, np.dtype[np.bool_]] = np.array([1], dtype=np.bool_)
I_bool_high_closed: np.ndarray[Any, np.dtype[np.bool_]] = np.array([1], dtype=np.bool_)
def_gen.integers(2, dtype=bool)
def_gen.integers(0, 2, dtype=bool)
def_gen.integers(1, dtype=bool, endpoint=True)
def_gen.integers(0, 1, dtype=bool, endpoint=True)
def_gen.integers(I_bool_low_like, 1, dtype=bool, endpoint=True)
def_gen.integers(I_bool_high_open, dtype=bool)
def_gen.integers(I_bool_low, I_bool_high_open, dtype=bool)
def_gen.integers(0, I_bool_high_open, dtype=bool)
def_gen.integers(I_bool_high_closed, dtype=bool, endpoint=True)
def_gen.integers(I_bool_low, I_bool_high_closed, dtype=bool, endpoint=True)
def_gen.integers(0, I_bool_high_closed, dtype=bool, endpoint=True)
def_gen.integers(2, dtype=np.bool_)
def_gen.integers(0, 2, dtype=np.bool_)
def_gen.integers(1, dtype=np.bool_, endpoint=True)
def_gen.integers(0, 1, dtype=np.bool_, endpoint=True)
def_gen.integers(I_bool_low_like, 1, dtype=np.bool_, endpoint=True)
def_gen.integers(I_bool_high_open, dtype=np.bool_)
def_gen.integers(I_bool_low, I_bool_high_open, dtype=np.bool_)
def_gen.integers(0, I_bool_high_open, dtype=np.bool_)
def_gen.integers(I_bool_high_closed, dtype=np.bool_, endpoint=True)
def_gen.integers(I_bool_low, I_bool_high_closed, dtype=np.bool_, endpoint=True)
def_gen.integers(0, I_bool_high_closed, dtype=np.bool_, endpoint=True)
I_u1_low: np.ndarray[Any, np.dtype[np.uint8]] = np.array([0], dtype=np.uint8)
I_u1_low_like: list[int] = [0]
I_u1_high_open: np.ndarray[Any, np.dtype[np.uint8]] = np.array([255], dtype=np.uint8)
I_u1_high_closed: np.ndarray[Any, np.dtype[np.uint8]] = np.array([255], dtype=np.uint8)
def_gen.integers(256, dtype="u1")
def_gen.integers(0, 256, dtype="u1")
def_gen.integers(255, dtype="u1", endpoint=True)
def_gen.integers(0, 255, dtype="u1", endpoint=True)
def_gen.integers(I_u1_low_like, 255, dtype="u1", endpoint=True)
def_gen.integers(I_u1_high_open, dtype="u1")
def_gen.integers(I_u1_low, I_u1_high_open, dtype="u1")
def_gen.integers(0, I_u1_high_open, dtype="u1")
def_gen.integers(I_u1_high_closed, dtype="u1", endpoint=True)
def_gen.integers(I_u1_low, I_u1_high_closed, dtype="u1", endpoint=True)
def_gen.integers(0, I_u1_high_closed, dtype="u1", endpoint=True)
def_gen.integers(256, dtype="uint8")
def_gen.integers(0, 256, dtype="uint8")
def_gen.integers(255, dtype="uint8", endpoint=True)
def_gen.integers(0, 255, dtype="uint8", endpoint=True)
def_gen.integers(I_u1_low_like, 255, dtype="uint8", endpoint=True)
def_gen.integers(I_u1_high_open, dtype="uint8")
def_gen.integers(I_u1_low, I_u1_high_open, dtype="uint8")
def_gen.integers(0, I_u1_high_open, dtype="uint8")
def_gen.integers(I_u1_high_closed, dtype="uint8", endpoint=True)
def_gen.integers(I_u1_low, I_u1_high_closed, dtype="uint8", endpoint=True)
def_gen.integers(0, I_u1_high_closed, dtype="uint8", endpoint=True)
def_gen.integers(256, dtype=np.uint8)
def_gen.integers(0, 256, dtype=np.uint8)
def_gen.integers(255, dtype=np.uint8, endpoint=True)
def_gen.integers(0, 255, dtype=np.uint8, endpoint=True)
def_gen.integers(I_u1_low_like, 255, dtype=np.uint8, endpoint=True)
def_gen.integers(I_u1_high_open, dtype=np.uint8)
def_gen.integers(I_u1_low, I_u1_high_open, dtype=np.uint8)
def_gen.integers(0, I_u1_high_open, dtype=np.uint8)
def_gen.integers(I_u1_high_closed, dtype=np.uint8, endpoint=True)
def_gen.integers(I_u1_low, I_u1_high_closed, dtype=np.uint8, endpoint=True)
def_gen.integers(0, I_u1_high_closed, dtype=np.uint8, endpoint=True)
I_u2_low: np.ndarray[Any, np.dtype[np.uint16]] = np.array([0], dtype=np.uint16)
I_u2_low_like: list[int] = [0]
I_u2_high_open: np.ndarray[Any, np.dtype[np.uint16]] = np.array([65535], dtype=np.uint16)
I_u2_high_closed: np.ndarray[Any, np.dtype[np.uint16]] = np.array([65535], dtype=np.uint16)
def_gen.integers(65536, dtype="u2")
def_gen.integers(0, 65536, dtype="u2")
def_gen.integers(65535, dtype="u2", endpoint=True)
def_gen.integers(0, 65535, dtype="u2", endpoint=True)
def_gen.integers(I_u2_low_like, 65535, dtype="u2", endpoint=True)
def_gen.integers(I_u2_high_open, dtype="u2")
def_gen.integers(I_u2_low, I_u2_high_open, dtype="u2")
def_gen.integers(0, I_u2_high_open, dtype="u2")
def_gen.integers(I_u2_high_closed, dtype="u2", endpoint=True)
def_gen.integers(I_u2_low, I_u2_high_closed, dtype="u2", endpoint=True)
def_gen.integers(0, I_u2_high_closed, dtype="u2", endpoint=True)
def_gen.integers(65536, dtype="uint16")
def_gen.integers(0, 65536, dtype="uint16")
def_gen.integers(65535, dtype="uint16", endpoint=True)
def_gen.integers(0, 65535, dtype="uint16", endpoint=True)
def_gen.integers(I_u2_low_like, 65535, dtype="uint16", endpoint=True)
def_gen.integers(I_u2_high_open, dtype="uint16")
def_gen.integers(I_u2_low, I_u2_high_open, dtype="uint16")
def_gen.integers(0, I_u2_high_open, dtype="uint16")
def_gen.integers(I_u2_high_closed, dtype="uint16", endpoint=True)
def_gen.integers(I_u2_low, I_u2_high_closed, dtype="uint16", endpoint=True)
def_gen.integers(0, I_u2_high_closed, dtype="uint16", endpoint=True)
def_gen.integers(65536, dtype=np.uint16)
def_gen.integers(0, 65536, dtype=np.uint16)
def_gen.integers(65535, dtype=np.uint16, endpoint=True)
def_gen.integers(0, 65535, dtype=np.uint16, endpoint=True)
def_gen.integers(I_u2_low_like, 65535, dtype=np.uint16, endpoint=True)
def_gen.integers(I_u2_high_open, dtype=np.uint16)
def_gen.integers(I_u2_low, I_u2_high_open, dtype=np.uint16)
def_gen.integers(0, I_u2_high_open, dtype=np.uint16)
def_gen.integers(I_u2_high_closed, dtype=np.uint16, endpoint=True)
def_gen.integers(I_u2_low, I_u2_high_closed, dtype=np.uint16, endpoint=True)
def_gen.integers(0, I_u2_high_closed, dtype=np.uint16, endpoint=True)
I_u4_low: np.ndarray[Any, np.dtype[np.uint32]] = np.array([0], dtype=np.uint32)
I_u4_low_like: list[int] = [0]
I_u4_high_open: np.ndarray[Any, np.dtype[np.uint32]] = np.array([4294967295], dtype=np.uint32)
I_u4_high_closed: np.ndarray[Any, np.dtype[np.uint32]] = np.array([4294967295], dtype=np.uint32)
def_gen.integers(4294967296, dtype="u4")
def_gen.integers(0, 4294967296, dtype="u4")
def_gen.integers(4294967295, dtype="u4", endpoint=True)
def_gen.integers(0, 4294967295, dtype="u4", endpoint=True)
def_gen.integers(I_u4_low_like, 4294967295, dtype="u4", endpoint=True)
def_gen.integers(I_u4_high_open, dtype="u4")
def_gen.integers(I_u4_low, I_u4_high_open, dtype="u4")
def_gen.integers(0, I_u4_high_open, dtype="u4")
def_gen.integers(I_u4_high_closed, dtype="u4", endpoint=True)
def_gen.integers(I_u4_low, I_u4_high_closed, dtype="u4", endpoint=True)
def_gen.integers(0, I_u4_high_closed, dtype="u4", endpoint=True)
def_gen.integers(4294967296, dtype="uint32")
def_gen.integers(0, 4294967296, dtype="uint32")
def_gen.integers(4294967295, dtype="uint32", endpoint=True)
def_gen.integers(0, 4294967295, dtype="uint32", endpoint=True)
def_gen.integers(I_u4_low_like, 4294967295, dtype="uint32", endpoint=True)
def_gen.integers(I_u4_high_open, dtype="uint32")
def_gen.integers(I_u4_low, I_u4_high_open, dtype="uint32")
def_gen.integers(0, I_u4_high_open, dtype="uint32")
def_gen.integers(I_u4_high_closed, dtype="uint32", endpoint=True)
def_gen.integers(I_u4_low, I_u4_high_closed, dtype="uint32", endpoint=True)
def_gen.integers(0, I_u4_high_closed, dtype="uint32", endpoint=True)
def_gen.integers(4294967296, dtype=np.uint32)
def_gen.integers(0, 4294967296, dtype=np.uint32)
def_gen.integers(4294967295, dtype=np.uint32, endpoint=True)
def_gen.integers(0, 4294967295, dtype=np.uint32, endpoint=True)
def_gen.integers(I_u4_low_like, 4294967295, dtype=np.uint32, endpoint=True)
def_gen.integers(I_u4_high_open, dtype=np.uint32)
def_gen.integers(I_u4_low, I_u4_high_open, dtype=np.uint32)
def_gen.integers(0, I_u4_high_open, dtype=np.uint32)
def_gen.integers(I_u4_high_closed, dtype=np.uint32, endpoint=True)
def_gen.integers(I_u4_low, I_u4_high_closed, dtype=np.uint32, endpoint=True)
def_gen.integers(0, I_u4_high_closed, dtype=np.uint32, endpoint=True)
I_u8_low: np.ndarray[Any, np.dtype[np.uint64]] = np.array([0], dtype=np.uint64)
I_u8_low_like: list[int] = [0]
I_u8_high_open: np.ndarray[Any, np.dtype[np.uint64]] = np.array([18446744073709551615], dtype=np.uint64)
I_u8_high_closed: np.ndarray[Any, np.dtype[np.uint64]] = np.array([18446744073709551615], dtype=np.uint64)
def_gen.integers(18446744073709551616, dtype="u8")
def_gen.integers(0, 18446744073709551616, dtype="u8")
def_gen.integers(18446744073709551615, dtype="u8", endpoint=True)
def_gen.integers(0, 18446744073709551615, dtype="u8", endpoint=True)
def_gen.integers(I_u8_low_like, 18446744073709551615, dtype="u8", endpoint=True)
def_gen.integers(I_u8_high_open, dtype="u8")
def_gen.integers(I_u8_low, I_u8_high_open, dtype="u8")
def_gen.integers(0, I_u8_high_open, dtype="u8")
def_gen.integers(I_u8_high_closed, dtype="u8", endpoint=True)
def_gen.integers(I_u8_low, I_u8_high_closed, dtype="u8", endpoint=True)
def_gen.integers(0, I_u8_high_closed, dtype="u8", endpoint=True)
def_gen.integers(18446744073709551616, dtype="uint64")
def_gen.integers(0, 18446744073709551616, dtype="uint64")
def_gen.integers(18446744073709551615, dtype="uint64", endpoint=True)
def_gen.integers(0, 18446744073709551615, dtype="uint64", endpoint=True)
def_gen.integers(I_u8_low_like, 18446744073709551615, dtype="uint64", endpoint=True)
def_gen.integers(I_u8_high_open, dtype="uint64")
def_gen.integers(I_u8_low, I_u8_high_open, dtype="uint64")
def_gen.integers(0, I_u8_high_open, dtype="uint64")
def_gen.integers(I_u8_high_closed, dtype="uint64", endpoint=True)
def_gen.integers(I_u8_low, I_u8_high_closed, dtype="uint64", endpoint=True)
def_gen.integers(0, I_u8_high_closed, dtype="uint64", endpoint=True)
def_gen.integers(18446744073709551616, dtype=np.uint64)
def_gen.integers(0, 18446744073709551616, dtype=np.uint64)
def_gen.integers(18446744073709551615, dtype=np.uint64, endpoint=True)
def_gen.integers(0, 18446744073709551615, dtype=np.uint64, endpoint=True)
def_gen.integers(I_u8_low_like, 18446744073709551615, dtype=np.uint64, endpoint=True)
def_gen.integers(I_u8_high_open, dtype=np.uint64)
def_gen.integers(I_u8_low, I_u8_high_open, dtype=np.uint64)
def_gen.integers(0, I_u8_high_open, dtype=np.uint64)
def_gen.integers(I_u8_high_closed, dtype=np.uint64, endpoint=True)
def_gen.integers(I_u8_low, I_u8_high_closed, dtype=np.uint64, endpoint=True)
def_gen.integers(0, I_u8_high_closed, dtype=np.uint64, endpoint=True)
I_i1_low: np.ndarray[Any, np.dtype[np.int8]] = np.array([-128], dtype=np.int8)
I_i1_low_like: list[int] = [-128]
I_i1_high_open: np.ndarray[Any, np.dtype[np.int8]] = np.array([127], dtype=np.int8)
I_i1_high_closed: np.ndarray[Any, np.dtype[np.int8]] = np.array([127], dtype=np.int8)
def_gen.integers(128, dtype="i1")
def_gen.integers(-128, 128, dtype="i1")
def_gen.integers(127, dtype="i1", endpoint=True)
def_gen.integers(-128, 127, dtype="i1", endpoint=True)
def_gen.integers(I_i1_low_like, 127, dtype="i1", endpoint=True)
def_gen.integers(I_i1_high_open, dtype="i1")
def_gen.integers(I_i1_low, I_i1_high_open, dtype="i1")
def_gen.integers(-128, I_i1_high_open, dtype="i1")
def_gen.integers(I_i1_high_closed, dtype="i1", endpoint=True)
def_gen.integers(I_i1_low, I_i1_high_closed, dtype="i1", endpoint=True)
def_gen.integers(-128, I_i1_high_closed, dtype="i1", endpoint=True)
def_gen.integers(128, dtype="int8")
def_gen.integers(-128, 128, dtype="int8")
def_gen.integers(127, dtype="int8", endpoint=True)
def_gen.integers(-128, 127, dtype="int8", endpoint=True)
def_gen.integers(I_i1_low_like, 127, dtype="int8", endpoint=True)
def_gen.integers(I_i1_high_open, dtype="int8")
def_gen.integers(I_i1_low, I_i1_high_open, dtype="int8")
def_gen.integers(-128, I_i1_high_open, dtype="int8")
def_gen.integers(I_i1_high_closed, dtype="int8", endpoint=True)
def_gen.integers(I_i1_low, I_i1_high_closed, dtype="int8", endpoint=True)
def_gen.integers(-128, I_i1_high_closed, dtype="int8", endpoint=True)
def_gen.integers(128, dtype=np.int8)
def_gen.integers(-128, 128, dtype=np.int8)
def_gen.integers(127, dtype=np.int8, endpoint=True)
def_gen.integers(-128, 127, dtype=np.int8, endpoint=True)
def_gen.integers(I_i1_low_like, 127, dtype=np.int8, endpoint=True)
def_gen.integers(I_i1_high_open, dtype=np.int8)
def_gen.integers(I_i1_low, I_i1_high_open, dtype=np.int8)
def_gen.integers(-128, I_i1_high_open, dtype=np.int8)
def_gen.integers(I_i1_high_closed, dtype=np.int8, endpoint=True)
def_gen.integers(I_i1_low, I_i1_high_closed, dtype=np.int8, endpoint=True)
def_gen.integers(-128, I_i1_high_closed, dtype=np.int8, endpoint=True)
I_i2_low: np.ndarray[Any, np.dtype[np.int16]] = np.array([-32768], dtype=np.int16)
I_i2_low_like: list[int] = [-32768]
I_i2_high_open: np.ndarray[Any, np.dtype[np.int16]] = np.array([32767], dtype=np.int16)
I_i2_high_closed: np.ndarray[Any, np.dtype[np.int16]] = np.array([32767], dtype=np.int16)
def_gen.integers(32768, dtype="i2")
def_gen.integers(-32768, 32768, dtype="i2")
def_gen.integers(32767, dtype="i2", endpoint=True)
def_gen.integers(-32768, 32767, dtype="i2", endpoint=True)
def_gen.integers(I_i2_low_like, 32767, dtype="i2", endpoint=True)
def_gen.integers(I_i2_high_open, dtype="i2")
def_gen.integers(I_i2_low, I_i2_high_open, dtype="i2")
def_gen.integers(-32768, I_i2_high_open, dtype="i2")
def_gen.integers(I_i2_high_closed, dtype="i2", endpoint=True)
def_gen.integers(I_i2_low, I_i2_high_closed, dtype="i2", endpoint=True)
def_gen.integers(-32768, I_i2_high_closed, dtype="i2", endpoint=True)
def_gen.integers(32768, dtype="int16")
def_gen.integers(-32768, 32768, dtype="int16")
def_gen.integers(32767, dtype="int16", endpoint=True)
def_gen.integers(-32768, 32767, dtype="int16", endpoint=True)
def_gen.integers(I_i2_low_like, 32767, dtype="int16", endpoint=True)
def_gen.integers(I_i2_high_open, dtype="int16")
def_gen.integers(I_i2_low, I_i2_high_open, dtype="int16")
def_gen.integers(-32768, I_i2_high_open, dtype="int16")
def_gen.integers(I_i2_high_closed, dtype="int16", endpoint=True)
def_gen.integers(I_i2_low, I_i2_high_closed, dtype="int16", endpoint=True)
def_gen.integers(-32768, I_i2_high_closed, dtype="int16", endpoint=True)
def_gen.integers(32768, dtype=np.int16)
def_gen.integers(-32768, 32768, dtype=np.int16)
def_gen.integers(32767, dtype=np.int16, endpoint=True)
def_gen.integers(-32768, 32767, dtype=np.int16, endpoint=True)
def_gen.integers(I_i2_low_like, 32767, dtype=np.int16, endpoint=True)
def_gen.integers(I_i2_high_open, dtype=np.int16)
def_gen.integers(I_i2_low, I_i2_high_open, dtype=np.int16)
def_gen.integers(-32768, I_i2_high_open, dtype=np.int16)
def_gen.integers(I_i2_high_closed, dtype=np.int16, endpoint=True)
def_gen.integers(I_i2_low, I_i2_high_closed, dtype=np.int16, endpoint=True)
def_gen.integers(-32768, I_i2_high_closed, dtype=np.int16, endpoint=True)
I_i4_low: np.ndarray[Any, np.dtype[np.int32]] = np.array([-2147483648], dtype=np.int32)
I_i4_low_like: list[int] = [-2147483648]
I_i4_high_open: np.ndarray[Any, np.dtype[np.int32]] = np.array([2147483647], dtype=np.int32)
I_i4_high_closed: np.ndarray[Any, np.dtype[np.int32]] = np.array([2147483647], dtype=np.int32)
def_gen.integers(2147483648, dtype="i4")
def_gen.integers(-2147483648, 2147483648, dtype="i4")
def_gen.integers(2147483647, dtype="i4", endpoint=True)
def_gen.integers(-2147483648, 2147483647, dtype="i4", endpoint=True)
def_gen.integers(I_i4_low_like, 2147483647, dtype="i4", endpoint=True)
def_gen.integers(I_i4_high_open, dtype="i4")
def_gen.integers(I_i4_low, I_i4_high_open, dtype="i4")
def_gen.integers(-2147483648, I_i4_high_open, dtype="i4")
def_gen.integers(I_i4_high_closed, dtype="i4", endpoint=True)
def_gen.integers(I_i4_low, I_i4_high_closed, dtype="i4", endpoint=True)
def_gen.integers(-2147483648, I_i4_high_closed, dtype="i4", endpoint=True)
def_gen.integers(2147483648, dtype="int32")
def_gen.integers(-2147483648, 2147483648, dtype="int32")
def_gen.integers(2147483647, dtype="int32", endpoint=True)
def_gen.integers(-2147483648, 2147483647, dtype="int32", endpoint=True)
def_gen.integers(I_i4_low_like, 2147483647, dtype="int32", endpoint=True)
def_gen.integers(I_i4_high_open, dtype="int32")
def_gen.integers(I_i4_low, I_i4_high_open, dtype="int32")
def_gen.integers(-2147483648, I_i4_high_open, dtype="int32")
def_gen.integers(I_i4_high_closed, dtype="int32", endpoint=True)
def_gen.integers(I_i4_low, I_i4_high_closed, dtype="int32", endpoint=True)
def_gen.integers(-2147483648, I_i4_high_closed, dtype="int32", endpoint=True)
def_gen.integers(2147483648, dtype=np.int32)
def_gen.integers(-2147483648, 2147483648, dtype=np.int32)
def_gen.integers(2147483647, dtype=np.int32, endpoint=True)
def_gen.integers(-2147483648, 2147483647, dtype=np.int32, endpoint=True)
def_gen.integers(I_i4_low_like, 2147483647, dtype=np.int32, endpoint=True)
def_gen.integers(I_i4_high_open, dtype=np.int32)
def_gen.integers(I_i4_low, I_i4_high_open, dtype=np.int32)
def_gen.integers(-2147483648, I_i4_high_open, dtype=np.int32)
def_gen.integers(I_i4_high_closed, dtype=np.int32, endpoint=True)
def_gen.integers(I_i4_low, I_i4_high_closed, dtype=np.int32, endpoint=True)
def_gen.integers(-2147483648, I_i4_high_closed, dtype=np.int32, endpoint=True)
I_i8_low: np.ndarray[Any, np.dtype[np.int64]] = np.array([-9223372036854775808], dtype=np.int64)
I_i8_low_like: list[int] = [-9223372036854775808]
I_i8_high_open: np.ndarray[Any, np.dtype[np.int64]] = np.array([9223372036854775807], dtype=np.int64)
I_i8_high_closed: np.ndarray[Any, np.dtype[np.int64]] = np.array([9223372036854775807], dtype=np.int64)
def_gen.integers(9223372036854775808, dtype="i8")
def_gen.integers(-9223372036854775808, 9223372036854775808, dtype="i8")
def_gen.integers(9223372036854775807, dtype="i8", endpoint=True)
def_gen.integers(-9223372036854775808, 9223372036854775807, dtype="i8", endpoint=True)
def_gen.integers(I_i8_low_like, 9223372036854775807, dtype="i8", endpoint=True)
def_gen.integers(I_i8_high_open, dtype="i8")
def_gen.integers(I_i8_low, I_i8_high_open, dtype="i8")
def_gen.integers(-9223372036854775808, I_i8_high_open, dtype="i8")
def_gen.integers(I_i8_high_closed, dtype="i8", endpoint=True)
def_gen.integers(I_i8_low, I_i8_high_closed, dtype="i8", endpoint=True)
def_gen.integers(-9223372036854775808, I_i8_high_closed, dtype="i8", endpoint=True)
def_gen.integers(9223372036854775808, dtype="int64")
def_gen.integers(-9223372036854775808, 9223372036854775808, dtype="int64")
def_gen.integers(9223372036854775807, dtype="int64", endpoint=True)
def_gen.integers(-9223372036854775808, 9223372036854775807, dtype="int64", endpoint=True)
def_gen.integers(I_i8_low_like, 9223372036854775807, dtype="int64", endpoint=True)
def_gen.integers(I_i8_high_open, dtype="int64")
def_gen.integers(I_i8_low, I_i8_high_open, dtype="int64")
def_gen.integers(-9223372036854775808, I_i8_high_open, dtype="int64")
def_gen.integers(I_i8_high_closed, dtype="int64", endpoint=True)
def_gen.integers(I_i8_low, I_i8_high_closed, dtype="int64", endpoint=True)
def_gen.integers(-9223372036854775808, I_i8_high_closed, dtype="int64", endpoint=True)
def_gen.integers(9223372036854775808, dtype=np.int64)
def_gen.integers(-9223372036854775808, 9223372036854775808, dtype=np.int64)
def_gen.integers(9223372036854775807, dtype=np.int64, endpoint=True)
def_gen.integers(-9223372036854775808, 9223372036854775807, dtype=np.int64, endpoint=True)
def_gen.integers(I_i8_low_like, 9223372036854775807, dtype=np.int64, endpoint=True)
def_gen.integers(I_i8_high_open, dtype=np.int64)
def_gen.integers(I_i8_low, I_i8_high_open, dtype=np.int64)
def_gen.integers(-9223372036854775808, I_i8_high_open, dtype=np.int64)
def_gen.integers(I_i8_high_closed, dtype=np.int64, endpoint=True)
def_gen.integers(I_i8_low, I_i8_high_closed, dtype=np.int64, endpoint=True)
def_gen.integers(-9223372036854775808, I_i8_high_closed, dtype=np.int64, endpoint=True)
def_gen.bit_generator
def_gen.bytes(2)
def_gen.choice(5)
def_gen.choice(5, 3)
def_gen.choice(5, 3, replace=True)
def_gen.choice(5, 3, p=[1 / 5] * 5)
def_gen.choice(5, 3, p=[1 / 5] * 5, replace=False)
def_gen.choice(["pooh", "rabbit", "piglet", "Christopher"])
def_gen.choice(["pooh", "rabbit", "piglet", "Christopher"], 3)
def_gen.choice(["pooh", "rabbit", "piglet", "Christopher"], 3, p=[1 / 4] * 4)
def_gen.choice(["pooh", "rabbit", "piglet", "Christopher"], 3, replace=True)
def_gen.choice(["pooh", "rabbit", "piglet", "Christopher"], 3, replace=False, p=np.array([1 / 8, 1 / 8, 1 / 2, 1 / 4]))
def_gen.dirichlet([0.5, 0.5])
def_gen.dirichlet(np.array([0.5, 0.5]))
def_gen.dirichlet(np.array([0.5, 0.5]), size=3)
def_gen.multinomial(20, [1 / 6.0] * 6)
def_gen.multinomial(20, np.array([0.5, 0.5]))
def_gen.multinomial(20, [1 / 6.0] * 6, size=2)
def_gen.multinomial([[10], [20]], [1 / 6.0] * 6, size=(2, 2))
def_gen.multinomial(np.array([[10], [20]]), np.array([0.5, 0.5]), size=(2, 2))
def_gen.multivariate_hypergeometric([3, 5, 7], 2)
def_gen.multivariate_hypergeometric(np.array([3, 5, 7]), 2)
def_gen.multivariate_hypergeometric(np.array([3, 5, 7]), 2, size=4)
def_gen.multivariate_hypergeometric(np.array([3, 5, 7]), 2, size=(4, 7))
def_gen.multivariate_hypergeometric([3, 5, 7], 2, method="count")
def_gen.multivariate_hypergeometric(np.array([3, 5, 7]), 2, method="marginals")
def_gen.multivariate_normal([0.0], [[1.0]])
def_gen.multivariate_normal([0.0], np.array([[1.0]]))
def_gen.multivariate_normal(np.array([0.0]), [[1.0]])
def_gen.multivariate_normal([0.0], np.array([[1.0]]))
def_gen.permutation(10)
def_gen.permutation([1, 2, 3, 4])
def_gen.permutation(np.array([1, 2, 3, 4]))
def_gen.permutation(D_2D, axis=1)
def_gen.permuted(D_2D)
def_gen.permuted(D_2D_like)
def_gen.permuted(D_2D, axis=1)
def_gen.permuted(D_2D, out=D_2D)
def_gen.permuted(D_2D_like, out=D_2D)
def_gen.permuted(D_2D_like, out=D_2D)
def_gen.permuted(D_2D, axis=1, out=D_2D)
def_gen.shuffle(np.arange(10))
def_gen.shuffle([1, 2, 3, 4, 5])
def_gen.shuffle(D_2D, axis=1)
def_gen.__str__()
def_gen.__repr__()
def_gen_state: dict[str, Any]
def_gen_state = def_gen.__getstate__()
def_gen.__setstate__(def_gen_state)
# RandomState
random_st: np.random.RandomState = np.random.RandomState()
random_st.standard_normal()
random_st.standard_normal(size=None)
random_st.standard_normal(size=1)
random_st.random()
random_st.random(size=None)
random_st.random(size=1)
random_st.standard_cauchy()
random_st.standard_cauchy(size=None)
random_st.standard_cauchy(size=1)
random_st.standard_exponential()
random_st.standard_exponential(size=None)
random_st.standard_exponential(size=1)
random_st.zipf(1.5)
random_st.zipf(1.5, size=None)
random_st.zipf(1.5, size=1)
random_st.zipf(D_arr_1p5)
random_st.zipf(D_arr_1p5, size=1)
random_st.zipf(D_arr_like_1p5)
random_st.zipf(D_arr_like_1p5, size=1)
random_st.weibull(0.5)
random_st.weibull(0.5, size=None)
random_st.weibull(0.5, size=1)
random_st.weibull(D_arr_0p5)
random_st.weibull(D_arr_0p5, size=1)
random_st.weibull(D_arr_like_0p5)
random_st.weibull(D_arr_like_0p5, size=1)
random_st.standard_t(0.5)
random_st.standard_t(0.5, size=None)
random_st.standard_t(0.5, size=1)
random_st.standard_t(D_arr_0p5)
random_st.standard_t(D_arr_0p5, size=1)
random_st.standard_t(D_arr_like_0p5)
random_st.standard_t(D_arr_like_0p5, size=1)
random_st.poisson(0.5)
random_st.poisson(0.5, size=None)
random_st.poisson(0.5, size=1)
random_st.poisson(D_arr_0p5)
random_st.poisson(D_arr_0p5, size=1)
random_st.poisson(D_arr_like_0p5)
random_st.poisson(D_arr_like_0p5, size=1)
random_st.power(0.5)
random_st.power(0.5, size=None)
random_st.power(0.5, size=1)
random_st.power(D_arr_0p5)
random_st.power(D_arr_0p5, size=1)
random_st.power(D_arr_like_0p5)
random_st.power(D_arr_like_0p5, size=1)
random_st.pareto(0.5)
random_st.pareto(0.5, size=None)
random_st.pareto(0.5, size=1)
random_st.pareto(D_arr_0p5)
random_st.pareto(D_arr_0p5, size=1)
random_st.pareto(D_arr_like_0p5)
random_st.pareto(D_arr_like_0p5, size=1)
random_st.chisquare(0.5)
random_st.chisquare(0.5, size=None)
random_st.chisquare(0.5, size=1)
random_st.chisquare(D_arr_0p5)
random_st.chisquare(D_arr_0p5, size=1)
random_st.chisquare(D_arr_like_0p5)
random_st.chisquare(D_arr_like_0p5, size=1)
random_st.exponential(0.5)
random_st.exponential(0.5, size=None)
random_st.exponential(0.5, size=1)
random_st.exponential(D_arr_0p5)
random_st.exponential(D_arr_0p5, size=1)
random_st.exponential(D_arr_like_0p5)
random_st.exponential(D_arr_like_0p5, size=1)
random_st.geometric(0.5)
random_st.geometric(0.5, size=None)
random_st.geometric(0.5, size=1)
random_st.geometric(D_arr_0p5)
random_st.geometric(D_arr_0p5, size=1)
random_st.geometric(D_arr_like_0p5)
random_st.geometric(D_arr_like_0p5, size=1)
random_st.logseries(0.5)
random_st.logseries(0.5, size=None)
random_st.logseries(0.5, size=1)
random_st.logseries(D_arr_0p5)
random_st.logseries(D_arr_0p5, size=1)
random_st.logseries(D_arr_like_0p5)
random_st.logseries(D_arr_like_0p5, size=1)
random_st.rayleigh(0.5)
random_st.rayleigh(0.5, size=None)
random_st.rayleigh(0.5, size=1)
random_st.rayleigh(D_arr_0p5)
random_st.rayleigh(D_arr_0p5, size=1)
random_st.rayleigh(D_arr_like_0p5)
random_st.rayleigh(D_arr_like_0p5, size=1)
random_st.standard_gamma(0.5)
random_st.standard_gamma(0.5, size=None)
random_st.standard_gamma(0.5, size=1)
random_st.standard_gamma(D_arr_0p5)
random_st.standard_gamma(D_arr_0p5, size=1)
random_st.standard_gamma(D_arr_like_0p5)
random_st.standard_gamma(D_arr_like_0p5, size=1)
random_st.standard_gamma(D_arr_like_0p5, size=1)
random_st.vonmises(0.5, 0.5)
random_st.vonmises(0.5, 0.5, size=None)
random_st.vonmises(0.5, 0.5, size=1)
random_st.vonmises(D_arr_0p5, 0.5)
random_st.vonmises(0.5, D_arr_0p5)
random_st.vonmises(D_arr_0p5, 0.5, size=1)
random_st.vonmises(0.5, D_arr_0p5, size=1)
random_st.vonmises(D_arr_like_0p5, 0.5)
random_st.vonmises(0.5, D_arr_like_0p5)
random_st.vonmises(D_arr_0p5, D_arr_0p5)
random_st.vonmises(D_arr_like_0p5, D_arr_like_0p5)
random_st.vonmises(D_arr_0p5, D_arr_0p5, size=1)
random_st.vonmises(D_arr_like_0p5, D_arr_like_0p5, size=1)
random_st.wald(0.5, 0.5)
random_st.wald(0.5, 0.5, size=None)
random_st.wald(0.5, 0.5, size=1)
random_st.wald(D_arr_0p5, 0.5)
random_st.wald(0.5, D_arr_0p5)
random_st.wald(D_arr_0p5, 0.5, size=1)
random_st.wald(0.5, D_arr_0p5, size=1)
random_st.wald(D_arr_like_0p5, 0.5)
random_st.wald(0.5, D_arr_like_0p5)
random_st.wald(D_arr_0p5, D_arr_0p5)
random_st.wald(D_arr_like_0p5, D_arr_like_0p5)
random_st.wald(D_arr_0p5, D_arr_0p5, size=1)
random_st.wald(D_arr_like_0p5, D_arr_like_0p5, size=1)
random_st.uniform(0.5, 0.5)
random_st.uniform(0.5, 0.5, size=None)
random_st.uniform(0.5, 0.5, size=1)
random_st.uniform(D_arr_0p5, 0.5)
random_st.uniform(0.5, D_arr_0p5)
random_st.uniform(D_arr_0p5, 0.5, size=1)
random_st.uniform(0.5, D_arr_0p5, size=1)
random_st.uniform(D_arr_like_0p5, 0.5)
random_st.uniform(0.5, D_arr_like_0p5)
random_st.uniform(D_arr_0p5, D_arr_0p5)
random_st.uniform(D_arr_like_0p5, D_arr_like_0p5)
random_st.uniform(D_arr_0p5, D_arr_0p5, size=1)
random_st.uniform(D_arr_like_0p5, D_arr_like_0p5, size=1)
random_st.beta(0.5, 0.5)
random_st.beta(0.5, 0.5, size=None)
random_st.beta(0.5, 0.5, size=1)
random_st.beta(D_arr_0p5, 0.5)
random_st.beta(0.5, D_arr_0p5)
random_st.beta(D_arr_0p5, 0.5, size=1)
random_st.beta(0.5, D_arr_0p5, size=1)
random_st.beta(D_arr_like_0p5, 0.5)
random_st.beta(0.5, D_arr_like_0p5)
random_st.beta(D_arr_0p5, D_arr_0p5)
random_st.beta(D_arr_like_0p5, D_arr_like_0p5)
random_st.beta(D_arr_0p5, D_arr_0p5, size=1)
random_st.beta(D_arr_like_0p5, D_arr_like_0p5, size=1)
random_st.f(0.5, 0.5)
random_st.f(0.5, 0.5, size=None)
random_st.f(0.5, 0.5, size=1)
random_st.f(D_arr_0p5, 0.5)
random_st.f(0.5, D_arr_0p5)
random_st.f(D_arr_0p5, 0.5, size=1)
random_st.f(0.5, D_arr_0p5, size=1)
random_st.f(D_arr_like_0p5, 0.5)
random_st.f(0.5, D_arr_like_0p5)
random_st.f(D_arr_0p5, D_arr_0p5)
random_st.f(D_arr_like_0p5, D_arr_like_0p5)
random_st.f(D_arr_0p5, D_arr_0p5, size=1)
random_st.f(D_arr_like_0p5, D_arr_like_0p5, size=1)
random_st.gamma(0.5, 0.5)
random_st.gamma(0.5, 0.5, size=None)
random_st.gamma(0.5, 0.5, size=1)
random_st.gamma(D_arr_0p5, 0.5)
random_st.gamma(0.5, D_arr_0p5)
random_st.gamma(D_arr_0p5, 0.5, size=1)
random_st.gamma(0.5, D_arr_0p5, size=1)
random_st.gamma(D_arr_like_0p5, 0.5)
random_st.gamma(0.5, D_arr_like_0p5)
random_st.gamma(D_arr_0p5, D_arr_0p5)
random_st.gamma(D_arr_like_0p5, D_arr_like_0p5)
random_st.gamma(D_arr_0p5, D_arr_0p5, size=1)
random_st.gamma(D_arr_like_0p5, D_arr_like_0p5, size=1)
random_st.gumbel(0.5, 0.5)
random_st.gumbel(0.5, 0.5, size=None)
random_st.gumbel(0.5, 0.5, size=1)
random_st.gumbel(D_arr_0p5, 0.5)
random_st.gumbel(0.5, D_arr_0p5)
random_st.gumbel(D_arr_0p5, 0.5, size=1)
random_st.gumbel(0.5, D_arr_0p5, size=1)
random_st.gumbel(D_arr_like_0p5, 0.5)
random_st.gumbel(0.5, D_arr_like_0p5)
random_st.gumbel(D_arr_0p5, D_arr_0p5)
random_st.gumbel(D_arr_like_0p5, D_arr_like_0p5)
random_st.gumbel(D_arr_0p5, D_arr_0p5, size=1)
random_st.gumbel(D_arr_like_0p5, D_arr_like_0p5, size=1)
random_st.laplace(0.5, 0.5)
random_st.laplace(0.5, 0.5, size=None)
random_st.laplace(0.5, 0.5, size=1)
random_st.laplace(D_arr_0p5, 0.5)
random_st.laplace(0.5, D_arr_0p5)
random_st.laplace(D_arr_0p5, 0.5, size=1)
random_st.laplace(0.5, D_arr_0p5, size=1)
random_st.laplace(D_arr_like_0p5, 0.5)
random_st.laplace(0.5, D_arr_like_0p5)
random_st.laplace(D_arr_0p5, D_arr_0p5)
random_st.laplace(D_arr_like_0p5, D_arr_like_0p5)
random_st.laplace(D_arr_0p5, D_arr_0p5, size=1)
random_st.laplace(D_arr_like_0p5, D_arr_like_0p5, size=1)
random_st.logistic(0.5, 0.5)
random_st.logistic(0.5, 0.5, size=None)
random_st.logistic(0.5, 0.5, size=1)
random_st.logistic(D_arr_0p5, 0.5)
random_st.logistic(0.5, D_arr_0p5)
random_st.logistic(D_arr_0p5, 0.5, size=1)
random_st.logistic(0.5, D_arr_0p5, size=1)
random_st.logistic(D_arr_like_0p5, 0.5)
random_st.logistic(0.5, D_arr_like_0p5)
random_st.logistic(D_arr_0p5, D_arr_0p5)
random_st.logistic(D_arr_like_0p5, D_arr_like_0p5)
random_st.logistic(D_arr_0p5, D_arr_0p5, size=1)
random_st.logistic(D_arr_like_0p5, D_arr_like_0p5, size=1)
random_st.lognormal(0.5, 0.5)
random_st.lognormal(0.5, 0.5, size=None)
random_st.lognormal(0.5, 0.5, size=1)
random_st.lognormal(D_arr_0p5, 0.5)
random_st.lognormal(0.5, D_arr_0p5)
random_st.lognormal(D_arr_0p5, 0.5, size=1)
random_st.lognormal(0.5, D_arr_0p5, size=1)
random_st.lognormal(D_arr_like_0p5, 0.5)
random_st.lognormal(0.5, D_arr_like_0p5)
random_st.lognormal(D_arr_0p5, D_arr_0p5)
random_st.lognormal(D_arr_like_0p5, D_arr_like_0p5)
random_st.lognormal(D_arr_0p5, D_arr_0p5, size=1)
random_st.lognormal(D_arr_like_0p5, D_arr_like_0p5, size=1)
random_st.noncentral_chisquare(0.5, 0.5)
random_st.noncentral_chisquare(0.5, 0.5, size=None)
random_st.noncentral_chisquare(0.5, 0.5, size=1)
random_st.noncentral_chisquare(D_arr_0p5, 0.5)
random_st.noncentral_chisquare(0.5, D_arr_0p5)
random_st.noncentral_chisquare(D_arr_0p5, 0.5, size=1)
random_st.noncentral_chisquare(0.5, D_arr_0p5, size=1)
random_st.noncentral_chisquare(D_arr_like_0p5, 0.5)
random_st.noncentral_chisquare(0.5, D_arr_like_0p5)
random_st.noncentral_chisquare(D_arr_0p5, D_arr_0p5)
random_st.noncentral_chisquare(D_arr_like_0p5, D_arr_like_0p5)
random_st.noncentral_chisquare(D_arr_0p5, D_arr_0p5, size=1)
random_st.noncentral_chisquare(D_arr_like_0p5, D_arr_like_0p5, size=1)
random_st.normal(0.5, 0.5)
random_st.normal(0.5, 0.5, size=None)
random_st.normal(0.5, 0.5, size=1)
random_st.normal(D_arr_0p5, 0.5)
random_st.normal(0.5, D_arr_0p5)
random_st.normal(D_arr_0p5, 0.5, size=1)
random_st.normal(0.5, D_arr_0p5, size=1)
random_st.normal(D_arr_like_0p5, 0.5)
random_st.normal(0.5, D_arr_like_0p5)
random_st.normal(D_arr_0p5, D_arr_0p5)
random_st.normal(D_arr_like_0p5, D_arr_like_0p5)
random_st.normal(D_arr_0p5, D_arr_0p5, size=1)
random_st.normal(D_arr_like_0p5, D_arr_like_0p5, size=1)
random_st.triangular(0.1, 0.5, 0.9)
random_st.triangular(0.1, 0.5, 0.9, size=None)
random_st.triangular(0.1, 0.5, 0.9, size=1)
random_st.triangular(D_arr_0p1, 0.5, 0.9)
random_st.triangular(0.1, D_arr_0p5, 0.9)
random_st.triangular(D_arr_0p1, 0.5, D_arr_like_0p9, size=1)
random_st.triangular(0.1, D_arr_0p5, 0.9, size=1)
random_st.triangular(D_arr_like_0p1, 0.5, D_arr_0p9)
random_st.triangular(0.5, D_arr_like_0p5, 0.9)
random_st.triangular(D_arr_0p1, D_arr_0p5, 0.9)
random_st.triangular(D_arr_like_0p1, D_arr_like_0p5, 0.9)
random_st.triangular(D_arr_0p1, D_arr_0p5, D_arr_0p9, size=1)
random_st.triangular(D_arr_like_0p1, D_arr_like_0p5, D_arr_like_0p9, size=1)
random_st.noncentral_f(0.1, 0.5, 0.9)
random_st.noncentral_f(0.1, 0.5, 0.9, size=None)
random_st.noncentral_f(0.1, 0.5, 0.9, size=1)
random_st.noncentral_f(D_arr_0p1, 0.5, 0.9)
random_st.noncentral_f(0.1, D_arr_0p5, 0.9)
random_st.noncentral_f(D_arr_0p1, 0.5, D_arr_like_0p9, size=1)
random_st.noncentral_f(0.1, D_arr_0p5, 0.9, size=1)
random_st.noncentral_f(D_arr_like_0p1, 0.5, D_arr_0p9)
random_st.noncentral_f(0.5, D_arr_like_0p5, 0.9)
random_st.noncentral_f(D_arr_0p1, D_arr_0p5, 0.9)
random_st.noncentral_f(D_arr_like_0p1, D_arr_like_0p5, 0.9)
random_st.noncentral_f(D_arr_0p1, D_arr_0p5, D_arr_0p9, size=1)
random_st.noncentral_f(D_arr_like_0p1, D_arr_like_0p5, D_arr_like_0p9, size=1)
random_st.binomial(10, 0.5)
random_st.binomial(10, 0.5, size=None)
random_st.binomial(10, 0.5, size=1)
random_st.binomial(I_arr_10, 0.5)
random_st.binomial(10, D_arr_0p5)
random_st.binomial(I_arr_10, 0.5, size=1)
random_st.binomial(10, D_arr_0p5, size=1)
random_st.binomial(I_arr_like_10, 0.5)
random_st.binomial(10, D_arr_like_0p5)
random_st.binomial(I_arr_10, D_arr_0p5)
random_st.binomial(I_arr_like_10, D_arr_like_0p5)
random_st.binomial(I_arr_10, D_arr_0p5, size=1)
random_st.binomial(I_arr_like_10, D_arr_like_0p5, size=1)
random_st.negative_binomial(10, 0.5)
random_st.negative_binomial(10, 0.5, size=None)
random_st.negative_binomial(10, 0.5, size=1)
random_st.negative_binomial(I_arr_10, 0.5)
random_st.negative_binomial(10, D_arr_0p5)
random_st.negative_binomial(I_arr_10, 0.5, size=1)
random_st.negative_binomial(10, D_arr_0p5, size=1)
random_st.negative_binomial(I_arr_like_10, 0.5)
random_st.negative_binomial(10, D_arr_like_0p5)
random_st.negative_binomial(I_arr_10, D_arr_0p5)
random_st.negative_binomial(I_arr_like_10, D_arr_like_0p5)
random_st.negative_binomial(I_arr_10, D_arr_0p5, size=1)
random_st.negative_binomial(I_arr_like_10, D_arr_like_0p5, size=1)
random_st.hypergeometric(20, 20, 10)
random_st.hypergeometric(20, 20, 10, size=None)
random_st.hypergeometric(20, 20, 10, size=1)
random_st.hypergeometric(I_arr_20, 20, 10)
random_st.hypergeometric(20, I_arr_20, 10)
random_st.hypergeometric(I_arr_20, 20, I_arr_like_10, size=1)
random_st.hypergeometric(20, I_arr_20, 10, size=1)
random_st.hypergeometric(I_arr_like_20, 20, I_arr_10)
random_st.hypergeometric(20, I_arr_like_20, 10)
random_st.hypergeometric(I_arr_20, I_arr_20, 10)
random_st.hypergeometric(I_arr_like_20, I_arr_like_20, 10)
random_st.hypergeometric(I_arr_20, I_arr_20, I_arr_10, size=1)
random_st.hypergeometric(I_arr_like_20, I_arr_like_20, I_arr_like_10, size=1)
random_st.randint(0, 100)
random_st.randint(100)
random_st.randint([100])
random_st.randint(0, [100])
random_st.randint(2, dtype=bool)
random_st.randint(0, 2, dtype=bool)
random_st.randint(I_bool_high_open, dtype=bool)
random_st.randint(I_bool_low, I_bool_high_open, dtype=bool)
random_st.randint(0, I_bool_high_open, dtype=bool)
random_st.randint(2, dtype=np.bool_)
random_st.randint(0, 2, dtype=np.bool_)
random_st.randint(I_bool_high_open, dtype=np.bool_)
random_st.randint(I_bool_low, I_bool_high_open, dtype=np.bool_)
random_st.randint(0, I_bool_high_open, dtype=np.bool_)
random_st.randint(256, dtype="u1")
random_st.randint(0, 256, dtype="u1")
random_st.randint(I_u1_high_open, dtype="u1")
random_st.randint(I_u1_low, I_u1_high_open, dtype="u1")
random_st.randint(0, I_u1_high_open, dtype="u1")
random_st.randint(256, dtype="uint8")
random_st.randint(0, 256, dtype="uint8")
random_st.randint(I_u1_high_open, dtype="uint8")
random_st.randint(I_u1_low, I_u1_high_open, dtype="uint8")
random_st.randint(0, I_u1_high_open, dtype="uint8")
random_st.randint(256, dtype=np.uint8)
random_st.randint(0, 256, dtype=np.uint8)
random_st.randint(I_u1_high_open, dtype=np.uint8)
random_st.randint(I_u1_low, I_u1_high_open, dtype=np.uint8)
random_st.randint(0, I_u1_high_open, dtype=np.uint8)
random_st.randint(65536, dtype="u2")
random_st.randint(0, 65536, dtype="u2")
random_st.randint(I_u2_high_open, dtype="u2")
random_st.randint(I_u2_low, I_u2_high_open, dtype="u2")
random_st.randint(0, I_u2_high_open, dtype="u2")
random_st.randint(65536, dtype="uint16")
random_st.randint(0, 65536, dtype="uint16")
random_st.randint(I_u2_high_open, dtype="uint16")
random_st.randint(I_u2_low, I_u2_high_open, dtype="uint16")
random_st.randint(0, I_u2_high_open, dtype="uint16")
random_st.randint(65536, dtype=np.uint16)
random_st.randint(0, 65536, dtype=np.uint16)
random_st.randint(I_u2_high_open, dtype=np.uint16)
random_st.randint(I_u2_low, I_u2_high_open, dtype=np.uint16)
random_st.randint(0, I_u2_high_open, dtype=np.uint16)
random_st.randint(4294967296, dtype="u4")
random_st.randint(0, 4294967296, dtype="u4")
random_st.randint(I_u4_high_open, dtype="u4")
random_st.randint(I_u4_low, I_u4_high_open, dtype="u4")
random_st.randint(0, I_u4_high_open, dtype="u4")
random_st.randint(4294967296, dtype="uint32")
random_st.randint(0, 4294967296, dtype="uint32")
random_st.randint(I_u4_high_open, dtype="uint32")
random_st.randint(I_u4_low, I_u4_high_open, dtype="uint32")
random_st.randint(0, I_u4_high_open, dtype="uint32")
random_st.randint(4294967296, dtype=np.uint32)
random_st.randint(0, 4294967296, dtype=np.uint32)
random_st.randint(I_u4_high_open, dtype=np.uint32)
random_st.randint(I_u4_low, I_u4_high_open, dtype=np.uint32)
random_st.randint(0, I_u4_high_open, dtype=np.uint32)
random_st.randint(18446744073709551616, dtype="u8")
random_st.randint(0, 18446744073709551616, dtype="u8")
random_st.randint(I_u8_high_open, dtype="u8")
random_st.randint(I_u8_low, I_u8_high_open, dtype="u8")
random_st.randint(0, I_u8_high_open, dtype="u8")
random_st.randint(18446744073709551616, dtype="uint64")
random_st.randint(0, 18446744073709551616, dtype="uint64")
random_st.randint(I_u8_high_open, dtype="uint64")
random_st.randint(I_u8_low, I_u8_high_open, dtype="uint64")
random_st.randint(0, I_u8_high_open, dtype="uint64")
random_st.randint(18446744073709551616, dtype=np.uint64)
random_st.randint(0, 18446744073709551616, dtype=np.uint64)
random_st.randint(I_u8_high_open, dtype=np.uint64)
random_st.randint(I_u8_low, I_u8_high_open, dtype=np.uint64)
random_st.randint(0, I_u8_high_open, dtype=np.uint64)
random_st.randint(128, dtype="i1")
random_st.randint(-128, 128, dtype="i1")
random_st.randint(I_i1_high_open, dtype="i1")
random_st.randint(I_i1_low, I_i1_high_open, dtype="i1")
random_st.randint(-128, I_i1_high_open, dtype="i1")
random_st.randint(128, dtype="int8")
random_st.randint(-128, 128, dtype="int8")
random_st.randint(I_i1_high_open, dtype="int8")
random_st.randint(I_i1_low, I_i1_high_open, dtype="int8")
random_st.randint(-128, I_i1_high_open, dtype="int8")
random_st.randint(128, dtype=np.int8)
random_st.randint(-128, 128, dtype=np.int8)
random_st.randint(I_i1_high_open, dtype=np.int8)
random_st.randint(I_i1_low, I_i1_high_open, dtype=np.int8)
random_st.randint(-128, I_i1_high_open, dtype=np.int8)
random_st.randint(32768, dtype="i2")
random_st.randint(-32768, 32768, dtype="i2")
random_st.randint(I_i2_high_open, dtype="i2")
random_st.randint(I_i2_low, I_i2_high_open, dtype="i2")
random_st.randint(-32768, I_i2_high_open, dtype="i2")
random_st.randint(32768, dtype="int16")
random_st.randint(-32768, 32768, dtype="int16")
random_st.randint(I_i2_high_open, dtype="int16")
random_st.randint(I_i2_low, I_i2_high_open, dtype="int16")
random_st.randint(-32768, I_i2_high_open, dtype="int16")
random_st.randint(32768, dtype=np.int16)
random_st.randint(-32768, 32768, dtype=np.int16)
random_st.randint(I_i2_high_open, dtype=np.int16)
random_st.randint(I_i2_low, I_i2_high_open, dtype=np.int16)
random_st.randint(-32768, I_i2_high_open, dtype=np.int16)
random_st.randint(2147483648, dtype="i4")
random_st.randint(-2147483648, 2147483648, dtype="i4")
random_st.randint(I_i4_high_open, dtype="i4")
random_st.randint(I_i4_low, I_i4_high_open, dtype="i4")
random_st.randint(-2147483648, I_i4_high_open, dtype="i4")
random_st.randint(2147483648, dtype="int32")
random_st.randint(-2147483648, 2147483648, dtype="int32")
random_st.randint(I_i4_high_open, dtype="int32")
random_st.randint(I_i4_low, I_i4_high_open, dtype="int32")
random_st.randint(-2147483648, I_i4_high_open, dtype="int32")
random_st.randint(2147483648, dtype=np.int32)
random_st.randint(-2147483648, 2147483648, dtype=np.int32)
random_st.randint(I_i4_high_open, dtype=np.int32)
random_st.randint(I_i4_low, I_i4_high_open, dtype=np.int32)
random_st.randint(-2147483648, I_i4_high_open, dtype=np.int32)
random_st.randint(9223372036854775808, dtype="i8")
random_st.randint(-9223372036854775808, 9223372036854775808, dtype="i8")
random_st.randint(I_i8_high_open, dtype="i8")
random_st.randint(I_i8_low, I_i8_high_open, dtype="i8")
random_st.randint(-9223372036854775808, I_i8_high_open, dtype="i8")
random_st.randint(9223372036854775808, dtype="int64")
random_st.randint(-9223372036854775808, 9223372036854775808, dtype="int64")
random_st.randint(I_i8_high_open, dtype="int64")
random_st.randint(I_i8_low, I_i8_high_open, dtype="int64")
random_st.randint(-9223372036854775808, I_i8_high_open, dtype="int64")
random_st.randint(9223372036854775808, dtype=np.int64)
random_st.randint(-9223372036854775808, 9223372036854775808, dtype=np.int64)
random_st.randint(I_i8_high_open, dtype=np.int64)
random_st.randint(I_i8_low, I_i8_high_open, dtype=np.int64)
random_st.randint(-9223372036854775808, I_i8_high_open, dtype=np.int64)
bg: np.random.BitGenerator = random_st._bit_generator
random_st.bytes(2)
random_st.choice(5)
random_st.choice(5, 3)
random_st.choice(5, 3, replace=True)
random_st.choice(5, 3, p=[1 / 5] * 5)
random_st.choice(5, 3, p=[1 / 5] * 5, replace=False)
random_st.choice(["pooh", "rabbit", "piglet", "Christopher"])
random_st.choice(["pooh", "rabbit", "piglet", "Christopher"], 3)
random_st.choice(["pooh", "rabbit", "piglet", "Christopher"], 3, p=[1 / 4] * 4)
random_st.choice(["pooh", "rabbit", "piglet", "Christopher"], 3, replace=True)
random_st.choice(["pooh", "rabbit", "piglet", "Christopher"], 3, replace=False, p=np.array([1 / 8, 1 / 8, 1 / 2, 1 / 4]))
random_st.dirichlet([0.5, 0.5])
random_st.dirichlet(np.array([0.5, 0.5]))
random_st.dirichlet(np.array([0.5, 0.5]), size=3)
random_st.multinomial(20, [1 / 6.0] * 6)
random_st.multinomial(20, np.array([0.5, 0.5]))
random_st.multinomial(20, [1 / 6.0] * 6, size=2)
random_st.multivariate_normal([0.0], [[1.0]])
random_st.multivariate_normal([0.0], np.array([[1.0]]))
random_st.multivariate_normal(np.array([0.0]), [[1.0]])
random_st.multivariate_normal([0.0], np.array([[1.0]]))
random_st.permutation(10)
random_st.permutation([1, 2, 3, 4])
random_st.permutation(np.array([1, 2, 3, 4]))
random_st.permutation(D_2D)
random_st.shuffle(np.arange(10))
random_st.shuffle([1, 2, 3, 4, 5])
random_st.shuffle(D_2D)
np.random.RandomState(SEED_PCG64)
np.random.RandomState(0)
np.random.RandomState([0, 1, 2])
random_st.__str__()
random_st.__repr__()
random_st_state = random_st.__getstate__()
random_st.__setstate__(random_st_state)
random_st.seed()
random_st.seed(1)
random_st.seed([0, 1])
random_st_get_state = random_st.get_state()
random_st_get_state_legacy = random_st.get_state(legacy=True)
random_st.set_state(random_st_get_state)
random_st.rand()
random_st.rand(1)
random_st.rand(1, 2)
random_st.randn()
random_st.randn(1)
random_st.randn(1, 2)
random_st.random_sample()
random_st.random_sample(1)
random_st.random_sample(size=(1, 2))
random_st.tomaxint()
random_st.tomaxint(1)
random_st.tomaxint((1,))
| |
from __future__ import division, absolute_import, print_function
import sys
import operator
import pytest
import ctypes
import gc
import numpy as np
from numpy.core._rational_tests import rational
from numpy.testing import (
assert_, assert_equal, assert_array_equal, assert_raises, HAS_REFCOUNT)
from numpy.compat import pickle
from itertools import permutations
def assert_dtype_equal(a, b):
assert_equal(a, b)
assert_equal(hash(a), hash(b),
"two equivalent types do not hash to the same value !")
def assert_dtype_not_equal(a, b):
assert_(a != b)
assert_(hash(a) != hash(b),
"two different types hash to the same value !")
class TestBuiltin(object):
@pytest.mark.parametrize('t', [int, float, complex, np.int32, str, object,
np.unicode])
def test_run(self, t):
"""Only test hash runs at all."""
dt = np.dtype(t)
hash(dt)
@pytest.mark.parametrize('t', [int, float])
def test_dtype(self, t):
# Make sure equivalent byte order char hash the same (e.g. < and = on
# little endian)
dt = np.dtype(t)
dt2 = dt.newbyteorder("<")
dt3 = dt.newbyteorder(">")
if dt == dt2:
assert_(dt.byteorder != dt2.byteorder, "bogus test")
assert_dtype_equal(dt, dt2)
else:
assert_(dt.byteorder != dt3.byteorder, "bogus test")
assert_dtype_equal(dt, dt3)
def test_equivalent_dtype_hashing(self):
# Make sure equivalent dtypes with different type num hash equal
uintp = np.dtype(np.uintp)
if uintp.itemsize == 4:
left = uintp
right = np.dtype(np.uint32)
else:
left = uintp
right = np.dtype(np.ulonglong)
assert_(left == right)
assert_(hash(left) == hash(right))
def test_invalid_types(self):
# Make sure invalid type strings raise an error
assert_raises(TypeError, np.dtype, 'O3')
assert_raises(TypeError, np.dtype, 'O5')
assert_raises(TypeError, np.dtype, 'O7')
assert_raises(TypeError, np.dtype, 'b3')
assert_raises(TypeError, np.dtype, 'h4')
assert_raises(TypeError, np.dtype, 'I5')
assert_raises(TypeError, np.dtype, 'e3')
assert_raises(TypeError, np.dtype, 'f5')
if np.dtype('g').itemsize == 8 or np.dtype('g').itemsize == 16:
assert_raises(TypeError, np.dtype, 'g12')
elif np.dtype('g').itemsize == 12:
assert_raises(TypeError, np.dtype, 'g16')
if np.dtype('l').itemsize == 8:
assert_raises(TypeError, np.dtype, 'l4')
assert_raises(TypeError, np.dtype, 'L4')
else:
assert_raises(TypeError, np.dtype, 'l8')
assert_raises(TypeError, np.dtype, 'L8')
if np.dtype('q').itemsize == 8:
assert_raises(TypeError, np.dtype, 'q4')
assert_raises(TypeError, np.dtype, 'Q4')
else:
assert_raises(TypeError, np.dtype, 'q8')
assert_raises(TypeError, np.dtype, 'Q8')
@pytest.mark.parametrize(
'value',
['m8', 'M8', 'datetime64', 'timedelta64',
'i4, (2,3)f8, f4', 'a3, 3u8, (3,4)a10',
'>f', '<f', '=f', '|f',
])
def test_dtype_bytes_str_equivalence(self, value):
bytes_value = value.encode('ascii')
from_bytes = np.dtype(bytes_value)
from_str = np.dtype(value)
assert_dtype_equal(from_bytes, from_str)
def test_dtype_from_bytes(self):
# Empty bytes object
assert_raises(TypeError, np.dtype, b'')
# Byte order indicator, but no type
assert_raises(TypeError, np.dtype, b'|')
# Single character with ordinal < NPY_NTYPES returns
# type by index into _builtin_descrs
assert_dtype_equal(np.dtype(bytes([0])), np.dtype('bool'))
assert_dtype_equal(np.dtype(bytes([17])), np.dtype(object))
# Single character where value is a valid type code
assert_dtype_equal(np.dtype(b'f'), np.dtype('float32'))
# Bytes with non-ascii values raise errors
assert_raises(TypeError, np.dtype, b'\xff')
assert_raises(TypeError, np.dtype, b's\xff')
def test_bad_param(self):
# Can't give a size that's too small
assert_raises(ValueError, np.dtype,
{'names':['f0', 'f1'],
'formats':['i4', 'i1'],
'offsets':[0, 4],
'itemsize':4})
# If alignment is enabled, the alignment (4) must divide the itemsize
assert_raises(ValueError, np.dtype,
{'names':['f0', 'f1'],
'formats':['i4', 'i1'],
'offsets':[0, 4],
'itemsize':9}, align=True)
# If alignment is enabled, the individual fields must be aligned
assert_raises(ValueError, np.dtype,
{'names':['f0', 'f1'],
'formats':['i1', 'f4'],
'offsets':[0, 2]}, align=True)
def test_field_order_equality(self):
x = np.dtype({'names': ['A', 'B'],
'formats': ['i4', 'f4'],
'offsets': [0, 4]})
y = np.dtype({'names': ['B', 'A'],
'formats': ['f4', 'i4'],
'offsets': [4, 0]})
assert_equal(x == y, False)
class TestRecord(object):
def test_equivalent_record(self):
"""Test whether equivalent record dtypes hash the same."""
a = np.dtype([('yo', int)])
b = np.dtype([('yo', int)])
assert_dtype_equal(a, b)
def test_different_names(self):
# In theory, they may hash the same (collision) ?
a = np.dtype([('yo', int)])
b = np.dtype([('ye', int)])
assert_dtype_not_equal(a, b)
def test_different_titles(self):
# In theory, they may hash the same (collision) ?
a = np.dtype({'names': ['r', 'b'],
'formats': ['u1', 'u1'],
'titles': ['Red pixel', 'Blue pixel']})
b = np.dtype({'names': ['r', 'b'],
'formats': ['u1', 'u1'],
'titles': ['RRed pixel', 'Blue pixel']})
assert_dtype_not_equal(a, b)
@pytest.mark.skipif(not HAS_REFCOUNT, reason="Python lacks refcounts")
def test_refcount_dictionary_setting(self):
names = ["name1"]
formats = ["f8"]
titles = ["t1"]
offsets = [0]
d = dict(names=names, formats=formats, titles=titles, offsets=offsets)
refcounts = {k: sys.getrefcount(i) for k, i in d.items()}
np.dtype(d)
refcounts_new = {k: sys.getrefcount(i) for k, i in d.items()}
assert refcounts == refcounts_new
def test_mutate(self):
# Mutating a dtype should reset the cached hash value
a = np.dtype([('yo', int)])
b = np.dtype([('yo', int)])
c = np.dtype([('ye', int)])
assert_dtype_equal(a, b)
assert_dtype_not_equal(a, c)
a.names = ['ye']
assert_dtype_equal(a, c)
assert_dtype_not_equal(a, b)
state = b.__reduce__()[2]
a.__setstate__(state)
assert_dtype_equal(a, b)
assert_dtype_not_equal(a, c)
def test_not_lists(self):
"""Test if an appropriate exception is raised when passing bad values to
the dtype constructor.
"""
assert_raises(TypeError, np.dtype,
dict(names={'A', 'B'}, formats=['f8', 'i4']))
assert_raises(TypeError, np.dtype,
dict(names=['A', 'B'], formats={'f8', 'i4'}))
def test_aligned_size(self):
# Check that structured dtypes get padded to an aligned size
dt = np.dtype('i4, i1', align=True)
assert_equal(dt.itemsize, 8)
dt = np.dtype([('f0', 'i4'), ('f1', 'i1')], align=True)
assert_equal(dt.itemsize, 8)
dt = np.dtype({'names':['f0', 'f1'],
'formats':['i4', 'u1'],
'offsets':[0, 4]}, align=True)
assert_equal(dt.itemsize, 8)
dt = np.dtype({'f0': ('i4', 0), 'f1':('u1', 4)}, align=True)
assert_equal(dt.itemsize, 8)
# Nesting should preserve that alignment
dt1 = np.dtype([('f0', 'i4'),
('f1', [('f1', 'i1'), ('f2', 'i4'), ('f3', 'i1')]),
('f2', 'i1')], align=True)
assert_equal(dt1.itemsize, 20)
dt2 = np.dtype({'names':['f0', 'f1', 'f2'],
'formats':['i4',
[('f1', 'i1'), ('f2', 'i4'), ('f3', 'i1')],
'i1'],
'offsets':[0, 4, 16]}, align=True)
assert_equal(dt2.itemsize, 20)
dt3 = np.dtype({'f0': ('i4', 0),
'f1': ([('f1', 'i1'), ('f2', 'i4'), ('f3', 'i1')], 4),
'f2': ('i1', 16)}, align=True)
assert_equal(dt3.itemsize, 20)
assert_equal(dt1, dt2)
assert_equal(dt2, dt3)
# Nesting should preserve packing
dt1 = np.dtype([('f0', 'i4'),
('f1', [('f1', 'i1'), ('f2', 'i4'), ('f3', 'i1')]),
('f2', 'i1')], align=False)
assert_equal(dt1.itemsize, 11)
dt2 = np.dtype({'names':['f0', 'f1', 'f2'],
'formats':['i4',
[('f1', 'i1'), ('f2', 'i4'), ('f3', 'i1')],
'i1'],
'offsets':[0, 4, 10]}, align=False)
assert_equal(dt2.itemsize, 11)
dt3 = np.dtype({'f0': ('i4', 0),
'f1': ([('f1', 'i1'), ('f2', 'i4'), ('f3', 'i1')], 4),
'f2': ('i1', 10)}, align=False)
assert_equal(dt3.itemsize, 11)
assert_equal(dt1, dt2)
assert_equal(dt2, dt3)
# Array of subtype should preserve alignment
dt1 = np.dtype([('a', '|i1'),
('b', [('f0', '<i2'),
('f1', '<f4')], 2)], align=True)
assert_equal(dt1.descr, [('a', '|i1'), ('', '|V3'),
('b', [('f0', '<i2'), ('', '|V2'),
('f1', '<f4')], (2,))])
def test_union_struct(self):
# Should be able to create union dtypes
dt = np.dtype({'names':['f0', 'f1', 'f2'], 'formats':['<u4', '<u2', '<u2'],
'offsets':[0, 0, 2]}, align=True)
assert_equal(dt.itemsize, 4)
a = np.array([3], dtype='<u4').view(dt)
a['f1'] = 10
a['f2'] = 36
assert_equal(a['f0'], 10 + 36*256*256)
# Should be able to specify fields out of order
dt = np.dtype({'names':['f0', 'f1', 'f2'], 'formats':['<u4', '<u2', '<u2'],
'offsets':[4, 0, 2]}, align=True)
assert_equal(dt.itemsize, 8)
# field name should not matter: assignment is by position
dt2 = np.dtype({'names':['f2', 'f0', 'f1'],
'formats':['<u4', '<u2', '<u2'],
'offsets':[4, 0, 2]}, align=True)
vals = [(0, 1, 2), (3, -1, 4)]
vals2 = [(0, 1, 2), (3, -1, 4)]
a = np.array(vals, dt)
b = np.array(vals2, dt2)
assert_equal(a.astype(dt2), b)
assert_equal(b.astype(dt), a)
assert_equal(a.view(dt2), b)
assert_equal(b.view(dt), a)
# Should not be able to overlap objects with other types
assert_raises(TypeError, np.dtype,
{'names':['f0', 'f1'],
'formats':['O', 'i1'],
'offsets':[0, 2]})
assert_raises(TypeError, np.dtype,
{'names':['f0', 'f1'],
'formats':['i4', 'O'],
'offsets':[0, 3]})
assert_raises(TypeError, np.dtype,
{'names':['f0', 'f1'],
'formats':[[('a', 'O')], 'i1'],
'offsets':[0, 2]})
assert_raises(TypeError, np.dtype,
{'names':['f0', 'f1'],
'formats':['i4', [('a', 'O')]],
'offsets':[0, 3]})
# Out of order should still be ok, however
dt = np.dtype({'names':['f0', 'f1'],
'formats':['i1', 'O'],
'offsets':[np.dtype('intp').itemsize, 0]})
def test_comma_datetime(self):
dt = np.dtype('M8[D],datetime64[Y],i8')
assert_equal(dt, np.dtype([('f0', 'M8[D]'),
('f1', 'datetime64[Y]'),
('f2', 'i8')]))
def test_from_dictproxy(self):
# Tests for PR #5920
dt = np.dtype({'names': ['a', 'b'], 'formats': ['i4', 'f4']})
assert_dtype_equal(dt, np.dtype(dt.fields))
dt2 = np.dtype((np.void, dt.fields))
assert_equal(dt2.fields, dt.fields)
def test_from_dict_with_zero_width_field(self):
# Regression test for #6430 / #2196
dt = np.dtype([('val1', np.float32, (0,)), ('val2', int)])
dt2 = np.dtype({'names': ['val1', 'val2'],
'formats': [(np.float32, (0,)), int]})
assert_dtype_equal(dt, dt2)
assert_equal(dt.fields['val1'][0].itemsize, 0)
assert_equal(dt.itemsize, dt.fields['val2'][0].itemsize)
def test_bool_commastring(self):
d = np.dtype('?,?,?') # raises?
assert_equal(len(d.names), 3)
for n in d.names:
assert_equal(d.fields[n][0], np.dtype('?'))
def test_nonint_offsets(self):
# gh-8059
def make_dtype(off):
return np.dtype({'names': ['A'], 'formats': ['i4'],
'offsets': [off]})
assert_raises(TypeError, make_dtype, 'ASD')
assert_raises(OverflowError, make_dtype, 2**70)
assert_raises(TypeError, make_dtype, 2.3)
assert_raises(ValueError, make_dtype, -10)
# no errors here:
dt = make_dtype(np.uint32(0))
np.zeros(1, dtype=dt)[0].item()
def test_fields_by_index(self):
dt = np.dtype([('a', np.int8), ('b', np.float32, 3)])
assert_dtype_equal(dt[0], np.dtype(np.int8))
assert_dtype_equal(dt[1], np.dtype((np.float32, 3)))
assert_dtype_equal(dt[-1], dt[1])
assert_dtype_equal(dt[-2], dt[0])
assert_raises(IndexError, lambda: dt[-3])
assert_raises(TypeError, operator.getitem, dt, 3.0)
assert_equal(dt[1], dt[np.int8(1)])
@pytest.mark.parametrize('align_flag',[False, True])
def test_multifield_index(self, align_flag):
# indexing with a list produces subfields
# the align flag should be preserved
dt = np.dtype([
(('title', 'col1'), '<U20'), ('A', '<f8'), ('B', '<f8')
], align=align_flag)
dt_sub = dt[['B', 'col1']]
assert_equal(
dt_sub,
np.dtype({
'names': ['B', 'col1'],
'formats': ['<f8', '<U20'],
'offsets': [88, 0],
'titles': [None, 'title'],
'itemsize': 96
})
)
assert_equal(dt_sub.isalignedstruct, align_flag)
dt_sub = dt[['B']]
assert_equal(
dt_sub,
np.dtype({
'names': ['B'],
'formats': ['<f8'],
'offsets': [88],
'itemsize': 96
})
)
assert_equal(dt_sub.isalignedstruct, align_flag)
dt_sub = dt[[]]
assert_equal(
dt_sub,
np.dtype({
'names': [],
'formats': [],
'offsets': [],
'itemsize': 96
})
)
assert_equal(dt_sub.isalignedstruct, align_flag)
assert_raises(TypeError, operator.getitem, dt, ())
assert_raises(TypeError, operator.getitem, dt, [1, 2, 3])
assert_raises(TypeError, operator.getitem, dt, ['col1', 2])
assert_raises(KeyError, operator.getitem, dt, ['fake'])
assert_raises(KeyError, operator.getitem, dt, ['title'])
assert_raises(ValueError, operator.getitem, dt, ['col1', 'col1'])
def test_partial_dict(self):
# 'names' is missing
assert_raises(ValueError, np.dtype,
{'formats': ['i4', 'i4'], 'f0': ('i4', 0), 'f1':('i4', 4)})
def test_fieldless_views(self):
a = np.zeros(2, dtype={'names':[], 'formats':[], 'offsets':[],
'itemsize':8})
assert_raises(ValueError, a.view, np.dtype([]))
d = np.dtype((np.dtype([]), 10))
assert_equal(d.shape, (10,))
assert_equal(d.itemsize, 0)
assert_equal(d.base, np.dtype([]))
arr = np.fromiter((() for i in range(10)), [])
assert_equal(arr.dtype, np.dtype([]))
assert_raises(ValueError, np.frombuffer, b'', dtype=[])
assert_equal(np.frombuffer(b'', dtype=[], count=2),
np.empty(2, dtype=[]))
assert_raises(ValueError, np.dtype, ([], 'f8'))
assert_raises(ValueError, np.zeros(1, dtype='i4').view, [])
assert_equal(np.zeros(2, dtype=[]) == np.zeros(2, dtype=[]),
np.ones(2, dtype=bool))
assert_equal(np.zeros((1, 2), dtype=[]) == a,
np.ones((1, 2), dtype=bool))
class TestSubarray(object):
def test_single_subarray(self):
a = np.dtype((int, (2)))
b = np.dtype((int, (2,)))
assert_dtype_equal(a, b)
assert_equal(type(a.subdtype[1]), tuple)
assert_equal(type(b.subdtype[1]), tuple)
def test_equivalent_record(self):
"""Test whether equivalent subarray dtypes hash the same."""
a = np.dtype((int, (2, 3)))
b = np.dtype((int, (2, 3)))
assert_dtype_equal(a, b)
def test_nonequivalent_record(self):
"""Test whether different subarray dtypes hash differently."""
a = np.dtype((int, (2, 3)))
b = np.dtype((int, (3, 2)))
assert_dtype_not_equal(a, b)
a = np.dtype((int, (2, 3)))
b = np.dtype((int, (2, 2)))
assert_dtype_not_equal(a, b)
a = np.dtype((int, (1, 2, 3)))
b = np.dtype((int, (1, 2)))
assert_dtype_not_equal(a, b)
def test_shape_equal(self):
"""Test some data types that are equal"""
assert_dtype_equal(np.dtype('f8'), np.dtype(('f8', tuple())))
# FutureWarning during deprecation period; after it is passed this
# should instead check that "(1)f8" == "1f8" == ("f8", 1).
with pytest.warns(FutureWarning):
assert_dtype_equal(np.dtype('f8'), np.dtype(('f8', 1)))
assert_dtype_equal(np.dtype((int, 2)), np.dtype((int, (2,))))
assert_dtype_equal(np.dtype(('<f4', (3, 2))), np.dtype(('<f4', (3, 2))))
d = ([('a', 'f4', (1, 2)), ('b', 'f8', (3, 1))], (3, 2))
assert_dtype_equal(np.dtype(d), np.dtype(d))
def test_shape_simple(self):
"""Test some simple cases that shouldn't be equal"""
assert_dtype_not_equal(np.dtype('f8'), np.dtype(('f8', (1,))))
assert_dtype_not_equal(np.dtype(('f8', (1,))), np.dtype(('f8', (1, 1))))
assert_dtype_not_equal(np.dtype(('f4', (3, 2))), np.dtype(('f4', (2, 3))))
def test_shape_monster(self):
"""Test some more complicated cases that shouldn't be equal"""
assert_dtype_not_equal(
np.dtype(([('a', 'f4', (2, 1)), ('b', 'f8', (1, 3))], (2, 2))),
np.dtype(([('a', 'f4', (1, 2)), ('b', 'f8', (1, 3))], (2, 2))))
assert_dtype_not_equal(
np.dtype(([('a', 'f4', (2, 1)), ('b', 'f8', (1, 3))], (2, 2))),
np.dtype(([('a', 'f4', (2, 1)), ('b', 'i8', (1, 3))], (2, 2))))
assert_dtype_not_equal(
np.dtype(([('a', 'f4', (2, 1)), ('b', 'f8', (1, 3))], (2, 2))),
np.dtype(([('e', 'f8', (1, 3)), ('d', 'f4', (2, 1))], (2, 2))))
assert_dtype_not_equal(
np.dtype(([('a', [('a', 'i4', 6)], (2, 1)), ('b', 'f8', (1, 3))], (2, 2))),
np.dtype(([('a', [('a', 'u4', 6)], (2, 1)), ('b', 'f8', (1, 3))], (2, 2))))
def test_shape_sequence(self):
# Any sequence of integers should work as shape, but the result
# should be a tuple (immutable) of base type integers.
a = np.array([1, 2, 3], dtype=np.int16)
l = [1, 2, 3]
# Array gets converted
dt = np.dtype([('a', 'f4', a)])
assert_(isinstance(dt['a'].shape, tuple))
assert_(isinstance(dt['a'].shape[0], int))
# List gets converted
dt = np.dtype([('a', 'f4', l)])
assert_(isinstance(dt['a'].shape, tuple))
#
class IntLike(object):
def __index__(self):
return 3
def __int__(self):
# (a PyNumber_Check fails without __int__)
return 3
dt = np.dtype([('a', 'f4', IntLike())])
assert_(isinstance(dt['a'].shape, tuple))
assert_(isinstance(dt['a'].shape[0], int))
dt = np.dtype([('a', 'f4', (IntLike(),))])
assert_(isinstance(dt['a'].shape, tuple))
assert_(isinstance(dt['a'].shape[0], int))
def test_shape_matches_ndim(self):
dt = np.dtype([('a', 'f4', ())])
assert_equal(dt['a'].shape, ())
assert_equal(dt['a'].ndim, 0)
dt = np.dtype([('a', 'f4')])
assert_equal(dt['a'].shape, ())
assert_equal(dt['a'].ndim, 0)
dt = np.dtype([('a', 'f4', 4)])
assert_equal(dt['a'].shape, (4,))
assert_equal(dt['a'].ndim, 1)
dt = np.dtype([('a', 'f4', (1, 2, 3))])
assert_equal(dt['a'].shape, (1, 2, 3))
assert_equal(dt['a'].ndim, 3)
def test_shape_invalid(self):
# Check that the shape is valid.
max_int = np.iinfo(np.intc).max
max_intp = np.iinfo(np.intp).max
# Too large values (the datatype is part of this)
assert_raises(ValueError, np.dtype, [('a', 'f4', max_int // 4 + 1)])
assert_raises(ValueError, np.dtype, [('a', 'f4', max_int + 1)])
assert_raises(ValueError, np.dtype, [('a', 'f4', (max_int, 2))])
# Takes a different code path (fails earlier:
assert_raises(ValueError, np.dtype, [('a', 'f4', max_intp + 1)])
# Negative values
assert_raises(ValueError, np.dtype, [('a', 'f4', -1)])
assert_raises(ValueError, np.dtype, [('a', 'f4', (-1, -1))])
def test_alignment(self):
#Check that subarrays are aligned
t1 = np.dtype('(1,)i4', align=True)
t2 = np.dtype('2i4', align=True)
assert_equal(t1.alignment, t2.alignment)
def iter_struct_object_dtypes():
"""
Iterates over a few complex dtypes and object pattern which
fill the array with a given object (defaults to a singleton).
Yields
------
dtype : dtype
pattern : tuple
Structured tuple for use with `np.array`.
count : int
Number of objects stored in the dtype.
singleton : object
A singleton object. The returned pattern is constructed so that
all objects inside the datatype are set to the singleton.
"""
obj = object()
dt = np.dtype([('b', 'O', (2, 3))])
p = ([[obj] * 3] * 2,)
yield pytest.param(dt, p, 6, obj, id="<subarray>")
dt = np.dtype([('a', 'i4'), ('b', 'O', (2, 3))])
p = (0, [[obj] * 3] * 2)
yield pytest.param(dt, p, 6, obj, id="<subarray in field>")
dt = np.dtype([('a', 'i4'),
('b', [('ba', 'O'), ('bb', 'i1')], (2, 3))])
p = (0, [[(obj, 0)] * 3] * 2)
yield pytest.param(dt, p, 6, obj, id="<structured subarray 1>")
dt = np.dtype([('a', 'i4'),
('b', [('ba', 'O'), ('bb', 'O')], (2, 3))])
p = (0, [[(obj, obj)] * 3] * 2)
yield pytest.param(dt, p, 12, obj, id="<structured subarray 2>")
@pytest.mark.skipif(not HAS_REFCOUNT, reason="Python lacks refcounts")
class TestStructuredObjectRefcounting:
"""These tests cover various uses of complicated structured types which
include objects and thus require reference counting.
"""
@pytest.mark.parametrize(['dt', 'pat', 'count', 'singleton'],
iter_struct_object_dtypes())
@pytest.mark.parametrize(["creation_func", "creation_obj"], [
pytest.param(np.empty, None,
# None is probably used for too many things
marks=pytest.mark.skip("unreliable due to python's behaviour")),
(np.ones, 1),
(np.zeros, 0)])
def test_structured_object_create_delete(self, dt, pat, count, singleton,
creation_func, creation_obj):
"""Structured object reference counting in creation and deletion"""
# The test assumes that 0, 1, and None are singletons.
gc.collect()
before = sys.getrefcount(creation_obj)
arr = creation_func(3, dt)
now = sys.getrefcount(creation_obj)
assert now - before == count * 3
del arr
now = sys.getrefcount(creation_obj)
assert now == before
@pytest.mark.parametrize(['dt', 'pat', 'count', 'singleton'],
iter_struct_object_dtypes())
def test_structured_object_item_setting(self, dt, pat, count, singleton):
"""Structured object reference counting for simple item setting"""
one = 1
gc.collect()
before = sys.getrefcount(singleton)
arr = np.array([pat] * 3, dt)
assert sys.getrefcount(singleton) - before == count * 3
# Fill with `1` and check that it was replaced correctly:
before2 = sys.getrefcount(one)
arr[...] = one
after2 = sys.getrefcount(one)
assert after2 - before2 == count * 3
del arr
gc.collect()
assert sys.getrefcount(one) == before2
assert sys.getrefcount(singleton) == before
@pytest.mark.parametrize(['dt', 'pat', 'count', 'singleton'],
iter_struct_object_dtypes())
@pytest.mark.parametrize(
['shape', 'index', 'items_changed'],
[((3,), ([0, 2],), 2),
((3, 2), ([0, 2], slice(None)), 4),
((3, 2), ([0, 2], [1]), 2),
((3,), ([True, False, True]), 2)])
def test_structured_object_indexing(self, shape, index, items_changed,
dt, pat, count, singleton):
"""Structured object reference counting for advanced indexing."""
zero = 0
one = 1
arr = np.zeros(shape, dt)
gc.collect()
before_zero = sys.getrefcount(zero)
before_one = sys.getrefcount(one)
# Test item getting:
part = arr[index]
after_zero = sys.getrefcount(zero)
assert after_zero - before_zero == count * items_changed
del part
# Test item setting:
arr[index] = one
gc.collect()
after_zero = sys.getrefcount(zero)
after_one = sys.getrefcount(one)
assert before_zero - after_zero == count * items_changed
assert after_one - before_one == count * items_changed
@pytest.mark.parametrize(['dt', 'pat', 'count', 'singleton'],
iter_struct_object_dtypes())
def test_structured_object_take_and_repeat(self, dt, pat, count, singleton):
"""Structured object reference counting for specialized functions.
The older functions such as take and repeat use different code paths
then item setting (when writing this).
"""
indices = [0, 1]
arr = np.array([pat] * 3, dt)
gc.collect()
before = sys.getrefcount(singleton)
res = arr.take(indices)
after = sys.getrefcount(singleton)
assert after - before == count * 2
new = res.repeat(10)
gc.collect()
after_repeat = sys.getrefcount(singleton)
assert after_repeat - after == count * 2 * 10
class TestStructuredDtypeSparseFields(object):
"""Tests subarray fields which contain sparse dtypes so that
not all memory is used by the dtype work. Such dtype's should
leave the underlying memory unchanged.
"""
dtype = np.dtype([('a', {'names':['aa', 'ab'], 'formats':['f', 'f'],
'offsets':[0, 4]}, (2, 3))])
sparse_dtype = np.dtype([('a', {'names':['ab'], 'formats':['f'],
'offsets':[4]}, (2, 3))])
@pytest.mark.xfail(reason="inaccessible data is changed see gh-12686.")
@pytest.mark.valgrind_error(reason="reads from uninitialized buffers.")
def test_sparse_field_assignment(self):
arr = np.zeros(3, self.dtype)
sparse_arr = arr.view(self.sparse_dtype)
sparse_arr[...] = np.finfo(np.float32).max
# dtype is reduced when accessing the field, so shape is (3, 2, 3):
assert_array_equal(arr["a"]["aa"], np.zeros((3, 2, 3)))
def test_sparse_field_assignment_fancy(self):
# Fancy assignment goes to the copyswap function for comlex types:
arr = np.zeros(3, self.dtype)
sparse_arr = arr.view(self.sparse_dtype)
sparse_arr[[0, 1, 2]] = np.finfo(np.float32).max
# dtype is reduced when accessing the field, so shape is (3, 2, 3):
assert_array_equal(arr["a"]["aa"], np.zeros((3, 2, 3)))
class TestMonsterType(object):
"""Test deeply nested subtypes."""
def test1(self):
simple1 = np.dtype({'names': ['r', 'b'], 'formats': ['u1', 'u1'],
'titles': ['Red pixel', 'Blue pixel']})
a = np.dtype([('yo', int), ('ye', simple1),
('yi', np.dtype((int, (3, 2))))])
b = np.dtype([('yo', int), ('ye', simple1),
('yi', np.dtype((int, (3, 2))))])
assert_dtype_equal(a, b)
c = np.dtype([('yo', int), ('ye', simple1),
('yi', np.dtype((a, (3, 2))))])
d = np.dtype([('yo', int), ('ye', simple1),
('yi', np.dtype((a, (3, 2))))])
assert_dtype_equal(c, d)
class TestMetadata(object):
def test_no_metadata(self):
d = np.dtype(int)
assert_(d.metadata is None)
def test_metadata_takes_dict(self):
d = np.dtype(int, metadata={'datum': 1})
assert_(d.metadata == {'datum': 1})
def test_metadata_rejects_nondict(self):
assert_raises(TypeError, np.dtype, int, metadata='datum')
assert_raises(TypeError, np.dtype, int, metadata=1)
assert_raises(TypeError, np.dtype, int, metadata=None)
def test_nested_metadata(self):
d = np.dtype([('a', np.dtype(int, metadata={'datum': 1}))])
assert_(d['a'].metadata == {'datum': 1})
def test_base_metadata_copied(self):
d = np.dtype((np.void, np.dtype('i4,i4', metadata={'datum': 1})))
assert_(d.metadata == {'datum': 1})
class TestString(object):
def test_complex_dtype_str(self):
dt = np.dtype([('top', [('tiles', ('>f4', (64, 64)), (1,)),
('rtile', '>f4', (64, 36))], (3,)),
('bottom', [('bleft', ('>f4', (8, 64)), (1,)),
('bright', '>f4', (8, 36))])])
assert_equal(str(dt),
"[('top', [('tiles', ('>f4', (64, 64)), (1,)), "
"('rtile', '>f4', (64, 36))], (3,)), "
"('bottom', [('bleft', ('>f4', (8, 64)), (1,)), "
"('bright', '>f4', (8, 36))])]")
# If the sticky aligned flag is set to True, it makes the
# str() function use a dict representation with an 'aligned' flag
dt = np.dtype([('top', [('tiles', ('>f4', (64, 64)), (1,)),
('rtile', '>f4', (64, 36))],
(3,)),
('bottom', [('bleft', ('>f4', (8, 64)), (1,)),
('bright', '>f4', (8, 36))])],
align=True)
assert_equal(str(dt),
"{'names':['top','bottom'], "
"'formats':[([('tiles', ('>f4', (64, 64)), (1,)), "
"('rtile', '>f4', (64, 36))], (3,)),"
"[('bleft', ('>f4', (8, 64)), (1,)), "
"('bright', '>f4', (8, 36))]], "
"'offsets':[0,76800], "
"'itemsize':80000, "
"'aligned':True}")
assert_equal(np.dtype(eval(str(dt))), dt)
dt = np.dtype({'names': ['r', 'g', 'b'], 'formats': ['u1', 'u1', 'u1'],
'offsets': [0, 1, 2],
'titles': ['Red pixel', 'Green pixel', 'Blue pixel']})
assert_equal(str(dt),
"[(('Red pixel', 'r'), 'u1'), "
"(('Green pixel', 'g'), 'u1'), "
"(('Blue pixel', 'b'), 'u1')]")
dt = np.dtype({'names': ['rgba', 'r', 'g', 'b'],
'formats': ['<u4', 'u1', 'u1', 'u1'],
'offsets': [0, 0, 1, 2],
'titles': ['Color', 'Red pixel',
'Green pixel', 'Blue pixel']})
assert_equal(str(dt),
"{'names':['rgba','r','g','b'],"
" 'formats':['<u4','u1','u1','u1'],"
" 'offsets':[0,0,1,2],"
" 'titles':['Color','Red pixel',"
"'Green pixel','Blue pixel'],"
" 'itemsize':4}")
dt = np.dtype({'names': ['r', 'b'], 'formats': ['u1', 'u1'],
'offsets': [0, 2],
'titles': ['Red pixel', 'Blue pixel']})
assert_equal(str(dt),
"{'names':['r','b'],"
" 'formats':['u1','u1'],"
" 'offsets':[0,2],"
" 'titles':['Red pixel','Blue pixel'],"
" 'itemsize':3}")
dt = np.dtype([('a', '<m8[D]'), ('b', '<M8[us]')])
assert_equal(str(dt),
"[('a', '<m8[D]'), ('b', '<M8[us]')]")
def test_repr_structured(self):
dt = np.dtype([('top', [('tiles', ('>f4', (64, 64)), (1,)),
('rtile', '>f4', (64, 36))], (3,)),
('bottom', [('bleft', ('>f4', (8, 64)), (1,)),
('bright', '>f4', (8, 36))])])
assert_equal(repr(dt),
"dtype([('top', [('tiles', ('>f4', (64, 64)), (1,)), "
"('rtile', '>f4', (64, 36))], (3,)), "
"('bottom', [('bleft', ('>f4', (8, 64)), (1,)), "
"('bright', '>f4', (8, 36))])])")
dt = np.dtype({'names': ['r', 'g', 'b'], 'formats': ['u1', 'u1', 'u1'],
'offsets': [0, 1, 2],
'titles': ['Red pixel', 'Green pixel', 'Blue pixel']},
align=True)
assert_equal(repr(dt),
"dtype([(('Red pixel', 'r'), 'u1'), "
"(('Green pixel', 'g'), 'u1'), "
"(('Blue pixel', 'b'), 'u1')], align=True)")
def test_repr_structured_not_packed(self):
dt = np.dtype({'names': ['rgba', 'r', 'g', 'b'],
'formats': ['<u4', 'u1', 'u1', 'u1'],
'offsets': [0, 0, 1, 2],
'titles': ['Color', 'Red pixel',
'Green pixel', 'Blue pixel']}, align=True)
assert_equal(repr(dt),
"dtype({'names':['rgba','r','g','b'],"
" 'formats':['<u4','u1','u1','u1'],"
" 'offsets':[0,0,1,2],"
" 'titles':['Color','Red pixel',"
"'Green pixel','Blue pixel'],"
" 'itemsize':4}, align=True)")
dt = np.dtype({'names': ['r', 'b'], 'formats': ['u1', 'u1'],
'offsets': [0, 2],
'titles': ['Red pixel', 'Blue pixel'],
'itemsize': 4})
assert_equal(repr(dt),
"dtype({'names':['r','b'], "
"'formats':['u1','u1'], "
"'offsets':[0,2], "
"'titles':['Red pixel','Blue pixel'], "
"'itemsize':4})")
def test_repr_structured_datetime(self):
dt = np.dtype([('a', '<M8[D]'), ('b', '<m8[us]')])
assert_equal(repr(dt),
"dtype([('a', '<M8[D]'), ('b', '<m8[us]')])")
def test_repr_str_subarray(self):
dt = np.dtype(('<i2', (1,)))
assert_equal(repr(dt), "dtype(('<i2', (1,)))")
assert_equal(str(dt), "('<i2', (1,))")
@pytest.mark.skipif(sys.version_info[0] >= 3, reason="Python 2 only")
def test_dtype_str_with_long_in_shape(self):
# Pull request #376, should not error
np.dtype('(1L,)i4')
def test_base_dtype_with_object_type(self):
# Issue gh-2798, should not error.
np.array(['a'], dtype="O").astype(("O", [("name", "O")]))
def test_empty_string_to_object(self):
# Pull request #4722
np.array(["", ""]).astype(object)
def test_void_subclass_unsized(self):
dt = np.dtype(np.record)
assert_equal(repr(dt), "dtype('V')")
assert_equal(str(dt), '|V0')
assert_equal(dt.name, 'record')
def test_void_subclass_sized(self):
dt = np.dtype((np.record, 2))
assert_equal(repr(dt), "dtype('V2')")
assert_equal(str(dt), '|V2')
assert_equal(dt.name, 'record16')
def test_void_subclass_fields(self):
dt = np.dtype((np.record, [('a', '<u2')]))
assert_equal(repr(dt), "dtype((numpy.record, [('a', '<u2')]))")
assert_equal(str(dt), "(numpy.record, [('a', '<u2')])")
assert_equal(dt.name, 'record16')
class TestDtypeAttributeDeletion(object):
def test_dtype_non_writable_attributes_deletion(self):
dt = np.dtype(np.double)
attr = ["subdtype", "descr", "str", "name", "base", "shape",
"isbuiltin", "isnative", "isalignedstruct", "fields",
"metadata", "hasobject"]
for s in attr:
assert_raises(AttributeError, delattr, dt, s)
def test_dtype_writable_attributes_deletion(self):
dt = np.dtype(np.double)
attr = ["names"]
for s in attr:
assert_raises(AttributeError, delattr, dt, s)
class TestDtypeAttributes(object):
def test_descr_has_trailing_void(self):
# see gh-6359
dtype = np.dtype({
'names': ['A', 'B'],
'formats': ['f4', 'f4'],
'offsets': [0, 8],
'itemsize': 16})
new_dtype = np.dtype(dtype.descr)
assert_equal(new_dtype.itemsize, 16)
def test_name_dtype_subclass(self):
# Ticket #4357
class user_def_subcls(np.void):
pass
assert_equal(np.dtype(user_def_subcls).name, 'user_def_subcls')
class TestPickling(object):
def check_pickling(self, dtype):
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
pickled = pickle.loads(pickle.dumps(dtype, proto))
assert_equal(pickled, dtype)
assert_equal(pickled.descr, dtype.descr)
if dtype.metadata is not None:
assert_equal(pickled.metadata, dtype.metadata)
# Check the reconstructed dtype is functional
x = np.zeros(3, dtype=dtype)
y = np.zeros(3, dtype=pickled)
assert_equal(x, y)
assert_equal(x[0], y[0])
@pytest.mark.parametrize('t', [int, float, complex, np.int32, str, object,
np.unicode, bool])
def test_builtin(self, t):
self.check_pickling(np.dtype(t))
def test_structured(self):
dt = np.dtype(([('a', '>f4', (2, 1)), ('b', '<f8', (1, 3))], (2, 2)))
self.check_pickling(dt)
def test_structured_aligned(self):
dt = np.dtype('i4, i1', align=True)
self.check_pickling(dt)
def test_structured_unaligned(self):
dt = np.dtype('i4, i1', align=False)
self.check_pickling(dt)
def test_structured_padded(self):
dt = np.dtype({
'names': ['A', 'B'],
'formats': ['f4', 'f4'],
'offsets': [0, 8],
'itemsize': 16})
self.check_pickling(dt)
def test_structured_titles(self):
dt = np.dtype({'names': ['r', 'b'],
'formats': ['u1', 'u1'],
'titles': ['Red pixel', 'Blue pixel']})
self.check_pickling(dt)
@pytest.mark.parametrize('base', ['m8', 'M8'])
@pytest.mark.parametrize('unit', ['', 'Y', 'M', 'W', 'D', 'h', 'm', 's',
'ms', 'us', 'ns', 'ps', 'fs', 'as'])
def test_datetime(self, base, unit):
dt = np.dtype('%s[%s]' % (base, unit) if unit else base)
self.check_pickling(dt)
if unit:
dt = np.dtype('%s[7%s]' % (base, unit))
self.check_pickling(dt)
def test_metadata(self):
dt = np.dtype(int, metadata={'datum': 1})
self.check_pickling(dt)
def test_rational_dtype():
# test for bug gh-5719
a = np.array([1111], dtype=rational).astype
assert_raises(OverflowError, a, 'int8')
# test that dtype detection finds user-defined types
x = rational(1)
assert_equal(np.array([x,x]).dtype, np.dtype(rational))
def test_dtypes_are_true():
# test for gh-6294
assert bool(np.dtype('f8'))
assert bool(np.dtype('i8'))
assert bool(np.dtype([('a', 'i8'), ('b', 'f4')]))
def test_invalid_dtype_string():
# test for gh-10440
assert_raises(TypeError, np.dtype, 'f8,i8,[f8,i8]')
assert_raises(TypeError, np.dtype, u'Fl\xfcgel')
class TestFromDTypeAttribute(object):
def test_simple(self):
class dt:
dtype = "f8"
assert np.dtype(dt) == np.float64
assert np.dtype(dt()) == np.float64
def test_recursion(self):
class dt:
pass
dt.dtype = dt
with pytest.raises(RecursionError):
np.dtype(dt)
dt_instance = dt()
dt_instance.dtype = dt
with pytest.raises(RecursionError):
np.dtype(dt_instance)
def test_void_subtype(self):
class dt(np.void):
# This code path is fully untested before, so it is unclear
# what this should be useful for. Note that if np.void is used
# numpy will think we are deallocating a base type [1.17, 2019-02].
dtype = np.dtype("f,f")
pass
np.dtype(dt)
np.dtype(dt(1))
def test_void_subtype_recursion(self):
class dt(np.void):
pass
dt.dtype = dt
with pytest.raises(RecursionError):
np.dtype(dt)
with pytest.raises(RecursionError):
np.dtype(dt(1))
class TestFromCTypes(object):
@staticmethod
def check(ctype, dtype):
dtype = np.dtype(dtype)
assert_equal(np.dtype(ctype), dtype)
assert_equal(np.dtype(ctype()), dtype)
def test_array(self):
c8 = ctypes.c_uint8
self.check( 3 * c8, (np.uint8, (3,)))
self.check( 1 * c8, (np.uint8, (1,)))
self.check( 0 * c8, (np.uint8, (0,)))
self.check(1 * (3 * c8), ((np.uint8, (3,)), (1,)))
self.check(3 * (1 * c8), ((np.uint8, (1,)), (3,)))
def test_padded_structure(self):
class PaddedStruct(ctypes.Structure):
_fields_ = [
('a', ctypes.c_uint8),
('b', ctypes.c_uint16)
]
expected = np.dtype([
('a', np.uint8),
('b', np.uint16)
], align=True)
self.check(PaddedStruct, expected)
def test_bit_fields(self):
class BitfieldStruct(ctypes.Structure):
_fields_ = [
('a', ctypes.c_uint8, 7),
('b', ctypes.c_uint8, 1)
]
assert_raises(TypeError, np.dtype, BitfieldStruct)
assert_raises(TypeError, np.dtype, BitfieldStruct())
def test_pointer(self):
p_uint8 = ctypes.POINTER(ctypes.c_uint8)
assert_raises(TypeError, np.dtype, p_uint8)
def test_void_pointer(self):
self.check(ctypes.c_void_p, np.uintp)
def test_union(self):
class Union(ctypes.Union):
_fields_ = [
('a', ctypes.c_uint8),
('b', ctypes.c_uint16),
]
expected = np.dtype(dict(
names=['a', 'b'],
formats=[np.uint8, np.uint16],
offsets=[0, 0],
itemsize=2
))
self.check(Union, expected)
def test_union_with_struct_packed(self):
class Struct(ctypes.Structure):
_pack_ = 1
_fields_ = [
('one', ctypes.c_uint8),
('two', ctypes.c_uint32)
]
class Union(ctypes.Union):
_fields_ = [
('a', ctypes.c_uint8),
('b', ctypes.c_uint16),
('c', ctypes.c_uint32),
('d', Struct),
]
expected = np.dtype(dict(
names=['a', 'b', 'c', 'd'],
formats=['u1', np.uint16, np.uint32, [('one', 'u1'), ('two', np.uint32)]],
offsets=[0, 0, 0, 0],
itemsize=ctypes.sizeof(Union)
))
self.check(Union, expected)
def test_union_packed(self):
class Struct(ctypes.Structure):
_fields_ = [
('one', ctypes.c_uint8),
('two', ctypes.c_uint32)
]
_pack_ = 1
class Union(ctypes.Union):
_pack_ = 1
_fields_ = [
('a', ctypes.c_uint8),
('b', ctypes.c_uint16),
('c', ctypes.c_uint32),
('d', Struct),
]
expected = np.dtype(dict(
names=['a', 'b', 'c', 'd'],
formats=['u1', np.uint16, np.uint32, [('one', 'u1'), ('two', np.uint32)]],
offsets=[0, 0, 0, 0],
itemsize=ctypes.sizeof(Union)
))
self.check(Union, expected)
def test_packed_structure(self):
class PackedStructure(ctypes.Structure):
_pack_ = 1
_fields_ = [
('a', ctypes.c_uint8),
('b', ctypes.c_uint16)
]
expected = np.dtype([
('a', np.uint8),
('b', np.uint16)
])
self.check(PackedStructure, expected)
def test_large_packed_structure(self):
class PackedStructure(ctypes.Structure):
_pack_ = 2
_fields_ = [
('a', ctypes.c_uint8),
('b', ctypes.c_uint16),
('c', ctypes.c_uint8),
('d', ctypes.c_uint16),
('e', ctypes.c_uint32),
('f', ctypes.c_uint32),
('g', ctypes.c_uint8)
]
expected = np.dtype(dict(
formats=[np.uint8, np.uint16, np.uint8, np.uint16, np.uint32, np.uint32, np.uint8 ],
offsets=[0, 2, 4, 6, 8, 12, 16],
names=['a', 'b', 'c', 'd', 'e', 'f', 'g'],
itemsize=18))
self.check(PackedStructure, expected)
def test_big_endian_structure_packed(self):
class BigEndStruct(ctypes.BigEndianStructure):
_fields_ = [
('one', ctypes.c_uint8),
('two', ctypes.c_uint32)
]
_pack_ = 1
expected = np.dtype([('one', 'u1'), ('two', '>u4')])
self.check(BigEndStruct, expected)
def test_little_endian_structure_packed(self):
class LittleEndStruct(ctypes.LittleEndianStructure):
_fields_ = [
('one', ctypes.c_uint8),
('two', ctypes.c_uint32)
]
_pack_ = 1
expected = np.dtype([('one', 'u1'), ('two', '<u4')])
self.check(LittleEndStruct, expected)
def test_little_endian_structure(self):
class PaddedStruct(ctypes.LittleEndianStructure):
_fields_ = [
('a', ctypes.c_uint8),
('b', ctypes.c_uint16)
]
expected = np.dtype([
('a', '<B'),
('b', '<H')
], align=True)
self.check(PaddedStruct, expected)
def test_big_endian_structure(self):
class PaddedStruct(ctypes.BigEndianStructure):
_fields_ = [
('a', ctypes.c_uint8),
('b', ctypes.c_uint16)
]
expected = np.dtype([
('a', '>B'),
('b', '>H')
], align=True)
self.check(PaddedStruct, expected)
def test_simple_endian_types(self):
self.check(ctypes.c_uint16.__ctype_le__, np.dtype('<u2'))
self.check(ctypes.c_uint16.__ctype_be__, np.dtype('>u2'))
self.check(ctypes.c_uint8.__ctype_le__, np.dtype('u1'))
self.check(ctypes.c_uint8.__ctype_be__, np.dtype('u1'))
all_types = set(np.typecodes['All'])
all_pairs = permutations(all_types, 2)
@pytest.mark.parametrize("pair", all_pairs)
def test_pairs(self, pair):
"""
Check that np.dtype('x,y') matches [np.dtype('x'), np.dtype('y')]
Example: np.dtype('d,I') -> dtype([('f0', '<f8'), ('f1', '<u4')])
"""
# gh-5645: check that np.dtype('i,L') can be used
pair_type = np.dtype('{},{}'.format(*pair))
expected = np.dtype([('f0', pair[0]), ('f1', pair[1])])
assert_equal(pair_type, expected)
| |
# -*- coding: utf-8 -*-
# Copyright 2011, Florent Lamiraux, Thomas Moulard, JRL, CNRS/AIST
from __future__ import print_function
import sys
import pinocchio
from dynamic_graph import plug
from dynamic_graph.sot.core.derivator import Derivator_of_Vector
from dynamic_graph.sot.core.op_point_modifier import OpPointModifier
from dynamic_graph.sot.core.robot_simu import RobotSimu
from dynamic_graph.tools import addTrace
from dynamic_graph.tracer_real_time import TracerRealTime
if sys.version_info.major == 2:
from abc import ABCMeta, abstractmethod
class ABC:
__metaclass__ = ABCMeta
else:
from abc import ABC, abstractmethod
class AbstractRobot(ABC):
"""
This class instantiates all the entities required to get a consistent
representation of a robot, mainly:
- device : to integrate velocities into angular control,
- dynamic: to compute forward geometry and kinematics,
- zmpFromForces: to compute ZMP force foot force sensors,
- stabilizer: to stabilize balanced motions
Operational points are stored into 'OperationalPoints' list. Some of them
are also accessible directly as attributes:
- leftWrist,
- rightWrist,
- leftAnkle,
- rightAnkle,
- Gaze.
Operational points are mapped to the actual joints in the robot model
via 'OperationalPointsMap' dictionary.
This attribute *must* be defined in the subclasses
Other attributes require to be defined:
- halfSitting: half-sitting position is the robot initial pose.
This attribute *must* be defined in subclasses.
- dynamic: The robot dynamic model.
- device: The device that integrates the dynamic equation, namely
the real robot or
a simulator
- dimension: The configuration size.
"""
def _initialize(self):
self.OperationalPoints = []
"""
Operational points are specific interesting points of the robot
used to control it.
When an operational point is defined, signals corresponding to the
point position and jacobian are created.
For instance if creating an operational point for the left-wrist,
the associated signals will be called "left-wrist" and
"Jleft-wrist" for respectively the position and the jacobian.
"""
self.AdditionalFrames = []
"""
Additional frames are frames which are defined w.r.t an operational point
and provides an interesting transformation.
It can be used, for instance, to store the sensor location.
The contained elements must be triplets matching:
- additional frame name,
- transformation w.r.t to the operational point,
- operational point file.
"""
self.frames = dict()
"""
Additional frames defined by using OpPointModifier.
"""
# FIXME: the following options are /not/ independent.
# zmp requires acceleration which requires velocity.
"""
Enable velocity computation.
"""
self.enableVelocityDerivator = False
"""
Enable acceleration computation.
"""
self.enableAccelerationDerivator = False
"""
Enable ZMP computation
"""
self.enableZmpComputation = False
"""
Tracer used to log data.
"""
self.tracer = None
"""
How much data will be logged.
"""
self.tracerSize = 2**20
"""
Automatically recomputed signals through the use
of device.after.
This list is maintained in order to clean the
signal list device.after before exiting.
"""
self.autoRecomputedSignals = []
"""
Which signals should be traced.
"""
self.tracedSignals = {
'dynamic': ["com", "zmp", "angularmomentum", "position", "velocity", "acceleration"],
'device': ['zmp', 'control', 'state']
}
def help(self):
print(AbstractHumanoidRobot.__doc__)
def _removeMimicJoints(self, urdfFile=None, urdfString=None):
""" Parse the URDF, extract the mimic joints and call removeJoints. """
# get mimic joints
import xml.etree.ElementTree as ET
if urdfFile is not None:
assert urdfString is None, "One and only one of input argument should be provided"
root = ET.parse(urdfFile)
else:
assert urdfString is not None, "One and only one of input argument should be provided"
root = ET.fromstring(urdfString)
mimicJoints = list()
for e in root.iter('joint'):
if 'name' in e.attrib:
name = e.attrib['name']
for c in e:
if hasattr(c, 'tag') and c.tag == 'mimic':
mimicJoints.append(name)
self.removeJoints(mimicJoints)
def removeJoints(self, joints):
"""
- param joints: a list of joint names to be removed from the self.pinocchioModel
"""
jointIds = list()
for j in joints:
if self.pinocchioModel.existJointName(j):
jointIds.append(self.pinocchioModel.getJointId(j))
if len(jointIds) > 0:
q = pinocchio.neutral(self.pinocchioModel)
self.pinocchioModel = pinocchio.buildReducedModel(self.pinocchioModel, jointIds, q)
self.pinocchioData = pinocchio.Data(self.pinocchioModel)
def loadModelFromString(self, urdfString, rootJointType=pinocchio.JointModelFreeFlyer, removeMimicJoints=True):
""" Load a URDF model contained in a string
- param rootJointType: the root joint type. None for no root joint.
- param removeMimicJoints: if True, all the mimic joints found in the model are removed.
"""
if rootJointType is None:
self.pinocchioModel = pinocchio.buildModelFromXML(urdfString)
else:
self.pinocchioModel = pinocchio.buildModelFromXML(urdfString, rootJointType())
self.pinocchioData = pinocchio.Data(self.pinocchioModel)
if removeMimicJoints:
self._removeMimicJoints(urdfString=urdfString)
def loadModelFromUrdf(self,
urdfPath,
urdfDir=None,
rootJointType=pinocchio.JointModelFreeFlyer,
removeMimicJoints=True):
"""
Load a model using the pinocchio urdf parser. This parser looks
for urdf files in which kinematics and dynamics information
have been added.
- param urdfPath: a path to the URDF file.
- param urdfDir: A list of directories. If None, will use ROS_PACKAGE_PATH.
"""
if urdfPath.startswith("package://"):
from os import path
n1 = 10 # len("package://")
n2 = urdfPath.index(path.sep, n1)
pkg = urdfPath[n1:n2]
relpath = urdfPath[n2 + 1:]
import rospkg
rospack = rospkg.RosPack()
abspkg = rospack.get_path(pkg)
urdfFile = path.join(abspkg, relpath)
else:
urdfFile = urdfPath
if urdfDir is None:
import os
urdfDir = os.environ["ROS_PACKAGE_PATH"].split(':')
if rootJointType is None:
self.pinocchioModel = pinocchio.buildModelFromUrdf(urdfFile)
else:
self.pinocchioModel = pinocchio.buildModelFromUrdf(urdfFile, rootJointType())
self.pinocchioData = pinocchio.Data(self.pinocchioModel)
if removeMimicJoints:
self._removeMimicJoints(urdfFile=urdfFile)
def initializeOpPoints(self):
for op in self.OperationalPoints:
self.dynamic.createOpPoint(op, self.OperationalPointsMap[op])
def createFrame(self, frameName, transformation, operationalPoint):
frame = OpPointModifier(frameName)
frame.setTransformation(transformation)
plug(self.dynamic.signal(operationalPoint), frame.positionIN)
plug(self.dynamic.signal("J{0}".format(operationalPoint)), frame.jacobianIN)
frame.position.recompute(frame.position.time + 1)
frame.jacobian.recompute(frame.jacobian.time + 1)
return frame
def setJointValueInConfig(self, q, jointNames, jointValues):
"""
q: configuration to update
jointNames: list of existing joint names in self.pinocchioModel
jointValues: corresponding joint values.
"""
model = self.pinocchioModel
for jn, jv in zip(jointNames, jointValues):
assert model.existJointName(jn)
joint = model.joints[model.getJointId(jn)]
q[joint.idx_q] = jv
@abstractmethod
def defineHalfSitting(self, q):
"""
Define half sitting configuration using the pinocchio Model (i.e.
with quaternions and not with euler angles).
method setJointValueInConfig may be usefull to implement this function.
"""
pass
def initializeRobot(self):
"""
If the robot model is correctly loaded, this method will then
initialize the operational points, set the position to
half-sitting with null velocity/acceleration.
To finish, different tasks are initialized:
- the center of mass task used to keep the robot stability
- one task per operational point to ease robot control
"""
if not hasattr(self, 'dynamic'):
raise RuntimeError("Dynamic robot model must be initialized first")
if not hasattr(self, 'device') or self.device is None:
# raise RuntimeError("A device is already defined.")
self.device = RobotSimu(self.name + '_device')
self.device.resize(self.dynamic.getDimension())
"""
Robot timestep
"""
self.timeStep = self.device.getTimeStep()
# Compute half sitting configuration
import numpy
"""
Half sitting configuration.
"""
self.halfSitting = pinocchio.neutral(self.pinocchioModel)
self.defineHalfSitting(self.halfSitting)
self.halfSitting = numpy.array(self.halfSitting[:3].tolist() + [0., 0., 0.] # Replace quaternion by RPY.
+ self.halfSitting[7:].tolist())
assert self.halfSitting.shape[0] == self.dynamic.getDimension()
# Set the device limits.
def get(s):
s.recompute(0)
return s.value
def opposite(v):
return [-x for x in v]
self.dynamic.add_signals()
self.device.setPositionBounds(get(self.dynamic.lowerJl), get(self.dynamic.upperJl))
self.device.setVelocityBounds(-get(self.dynamic.upperVl), get(self.dynamic.upperVl))
self.device.setTorqueBounds(-get(self.dynamic.upperTl), get(self.dynamic.upperTl))
# Freeflyer reference frame should be the same as global
# frame so that operational point positions correspond to
# position in freeflyer frame.
self.device.set(self.halfSitting)
plug(self.device.state, self.dynamic.position)
if self.enableVelocityDerivator:
self.velocityDerivator = Derivator_of_Vector('velocityDerivator')
self.velocityDerivator.dt.value = self.timeStep
plug(self.device.state, self.velocityDerivator.sin)
plug(self.velocityDerivator.sout, self.dynamic.velocity)
else:
self.dynamic.velocity.value = numpy.zeros([
self.dimension,
])
if self.enableAccelerationDerivator:
self.accelerationDerivator = \
Derivator_of_Vector('accelerationDerivator')
self.accelerationDerivator.dt.value = self.timeStep
plug(self.velocityDerivator.sout, self.accelerationDerivator.sin)
plug(self.accelerationDerivator.sout, self.dynamic.acceleration)
else:
self.dynamic.acceleration.value = numpy.zeros([
self.dimension,
])
def addTrace(self, entityName, signalName):
if self.tracer:
self.autoRecomputedSignals.append('{0}.{1}'.format(entityName, signalName))
addTrace(self, self.tracer, entityName, signalName)
def initializeTracer(self):
if not self.tracer:
self.tracer = TracerRealTime('trace')
self.tracer.setBufferSize(self.tracerSize)
self.tracer.open('/tmp/', 'dg_', '.dat')
# Recompute trace.triger at each iteration to enable tracing.
self.device.after.addSignal('{0}.triger'.format(self.tracer.name))
def traceDefaultSignals(self):
# Geometry / operational points
for s in self.OperationalPoints + self.tracedSignals['dynamic']:
self.addTrace(self.dynamic.name, s)
# Geometry / frames
for (frameName, _, _) in self.AdditionalFrames:
for s in ['position', 'jacobian']:
self.addTrace(self.frames[frameName].name, s)
# Device
for s in self.tracedSignals['device']:
self.addTrace(self.device.name, s)
if type(self.device) != RobotSimu:
self.addTrace(self.device.name, 'robotState')
# Misc
if self.enableVelocityDerivator:
self.addTrace(self.velocityDerivator.name, 'sout')
if self.enableAccelerationDerivator:
self.addTrace(self.accelerationDerivator.name, 'sout')
def __init__(self, name, tracer=None):
self._initialize()
self.name = name
# Initialize tracer if necessary.
if tracer:
self.tracer = tracer
def __del__(self):
if self.tracer:
self.stopTracer()
def startTracer(self):
"""
Start the tracer if it does not already been stopped.
"""
if self.tracer:
self.tracer.start()
def stopTracer(self):
"""
Stop and destroy tracer.
"""
if self.tracer:
self.tracer.dump()
self.tracer.stop()
self.tracer.close()
self.tracer.clear()
for s in self.autoRecomputedSignals:
self.device.after.rmSignal(s)
self.tracer = None
def reset(self, posture=None):
"""
Restart the control from another position.
This method has not been extensively tested and
should be used carefully.
In particular, tasks should be removed from the
solver before attempting a reset.
"""
if not posture:
posture = self.halfSitting
self.device.set(posture)
self.dynamic.com.recompute(self.device.state.time + 1)
self.dynamic.Jcom.recompute(self.device.state.time + 1)
for op in self.OperationalPoints:
self.dynamic.signal(self.OperationalPointsMap[op]).recompute(self.device.state.time + 1)
self.dynamic.signal('J' + self.OperationalPointsMap[op]).recompute(self.device.state.time + 1)
class AbstractHumanoidRobot(AbstractRobot):
def __init__(self, name, tracer=None):
AbstractRobot.__init__(self, name, tracer)
def _initialize(self):
AbstractRobot._initialize(self)
self.OperationalPoints.extend(['left-wrist', 'right-wrist', 'left-ankle', 'right-ankle', 'gaze'])
| |
import time
import cmd
import os
import datetime
import subprocess
from .report_generator import SessionReportGenerator
from .print_colour import Printer
from .session import Session
class TestSessionRecorder(cmd.Cmd):
# Constants
# File System
SESSION_DIR = os.path.expanduser("~") + '/sessions'
REPORTS_DIR = os.path.expanduser("~") + '/reports'
# Prompts
DEFAULT_PROMPT = '>> '
# Global Values
prompt = DEFAULT_PROMPT
intro = 'Test Session Recorder is simply utility that allows a'
'tester to capture and log test session data easily in an '
'interactive command line format. Sessions can be viewed '
'later or outputted to an HTML report'
session = None
try:
rows, columns = subprocess.check_output(
['stty', 'size']).decode('utf-8').split()
columns = int(columns)
except:
rows = 0
columns = 80
def preloop(self):
if not os.path.exists(os.path.join(self.SESSION_DIR)):
os.makedirs(self.SESSION_DIR)
def do_new(self, session_name):
"""new [session_name]
Create a new test session as session_name"""
if not session_name:
print('Test session must have a name or title')
elif self.check_for_session(session_name):
print('This test session already exists. Please try again with a'
' different title')
else:
self.new_session(session_name)
def do_open(self, session_name):
"""open [session_name]
Open test session_name or start a new session if
session_name does not exist"""
if not session_name:
print('Please specify a test session to open')
else:
if self.check_for_session(session_name):
TestSessionRecorder.print_header(
'Session Opened - ' + session_name)
self.show_session(
Session.get_session_data(
session_name, self.SESSION_DIR))
self.session = Session(session_name, self.SESSION_DIR)
self.prompt = Session.SESSION_PROMPT
else:
# Session does not exist so start a new one
self.new_session(session_name)
def complete_open(self, text, line, begidx, endidx):
return self.autocomplete_sessions(text, line, begidx, endidx)
def do_show(self, session_name):
"""show [session_name]
Show contents of test session_name"""
self.show_session(Session.get_session_data(
session_name, self.SESSION_DIR))
def complete_show(self, text, line, begidx, endidx):
return self.autocomplete_sessions(text, line, begidx, endidx)
def do_list(self, line):
"""list
List all test sessions"""
all_sessions = os.listdir(self.SESSION_DIR)
if len(all_sessions) > 0:
TestSessionRecorder.print_header('Test Sessions')
for session in all_sessions:
print(session, end='')
create_time = time.ctime(os.path.getmtime(
os.path.join(self.SESSION_DIR, session)))
print(' '*(self.columns-len(session)-len(create_time))
+ create_time)
else:
print('There are no recorded sessions')
def do_report(self, report_args):
"""report [report_args]
Generate an HTML report for [session_name] -f [optional_filename]"""
args = report_args.split('-f')
if len(args) == 0:
print('Please enter a valid session name')
elif len(args) >= 1:
session_name = args[0].strip()
if self.check_for_session(session_name):
generator = SessionReportGenerator(self.REPORTS_DIR,
'test_session')
session_data = Session.get_session_data(
session_name, self.SESSION_DIR)
log = session_data[Session.LOG_KEY]
test_log = []
bug_log = []
for entry in log:
if entry['bug']:
bug_log.append(entry['date'] + ' ' + entry['entry'])
else:
test_log.append(entry['date'] + ' ' + entry['entry'])
session_data[Session.BUG_KEY] = bug_log
session_data[Session.LOG_KEY] = test_log
if len(args) == 2:
filename = args[1].strip()
result = generator.generate_report(
session_name, filename, **session_data)
else:
result = generator.generate_report(
session_name, **session_data)
if result:
print('Report sucessfully generated')
else:
print('Report failed to generate')
else:
print('Session name not found')
else:
print('Invalid report arguments')
def complete_report(self, text, line, begidx, endidx):
return self.autocomplete_sessions(text, line, begidx, endidx)
def precmd(self, line):
if self.session:
timestamp = datetime.datetime.now().strftime('[%Y-%m-%d %H:%M:%S]')
result = self.session.process_session_cmd(timestamp, line)
console_text = result[Session.TEXT_KEY]
if console_text:
print(console_text)
return result[Session.CMD_KEY]
else:
return line
def default(self, line):
if self.session:
self.prompt = Session.SESSION_PROMPT
if Session.SESSION_QUIT in line:
self.quit_session()
else:
print('Please enter a valid command')
def do_delete(self, session_name):
"""delete [session_name]
Permanently delete test session_name"""
if not session_name:
print('Please enter a valid session name')
elif self.check_for_session(session_name):
print('Are you sure you want to delete {} ? (y/N)'.format(
session_name))
confirmation = input()
self.delete_session(session_name, confirmation)
else:
print('There is no test session with that name')
def delete_session(self, session_name, choice):
if choice.lower() in ['y', 'yes']:
os.remove(os.path.join(self.SESSION_DIR, session_name))
print(session_name + ' successfully deleted')
def complete_delete(self, text, line, begidx, endidx):
return self.autocomplete_sessions(text, line, begidx, endidx)
def do_quit(self, line):
"""quit
Quit the application or current test session"""
return True
def check_for_session(self, session_name):
all_sessions = os.listdir(self.SESSION_DIR)
return session_name in all_sessions
def show_session(self, session_data):
TestSessionRecorder.print_header('Test Session Contents', True)
mission = session_data[Session.MISSION_KEY]
timebox = session_data[Session.TIMEBOX_KEY]
test_areas = session_data[Session.AREAS_KEY]
debrief = session_data[Session.DEBRIEF_KEY]
if mission is not None:
Printer.print('Test Mission: ', end='')
print(mission)
if timebox is not None:
Printer.print('Timebox: ', end='')
print(timebox)
if len(test_areas) != 0:
Printer.print('Test Areas:')
for area in test_areas:
print('- ' + area)
TestSessionRecorder.print_bar()
TestSessionRecorder.print_header('Test Session Log', True)
log_entries = session_data[Session.LOG_KEY]
if len(log_entries) > 0:
for entry in log_entries:
if entry['bug']:
Printer.print(
entry['date'] + ' (BUG)' + entry['entry'],
Printer.WARNING)
else:
print(entry['date'] + ' ' + entry['entry'])
TestSessionRecorder.print_bar()
if debrief is not None:
Printer.print('Debrief: ', end='')
print(debrief)
Printer.print('Duration: ', end='')
print(session_data[Session.DURATION_KEY])
def new_session(self, session_name):
print('Session Started: ' + session_name)
TestSessionRecorder.print_bar()
self.prompt = Session.SESSION_PROMPT
self.session = Session(session_name, self.SESSION_DIR)
def quit_session(self):
self.prompt = self.DEFAULT_PROMPT
duration = self.session.get_duration()
print('Session Duration: ' + str(duration))
print('Session saved.')
self.session = None
def autocomplete_sessions(self, text, line, begidx, endidx):
all_sessions = os.listdir(self.SESSION_DIR)
if not text:
completions = all_sessions[:]
else:
completions = [f for f in all_sessions if f.startswith(text)]
return completions
@classmethod
def print_header(cls, header_text, center=False):
if center:
Printer.print(' '*(int(TestSessionRecorder.columns/2)-int(
(len(header_text)/2))) + header_text, Printer.BLUE)
else:
Printer.print(header_text, Printer.BLUE)
TestSessionRecorder.print_bar()
@classmethod
def print_bar(cls):
print('='*TestSessionRecorder.columns)
if __name__ == '__main__':
TestSessionRecorder().cmdloop(
TestSessionRecorder.print_header('Test Session Recorder', True))
| |
#
# tested on | Windows native | Linux cross-compilation
# ------------------------+-------------------+---------------------------
# MSVS C++ 2010 Express | WORKS | n/a
# Mingw-w64 | WORKS | WORKS
# Mingw-w32 | WORKS | WORKS
# MinGW | WORKS | untested
#
#####
# Notes about MSVS C++ :
#
# - MSVC2010-Express compiles to 32bits only.
#
#####
# Notes about Mingw-w64 and Mingw-w32 under Windows :
#
# - both can be installed using the official installer :
# http://mingw-w64.sourceforge.net/download.php#mingw-builds
#
# - if you want to compile both 32bits and 64bits, don't forget to
# run the installer twice to install them both.
#
# - install them into a path that does not contain spaces
# ( example : "C:/Mingw-w32", "C:/Mingw-w64" )
#
# - if you want to compile faster using the "-j" option, don't forget
# to install the appropriate version of the Pywin32 python extension
# available from : http://sourceforge.net/projects/pywin32/files/
#
# - before running scons, you must add into the environment path
# the path to the "/bin" directory of the Mingw version you want
# to use :
#
# set PATH=C:/Mingw-w32/bin;%PATH%
#
# - then, scons should be able to detect gcc.
# - Mingw-w32 only compiles 32bits.
# - Mingw-w64 only compiles 64bits.
#
# - it is possible to add them both at the same time into the PATH env,
# if you also define the MINGW32_PREFIX and MINGW64_PREFIX environment
# variables.
# For instance, you could store that set of commands into a .bat script
# that you would run just before scons :
#
# set PATH=C:\mingw-w32\bin;%PATH%
# set PATH=C:\mingw-w64\bin;%PATH%
# set MINGW32_PREFIX=C:\mingw-w32\bin\
# set MINGW64_PREFIX=C:\mingw-w64\bin\
#
#####
# Notes about Mingw, Mingw-w64 and Mingw-w32 under Linux :
#
# - default toolchain prefixes are :
# "i586-mingw32msvc-" for MinGW
# "i686-w64-mingw32-" for Mingw-w32
# "x86_64-w64-mingw32-" for Mingw-w64
#
# - if both MinGW and Mingw-w32 are installed on your system
# Mingw-w32 should take the priority over MinGW.
#
# - it is possible to manually override prefixes by defining
# the MINGW32_PREFIX and MINGW64_PREFIX environment variables.
#
#####
# Notes about Mingw under Windows :
#
# - this is the MinGW version from http://mingw.org/
# - install it into a path that does not contain spaces
# ( example : "C:/MinGW" )
# - several DirectX headers might be missing. You can copy them into
# the C:/MinGW/include" directory from this page :
# https://code.google.com/p/mingw-lib/source/browse/trunk/working/avcodec_to_widget_5/directx_include/
# - before running scons, add the path to the "/bin" directory :
# set PATH=C:/MinGW/bin;%PATH%
# - scons should be able to detect gcc.
#
#####
# TODO :
#
# - finish to cleanup this script to remove all the remains of previous hacks and workarounds
# - make it work with the Windows7 SDK that is supposed to enable 64bits compilation for MSVC2010-Express
# - confirm it works well with other Visual Studio versions.
# - update the wiki about the pywin32 extension required for the "-j" option under Windows.
# - update the wiki to document MINGW32_PREFIX and MINGW64_PREFIX
#
import os
import sys
def is_active():
return True
def get_name():
return "Windows"
def can_build():
if (os.name=="nt"):
#building natively on windows!
if (os.getenv("VSINSTALLDIR")):
return True
else:
print("\nMSVC not detected, attempting Mingw.")
mingw32 = ""
mingw64 = ""
if ( os.getenv("MINGW32_PREFIX") ) :
mingw32 = os.getenv("MINGW32_PREFIX")
if ( os.getenv("MINGW64_PREFIX") ) :
mingw64 = os.getenv("MINGW64_PREFIX")
test = "gcc --version > NUL 2>&1"
if os.system(test)!= 0 and os.system(mingw32+test)!=0 and os.system(mingw64+test)!=0 :
print("- could not detect gcc.")
print("Please, make sure a path to a Mingw /bin directory is accessible into the environment PATH.\n")
return False
else:
print("- gcc detected.")
return True
if (os.name=="posix"):
mingw = "i586-mingw32msvc-"
mingw64 = "x86_64-w64-mingw32-"
mingw32 = "i686-w64-mingw32-"
if (os.getenv("MINGW32_PREFIX")):
mingw32=os.getenv("MINGW32_PREFIX")
mingw = mingw32
if (os.getenv("MINGW64_PREFIX")):
mingw64=os.getenv("MINGW64_PREFIX")
test = "gcc --version &>/dev/null"
if (os.system(mingw+test) == 0 or os.system(mingw64+test) == 0 or os.system(mingw32+test) == 0):
return True
return False
def get_opts():
mingw=""
mingw32=""
mingw64=""
if ( os.name == "posix" ):
mingw = "i586-mingw32msvc-"
mingw32 = "i686-w64-mingw32-"
mingw64 = "x86_64-w64-mingw32-"
if os.system(mingw32+"gcc --version &>/dev/null") != 0 :
mingw32 = mingw
if (os.getenv("MINGW32_PREFIX")):
mingw32=os.getenv("MINGW32_PREFIX")
mingw = mingw32
if (os.getenv("MINGW64_PREFIX")):
mingw64=os.getenv("MINGW64_PREFIX")
return [
('mingw_prefix','Mingw Prefix',mingw32),
('mingw_prefix_64','Mingw Prefix 64 bits',mingw64),
]
def get_flags():
return [
('freetype','builtin'), #use builtin freetype
('openssl','builtin'), #use builtin openssl
]
def build_res_file( target, source, env ):
cmdbase = ""
if (env["bits"] == "32"):
cmdbase = env['mingw_prefix']
else:
cmdbase = env['mingw_prefix_64']
CPPPATH = env['CPPPATH']
cmdbase = cmdbase + 'windres --include-dir . '
import subprocess
for x in range(len(source)):
cmd = cmdbase + '-i ' + str(source[x]) + ' -o ' + str(target[x])
try:
out = subprocess.Popen(cmd,shell = True,stderr = subprocess.PIPE).communicate()
if len(out[1]):
return 1
except:
return 1
return 0
def configure(env):
env.Append(CPPPATH=['#platform/windows'])
env['is_mingw']=False
if (os.name=="nt" and os.getenv("VSINSTALLDIR")!=None):
#build using visual studio
env['ENV']['TMP'] = os.environ['TMP']
env.Append(CPPPATH=['#platform/windows/include'])
env.Append(LIBPATH=['#platform/windows/lib'])
if (env["freetype"]!="no"):
env.Append(CCFLAGS=['/DFREETYPE_ENABLED'])
env.Append(CPPPATH=['#tools/freetype'])
env.Append(CPPPATH=['#tools/freetype/freetype/include'])
if (env["target"]=="release"):
env.Append(CCFLAGS=['/O2'])
env.Append(LINKFLAGS=['/SUBSYSTEM:WINDOWS'])
env.Append(LINKFLAGS=['/ENTRY:mainCRTStartup'])
elif (env["target"]=="release_debug"):
env.Append(CCFLAGS=['/O2','/DDEBUG_ENABLED'])
env.Append(LINKFLAGS=['/SUBSYSTEM:CONSOLE'])
elif (env["target"]=="debug_release"):
env.Append(CCFLAGS=['/Z7','/Od'])
env.Append(LINKFLAGS=['/DEBUG'])
env.Append(LINKFLAGS=['/SUBSYSTEM:WINDOWS'])
env.Append(LINKFLAGS=['/ENTRY:mainCRTStartup'])
elif (env["target"]=="debug"):
env.Append(CCFLAGS=['/Z7','/DDEBUG_ENABLED','/DDEBUG_MEMORY_ENABLED','/DD3D_DEBUG_INFO','/Od'])
env.Append(LINKFLAGS=['/SUBSYSTEM:CONSOLE'])
env.Append(LINKFLAGS=['/DEBUG'])
env.Append(CCFLAGS=['/MT','/Gd','/GR','/nologo'])
env.Append(CXXFLAGS=['/TP'])
env.Append(CPPFLAGS=['/DMSVC', '/GR', ])
env.Append(CCFLAGS=['/I'+os.getenv("WindowsSdkDir")+"/Include"])
env.Append(CCFLAGS=['/DWINDOWS_ENABLED'])
env.Append(CCFLAGS=['/DRTAUDIO_ENABLED'])
env.Append(CCFLAGS=['/DWIN32'])
env.Append(CCFLAGS=['/DTYPED_METHOD_BIND'])
env.Append(CCFLAGS=['/DGLES2_ENABLED'])
env.Append(CCFLAGS=['/DGLEW_ENABLED'])
LIBS=['winmm','opengl32','dsound','kernel32','ole32','oleaut32','user32','gdi32', 'IPHLPAPI','Shlwapi', 'wsock32', 'shell32','advapi32','dinput8','dxguid']
env.Append(LINKFLAGS=[p+env["LIBSUFFIX"] for p in LIBS])
env.Append(LIBPATH=[os.getenv("WindowsSdkDir")+"/Lib"])
if (os.getenv("DXSDK_DIR")):
DIRECTX_PATH=os.getenv("DXSDK_DIR")
else:
DIRECTX_PATH="C:/Program Files/Microsoft DirectX SDK (March 2009)"
if (os.getenv("VCINSTALLDIR")):
VC_PATH=os.getenv("VCINSTALLDIR")
else:
VC_PATH=""
env.Append(CCFLAGS=["/I" + p for p in os.getenv("INCLUDE").split(";")])
env.Append(LIBPATH=[p for p in os.getenv("LIB").split(";")])
env.Append(CCFLAGS=["/I"+DIRECTX_PATH+"/Include"])
env.Append(LIBPATH=[DIRECTX_PATH+"/Lib/x86"])
env['ENV'] = os.environ;
env["x86_opt_vc"]=True
else:
# Workaround for MinGW. See:
# http://www.scons.org/wiki/LongCmdLinesOnWin32
if (os.name=="nt"):
import subprocess
def mySubProcess(cmdline,env):
#print "SPAWNED : " + cmdline
startupinfo = subprocess.STARTUPINFO()
startupinfo.dwFlags |= subprocess.STARTF_USESHOWWINDOW
proc = subprocess.Popen(cmdline, stdin=subprocess.PIPE, stdout=subprocess.PIPE,
stderr=subprocess.PIPE, startupinfo=startupinfo, shell = False, env = env)
data, err = proc.communicate()
rv = proc.wait()
if rv:
print "====="
print err
print "====="
return rv
def mySpawn(sh, escape, cmd, args, env):
newargs = ' '.join(args[1:])
cmdline = cmd + " " + newargs
rv=0
if len(cmdline) > 32000 and cmd.endswith("ar") :
cmdline = cmd + " " + args[1] + " " + args[2] + " "
for i in range(3,len(args)) :
rv = mySubProcess( cmdline + args[i], env )
if rv :
break
else:
rv = mySubProcess( cmdline, env )
return rv
env['SPAWN'] = mySpawn
#build using mingw
if (os.name=="nt"):
env['ENV']['TMP'] = os.environ['TMP'] #way to go scons, you can be so stupid sometimes
else:
env["PROGSUFFIX"]=env["PROGSUFFIX"]+".exe" # for linux cross-compilation
mingw_prefix=""
if (env["bits"]=="default"):
env["bits"]="32"
if (env["bits"]=="32"):
env.Append(LINKFLAGS=['-static'])
env.Append(LINKFLAGS=['-static-libgcc'])
env.Append(LINKFLAGS=['-static-libstdc++'])
mingw_prefix=env["mingw_prefix"];
else:
env.Append(LINKFLAGS=['-static'])
mingw_prefix=env["mingw_prefix_64"];
nulstr=""
if (os.name=="posix"):
nulstr=">/dev/null"
else:
nulstr=">nul"
# if os.system(mingw_prefix+"gcc --version"+nulstr)!=0:
# #not really super consistent but..
# print("Can't find Windows compiler: "+mingw_prefix)
# sys.exit(255)
if (env["target"]=="release"):
env.Append(CCFLAGS=['-O3','-ffast-math','-fomit-frame-pointer','-msse2'])
env.Append(LINKFLAGS=['-Wl,--subsystem,windows'])
elif (env["target"]=="release_debug"):
env.Append(CCFLAGS=['-O2','-DDEBUG_ENABLED'])
elif (env["target"]=="debug"):
env.Append(CCFLAGS=['-g', '-Wall','-DDEBUG_ENABLED','-DDEBUG_MEMORY_ENABLED'])
if (env["freetype"]!="no"):
env.Append(CCFLAGS=['-DFREETYPE_ENABLED'])
env.Append(CPPPATH=['#tools/freetype'])
env.Append(CPPPATH=['#tools/freetype/freetype/include'])
env["CC"]=mingw_prefix+"gcc"
env['AS']=mingw_prefix+"as"
env['CXX'] = mingw_prefix+"g++"
env['AR'] = mingw_prefix+"ar"
env['RANLIB'] = mingw_prefix+"ranlib"
env['LD'] = mingw_prefix+"g++"
env["x86_opt_gcc"]=True
#env['CC'] = "winegcc"
#env['CXX'] = "wineg++"
env.Append(CCFLAGS=['-DWINDOWS_ENABLED','-mwindows'])
env.Append(CPPFLAGS=['-DRTAUDIO_ENABLED'])
env.Append(CCFLAGS=['-DGLES2_ENABLED','-DGLEW_ENABLED'])
env.Append(LIBS=['mingw32','opengl32', 'dsound', 'ole32', 'd3d9','winmm','gdi32','iphlpapi','shlwapi','wsock32','kernel32', 'oleaut32', 'dinput8', 'dxguid'])
# if (env["bits"]=="32"):
# env.Append(LIBS=['gcc_s'])
# #--with-arch=i686
# env.Append(CPPFLAGS=['-march=i686'])
# env.Append(LINKFLAGS=['-march=i686'])
#'d3dx9d'
env.Append(CPPFLAGS=['-DMINGW_ENABLED'])
env.Append(LINKFLAGS=['-g'])
# resrc
env['is_mingw']=True
env.Append( BUILDERS = { 'RES' : env.Builder(action = build_res_file, suffix = '.o',src_suffix = '.rc') } )
import methods
env.Append( BUILDERS = { 'GLSL120' : env.Builder(action = methods.build_legacygl_headers, suffix = 'glsl.h',src_suffix = '.glsl') } )
env.Append( BUILDERS = { 'GLSL' : env.Builder(action = methods.build_glsl_headers, suffix = 'glsl.h',src_suffix = '.glsl') } )
env.Append( BUILDERS = { 'HLSL9' : env.Builder(action = methods.build_hlsl_dx9_headers, suffix = 'hlsl.h',src_suffix = '.hlsl') } )
env.Append( BUILDERS = { 'GLSL120GLES' : env.Builder(action = methods.build_gles2_headers, suffix = 'glsl.h',src_suffix = '.glsl') } )
| |
# This file is part of the MapProxy project.
# Copyright (C) 2016 Omniscale <http://omniscale.de>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import errno
import hashlib
import os
import shutil
import struct
from mapproxy.image import ImageSource
from mapproxy.cache.base import TileCacheBase, tile_buffer
from mapproxy.util.fs import ensure_directory, write_atomic
from mapproxy.util.lock import FileLock
from mapproxy.compat import BytesIO
import logging
log = logging.getLogger(__name__)
class CompactCacheV1(TileCacheBase):
supports_timestamp = False
def __init__(self, cache_dir):
self.lock_cache_id = 'compactcache-' + hashlib.md5(cache_dir.encode('utf-8')).hexdigest()
self.cache_dir = cache_dir
def _get_bundle(self, tile_coord):
x, y, z = tile_coord
level_dir = os.path.join(self.cache_dir, 'L%02d' % z)
c = x // BUNDLEX_GRID_WIDTH * BUNDLEX_GRID_WIDTH
r = y // BUNDLEX_GRID_HEIGHT * BUNDLEX_GRID_HEIGHT
basename = 'R%04xC%04x' % (r, c)
return Bundle(os.path.join(level_dir, basename), offset=(c, r))
def is_cached(self, tile):
if tile.coord is None:
return True
if tile.source:
return True
return self._get_bundle(tile.coord).is_cached(tile)
def store_tile(self, tile):
if tile.stored:
return True
return self._get_bundle(tile.coord).store_tile(tile)
def load_tile(self, tile, with_metadata=False):
if tile.source or tile.coord is None:
return True
return self._get_bundle(tile.coord).load_tile(tile)
def remove_tile(self, tile):
if tile.coord is None:
return True
return self._get_bundle(tile.coord).remove_tile(tile)
def load_tile_metadata(self, tile):
if self.load_tile(tile):
tile.timestamp = -1
def remove_level_tiles_before(self, level, timestamp):
if timestamp == 0:
level_dir = os.path.join(self.cache_dir, 'L%02d' % level)
shutil.rmtree(level_dir, ignore_errors=True)
return True
return False
BUNDLE_EXT = '.bundle'
BUNDLEX_EXT = '.bundlx'
class Bundle(object):
def __init__(self, base_filename, offset):
self.base_filename = base_filename
self.lock_filename = base_filename + '.lck'
self.offset = offset
def _rel_tile_coord(self, tile_coord):
return (
tile_coord[0] % BUNDLEX_GRID_WIDTH,
tile_coord[1] % BUNDLEX_GRID_HEIGHT,
)
def is_cached(self, tile):
if tile.source or tile.coord is None:
return True
idx = BundleIndex(self.base_filename + BUNDLEX_EXT)
x, y = self._rel_tile_coord(tile.coord)
offset = idx.tile_offset(x, y)
if offset == 0:
return False
bundle = BundleData(self.base_filename + BUNDLE_EXT, self.offset)
size = bundle.read_size(offset)
return size != 0
def store_tile(self, tile):
if tile.stored:
return True
with tile_buffer(tile) as buf:
data = buf.read()
with FileLock(self.lock_filename):
bundle = BundleData(self.base_filename + BUNDLE_EXT, self.offset)
idx = BundleIndex(self.base_filename + BUNDLEX_EXT)
x, y = self._rel_tile_coord(tile.coord)
offset = idx.tile_offset(x, y)
offset, size = bundle.append_tile(data, prev_offset=offset)
idx.update_tile_offset(x, y, offset=offset, size=size)
return True
def load_tile(self, tile, with_metadata=False):
if tile.source or tile.coord is None:
return True
idx = BundleIndex(self.base_filename + BUNDLEX_EXT)
x, y = self._rel_tile_coord(tile.coord)
offset = idx.tile_offset(x, y)
if offset == 0:
return False
bundle = BundleData(self.base_filename + BUNDLE_EXT, self.offset)
data = bundle.read_tile(offset)
if not data:
return False
tile.source = ImageSource(BytesIO(data))
return True
def remove_tile(self, tile):
if tile.coord is None:
return True
with FileLock(self.lock_filename):
idx = BundleIndex(self.base_filename + BUNDLEX_EXT)
x, y = self._rel_tile_coord(tile.coord)
idx.remove_tile_offset(x, y)
return True
BUNDLEX_GRID_WIDTH = 128
BUNDLEX_GRID_HEIGHT = 128
BUNDLEX_HEADER_SIZE = 16
BUNDLEX_HEADER = b'\x03\x00\x00\x00\x10\x00\x00\x00\x00\x40\x00\x00\x05\x00\x00\x00'
BUNDLEX_FOOTER_SIZE = 16
BUNDLEX_FOOTER = b'\x00\x00\x00\x00\x10\x00\x00\x00\x10\x00\x00\x00\x00\x00\x00\x00'
class BundleIndex(object):
def __init__(self, filename):
self.filename = filename
# defer initialization to update/remove calls to avoid
# index creation on is_cached (prevents new files in read-only caches)
self._initialized = False
def _init_index(self):
self._initialized = True
if os.path.exists(self.filename):
return
ensure_directory(self.filename)
buf = BytesIO()
buf.write(BUNDLEX_HEADER)
for i in range(BUNDLEX_GRID_WIDTH * BUNDLEX_GRID_HEIGHT):
buf.write(struct.pack('<Q', (i*4)+BUNDLE_HEADER_SIZE)[:5])
buf.write(BUNDLEX_FOOTER)
write_atomic(self.filename, buf.getvalue())
def _tile_offset(self, x, y):
return BUNDLEX_HEADER_SIZE + (x * BUNDLEX_GRID_HEIGHT + y) * 5
def tile_offset(self, x, y):
idx_offset = self._tile_offset(x, y)
try:
with open(self.filename, 'rb') as f:
f.seek(idx_offset)
offset = struct.unpack('<Q', f.read(5) + b'\x00\x00\x00')[0]
return offset
except IOError as ex:
if ex.errno == errno.ENOENT:
# mising bundle file -> missing tile
return 0
raise
def update_tile_offset(self, x, y, offset, size):
self._init_index()
idx_offset = self._tile_offset(x, y)
offset = struct.pack('<Q', offset)[:5]
with open(self.filename, 'r+b') as f:
f.seek(idx_offset, os.SEEK_SET)
f.write(offset)
def remove_tile_offset(self, x, y):
self._init_index()
idx_offset = self._tile_offset(x, y)
with open(self.filename, 'r+b') as f:
f.seek(idx_offset)
f.write(b'\x00' * 5)
# The bundle file has a header with 15 little-endian long values (60 bytes).
# NOTE: the fixed values might be some flags for image options (format, aliasing)
# all files available for testing had the same values however.
BUNDLE_HEADER_SIZE = 60
BUNDLE_HEADER = [
3 , # 0, fixed
16384 , # 1, max. num of tiles 128*128 = 16384
16 , # 2, size of largest tile
5 , # 3, fixed
0 , # 4, num of tiles in bundle (*4)
0 , # 5, fixed
60+65536 , # 6, bundle size
0 , # 7, fixed
40 , # 8 fixed
0 , # 9, fixed
16 , # 10, fixed
0 , # 11, y0
127 , # 12, y1
0 , # 13, x0
127 , # 14, x1
]
BUNDLE_HEADER_STRUCT_FORMAT = '<lllllllllllllll'
class BundleData(object):
def __init__(self, filename, tile_offsets):
self.filename = filename
self.tile_offsets = tile_offsets
if not os.path.exists(self.filename):
self._init_bundle()
def _init_bundle(self):
ensure_directory(self.filename)
header = list(BUNDLE_HEADER)
header[13], header[11] = self.tile_offsets
header[14], header[12] = header[13]+127, header[11]+127
write_atomic(self.filename,
struct.pack(BUNDLE_HEADER_STRUCT_FORMAT, *header) +
# zero-size entry for each tile
(b'\x00' * (BUNDLEX_GRID_HEIGHT * BUNDLEX_GRID_WIDTH * 4)))
def read_size(self, offset):
with open(self.filename, 'rb') as f:
f.seek(offset)
return struct.unpack('<L', f.read(4))[0]
def read_tile(self, offset):
with open(self.filename, 'rb') as f:
f.seek(offset)
size = struct.unpack('<L', f.read(4))[0]
if size <= 0:
return False
return f.read(size)
def append_tile(self, data, prev_offset):
size = len(data)
is_new_tile = True
with open(self.filename, 'r+b') as f:
if prev_offset:
f.seek(prev_offset, os.SEEK_SET)
if f.tell() == prev_offset:
if struct.unpack('<L', f.read(4))[0] > 0:
is_new_tile = False
f.seek(0, os.SEEK_END)
offset = f.tell()
if offset == 0:
f.write(b'\x00' * 16) # header
offset = 16
f.write(struct.pack('<L', size))
f.write(data)
# update header
f.seek(0, os.SEEK_SET)
header = list(struct.unpack(BUNDLE_HEADER_STRUCT_FORMAT, f.read(60)))
header[2] = max(header[2], size)
header[6] += size + 4
if is_new_tile:
header[4] += 4
f.seek(0, os.SEEK_SET)
f.write(struct.pack(BUNDLE_HEADER_STRUCT_FORMAT, *header))
return offset, size
| |
# package org.apache.helix.manager.zk
#from org.apache.helix.manager.zk import *
#from java.io import File
#from java.util import List
#from java.util import Map
#from java.util import Set
#from java.util.concurrent import ConcurrentHashMap
#from java.util.concurrent import CopyOnWriteArraySet
#from org.I0Itec.zkclient import IZkChildListener
#from org.I0Itec.zkclient import IZkDataListener
#from org.I0Itec.zkclient import IZkStateListener
#from org.I0Itec.zkclient.exception import ZkNoNodeException
#from org.apache.log4j import Logger
#from org.apache.zookeeper.Watcher.Event import EventType
#from org.apache.zookeeper.Watcher.Event import KeeperState
#from org.apache.zookeeper.data import Stat
from org.apache.helix.AccessOption import AccessOption
from org.apache.helix.BaseDataAccessor import BaseDataAccessor
from org.apache.helix.manager.zk.ZkCacheEventThread import ZkCacheEvent
from org.apache.helix.store.HelixPropertyListener import HelixPropertyListener
from org.apache.helix.store.zk.ZNode import ZNode
# Parameterized type: <T>
class ZkCallbackCache(Cache<T>, IZkChildListener, IZkDataListener, IZkStateListener):
"""
Java modifiers:
private static
Type:
Logger
"""
LOG = Logger.getLogger(ZkCallbackCache.class)
"""
Parameters:
BaseDataAccessor<T> accessor
String chrootPath
List<String> paths
ZkCacheEventThread eventThread
"""
def __init__(self, accessor, chrootPath, paths, eventThread):
super()
self._accessor = accessor
self._chrootPath = chrootPath
self._listener = ConcurrentHashMap<String, Set<HelixPropertyListener>>()
self._eventThread = eventThread
if paths != None && not paths.isEmpty():
for # String
path = None
in paths) updateRecursive(path)
def update(self, path, data, stat):
"""
Returns void
Parameters:
path: Stringdata: Tstat: Stat
@Override
"""
# String
parentPath = File(path).getParent()
# String
childName = File(path).getName()
addToParentChildSet(parentPath, childName)
# ZNode
znode = _cache.get(path)
if znode == None:
_cache.put(path, ZNode(path, data, stat))
fireEvents(path, EventType.NodeCreated)
else:
# Stat
oldStat = znode.getStat()
znode.setData(data)
znode.setStat(stat)
if oldStat.getCzxid() != stat.getCzxid():
fireEvents(path, EventType.NodeDeleted)
fireEvents(path, EventType.NodeCreated)
else:
if oldStat.getVersion() != stat.getVersion():
fireEvents(path, EventType.NodeDataChanged)
def updateRecursive(self, path):
"""
Returns void
Parameters:
path: String
@Override
"""
if path == None:
return
try:
_lock.writeLock().lock()
try:
_accessor.subscribeDataChanges(path, self)
# Stat
stat = Stat()
# T
readData = _accessor.get(path, stat, AccessOption.THROW_EXCEPTION_IFNOTEXIST)
update(path, readData, stat)
except ZkNoNodeException, e:
# ZNode
znode = _cache.get(path)
# List<String>
childNames = _accessor.subscribeChildChanges(path, self)
if childNames != None && not childNames.isEmpty():
for # String
childName = None
in childNames) if not znode.hasChild(childName):
# String
childPath = path + "/" + childName
znode.addChild(childName)
updateRecursive(childPath)
final:
_lock.writeLock().unlock()
def handleChildChange(self, parentPath, currentChilds):
"""
Returns void
Parameters:
parentPath: StringcurrentChilds: List<String>
@Override
Throws:
Exception
"""
if currentChilds == None:
return
updateRecursive(parentPath)
def handleDataChange(self, dataPath, data):
"""
Returns void
Parameters:
dataPath: Stringdata: Object
@Override
Throws:
Exception
"""
try:
_lock.writeLock().lock()
# Stat
stat = Stat()
# Object
readData = _accessor.get(dataPath, stat, AccessOption.THROW_EXCEPTION_IFNOTEXIST)
# ZNode
znode = _cache.get(dataPath)
if znode != None:
# Stat
oldStat = znode.getStat()
znode.setData(readData)
znode.setStat(stat)
if oldStat.getCzxid() != stat.getCzxid():
fireEvents(dataPath, EventType.NodeDeleted)
fireEvents(dataPath, EventType.NodeCreated)
else:
if oldStat.getVersion() != stat.getVersion():
fireEvents(dataPath, EventType.NodeDataChanged)
else:
final:
_lock.writeLock().unlock()
def handleDataDeleted(self, dataPath):
"""
Returns void
Parameters:
dataPath: String
@Override
Throws:
Exception
"""
try:
_lock.writeLock().lock()
_accessor.unsubscribeDataChanges(dataPath, self)
_accessor.unsubscribeChildChanges(dataPath, self)
# String
parentPath = File(dataPath).getParent()
# String
name = File(dataPath).getName()
removeFromParentChildSet(parentPath, name)
_cache.remove(dataPath)
fireEvents(dataPath, EventType.NodeDeleted)
final:
_lock.writeLock().unlock()
def handleStateChanged(self, state):
"""
Returns void
Parameters:
state: KeeperState
@Override
Throws:
Exception
"""
def handleNewSession(self):
"""
Returns void
@Override
Throws:
Exception
"""
def subscribe(self, path, listener):
"""
Returns void
Parameters:
path: Stringlistener: HelixPropertyListener
"""
synchronized (_listener) # Set<HelixPropertyListener>
listeners = _listener.get(path)
if listeners == None:
listeners = CopyOnWriteArraySet<HelixPropertyListener>()
_listener.put(path, listeners)
listeners.add(listener)
def unsubscribe(self, path, childListener):
"""
Returns void
Parameters:
path: StringchildListener: HelixPropertyListener
"""
synchronized (_listener) # Set<HelixPropertyListener>
listeners = _listener.get(path)
if listeners != None:
listeners.remove(childListener)
def fireEvents(self, path, type):
"""
Returns void
Parameters:
path: Stringtype: EventType
Java modifiers:
private
"""
# String
tmpPath = path
# String
clientPath = (java2python_runtime.ternary(_chrootPath == None, path, (java2python_runtime.ternary((_chrootPath == path), "/", path.substring(_chrootPath.length())))))
while (tmpPath != None:
# Set<HelixPropertyListener>
listeners = _listener.get(tmpPath)
if listeners != None && not listeners.isEmpty():
for # HelixPropertyListener
listener = None
in listeners) try:
switch(type) {
case NodeDataChanged:
_eventThread.send(ZkCacheEvent("dataChange on " + path + " send to " + listener) {
def run(self):
"""
Returns void
@Override
Throws:
Exception
"""
listener.onDataChange(clientPath)
})
break
case NodeCreated:
_eventThread.send(ZkCacheEvent("dataCreate on " + path + " send to " + listener) {
def run(self):
"""
Returns void
@Override
Throws:
Exception
"""
listener.onDataCreate(clientPath)
})
break
case NodeDeleted:
_eventThread.send(ZkCacheEvent("dataDelete on " + path + " send to " + listener) {
def run(self):
"""
Returns void
@Override
Throws:
Exception
"""
listener.onDataDelete(clientPath)
})
break
default:
break
}
except Exception, e:
LOG.error("Exception in handle events."+ str(e))
tmpPath = File(tmpPath).getParent()
| |
#!/usr/bin/python
# create_glider_netcdf.py - A command line script for generating NetCDF files
# from a subset of glider binary data files.
#
# By: Michael Lindemuth <mlindemu@usf.edu>
# University of South Florida
# College of Marine Science
# Ocean Technology Group
import argparse
from glider_binary_data_reader import (
GliderBDReader,
MergedGliderBDReader
)
from glider_binary_data_reader.methods import parse_glider_filename
from glider_netcdf_writer import (
open_glider_netcdf,
GLIDER_UV_DATATYPE_KEYS
)
import sys
import os
import json
from datetime import datetime
import numpy as np
from glider_utils.yo import find_yo_extrema
from glider_utils.yo.filters import default_filter
from glider_utils.gps import interpolate_gps
def create_reader(flight_path, science_path):
if flight_path is not None:
flight_reader = GliderBDReader(
[flight_path]
)
if science_path is None:
return flight_reader
if science_path is not None:
science_reader = GliderBDReader(
[science_path]
)
if flight_path is None:
return science_reader
return MergedGliderBDReader(flight_reader, science_reader)
def find_profiles(flight_path, science_path, time_name, depth_name):
profile_values = []
reader = create_reader(flight_path, science_path)
for line in reader:
if depth_name in line:
profile_values.append([line[time_name], line[depth_name]])
profile_values = np.array(profile_values)
profile_dataset = find_yo_extrema(
profile_values[:, 0], profile_values[:, 1]
)
return default_filter(profile_dataset)
def get_file_set_gps(flight_path, science_path, time_name, gps_prefix):
gps_values = []
reader = create_reader(flight_path, science_path)
lat_name = gps_prefix + 'lat-lat'
lon_name = gps_prefix + 'lon-lon'
for line in reader:
if lat_name in line:
gps_values.append(
[line[time_name], line[lat_name], line[lon_name]]
)
else:
gps_values.append([line[time_name], np.nan, np.nan])
gps_values = np.array(gps_values)
gps_values[:, 1], gps_values[:, 2] = interpolate_gps(
gps_values[:, 0], gps_values[:, 1], gps_values[:, 2]
)
return gps_values
def fill_gps(line, interp_gps, time_name, gps_prefix):
lat_name = gps_prefix + 'lat-lat'
lon_name = gps_prefix + 'lon-lon'
if lat_name not in line:
timestamp = line[time_name]
line[lat_name] = interp_gps[interp_gps[:, 0] == timestamp, 1][0]
line[lon_name] = interp_gps[interp_gps[:, 0] == timestamp, 2][0]
return line
def init_netcdf(file_path, attrs, segment_id, profile_id):
# Check if the output path already exists, remove old file
mode = 'w'
if os.path.isfile(file_path):
os.remove(file_path)
with open_glider_netcdf(file_path, mode) as glider_nc:
# Set global attributes
glider_nc.set_global_attributes(attrs['global'])
# Set Trajectory
glider_nc.set_trajectory_id(
attrs['deployment']['glider'],
attrs['deployment']['trajectory_date']
)
# Set Platform
glider_nc.set_platform(attrs['deployment']['platform'])
# Set Instruments
glider_nc.set_instruments(attrs['instruments'])
# Set Segment ID
glider_nc.set_segment_id(segment_id)
# Set Profile ID
glider_nc.set_profile_id(profile_id)
def find_segment_id(flight_path, science_path):
if flight_path is None:
filename = science_path
else:
filename = flight_path
details = parse_glider_filename(filename)
return details['segment']
def fill_uv_variables(dst_glider_nc, uv_values):
for key, value in uv_values.items():
dst_glider_nc.set_scalar(key, value)
def backfill_uv_variables(src_glider_nc, empty_uv_processed_paths):
uv_values = {}
for key_name in GLIDER_UV_DATATYPE_KEYS:
uv_values[key_name] = src_glider_nc.get_scalar(key_name)
for file_path in empty_uv_processed_paths:
with open_glider_netcdf(file_path, 'a') as dst_glider_nc:
fill_uv_variables(dst_glider_nc, uv_values)
return uv_values
def create_arg_parser():
parser = argparse.ArgumentParser(
description='Parses a set of glider binary data files to a '
'single NetCDF file according to configurations '
'for institution, deployment, glider, and datatypes.'
)
parser.add_argument(
'glider_name',
help='Name of glider that generated given binary files.'
)
parser.add_argument(
'glider_config_path',
help='Path to configuration files for institution.'
)
parser.add_argument(
'output_path',
help='Path to file for NetCDF output.'
)
parser.add_argument(
'-m', '--mode',
help="Set the mode for the file nameing convention (rt or delayed?)",
default="delayed"
)
parser.add_argument(
'--segment_id', nargs=1,
help='Set the segment ID',
default=None
)
parser.add_argument(
'-t', '--time',
help="Set time parameter to use for profile recognition",
default="timestamp"
)
parser.add_argument(
'-d', '--depth',
help="Set depth parameter to use for profile recognition",
default="m_depth-m"
)
parser.add_argument(
'-g', '--gps_prefix',
help="Set prefix for gps parameters to use for location estimation",
default="m_gps_"
)
parser.add_argument(
'-f', '--flight',
help="Flight data file to process",
default=None
)
parser.add_argument(
'-s', '--science',
help="Science data file to process",
default=None
)
return parser
def read_attrs(glider_config_path, glider_name):
# Load in configurations
attrs = {}
# Load institute global attributes
global_attrs_path = (
os.path.join(glider_config_path, "global_attributes.json")
)
with open(global_attrs_path, 'r') as f:
attrs['global'] = json.load(f)
# Load deployment attributes (including global attributes)
deployment_attrs_path = (
os.path.join(glider_config_path, glider_name,
"deployment.json")
)
with open(deployment_attrs_path, 'r') as f:
attrs['deployment'] = json.load(f)
# Load instruments
instruments_attrs_path = (
os.path.join(glider_config_path, glider_name,
"instruments.json")
)
with open(instruments_attrs_path, 'r') as f:
attrs['instruments'] = json.load(f)
# Fill in global attributes
attrs['global'].update(attrs['deployment']['global_attributes'])
return attrs
def process_dataset(args, attrs):
flight_path = args.flight
science_path = args.science
# Find profile breaks
profiles = find_profiles(flight_path, science_path, args.time, args.depth)
# Interpolate GPS
interp_gps = get_file_set_gps(
flight_path, science_path, args.time, args.gps_prefix
)
# Create NetCDF Files for Each Profile
profile_id = 0
profile_end = 0
file_path = None
uv_values = None
empty_uv_processed_paths = []
reader = create_reader(flight_path, science_path)
for line in reader:
if profile_end < line['timestamp']:
# Open new NetCDF
begin_time = datetime.fromtimestamp(line['timestamp'])
filename = "%s_%s_%s.nc" % (
args.glider_name,
begin_time.isoformat(),
args.mode
)
file_path = os.path.join(
args.output_path,
filename
)
profile = profiles[profiles[:, 2] == profile_id]
# NOTE: Store 1 based profile id
init_netcdf(file_path, attrs, args.segment_id, profile_id + 1)
profile = profiles[profiles[:, 2] == profile_id]
profile_end = max(profile[:, 0])
with open_glider_netcdf(file_path, 'a') as glider_nc:
while line['timestamp'] <= profile_end:
line = fill_gps(line, interp_gps, args.time, args.gps_prefix)
glider_nc.stream_dict_insert(line)
try:
line = reader.next()
except StopIteration:
break
# Handle UV Variables
if glider_nc.contains('time_uv'):
uv_values = backfill_uv_variables(
glider_nc, empty_uv_processed_paths
)
elif uv_values is not None:
fill_uv_variables(glider_nc, uv_values)
del empty_uv_processed_paths[:]
else:
empty_uv_processed_paths.append(file_path)
glider_nc.update_profile_vars()
try:
glider_nc.calculate_salinity()
glider_nc.calculate_density()
except Exception, ex:
print "(%s)- %s" % (file_path, ex)
profile_id += 1
def main():
parser = create_arg_parser()
args = parser.parse_args()
# Check filenames
if args.flight is None and args.science is None:
raise ValueError('Must specify flight, science or both paths')
if args.flight is not None and args.science is not None:
flight_prefix = os.path.split(args.flight)[1].rsplit('.')[0]
science_prefix = os.path.split(args.science)[1].rsplit('.')[0]
if flight_prefix != science_prefix:
raise ValueError('Flight and science file names must match')
# Fill in segment ID
if args.segment_id is None:
args.segment_id = find_segment_id(args.flight, args.science)
attrs = read_attrs(args.glider_config_path, args.glider_name)
process_dataset(args, attrs)
return 0
if __name__ == '__main__':
sys.exit(main())
| |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
import functools
from typing import Any, AsyncIterable, Callable, Dict, Generic, Optional, TypeVar
import warnings
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse
from azure.core.rest import HttpRequest
from azure.core.tracing.decorator import distributed_trace
from azure.core.tracing.decorator_async import distributed_trace_async
from azure.mgmt.core.exceptions import ARMErrorFormat
from ... import models as _models
from ..._vendor import _convert_request
from ...operations._factories_operations import build_configure_factory_repo_request, build_create_or_update_request, build_delete_request, build_get_data_plane_access_request, build_get_git_hub_access_token_request, build_get_request, build_list_by_resource_group_request, build_list_request, build_update_request
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class FactoriesOperations:
"""FactoriesOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.datafactory.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
@distributed_trace
def list(
self,
**kwargs: Any
) -> AsyncIterable["_models.FactoryListResponse"]:
"""Lists factories under the specified subscription.
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either FactoryListResponse or the result of cls(response)
:rtype:
~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.datafactory.models.FactoryListResponse]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.FactoryListResponse"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
def prepare_request(next_link=None):
if not next_link:
request = build_list_request(
subscription_id=self._config.subscription_id,
template_url=self.list.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
else:
request = build_list_request(
subscription_id=self._config.subscription_id,
template_url=next_link,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
request.method = "GET"
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize("FactoryListResponse", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.DataFactory/factories'} # type: ignore
@distributed_trace_async
async def configure_factory_repo(
self,
location_id: str,
factory_repo_update: "_models.FactoryRepoUpdate",
**kwargs: Any
) -> "_models.Factory":
"""Updates a factory's repo information.
:param location_id: The location identifier.
:type location_id: str
:param factory_repo_update: Update factory repo request definition.
:type factory_repo_update: ~azure.mgmt.datafactory.models.FactoryRepoUpdate
:keyword callable cls: A custom type or function that will be passed the direct response
:return: Factory, or the result of cls(response)
:rtype: ~azure.mgmt.datafactory.models.Factory
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.Factory"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
_json = self._serialize.body(factory_repo_update, 'FactoryRepoUpdate')
request = build_configure_factory_repo_request(
subscription_id=self._config.subscription_id,
location_id=location_id,
content_type=content_type,
json=_json,
template_url=self.configure_factory_repo.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('Factory', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
configure_factory_repo.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.DataFactory/locations/{locationId}/configureFactoryRepo'} # type: ignore
@distributed_trace
def list_by_resource_group(
self,
resource_group_name: str,
**kwargs: Any
) -> AsyncIterable["_models.FactoryListResponse"]:
"""Lists factories.
:param resource_group_name: The resource group name.
:type resource_group_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either FactoryListResponse or the result of cls(response)
:rtype:
~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.datafactory.models.FactoryListResponse]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.FactoryListResponse"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
def prepare_request(next_link=None):
if not next_link:
request = build_list_by_resource_group_request(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
template_url=self.list_by_resource_group.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
else:
request = build_list_by_resource_group_request(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
template_url=next_link,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
request.method = "GET"
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize("FactoryListResponse", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list_by_resource_group.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DataFactory/factories'} # type: ignore
@distributed_trace_async
async def create_or_update(
self,
resource_group_name: str,
factory_name: str,
factory: "_models.Factory",
if_match: Optional[str] = None,
**kwargs: Any
) -> "_models.Factory":
"""Creates or updates a factory.
:param resource_group_name: The resource group name.
:type resource_group_name: str
:param factory_name: The factory name.
:type factory_name: str
:param factory: Factory resource definition.
:type factory: ~azure.mgmt.datafactory.models.Factory
:param if_match: ETag of the factory entity. Should only be specified for update, for which it
should match existing entity or can be * for unconditional update.
:type if_match: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: Factory, or the result of cls(response)
:rtype: ~azure.mgmt.datafactory.models.Factory
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.Factory"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
_json = self._serialize.body(factory, 'Factory')
request = build_create_or_update_request(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
factory_name=factory_name,
content_type=content_type,
json=_json,
if_match=if_match,
template_url=self.create_or_update.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('Factory', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DataFactory/factories/{factoryName}'} # type: ignore
@distributed_trace_async
async def update(
self,
resource_group_name: str,
factory_name: str,
factory_update_parameters: "_models.FactoryUpdateParameters",
**kwargs: Any
) -> "_models.Factory":
"""Updates a factory.
:param resource_group_name: The resource group name.
:type resource_group_name: str
:param factory_name: The factory name.
:type factory_name: str
:param factory_update_parameters: The parameters for updating a factory.
:type factory_update_parameters: ~azure.mgmt.datafactory.models.FactoryUpdateParameters
:keyword callable cls: A custom type or function that will be passed the direct response
:return: Factory, or the result of cls(response)
:rtype: ~azure.mgmt.datafactory.models.Factory
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.Factory"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
_json = self._serialize.body(factory_update_parameters, 'FactoryUpdateParameters')
request = build_update_request(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
factory_name=factory_name,
content_type=content_type,
json=_json,
template_url=self.update.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('Factory', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DataFactory/factories/{factoryName}'} # type: ignore
@distributed_trace_async
async def get(
self,
resource_group_name: str,
factory_name: str,
if_none_match: Optional[str] = None,
**kwargs: Any
) -> Optional["_models.Factory"]:
"""Gets a factory.
:param resource_group_name: The resource group name.
:type resource_group_name: str
:param factory_name: The factory name.
:type factory_name: str
:param if_none_match: ETag of the factory entity. Should only be specified for get. If the ETag
matches the existing entity tag, or if * was provided, then no content will be returned.
:type if_none_match: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: Factory, or the result of cls(response)
:rtype: ~azure.mgmt.datafactory.models.Factory or None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[Optional["_models.Factory"]]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_get_request(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
factory_name=factory_name,
if_none_match=if_none_match,
template_url=self.get.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 304]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('Factory', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DataFactory/factories/{factoryName}'} # type: ignore
@distributed_trace_async
async def delete(
self,
resource_group_name: str,
factory_name: str,
**kwargs: Any
) -> None:
"""Deletes a factory.
:param resource_group_name: The resource group name.
:type resource_group_name: str
:param factory_name: The factory name.
:type factory_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: None, or the result of cls(response)
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_delete_request(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
factory_name=factory_name,
template_url=self.delete.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DataFactory/factories/{factoryName}'} # type: ignore
@distributed_trace_async
async def get_git_hub_access_token(
self,
resource_group_name: str,
factory_name: str,
git_hub_access_token_request: "_models.GitHubAccessTokenRequest",
**kwargs: Any
) -> "_models.GitHubAccessTokenResponse":
"""Get GitHub Access Token.
:param resource_group_name: The resource group name.
:type resource_group_name: str
:param factory_name: The factory name.
:type factory_name: str
:param git_hub_access_token_request: Get GitHub access token request definition.
:type git_hub_access_token_request: ~azure.mgmt.datafactory.models.GitHubAccessTokenRequest
:keyword callable cls: A custom type or function that will be passed the direct response
:return: GitHubAccessTokenResponse, or the result of cls(response)
:rtype: ~azure.mgmt.datafactory.models.GitHubAccessTokenResponse
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.GitHubAccessTokenResponse"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
_json = self._serialize.body(git_hub_access_token_request, 'GitHubAccessTokenRequest')
request = build_get_git_hub_access_token_request(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
factory_name=factory_name,
content_type=content_type,
json=_json,
template_url=self.get_git_hub_access_token.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('GitHubAccessTokenResponse', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_git_hub_access_token.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DataFactory/factories/{factoryName}/getGitHubAccessToken'} # type: ignore
@distributed_trace_async
async def get_data_plane_access(
self,
resource_group_name: str,
factory_name: str,
policy: "_models.UserAccessPolicy",
**kwargs: Any
) -> "_models.AccessPolicyResponse":
"""Get Data Plane access.
:param resource_group_name: The resource group name.
:type resource_group_name: str
:param factory_name: The factory name.
:type factory_name: str
:param policy: Data Plane user access policy definition.
:type policy: ~azure.mgmt.datafactory.models.UserAccessPolicy
:keyword callable cls: A custom type or function that will be passed the direct response
:return: AccessPolicyResponse, or the result of cls(response)
:rtype: ~azure.mgmt.datafactory.models.AccessPolicyResponse
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.AccessPolicyResponse"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
_json = self._serialize.body(policy, 'UserAccessPolicy')
request = build_get_data_plane_access_request(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
factory_name=factory_name,
content_type=content_type,
json=_json,
template_url=self.get_data_plane_access.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('AccessPolicyResponse', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_data_plane_access.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DataFactory/factories/{factoryName}/getDataPlaneAccess'} # type: ignore
| |
"""
Test cases for the template loaders
Note: This test requires setuptools!
"""
from django.conf import settings
if __name__ == '__main__':
settings.configure()
import os.path
import sys
import types
import unittest
try:
import pkg_resources
except ImportError:
pkg_resources = None
from django.template import TemplateDoesNotExist, Context
from django.template.loaders.eggs import Loader as EggLoader
from django.template import loader
from django.test import TestCase, override_settings
from django.utils import six
from django.utils._os import upath
from django.utils.six import StringIO
# Mock classes and objects for pkg_resources functions.
class MockLoader(object):
pass
def create_egg(name, resources):
"""
Creates a mock egg with a list of resources.
name: The name of the module.
resources: A dictionary of resources. Keys are the names and values the data.
"""
egg = types.ModuleType(name)
egg.__loader__ = MockLoader()
egg.__path__ = ['/some/bogus/path/']
egg.__file__ = '/some/bogus/path/__init__.pyc'
egg._resources = resources
sys.modules[name] = egg
@unittest.skipUnless(pkg_resources, 'setuptools is not installed')
class EggLoaderTest(TestCase):
def setUp(self):
# Defined here b/c at module scope we may not have pkg_resources
class MockProvider(pkg_resources.NullProvider):
def __init__(self, module):
pkg_resources.NullProvider.__init__(self, module)
self.module = module
def _has(self, path):
return path in self.module._resources
def _isdir(self, path):
return False
def get_resource_stream(self, manager, resource_name):
return self.module._resources[resource_name]
def _get(self, path):
return self.module._resources[path].read()
def _fn(self, base, resource_name):
return resource_name
pkg_resources._provider_factories[MockLoader] = MockProvider
self.empty_egg = create_egg("egg_empty", {})
self.egg_1 = create_egg("egg_1", {
os.path.normcase('templates/y.html'): StringIO("y"),
os.path.normcase('templates/x.txt'): StringIO("x"),
})
@override_settings(INSTALLED_APPS=['egg_empty'])
def test_empty(self):
"Loading any template on an empty egg should fail"
egg_loader = EggLoader()
self.assertRaises(TemplateDoesNotExist, egg_loader.load_template_source, "not-existing.html")
@override_settings(INSTALLED_APPS=['egg_1'])
def test_non_existing(self):
"Template loading fails if the template is not in the egg"
egg_loader = EggLoader()
self.assertRaises(TemplateDoesNotExist, egg_loader.load_template_source, "not-existing.html")
@override_settings(INSTALLED_APPS=['egg_1'])
def test_existing(self):
"A template can be loaded from an egg"
egg_loader = EggLoader()
contents, template_name = egg_loader.load_template_source("y.html")
self.assertEqual(contents, "y")
self.assertEqual(template_name, "egg:egg_1:templates/y.html")
def test_not_installed(self):
"Loading an existent template from an egg not included in any app should fail"
egg_loader = EggLoader()
self.assertRaises(TemplateDoesNotExist, egg_loader.load_template_source, "y.html")
@override_settings(
TEMPLATE_LOADERS=(
('django.template.loaders.cached.Loader', (
'django.template.loaders.filesystem.Loader',
)),
)
)
class CachedLoader(TestCase):
def test_templatedir_caching(self):
"Check that the template directories form part of the template cache key. Refs #13573"
# Retrive a template specifying a template directory to check
t1, name = loader.find_template('test.html', (os.path.join(os.path.dirname(upath(__file__)), 'templates', 'first'),))
# Now retrieve the same template name, but from a different directory
t2, name = loader.find_template('test.html', (os.path.join(os.path.dirname(upath(__file__)), 'templates', 'second'),))
# The two templates should not have the same content
self.assertNotEqual(t1.render(Context({})), t2.render(Context({})))
def test_missing_template_is_cached(self):
"#19949 -- Check that the missing template is cached."
template_loader = loader.find_template_loader(settings.TEMPLATE_LOADERS[0])
# Empty cache, which may be filled from previous tests.
template_loader.reset()
# Check that 'missing.html' isn't already in cache before 'missing.html' is loaded
self.assertRaises(KeyError, lambda: template_loader.template_cache["missing.html"])
# Try to load it, it should fail
self.assertRaises(TemplateDoesNotExist, template_loader.load_template, "missing.html")
# Verify that the fact that the missing template, which hasn't been found, has actually
# been cached:
self.assertEqual(template_loader.template_cache.get("missing.html"),
TemplateDoesNotExist,
"Cached template loader doesn't cache file lookup misses. It should.")
@override_settings(
TEMPLATE_DIRS=(
os.path.join(os.path.dirname(upath(__file__)), 'templates'),
)
)
class RenderToStringTest(TestCase):
def test_basic(self):
self.assertEqual(loader.render_to_string('test_context.html'), 'obj:')
def test_basic_context(self):
self.assertEqual(loader.render_to_string('test_context.html',
{'obj': 'test'}), 'obj:test')
def test_existing_context_kept_clean(self):
context = Context({'obj': 'before'})
output = loader.render_to_string('test_context.html', {'obj': 'after'},
context_instance=context)
self.assertEqual(output, 'obj:after')
self.assertEqual(context['obj'], 'before')
def test_empty_list(self):
six.assertRaisesRegex(self, TemplateDoesNotExist,
'No template names provided$',
loader.render_to_string, [])
def test_select_templates_from_empty_list(self):
six.assertRaisesRegex(self, TemplateDoesNotExist,
'No template names provided$',
loader.select_template, [])
class TemplateDirsOverrideTest(unittest.TestCase):
dirs_tuple = (os.path.join(os.path.dirname(upath(__file__)), 'other_templates'),)
dirs_list = list(dirs_tuple)
dirs_iter = (dirs_tuple, dirs_list)
def test_render_to_string(self):
for dirs in self.dirs_iter:
self.assertEqual(loader.render_to_string('test_dirs.html', dirs=dirs), 'spam eggs\n')
def test_get_template(self):
for dirs in self.dirs_iter:
template = loader.get_template('test_dirs.html', dirs=dirs)
self.assertEqual(template.render(Context({})), 'spam eggs\n')
def test_select_template(self):
for dirs in self.dirs_iter:
template = loader.select_template(['test_dirs.html'], dirs=dirs)
self.assertEqual(template.render(Context({})), 'spam eggs\n')
@override_settings(
TEMPLATE_LOADERS=(
('django.template.loaders.cached.Loader', (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
)),
)
)
class PriorityCacheLoader(TestCase):
def test_basic(self):
"""
Check that the order of template loader works. Refs #21460.
"""
t1, name = loader.find_template('priority/foo.html')
self.assertEqual(t1.render(Context({})), 'priority\n')
@override_settings(
TEMPLATE_LOADERS=('django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',),
)
class PriorityLoader(TestCase):
def test_basic(self):
"""
Check that the order of template loader works. Refs #21460.
"""
t1, name = loader.find_template('priority/foo.html')
self.assertEqual(t1.render(Context({})), 'priority\n')
| |
#!/usr/bin/python
#============================ adjust path =====================================
import sys
import os
if __name__ == "__main__":
here = sys.path[0]
sys.path.insert(0, os.path.join(here, '..', '..','libs'))
sys.path.insert(0, os.path.join(here, '..', '..','external_libs'))
#============================ verify installation =============================
from SmartMeshSDK.utils import SmsdkInstallVerifier
(goodToGo,reason) = SmsdkInstallVerifier.verifyComponents(
[
SmsdkInstallVerifier.PYTHON,
SmsdkInstallVerifier.PYSERIAL,
]
)
if not goodToGo:
print "Your installation does not allow this application to run:\n"
print reason
raw_input("Press any button to exit")
sys.exit(1)
#============================ imports =========================================
import threading
import Queue
import datetime
import copy
import webbrowser
from SmartMeshSDK.utils import AppUtils, \
FormatUtils
from SmartMeshSDK.ApiDefinition import IpMgrDefinition
from SmartMeshSDK.IpMgrConnectorMux import IpMgrSubscribe
from SmartMeshSDK.ApiException import APIError
from SmartMeshSDK.protocols.oap import OAPDispatcher, \
OAPClient, \
OAPMessage, \
OAPNotif
from SmartMeshSDK.protocols.xivelyConnector import xivelyConnector
from dustUI import dustWindow, \
dustFrameConnection, \
dustFrameForm, \
dustFrameMoteList, \
dustFrameText
#============================ logging =========================================
# local
import logging
class NullHandler(logging.Handler):
def emit(self, record):
pass
log = logging.getLogger('App')
log.setLevel(logging.ERROR)
log.addHandler(NullHandler())
# global
AppUtils.configureLogging()
#============================ defines =========================================
DFLT_API_KEY = ''
GUI_UPDATEPERIOD = 500 # ms
MAX_QUEUE_SIZE = 10
COL_NUMDATARX = 'data received'
COL_NUMDATAPUB = 'published'
COL_NUMDATAPUBOK = 'published OK'
COL_CLR = 'clear'
COL_URL = 'see data online'
#============================ body ============================================
##
# \addtogroup Xively
# \{
#
class xivelyConnectorThread(threading.Thread):
'''
\brief A singleton which publishes data to Xively.
'''
#======================== singleton pattern ===============================
_instance = None
_init = False
CLOSE_MESSAGE = 'close'
def __new__(cls, *args, **kwargs):
if not cls._instance:
cls._instance = super(xivelyConnectorThread, cls).__new__(cls, *args, **kwargs)
return cls._instance
def __init__(self):
# don't re-initialize an instance (needed because singleton)
if self._init:
return
self._init = True
# variables
self.queue = Queue.Queue(maxsize=MAX_QUEUE_SIZE)
self.publisher = None
# initialize parent class
threading.Thread.__init__(self)
# give this thread a name
self.name = "xivelyConnectorThread"
# start myself
self.start()
#======================== public ==========================================
def publish(self,mac,datastream,value):
try:
self.queue.put_nowait((mac,datastream,value))
except Queue.Full:
print "Queue is full"
def getProductId(self):
returnVal = None
if self.publisher:
returnVal = self.publisher.getProductId()
return returnVal
def close(self):
self.queue.put(self.CLOSE_MESSAGE)
#======================== private =========================================
def run(self):
while True:
elem = self.queue.get()
if elem==self.CLOSE_MESSAGE:
if self.publisher:
self.publisher.close()
return
(mac,datastream,value) = elem
AppData().incrementMoteCounter(mac,COL_NUMDATAPUB)
if self.publisher==None:
apiKey = AppData().getApiKey()
if apiKey:
self.publisher = xivelyConnector.xivelyConnector(
apiKey = apiKey,
productName = 'SmartMesh IP Starter Kit',
productDesc = 'Manager {0}'.format(
FormatUtils.formatMacString(AppData().getManager()),
),
)
if self.publisher==None:
continue
try:
# publish
self.publisher.publish(
mac = mac,
datastream = datastream,
value = value,
)
# log
output = []
output += ['pushed following data to Xively:']
output += ['- mac: {0}'.format(
FormatUtils.formatMacString(mac),
)
]
output += ['- datastream: {0}'.format(datastream)]
output += ['- value: {0}'.format(value)]
output = '\n'.join(output)
log.debug(output)
except Exception as err:
output = []
output += ['===============']
output += ['{0}: Exception when publishing'.format(self.name)]
output += ['- mac: {0}'.format(FormatUtils.formatMacString(mac))]
output += ['- datastream: {0}'.format(datastream)]
output += ['{0}'.format(type(err))]
output += ['{0}'.format(err)]
output += ['']
output = '\n'.join(output)
log.error(output)
print output
else:
AppData().incrementMoteCounter(mac,COL_NUMDATAPUBOK)
class AppData(object):
'''
\brief A singleton that holds the data about the motes.
'''
#======================== singleton pattern ===============================
_instance = None
_init = False
def __new__(cls, *args, **kwargs):
if not cls._instance:
cls._instance = super(AppData, cls).__new__(cls, *args, **kwargs)
return cls._instance
def __init__(self):
# don't re-initialize an instance (needed because singleton)
if self._init:
return
self._init = True
# variables
self.dataLock = threading.RLock()
self.apiKey = ''
self.motedata = {}
self.manager = None
#======================== public ==========================================
def resetData(self):
with self.dataLock:
self.motedata = {}
self.manager = None
#===== apiKey
def setApiKey(self,apiKey):
assert type(apiKey)==str
with self.dataLock:
self.apiKey = apiKey
def getApiKey(self):
with self.dataLock:
return self.apiKey
#===== manager
def setManager(self,mac):
mac = self._formatMac(mac)
with self.dataLock:
assert self.manager==None
self.manager = mac
def getManager(self):
with self.dataLock:
return self.manager
#===== mote
def addMote(self,mac):
mac = self._formatMac(mac)
with self.dataLock:
if mac not in self.motedata:
self.motedata[mac] = {
COL_NUMDATARX: 0,
COL_NUMDATAPUB: 0,
COL_NUMDATAPUBOK: 0,
}
def deleteMote(self,mac):
mac = self._formatMac(mac)
with self.dataLock:
if mac in self.motedata:
del self.motedata[mac]
def getMoteData(self):
with self.dataLock:
return copy.deepcopy(self.motedata)
def incrementMoteCounter(self,mac,counterName):
mac = self._formatMac(mac)
assert counterName in [COL_NUMDATARX,COL_NUMDATAPUB,COL_NUMDATAPUBOK]
with self.dataLock:
self.addMote(mac)
self.motedata[mac][counterName] += 1
def clearMoteCounters(self,mac):
mac = self._formatMac(mac)
with self.dataLock:
for counterName in [COL_NUMDATARX,COL_NUMDATAPUB,COL_NUMDATAPUBOK]:
self.motedata[mac][counterName] = 0
#======================== private =========================================
def _formatMac(self,mac):
assert type(mac) in [tuple,list]
assert len(mac)==8
return tuple(mac)
class notifClient(object):
'''
\brief Class which subscribes to and receives notifications from the
manager.
'''
def __init__(self, connector, disconnectedCallback):
# store params
self.connector = connector
self.disconnectedCallback = disconnectedCallback
self.oap_clients = {}
# variables
# subscriber
self.subscriber = IpMgrSubscribe.IpMgrSubscribe(self.connector)
self.subscriber.start()
self.subscriber.subscribe(
notifTypes = [
IpMgrSubscribe.IpMgrSubscribe.NOTIFEVENT,
],
fun = self._eventHandler,
isRlbl = True,
)
self.subscriber.subscribe(
notifTypes = [
IpMgrSubscribe.IpMgrSubscribe.ERROR,
IpMgrSubscribe.IpMgrSubscribe.FINISH,
],
fun = self._errorHandler,
isRlbl = True,
)
self.subscriber.subscribe(
notifTypes = [
IpMgrSubscribe.IpMgrSubscribe.NOTIFDATA,
],
fun = self._notifDataHandler,
isRlbl = False,
)
# OAP dispatcher
self.oap_dispatch = OAPDispatcher.OAPDispatcher()
self.oap_dispatch.register_notif_handler(self._handle_oap_notif)
#======================== public ==========================================
def disconnect(self):
self.connector.disconnect()
xivelyConnectorThread().close()
#======================== private =========================================
#===== notifications from manager
def _eventHandler(self,notifName,notifParams):
if notifName in [IpMgrSubscribe.IpMgrSubscribe.EVENTMOTEOPERATIONAL]:
AppData().addMote(notifParams.macAddress)
if notifName in [IpMgrSubscribe.IpMgrSubscribe.EVENTMOTELOST]:
AppData().deleteMote(notifParams.macAddress)
def _errorHandler(self,notifName,notifParams):
self.disconnectedCallback()
def _notifDataHandler(self,notifName,notifParams):
assert notifName==IpMgrSubscribe.IpMgrSubscribe.NOTIFDATA
mac = tuple(notifParams.macAddress)
AppData().incrementMoteCounter(mac,COL_NUMDATARX)
self.oap_dispatch.dispatch_pkt(notifName, notifParams)
def _handle_oap_notif(self,mac,notif):
# convert MAC to tuple
mac = tuple(mac)
if isinstance(notif,OAPNotif.OAPTempSample):
# this is a temperature notification
value = float(notif.samples[0])/100.0 # /100 since unit in 100th of C
xivelyConnectorThread().publish(
mac = mac,
datastream = 'temperature',
value = value,
)
if mac not in self.oap_clients:
publisher = xivelyConnectorThread().publisher
if publisher:
try:
# create datastream
publisher.publish(
mac = mac,
datastream = 'led',
value = 0,
)
# subscribe
publisher.subscribe(
mac = mac,
datastream = 'led',
callback = self._led_cb,
)
# create OAP client
self.oap_clients[mac] = OAPClient.OAPClient(
mac,
self._sendDataToConnector,
self.oap_dispatch,
)
except Exception as err:
output = []
output += ['===============']
output += ['{0}: Exception when creating and subscribing to datastream']
output += ['- mac: {0}'.format(FormatUtils.formatMacString(mac))]
output += ['{0}'.format(type(err))]
output += ['{0}'.format(err)]
output += ['']
output = '\n'.join(output)
log.error(output)
print output
#===== notifications from Xively
def _led_cb(self,mac,datastream,value):
# all non-0 values turn LED on
if value==0:
value = 0
else:
value = 1
# send through OAP
self.oap_clients[mac].send(
OAPMessage.CmdType.PUT, # command
[3,2], # address
data_tags=[OAPMessage.TLVByte(t=0,v=value)], # parameters
cb=None, # callback
)
def _sendDataToConnector(self,mac,priority,srcPort,dstPort,options,data):
self.connector.dn_sendData(
mac,
priority,
srcPort,
dstPort,
options,
data
)
class xivelyGui(object):
def __init__(self):
# local variables
self.guiLock = threading.Lock()
self.notifClientHandler = None
self.macs = []
self.oldData = {}
# create window
self.window = dustWindow.dustWindow(
'Xively Publisher',
self._windowCb_close,
)
# add a connection frame
self.connectionFrame = dustFrameConnection.dustFrameConnection(
self.window,
self.guiLock,
self._connectionFrameCb_connected,
frameName="manager connection",
row=0,column=0,
)
self.connectionFrame.apiLoaded(IpMgrDefinition.IpMgrDefinition())
self.connectionFrame.show()
# add a form frame
self.apiKeyFrame = dustFrameForm.dustFrameForm(
self.window,
self.guiLock,
self._apiKeyButtonCb,
"Xively API key",
row=1,column=0
)
self.apiKeyFrame.show()
self.apiKeyFrame.setVal(DFLT_API_KEY)
# add a mote list frame
columnnames = [
{
'name': COL_NUMDATARX,
'type': dustFrameMoteList.dustFrameMoteList.LABEL,
},
{
'name': COL_NUMDATAPUB,
'type': dustFrameMoteList.dustFrameMoteList.LABEL,
},
{
'name': COL_NUMDATAPUBOK,
'type': dustFrameMoteList.dustFrameMoteList.LABEL,
},
{
'name': COL_CLR,
'type': dustFrameMoteList.dustFrameMoteList.ACTION,
},
{
'name': COL_URL,
'type': dustFrameMoteList.dustFrameMoteList.ACTION,
},
]
self.moteListFrame = dustFrameMoteList.dustFrameMoteList(
self.window,
self.guiLock,
columnnames,
row=2,column=0,
)
self.moteListFrame.show()
# add a tooltip frame
self.toolTipFrame = dustFrameText.dustFrameText(
self.window,
self.guiLock,
frameName="tooltip",
row=5,column=0,
)
self.toolTipFrame.show()
#======================== public ==========================================
def start(self):
# start update
self.moteListFrame.after(GUI_UPDATEPERIOD,self._updateMoteList)
'''
This command instructs the GUI to start executing and reacting to
user interactions. It never returns and should therefore be the last
command called.
'''
try:
self.window.mainloop()
except SystemExit:
sys.exit()
#======================== private =========================================
def _windowCb_close(self):
if self.notifClientHandler:
self.notifClientHandler.disconnect()
def _connectionFrameCb_connected(self,connector):
'''
\brief Called when the connectionFrame has connected.
'''
# store the connector
self.connector = connector
# update MAC address of manager and operational motes
self._updateMacAddresses()
# create a notification client
self.notifClientHandler = notifClient(
self.connector,
self._connectionFrameCb_disconnected,
)
def _connectionFrameCb_disconnected(self):
'''
\brief Called when the connectionFrame has disconnected.
'''
# update the GUI
self.connectionFrame.updateGuiDisconnected()
# reset the data
AppData().resetData()
# delete the connector
if self.connector:
self.connector.disconnect()
self.connector = None
def _apiKeyButtonCb(self,apiKey):
AppData().setApiKey(str(apiKey))
self.apiKeyFrame.disable()
def _moteListFrame_clear(self,mac,button):
AppData().clearMoteCounters(mac)
def _moteListFrame_Url(self,mac,button):
# get FeedId
productId = xivelyConnectorThread().getProductId()
if not productId:
self.toolTipFrame.write('Cannot open live data. Not connected to Xively.')
return
# format URL
url = "https://xively.com/manage/{0}/devices/{1}".format(
productId,
FormatUtils.formatMacString(mac),
)
# open browser
webbrowser.open(
url = url,
new = 2, # 2==Open new tab if possible
)
#======================== helpers =========================================
def _updateMoteList(self):
# get latest data
newData = AppData().getMoteData()
# update GUI
for (mac,moteData) in newData.items():
if mac not in self.macs:
# add the mote
self.macs += [mac]
moteData[COL_CLR] = {
'text': 'clear',
'callback': self._moteListFrame_clear,
}
moteData[COL_URL] = {
'text': 'open browser',
'callback': self._moteListFrame_Url,
}
self.moteListFrame.addMote(mac,moteData)
self.oldData[mac] = {
COL_NUMDATARX: 0,
COL_NUMDATAPUB: 0,
COL_NUMDATAPUBOK: 0,
}
else:
# update the mote
for columnname,columnval in moteData.items():
if columnname in [COL_NUMDATARX,
COL_NUMDATAPUB,
COL_NUMDATAPUBOK]:
if self.oldData[mac][columnname]!=columnval:
self.oldData[mac][columnname] = columnval
self.moteListFrame.update(mac,columnname,columnval)
# schedule next update
self.moteListFrame.after(GUI_UPDATEPERIOD,self._updateMoteList)
def _updateMacAddresses(self):
currentMac = (0,0,0,0,0,0,0,0) # start getMoteConfig() iteration with the 0 MAC address
continueAsking = True
while continueAsking:
try:
res = self.connector.dn_getMoteConfig(currentMac,True)
except APIError:
continueAsking = False
else:
if res.isAP:
# I found the managerMac
AppData().setManager(res.macAddress)
if ((not res.isAP) and (res.state in [4,])):
# I found an operational mote
AppData().addMote(res.macAddress)
currentMac = res.macAddress
#============================ main ============================================
def main():
xivelyGuiHandler = xivelyGui()
xivelyGuiHandler.start()
if __name__ == '__main__':
main()
##
# end of Xively
# \}
#
| |
#!/usr/bin/env python
#
# Copyright 2007 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Represents a lexographic range of namespaces."""
__all__ = [
'NAMESPACE_CHARACTERS',
'MAX_NAMESPACE_LENGTH',
'MAX_NAMESPACE',
'MIN_NAMESPACE',
'NAMESPACE_BATCH_SIZE',
'NamespaceRange',
'get_namespace_keys',
]
import itertools
import string
from google.appengine.api import datastore
from google.appengine.ext import db
from google.appengine.ext.db import metadata
NAMESPACE_CHARACTERS = ''.join(sorted(string.digits +
string.lowercase +
string.uppercase +
'._-'))
MAX_NAMESPACE_LENGTH = 100
MIN_NAMESPACE = ''
NAMESPACE_BATCH_SIZE = 50
def _setup_constants(alphabet=NAMESPACE_CHARACTERS,
max_length=MAX_NAMESPACE_LENGTH,
batch_size=NAMESPACE_BATCH_SIZE):
"""Calculate derived constant values. Only useful for testing."""
global NAMESPACE_CHARACTERS
global MAX_NAMESPACE_LENGTH
global MAX_NAMESPACE
global _LEX_DISTANCE
global NAMESPACE_BATCH_SIZE
NAMESPACE_CHARACTERS = alphabet
MAX_NAMESPACE_LENGTH = max_length
MAX_NAMESPACE = NAMESPACE_CHARACTERS[-1] * MAX_NAMESPACE_LENGTH
NAMESPACE_BATCH_SIZE = batch_size
_LEX_DISTANCE = [1]
for i in range(1, MAX_NAMESPACE_LENGTH):
_LEX_DISTANCE.append(
_LEX_DISTANCE[i-1] * len(NAMESPACE_CHARACTERS) + 1)
del i
_setup_constants()
def _ord_to_namespace(n, _max_length=None):
"""Convert a namespace ordinal to a namespace string.
Converts an int, representing the sequence number of a namespace ordered
lexographically, into a namespace string.
>>> _ord_to_namespace(0)
''
>>> _ord_to_namespace(1)
'-'
>>> _ord_to_namespace(2)
'--'
>>> _ord_to_namespace(3)
'---'
Args:
n: A number representing the lexographical ordering of a namespace.
_max_length: The maximum namespace length.
Returns:
A string representing the nth namespace in lexographical order.
"""
if _max_length is None:
_max_length = MAX_NAMESPACE_LENGTH
length = _LEX_DISTANCE[_max_length - 1]
if n == 0:
return ''
n -= 1
return (NAMESPACE_CHARACTERS[n / length] +
_ord_to_namespace(n % length, _max_length - 1))
def _namespace_to_ord(namespace):
"""Converts a namespace string into an int representing its lexographic order.
>>> _namespace_to_ord('')
''
>>> _namespace_to_ord('_')
1
>>> _namespace_to_ord('__')
2
Args:
namespace: A namespace string.
Returns:
An int representing the lexographical order of the given namespace string.
"""
n = 0
for i, c in enumerate(namespace):
n += (_LEX_DISTANCE[MAX_NAMESPACE_LENGTH - i- 1] *
NAMESPACE_CHARACTERS.index(c)
+ 1)
return n
def _key_for_namespace(namespace, app):
"""Return the __namespace__ key for a namespace.
Args:
namespace: The namespace whose key is requested.
app: The id of the application that the key belongs to.
Returns:
A db.Key representing the namespace.
"""
if namespace:
return db.Key.from_path(metadata.Namespace.KIND_NAME,
namespace,
_app=app)
else:
return db.Key.from_path(metadata.Namespace.KIND_NAME,
metadata.Namespace.EMPTY_NAMESPACE_ID,
_app=app)
class NamespaceRange(object):
"""An inclusive lexographical range of namespaces.
This class is immutable.
"""
def __init__(self,
namespace_start=None,
namespace_end=None,
_app=None):
"""Initializes a NamespaceRange instance.
Args:
namespace_start: A string representing the start of the namespace range.
namespace_start is included in the range. If namespace_start is None
then the lexographically first namespace is used.
namespace_end: A string representing the end of the namespace range.
namespace_end is included in the range and must be >= namespace_start.
If namespace_end is None then the lexographically last namespace is
used.
Raises:
ValueError: if namespace_start > namespace_end.
"""
if namespace_start is None:
namespace_start = MIN_NAMESPACE
if namespace_end is None:
namespace_end = MAX_NAMESPACE
if namespace_start > namespace_end:
raise ValueError('namespace_start (%r) > namespace_end (%r)' % (
namespace_start, namespace_end))
self.__namespace_start = namespace_start
self.__namespace_end = namespace_end
self.__app = _app
@property
def app(self):
return self.__app
@property
def namespace_start(self):
return self.__namespace_start
@property
def namespace_end(self):
return self.__namespace_end
@property
def is_single_namespace(self):
"""True if the namespace range only includes a single namespace."""
return self.namespace_start == self.namespace_end
def split_range(self):
"""Splits the NamespaceRange into two nearly equal-sized ranges.
Returns:
If this NamespaceRange contains a single namespace then a list containing
this NamespaceRange is returned. Otherwise a two-element list containing
two NamespaceRanges whose total range is identical to this
NamespaceRange's is returned.
"""
if self.is_single_namespace:
return [self]
mid_point = (_namespace_to_ord(self.namespace_start) +
_namespace_to_ord(self.namespace_end)) // 2
return [NamespaceRange(self.namespace_start,
_ord_to_namespace(mid_point),
_app=self.app),
NamespaceRange(_ord_to_namespace(mid_point+1),
self.namespace_end,
_app=self.app)]
def __copy__(self):
return self.__class__(self.__namespace_start,
self.__namespace_end,
self.__app)
def __eq__(self, o):
return (self.namespace_start == o.namespace_start and
self.namespace_end == o.namespace_end)
def __hash__(self):
return hash((self.namespace_start, self.namespace_end, self.app))
def __repr__(self):
if self.app is None:
return 'NamespaceRange(namespace_start=%r, namespace_end=%r)' % (
self.namespace_start, self.namespace_end)
else:
return 'NamespaceRange(namespace_start=%r, namespace_end=%r, _app=%r)' % (
self.namespace_start, self.namespace_end, self.app)
def with_start_after(self, after_namespace):
"""Returns a copy of this NamespaceName with a new namespace_start.
Args:
after_namespace: A namespace string.
Returns:
A NamespaceRange object whose namespace_start is the lexographically next
namespace after the given namespace string.
Raises:
ValueError: if the NamespaceRange includes only a single namespace.
"""
namespace_start = _ord_to_namespace(_namespace_to_ord(after_namespace) + 1)
return NamespaceRange(namespace_start, self.namespace_end, _app=self.app)
def make_datastore_query(self, cursor=None):
"""Returns a datastore.Query that generates all namespaces in the range.
Args:
cursor: start cursor for the query.
Returns:
A datastore.Query instance that generates db.Keys for each namespace in
the NamespaceRange.
"""
filters = {}
filters['__key__ >= '] = _key_for_namespace(
self.namespace_start, self.app)
filters['__key__ <= '] = _key_for_namespace(
self.namespace_end, self.app)
return datastore.Query('__namespace__',
filters=filters,
keys_only=True,
cursor=cursor,
_app=self.app)
def normalized_start(self):
"""Returns a NamespaceRange with leading non-existant namespaces removed.
Returns:
A copy of this NamespaceRange whose namespace_start is adjusted to exclude
the portion of the range that contains no actual namespaces in the
datastore. None is returned if the NamespaceRange contains no actual
namespaces in the datastore.
"""
namespaces_after_key = list(self.make_datastore_query().Run(limit=1))
if not namespaces_after_key:
return None
namespace_after_key = namespaces_after_key[0].name() or ''
return NamespaceRange(namespace_after_key,
self.namespace_end,
_app=self.app)
def to_json_object(self):
"""Returns a dict representation that can be serialized to JSON."""
obj_dict = dict(namespace_start=self.namespace_start,
namespace_end=self.namespace_end)
if self.app is not None:
obj_dict['app'] = self.app
return obj_dict
@classmethod
def from_json_object(cls, json):
"""Returns a NamespaceRange from an object deserialized from JSON."""
return cls(json['namespace_start'],
json['namespace_end'],
_app=json.get('app'))
@classmethod
def split(cls,
n,
contiguous,
can_query=itertools.chain(itertools.repeat(True, 50),
itertools.repeat(False)).next,
_app=None):
"""Splits the complete NamespaceRange into n equally-sized NamespaceRanges.
Args:
n: The maximum number of NamespaceRanges to return. Fewer than n
namespaces may be returned.
contiguous: If True then the returned NamespaceRanges will cover the
entire space of possible namespaces (i.e. from MIN_NAMESPACE to
MAX_NAMESPACE) without gaps. If False then the returned
NamespaceRanges may exclude namespaces that don't appear in the
datastore.
can_query: A function that returns True if split() can query the datastore
to generate more fair namespace range splits, and False otherwise.
If not set then split() is allowed to make 50 datastore queries.
Returns:
A list of at most n NamespaceRanges representing a near-equal distribution
of actual existant datastore namespaces. The returned list will be sorted
lexographically.
Raises:
ValueError: if n is < 1.
"""
if n < 1:
raise ValueError('n must be >= 1')
ranges = None
if can_query():
if not contiguous:
ns_keys = get_namespace_keys(_app, n + 1)
if not ns_keys:
return []
else:
if len(ns_keys) <= n:
ns_range = []
for ns_key in ns_keys:
ns_range.append(NamespaceRange(ns_key.name() or '',
ns_key.name() or '',
_app=_app))
return sorted(ns_range,
key=lambda ns_range: ns_range.namespace_start)
ranges = [NamespaceRange(ns_keys[0].name() or '', _app=_app)]
else:
ns_range = NamespaceRange(_app=_app).normalized_start()
if ns_range is None:
return [NamespaceRange(_app=_app)]
ranges = [ns_range]
else:
ranges = [NamespaceRange(_app=_app)]
singles = []
while ranges and (len(ranges) + len(singles)) < n:
namespace_range = ranges.pop(0)
if namespace_range.is_single_namespace:
singles.append(namespace_range)
else:
left, right = namespace_range.split_range()
if can_query():
right = right.normalized_start()
if right is not None:
ranges.append(right)
ranges.append(left)
ns_ranges = sorted(singles + ranges,
key=lambda ns_range: ns_range.namespace_start)
if contiguous:
if not ns_ranges:
return [NamespaceRange(_app=_app)]
continuous_ns_ranges = []
for i in range(len(ns_ranges)):
if i == 0:
namespace_start = MIN_NAMESPACE
else:
namespace_start = ns_ranges[i].namespace_start
if i == len(ns_ranges) - 1:
namespace_end = MAX_NAMESPACE
else:
namespace_end = _ord_to_namespace(
_namespace_to_ord(ns_ranges[i+1].namespace_start) - 1)
continuous_ns_ranges.append(NamespaceRange(namespace_start,
namespace_end,
_app=_app))
return continuous_ns_ranges
else:
return ns_ranges
def __iter__(self):
"""Iterate over all the namespaces within this range."""
cursor = None
while True:
query = self.make_datastore_query(cursor=cursor)
count = 0
for ns_key in query.Run(limit=NAMESPACE_BATCH_SIZE):
count += 1
yield ns_key.name() or ''
if count < NAMESPACE_BATCH_SIZE:
break
cursor = query.GetCursor()
def get_namespace_keys(app, limit):
"""Get namespace keys."""
ns_query = datastore.Query('__namespace__', keys_only=True, _app=app)
return list(ns_query.Run(limit=limit, batch_size=limit))
| |
# Copyright (c) 2010 Citrix Systems, Inc.
# Copyright (c) 2013 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Helper methods for operations related to the management of volumes,
and storage repositories
"""
import re
import string
from eventlet import greenthread
from oslo_config import cfg
from oslo_log import log as logging
from nova import exception
from nova.i18n import _, _LE, _LW
xenapi_volume_utils_opts = [
cfg.IntOpt('introduce_vdi_retry_wait',
default=20,
help='Number of seconds to wait for an SR to settle '
'if the VDI does not exist when first introduced'),
]
CONF = cfg.CONF
CONF.register_opts(xenapi_volume_utils_opts, 'xenserver')
LOG = logging.getLogger(__name__)
def parse_sr_info(connection_data, description=''):
label = connection_data.pop('name_label',
'tempSR-%s' % connection_data.get('volume_id'))
params = {}
if 'sr_uuid' not in connection_data:
params = _parse_volume_info(connection_data)
# This magic label sounds a lot like 'False Disc' in leet-speak
uuid = "FA15E-D15C-" + str(params['id'])
else:
uuid = connection_data['sr_uuid']
for k in connection_data.get('introduce_sr_keys', {}):
params[k] = connection_data[k]
params['name_description'] = connection_data.get('name_description',
description)
return (uuid, label, params)
def _parse_volume_info(connection_data):
"""Parse device_path and mountpoint as they can be used by XenAPI.
In particular, the mountpoint (e.g. /dev/sdc) must be translated
into a numeric literal.
"""
volume_id = connection_data['volume_id']
target_portal = connection_data['target_portal']
target_host = _get_target_host(target_portal)
target_port = _get_target_port(target_portal)
target_iqn = connection_data['target_iqn']
log_params = {
"vol_id": volume_id,
"host": target_host,
"port": target_port,
"iqn": target_iqn
}
LOG.debug('(vol_id,host,port,iqn): '
'(%(vol_id)s,%(host)s,%(port)s,%(iqn)s)', log_params)
if (volume_id is None or
target_host is None or
target_iqn is None):
raise exception.StorageError(
reason=_('Unable to obtain target information %s') %
connection_data)
volume_info = {}
volume_info['id'] = volume_id
volume_info['target'] = target_host
volume_info['port'] = target_port
volume_info['targetIQN'] = target_iqn
if ('auth_method' in connection_data and
connection_data['auth_method'] == 'CHAP'):
volume_info['chapuser'] = connection_data['auth_username']
volume_info['chappassword'] = connection_data['auth_password']
return volume_info
def _get_target_host(iscsi_string):
"""Retrieve target host."""
if iscsi_string:
host = iscsi_string.split(':')[0]
if len(host) > 0:
return host
return CONF.xenserver.target_host
def _get_target_port(iscsi_string):
"""Retrieve target port."""
if iscsi_string and ':' in iscsi_string:
return iscsi_string.split(':')[1]
return CONF.xenserver.target_port
def introduce_sr(session, sr_uuid, label, params):
LOG.debug('Introducing SR %s', label)
sr_type, sr_desc = _handle_sr_params(params)
sr_ref = session.call_xenapi('SR.introduce', sr_uuid, label, sr_desc,
sr_type, '', False, params)
LOG.debug('Creating PBD for SR')
pbd_ref = _create_pbd(session, sr_ref, params)
LOG.debug('Plugging SR')
session.call_xenapi("PBD.plug", pbd_ref)
session.call_xenapi("SR.scan", sr_ref)
return sr_ref
def _handle_sr_params(params):
if 'id' in params:
del params['id']
sr_type = params.pop('sr_type', 'iscsi')
sr_desc = params.pop('name_description', '')
return sr_type, sr_desc
def _create_pbd(session, sr_ref, params):
pbd_rec = {}
pbd_rec['host'] = session.host_ref
pbd_rec['SR'] = sr_ref
pbd_rec['device_config'] = params
pbd_ref = session.call_xenapi("PBD.create", pbd_rec)
return pbd_ref
def introduce_vdi(session, sr_ref, vdi_uuid=None, target_lun=None):
"""Introduce VDI in the host."""
try:
vdi_ref = _get_vdi_ref(session, sr_ref, vdi_uuid, target_lun)
if vdi_ref is None:
greenthread.sleep(CONF.xenserver.introduce_vdi_retry_wait)
session.call_xenapi("SR.scan", sr_ref)
vdi_ref = _get_vdi_ref(session, sr_ref, vdi_uuid, target_lun)
except session.XenAPI.Failure:
LOG.exception(_LE('Unable to introduce VDI on SR'))
raise exception.StorageError(
reason=_('Unable to introduce VDI on SR %s') % sr_ref)
if not vdi_ref:
raise exception.StorageError(
reason=_('VDI not found on SR %(sr)s (vdi_uuid '
'%(vdi_uuid)s, target_lun %(target_lun)s)') %
{'sr': sr_ref, 'vdi_uuid': vdi_uuid,
'target_lun': target_lun})
try:
vdi_rec = session.call_xenapi("VDI.get_record", vdi_ref)
LOG.debug(vdi_rec)
except session.XenAPI.Failure:
LOG.exception(_LE('Unable to get record of VDI'))
raise exception.StorageError(
reason=_('Unable to get record of VDI %s on') % vdi_ref)
if vdi_rec['managed']:
# We do not need to introduce the vdi
return vdi_ref
try:
return session.call_xenapi("VDI.introduce",
vdi_rec['uuid'],
vdi_rec['name_label'],
vdi_rec['name_description'],
vdi_rec['SR'],
vdi_rec['type'],
vdi_rec['sharable'],
vdi_rec['read_only'],
vdi_rec['other_config'],
vdi_rec['location'],
vdi_rec['xenstore_data'],
vdi_rec['sm_config'])
except session.XenAPI.Failure:
LOG.exception(_LE('Unable to introduce VDI for SR'))
raise exception.StorageError(
reason=_('Unable to introduce VDI for SR %s') % sr_ref)
def _get_vdi_ref(session, sr_ref, vdi_uuid, target_lun):
if vdi_uuid:
LOG.debug("vdi_uuid: %s" % vdi_uuid)
return session.call_xenapi("VDI.get_by_uuid", vdi_uuid)
elif target_lun:
vdi_refs = session.call_xenapi("SR.get_VDIs", sr_ref)
for curr_ref in vdi_refs:
curr_rec = session.call_xenapi("VDI.get_record", curr_ref)
if ('sm_config' in curr_rec and
'LUNid' in curr_rec['sm_config'] and
curr_rec['sm_config']['LUNid'] == str(target_lun)):
return curr_ref
else:
return (session.call_xenapi("SR.get_VDIs", sr_ref))[0]
return None
def purge_sr(session, sr_ref):
# Make sure no VBDs are referencing the SR VDIs
vdi_refs = session.call_xenapi("SR.get_VDIs", sr_ref)
for vdi_ref in vdi_refs:
vbd_refs = session.call_xenapi("VDI.get_VBDs", vdi_ref)
if vbd_refs:
LOG.warning(_LW('Cannot purge SR with referenced VDIs'))
return
forget_sr(session, sr_ref)
def forget_sr(session, sr_ref):
"""Forgets the storage repository without destroying the VDIs within."""
LOG.debug('Forgetting SR...')
_unplug_pbds(session, sr_ref)
session.call_xenapi("SR.forget", sr_ref)
def _unplug_pbds(session, sr_ref):
try:
pbds = session.call_xenapi("SR.get_PBDs", sr_ref)
except session.XenAPI.Failure as exc:
LOG.warning(_LW('Ignoring exception %(exc)s when getting PBDs'
' for %(sr_ref)s'), {'exc': exc, 'sr_ref': sr_ref})
return
for pbd in pbds:
try:
session.call_xenapi("PBD.unplug", pbd)
except session.XenAPI.Failure as exc:
LOG.warning(_LW('Ignoring exception %(exc)s when unplugging'
' PBD %(pbd)s'), {'exc': exc, 'pbd': pbd})
def get_device_number(mountpoint):
device_number = _mountpoint_to_number(mountpoint)
if device_number < 0:
raise exception.StorageError(
reason=_('Unable to obtain target information %s') %
mountpoint)
return device_number
def _mountpoint_to_number(mountpoint):
"""Translate a mountpoint like /dev/sdc into a numeric."""
if mountpoint.startswith('/dev/'):
mountpoint = mountpoint[5:]
if re.match('^[hs]d[a-p]$', mountpoint):
return (ord(mountpoint[2:3]) - ord('a'))
elif re.match('^x?vd[a-p]$', mountpoint):
return (ord(mountpoint[-1]) - ord('a'))
elif re.match('^[0-9]+$', mountpoint):
return string.atoi(mountpoint, 10)
else:
LOG.warning(_LW('Mountpoint cannot be translated: %s'), mountpoint)
return -1
def find_sr_by_uuid(session, sr_uuid):
"""Return the storage repository given a uuid."""
try:
return session.call_xenapi("SR.get_by_uuid", sr_uuid)
except session.XenAPI.Failure as exc:
if exc.details[0] == 'UUID_INVALID':
return None
raise
def find_sr_from_vbd(session, vbd_ref):
"""Find the SR reference from the VBD reference."""
try:
vdi_ref = session.call_xenapi("VBD.get_VDI", vbd_ref)
sr_ref = session.call_xenapi("VDI.get_SR", vdi_ref)
except session.XenAPI.Failure:
LOG.exception(_LE('Unable to find SR from VBD'))
raise exception.StorageError(
reason=_('Unable to find SR from VBD %s') % vbd_ref)
return sr_ref
def find_sr_from_vdi(session, vdi_ref):
"""Find the SR reference from the VDI reference."""
try:
sr_ref = session.call_xenapi("VDI.get_SR", vdi_ref)
except session.XenAPI.Failure:
LOG.exception(_LE('Unable to find SR from VDI'))
raise exception.StorageError(
reason=_('Unable to find SR from VDI %s') % vdi_ref)
return sr_ref
def find_vbd_by_number(session, vm_ref, dev_number):
"""Get the VBD reference from the device number."""
vbd_refs = session.VM.get_VBDs(vm_ref)
requested_device = str(dev_number)
if vbd_refs:
for vbd_ref in vbd_refs:
try:
user_device = session.VBD.get_userdevice(vbd_ref)
if user_device == requested_device:
return vbd_ref
except session.XenAPI.Failure:
msg = "Error looking up VBD %s for %s" % (vbd_ref, vm_ref)
LOG.debug(msg, exc_info=True)
def is_booted_from_volume(session, vm_ref):
"""Determine if the root device is a volume."""
vbd_ref = find_vbd_by_number(session, vm_ref, 0)
vbd_other_config = session.VBD.get_other_config(vbd_ref)
if vbd_other_config.get('osvol', False):
return True
return False
| |
import numpy.testing as npt
import numpy as np
import pytest
from scipy import stats
from .common_tests import (check_normalization, check_moment, check_mean_expect,
check_var_expect, check_skew_expect,
check_kurt_expect, check_entropy,
check_private_entropy, check_edge_support,
check_named_args, check_random_state_property,
check_pickling, check_rvs_broadcast, check_freezing)
from scipy.stats._distr_params import distdiscrete
vals = ([1, 2, 3, 4], [0.1, 0.2, 0.3, 0.4])
distdiscrete += [[stats.rv_discrete(values=vals), ()]]
def cases_test_discrete_basic():
seen = set()
for distname, arg in distdiscrete:
yield distname, arg, distname not in seen
seen.add(distname)
@pytest.mark.parametrize('distname,arg,first_case', cases_test_discrete_basic())
def test_discrete_basic(distname, arg, first_case):
try:
distfn = getattr(stats, distname)
except TypeError:
distfn = distname
distname = 'sample distribution'
np.random.seed(9765456)
rvs = distfn.rvs(size=2000, *arg)
supp = np.unique(rvs)
m, v = distfn.stats(*arg)
check_cdf_ppf(distfn, arg, supp, distname + ' cdf_ppf')
check_pmf_cdf(distfn, arg, distname)
check_oth(distfn, arg, supp, distname + ' oth')
check_edge_support(distfn, arg)
alpha = 0.01
check_discrete_chisquare(distfn, arg, rvs, alpha,
distname + ' chisquare')
if first_case:
locscale_defaults = (0,)
meths = [distfn.pmf, distfn.logpmf, distfn.cdf, distfn.logcdf,
distfn.logsf]
# make sure arguments are within support
spec_k = {'randint': 11, 'hypergeom': 4, 'bernoulli': 0, }
k = spec_k.get(distname, 1)
check_named_args(distfn, k, arg, locscale_defaults, meths)
if distname != 'sample distribution':
check_scale_docstring(distfn)
check_random_state_property(distfn, arg)
check_pickling(distfn, arg)
check_freezing(distfn, arg)
# Entropy
check_entropy(distfn, arg, distname)
if distfn.__class__._entropy != stats.rv_discrete._entropy:
check_private_entropy(distfn, arg, stats.rv_discrete)
@pytest.mark.parametrize('distname,arg', distdiscrete)
def test_moments(distname, arg):
try:
distfn = getattr(stats, distname)
except TypeError:
distfn = distname
distname = 'sample distribution'
m, v, s, k = distfn.stats(*arg, moments='mvsk')
check_normalization(distfn, arg, distname)
# compare `stats` and `moment` methods
check_moment(distfn, arg, m, v, distname)
check_mean_expect(distfn, arg, m, distname)
check_var_expect(distfn, arg, m, v, distname)
check_skew_expect(distfn, arg, m, v, s, distname)
if distname not in ['zipf', 'yulesimon']:
check_kurt_expect(distfn, arg, m, v, k, distname)
# frozen distr moments
check_moment_frozen(distfn, arg, m, 1)
check_moment_frozen(distfn, arg, v+m*m, 2)
@pytest.mark.parametrize('dist,shape_args', distdiscrete)
def test_rvs_broadcast(dist, shape_args):
# If shape_only is True, it means the _rvs method of the
# distribution uses more than one random number to generate a random
# variate. That means the result of using rvs with broadcasting or
# with a nontrivial size will not necessarily be the same as using the
# numpy.vectorize'd version of rvs(), so we can only compare the shapes
# of the results, not the values.
# Whether or not a distribution is in the following list is an
# implementation detail of the distribution, not a requirement. If
# the implementation the rvs() method of a distribution changes, this
# test might also have to be changed.
shape_only = dist in ['betabinom', 'skellam', 'yulesimon', 'dlaplace']
try:
distfunc = getattr(stats, dist)
except TypeError:
distfunc = dist
dist = 'rv_discrete(values=(%r, %r))' % (dist.xk, dist.pk)
loc = np.zeros(2)
nargs = distfunc.numargs
allargs = []
bshape = []
# Generate shape parameter arguments...
for k in range(nargs):
shp = (k + 3,) + (1,)*(k + 1)
param_val = shape_args[k]
allargs.append(np.full(shp, param_val))
bshape.insert(0, shp[0])
allargs.append(loc)
bshape.append(loc.size)
# bshape holds the expected shape when loc, scale, and the shape
# parameters are all broadcast together.
check_rvs_broadcast(distfunc, dist, allargs, bshape, shape_only, [np.int_])
@pytest.mark.parametrize('dist,args', distdiscrete)
def test_ppf_with_loc(dist, args):
try:
distfn = getattr(stats, dist)
except TypeError:
distfn = dist
#check with a negative, no and positive relocation.
np.random.seed(1942349)
re_locs = [np.random.randint(-10, -1), 0, np.random.randint(1, 10)]
_a, _b = distfn.support(*args)
for loc in re_locs:
npt.assert_array_equal(
[_a-1+loc, _b+loc],
[distfn.ppf(0.0, *args, loc=loc), distfn.ppf(1.0, *args, loc=loc)]
)
def check_cdf_ppf(distfn, arg, supp, msg):
# cdf is a step function, and ppf(q) = min{k : cdf(k) >= q, k integer}
npt.assert_array_equal(distfn.ppf(distfn.cdf(supp, *arg), *arg),
supp, msg + '-roundtrip')
npt.assert_array_equal(distfn.ppf(distfn.cdf(supp, *arg) - 1e-8, *arg),
supp, msg + '-roundtrip')
if not hasattr(distfn, 'xk'):
_a, _b = distfn.support(*arg)
supp1 = supp[supp < _b]
npt.assert_array_equal(distfn.ppf(distfn.cdf(supp1, *arg) + 1e-8, *arg),
supp1 + distfn.inc, msg + ' ppf-cdf-next')
# -1e-8 could cause an error if pmf < 1e-8
def check_pmf_cdf(distfn, arg, distname):
if hasattr(distfn, 'xk'):
index = distfn.xk
else:
startind = int(distfn.ppf(0.01, *arg) - 1)
index = list(range(startind, startind + 10))
cdfs = distfn.cdf(index, *arg)
pmfs_cum = distfn.pmf(index, *arg).cumsum()
atol, rtol = 1e-10, 1e-10
if distname == 'skellam': # ncx2 accuracy
atol, rtol = 1e-5, 1e-5
npt.assert_allclose(cdfs - cdfs[0], pmfs_cum - pmfs_cum[0],
atol=atol, rtol=rtol)
def check_moment_frozen(distfn, arg, m, k):
npt.assert_allclose(distfn(*arg).moment(k), m,
atol=1e-10, rtol=1e-10)
def check_oth(distfn, arg, supp, msg):
# checking other methods of distfn
npt.assert_allclose(distfn.sf(supp, *arg), 1. - distfn.cdf(supp, *arg),
atol=1e-10, rtol=1e-10)
q = np.linspace(0.01, 0.99, 20)
npt.assert_allclose(distfn.isf(q, *arg), distfn.ppf(1. - q, *arg),
atol=1e-10, rtol=1e-10)
median_sf = distfn.isf(0.5, *arg)
npt.assert_(distfn.sf(median_sf - 1, *arg) > 0.5)
npt.assert_(distfn.cdf(median_sf + 1, *arg) > 0.5)
def check_discrete_chisquare(distfn, arg, rvs, alpha, msg):
"""Perform chisquare test for random sample of a discrete distribution
Parameters
----------
distname : string
name of distribution function
arg : sequence
parameters of distribution
alpha : float
significance level, threshold for p-value
Returns
-------
result : bool
0 if test passes, 1 if test fails
"""
wsupp = 0.05
# construct intervals with minimum mass `wsupp`.
# intervals are left-half-open as in a cdf difference
_a, _b = distfn.support(*arg)
lo = int(max(_a, -1000))
high = int(min(_b, 1000)) + 1
distsupport = range(lo, high)
last = 0
distsupp = [lo]
distmass = []
for ii in distsupport:
current = distfn.cdf(ii, *arg)
if current - last >= wsupp - 1e-14:
distsupp.append(ii)
distmass.append(current - last)
last = current
if current > (1 - wsupp):
break
if distsupp[-1] < _b:
distsupp.append(_b)
distmass.append(1 - last)
distsupp = np.array(distsupp)
distmass = np.array(distmass)
# convert intervals to right-half-open as required by histogram
histsupp = distsupp + 1e-8
histsupp[0] = _a
# find sample frequencies and perform chisquare test
freq, hsupp = np.histogram(rvs, histsupp)
chis, pval = stats.chisquare(np.array(freq), len(rvs)*distmass)
npt.assert_(pval > alpha,
'chisquare - test for %s at arg = %s with pval = %s' %
(msg, str(arg), str(pval)))
def check_scale_docstring(distfn):
if distfn.__doc__ is not None:
# Docstrings can be stripped if interpreter is run with -OO
npt.assert_('scale' not in distfn.__doc__)
| |
"""Forms for Contractors and related entities.
Contractor
Call
Pitch
Assignment
"""
from bootstrap3_datetime.widgets import DateTimePicker
from django import forms
from django.db.models import Q
from django.forms import Textarea, TextInput, Select
from editorial.models import (
ContractorProfile,
TalentEditorProfile,
ContractorSubscription,
OrganizationContractorAffiliation,
Story,
Facet,
Call,
Pitch,
Assignment,
)
class ContractorProfileForm(forms.ModelForm):
"""Handles creation and editing of a contractor's profile."""
class Meta:
model = ContractorProfile
fields = [
'resume',
'address',
'availability',
'current_location',
'gear',
'portfolio_link1',
'portfolio_link2',
'portfolio_link3',
'public',
]
widgets = {
'address': Textarea(
attrs={'class': 'form-control', 'rows': 3, 'placeholder': 'Address'}),
'availability': TextInput(
attrs={'class': 'form-control', 'placeholder': 'Availability'}),
'current_location': TextInput(
attrs={'class': 'form-control', 'placeholder': 'Current Location'}),
'gear': Textarea(
attrs={'class': 'form-control', 'rows': 2, 'placeholder': 'Gear'}),
'portfolio_link1': TextInput(
attrs={'class': 'form-control', 'placeholder': 'Portfolio Link 1'}),
'portfolio_link2': TextInput(
attrs={'class': 'form-control', 'placeholder': 'Portfolio Link 2'}),
'portfolio_link3': TextInput(
attrs={'class': 'form-control', 'placeholder': 'Portfolio Link 3'}),
}
class OrganizationContractorAffiliationForm(forms.ModelForm):
"""Handles creation and editing of the details of a contractor's
relationship with a specific organization.
"""
# def __init__(self, *args, **kwargs):
# super(CallForm, self).__init__(*args, **kwargs)
# # set empty label
# self.fields['status'].empty_label = 'Contractor status'
contractor = forms.ModelChoiceField(
queryset=ContractorProfile.objects.filter(public=True),
widget=forms.Select(attrs={'class': 'c-select', 'id': 'affiliation-contractor'}),
required=True,
)
class Meta:
model = OrganizationContractorAffiliation
fields = [
'contractor',
'w9_on_file',
'rates',
'strengths',
'conflicts',
'editor_notes',
'talent_pool',
'status',
]
widgets = {
'rates': TextInput(attrs={'class': 'form-control', 'placeholder': 'Rates'}),
'strengths': TextInput(
attrs={'class': 'form-control', 'placeholder': 'Strengths'}),
'conflicts': TextInput(
attrs={'class': 'form-control', 'placeholder': 'Conflicts'}),
'editor_notes': Textarea(
attrs={'class': 'form-control', 'rows': 12, 'placeholder': 'Editor Notes'}),
}
class ContractorSubscriptionForm(forms.ModelForm):
""" Form to edit a contractor subscription."""
class Meta:
model = ContractorSubscription
fields = ['standard']
class CallForm(forms.ModelForm):
"""Handles creation and editing of a call."""
def __init__(self, *args, **kwargs):
org = kwargs.pop("organization")
super(CallForm, self).__init__(*args, **kwargs)
# set empty label
self.fields['status'].empty_label = 'Call status'
expiration_date = forms.DateTimeField(
required=False,
widget=DateTimePicker(
options={'format': 'YYYY-MM-DD HH:mm'},
attrs={'id': 'story-embargo-picker'})
)
class Meta:
model = Call
fields = [
'name',
'text',
'expiration_date',
'is_active',
'urgent',
'timeframe',
'status',
]
widgets = {
'name': TextInput(attrs={'class': 'form-control', 'placeholder': 'Name'}),
'text': Textarea(attrs={'class': 'form-control', 'placeholder': 'Text'}),
'timeframe': TextInput(
attrs={'class': 'form-control', 'placeholder': 'Timeframe'}),
'status': Select(attrs={'class': 'c-select', 'id': 'call-status'}),
}
class PitchForm(forms.ModelForm):
"""Handles creation and editing of a pitch."""
recipient = forms.ModelChoiceField(
queryset=TalentEditorProfile.objects.filter(public=True),
widget=forms.Select(attrs={'class': 'c-select', 'id': 'pitch-recipient'}),
required=True,
)
class Meta:
model = Pitch
fields = [
'name',
'text',
'status',
'exclusive',
'recipient',
]
widgets = {
'name': TextInput(attrs={'class': 'form-control', 'placeholder': 'Name'}),
'text': Textarea(attrs={'class': 'form-control', 'placeholder': 'Text'}),
'status': Select(attrs={'class': 'c-select', 'id': 'pitch-status'}),
}
class AssignmentForm(forms.ModelForm):
"""Handles creation and editing of a assignment."""
def __init__(self, *args, **kwargs):
org = kwargs.pop("organization")
super(AssignmentForm, self).__init__(*args, **kwargs)
# limit to stories or facets owned by an organization or that an org is a collaborator on
self.fields['story'].queryset = Story.objects.filter(
Q(organization=org) | Q(collaborate_with=org))
self.fields['facet'].queryset = Facet.objects.filter(Q(organization=org))
# set empty labels
self.fields['contractor'].empty_label = "Select a contractor"
self.fields['story'].empty_label = 'Select a story'
self.fields['facet'].empty_label = 'Select a facet'
contractor = forms.ModelChoiceField(
queryset=ContractorProfile.objects.filter(public=True),
widget=forms.Select(attrs={'class': 'c-select', 'id': 'assignment-contractor'}),
required=True,
)
class Meta:
model = Assignment
fields = [
'name',
'text',
'rate',
'contractor',
'complete',
'story',
'facet',
]
widgets = {
'name': TextInput(attrs={'class': 'form-control', 'placeholder': 'Name'}),
'text': Textarea(attrs={'class': 'form-control', 'placeholder': 'Text'}),
'rate': TextInput(attrs={'class': 'form-control', 'placeholder': 'Rate'}),
'story': Select(attrs={'class': 'c-select', 'id': 'assignment-story'}),
'facet': Select(attrs={'class': 'c-select', 'id': 'assignment-facet'}),
}
| |
# -*- coding: utf-8 -*-
#$URL: https://rst2pdf.googlecode.com/svn/tags/0.93/rst2pdf/genpdftext.py $
#$Date: 2012-12-14 13:41:35 -0300 (Fri, 14 Dec 2012) $
#$Revision: 2621 $
# See LICENSE.txt for licensing terms
import os
from xml.sax.saxutils import escape
from log import log, nodeid
from basenodehandler import NodeHandler
import docutils.nodes
from urlparse import urljoin, urlparse
from reportlab.lib.units import cm
from opt_imports import Paragraph
from image import MyImage, missing
from flowables import MySpacer
class FontHandler(NodeHandler):
def get_pre_post(self, client, node, replaceEnt):
return self.get_font_prefix(client, node, replaceEnt), '</font>'
def get_font_prefix(self, client, node, replaceEnt):
return client.styleToFont(self.fontstyle)
class HandleText(NodeHandler, docutils.nodes.Text):
def gather_elements(self, client, node, style):
return [Paragraph(client.gather_pdftext(node), style)]
def get_text(self, client, node, replaceEnt):
text = node.astext()
if replaceEnt:
text = escape(text)
return text
class HandleStrong(NodeHandler, docutils.nodes.strong):
pre = "<b>"
post = "</b>"
class HandleEmphasis(NodeHandler, docutils.nodes.emphasis):
pre = "<i>"
post = "</i>"
class HandleLiteral(NodeHandler, docutils.nodes.literal):
def get_pre_post(self, client, node, replaceEnt):
if node['classes']:
pre = client.styleToFont(node['classes'][0])
else:
pre = client.styleToFont('literal')
post = "</font>"
if not client.styles['literal'].hyphenation:
pre = '<nobr>' + pre
post += '</nobr>'
return pre, post
def get_text(self, client, node, replaceEnt):
text = node.astext()
text = escape(node.astext())
text = text.replace(' ', ' ')
return text
class HandleSuper(NodeHandler, docutils.nodes.superscript):
pre = '<super>'
post = "</super>"
class HandleSub(NodeHandler, docutils.nodes.subscript):
pre = '<sub>'
post = "</sub>"
class HandleTitleReference(FontHandler, docutils.nodes.title_reference):
fontstyle = 'title_reference'
class HandleReference(NodeHandler, docutils.nodes.reference):
def get_pre_post(self, client, node, replaceEnt):
pre, post = '', ''
uri = node.get('refuri')
if uri:
# Issue 366: links to "#" make no sense in a PDF
if uri =="#":
return "", ""
if uri.startswith ('#'):
pass
elif client.baseurl: # Need to join the uri with the base url
uri = urljoin(client.baseurl, uri)
if urlparse(uri)[0] and client.inlinelinks:
# external inline reference
if uri in [node.astext(),"mailto:"+node.astext()]:
# No point on repeating it
post = u''
elif uri.startswith('http://') or uri.startswith('ftp://'):
post = u' (%s)' % uri
elif uri.startswith('mailto:'):
#No point on showing "mailto:"
post = u' (%s)' % uri[7:]
else:
# A plain old link
pre += u'<a href="%s" color="%s">' %\
(uri, client.styles.linkColor)
post = '</a>' + post
else:
uri = node.get('refid')
if uri:
pre += u'<a href="#%s" color="%s">' %\
(uri, client.styles.linkColor)
post = '</a>' + post
return pre, post
class HandleOptions(HandleText, docutils.nodes.option_string, docutils.nodes.option_argument):
pass
class HandleSysMessage(HandleText, docutils.nodes.system_message, docutils.nodes.problematic):
pre = '<font color="red">'
post = "</font>"
def gather_elements(self, client, node, style):
# FIXME show the error in the document, red, whatever
# log.warning("Problematic node %s", node.astext())
return []
class HandleGenerated(HandleText, docutils.nodes.generated):
pass
# def get_text(self, client, node, replaceEnt):
# if 'sectnum' in node['classes']:
# # This is the child of a title with a section number
# # Send the section number up to the title node
# node.parent['_sectnum'] = node.astext()
# return node.astext()
class HandleImage(NodeHandler, docutils.nodes.image):
def gather_elements(self, client, node, style):
# FIXME: handle alt
target = None
if isinstance(node.parent, docutils.nodes.reference):
target = node.parent.get('refuri', None)
st_name = 'image'
if node.get('classes'):
st_name = node.get('classes')[0]
style=client.styles[st_name]
uri = str(node.get("uri"))
if uri.split("://")[0].lower() not in ('http','ftp','https'):
imgname = os.path.join(client.basedir,uri)
else:
imgname = uri
try:
w, h, kind = MyImage.size_for_node(node, client=client)
except ValueError:
# Broken image, return arbitrary stuff
imgname=missing
w, h, kind = 100, 100, 'direct'
node.elements = [
MyImage(filename=imgname, height=h, width=w,
kind=kind, client=client, target=target)]
alignment = node.get('align', '').upper()
if not alignment:
# There is no JUSTIFY for flowables, of course, so 4:LEFT
alignment = {0:'LEFT', 1:'CENTER', 2:'RIGHT', 4:'LEFT'}[style.alignment]
if not alignment:
alignment = 'CENTER'
node.elements[0].image.hAlign = alignment
node.elements[0].spaceBefore = style.spaceBefore
node.elements[0].spaceAfter = style.spaceAfter
# Image flowables don't support valign (makes no sense for them?)
# elif alignment in ('TOP','MIDDLE','BOTTOM'):
# i.vAlign = alignment
return node.elements
def get_text(self, client, node, replaceEnt):
# First see if the image file exists, or else,
# use image-missing.png
imgname = os.path.join(client.basedir,str(node.get("uri")))
try:
w, h, kind = MyImage.size_for_node(node, client=client)
except ValueError:
# Broken image, return arbitrary stuff
imgname=missing
w, h, kind = 100, 100, 'direct'
alignment=node.get('align', 'CENTER').lower()
if alignment in ('top', 'middle', 'bottom'):
align='valign="%s"'%alignment
else:
align=''
# TODO: inline images don't support SVG, vectors and PDF,
# which may be surprising. So, work on converting them
# previous to passing to reportlab.
# Try to rasterize using the backend
w, h, kind = MyImage.size_for_node(node, client=client)
uri=MyImage.raster(imgname, client)
return '<img src="%s" width="%f" height="%f" %s/>'%\
(uri, w, h, align)
class HandleFootRef(NodeHandler, docutils.nodes.footnote_reference,docutils.nodes.citation_reference):
def get_text(self, client, node, replaceEnt):
# TODO: when used in Sphinx, all footnotes are autonumbered
anchors=''
for i in node.get('ids'):
if i not in client.targets:
anchors+='<a name="%s"/>' % i
client.targets.append(i)
return u'%s<super><a href="%s" color="%s">%s</a></super>'%\
(anchors, '#' + node.get('refid',node.astext()),
client.styles.linkColor, node.astext())
class HandleTarget(NodeHandler, docutils.nodes.target):
def gather_elements(self, client, node, style):
if 'refid' in node:
client.pending_targets.append(node['refid'])
return client.gather_elements(node, style)
def get_text(self, client, node, replaceEnt):
text = client.gather_pdftext(node)
if replaceEnt:
text = escape(text)
return text
def get_pre_post(self, client, node, replaceEnt):
pre = ''
if node['ids'][0] not in client.targets:
pre = u'<a name="%s"/>' % node['ids'][0]
client.targets.append(node['ids'][0])
return pre, ''
class HandleInline(NodeHandler, docutils.nodes.inline):
def get_pre_post(self, client, node, replaceEnt):
r = client.styleToTags(node['classes'][0])
if r:
return r
return '', ''
| |
from sympy import (
Symbol, Dummy, gamma, I, oo, nan, zoo, factorial, sqrt, Rational,
multigamma, log, polygamma, digamma, trigamma, EulerGamma, pi, uppergamma, S, expand_func,
loggamma, sin, cos, O, lowergamma, exp, erf, erfc, exp_polar, harmonic,
zeta, conjugate, Ei, im, re, tanh, Abs)
from sympy.core.expr import unchanged
from sympy.core.function import ArgumentIndexError
from sympy.utilities.pytest import raises
from sympy.utilities.randtest import (test_derivative_numerically as td,
random_complex_number as randcplx,
verify_numerically as tn)
x = Symbol('x')
y = Symbol('y')
n = Symbol('n', integer=True)
w = Symbol('w', real=True)
def test_gamma():
assert gamma(nan) is nan
assert gamma(oo) is oo
assert gamma(-100) is zoo
assert gamma(0) is zoo
assert gamma(-100.0) is zoo
assert gamma(1) == 1
assert gamma(2) == 1
assert gamma(3) == 2
assert gamma(102) == factorial(101)
assert gamma(S.Half) == sqrt(pi)
assert gamma(Rational(3, 2)) == sqrt(pi)*S.Half
assert gamma(Rational(5, 2)) == sqrt(pi)*Rational(3, 4)
assert gamma(Rational(7, 2)) == sqrt(pi)*Rational(15, 8)
assert gamma(Rational(-1, 2)) == -2*sqrt(pi)
assert gamma(Rational(-3, 2)) == sqrt(pi)*Rational(4, 3)
assert gamma(Rational(-5, 2)) == sqrt(pi)*Rational(-8, 15)
assert gamma(Rational(-15, 2)) == sqrt(pi)*Rational(256, 2027025)
assert gamma(Rational(
-11, 8)).expand(func=True) == Rational(64, 33)*gamma(Rational(5, 8))
assert gamma(Rational(
-10, 3)).expand(func=True) == Rational(81, 280)*gamma(Rational(2, 3))
assert gamma(Rational(
14, 3)).expand(func=True) == Rational(880, 81)*gamma(Rational(2, 3))
assert gamma(Rational(
17, 7)).expand(func=True) == Rational(30, 49)*gamma(Rational(3, 7))
assert gamma(Rational(
19, 8)).expand(func=True) == Rational(33, 64)*gamma(Rational(3, 8))
assert gamma(x).diff(x) == gamma(x)*polygamma(0, x)
assert gamma(x - 1).expand(func=True) == gamma(x)/(x - 1)
assert gamma(x + 2).expand(func=True, mul=False) == x*(x + 1)*gamma(x)
assert conjugate(gamma(x)) == gamma(conjugate(x))
assert expand_func(gamma(x + Rational(3, 2))) == \
(x + S.Half)*gamma(x + S.Half)
assert expand_func(gamma(x - S.Half)) == \
gamma(S.Half + x)/(x - S.Half)
# Test a bug:
assert expand_func(gamma(x + Rational(3, 4))) == gamma(x + Rational(3, 4))
# XXX: Not sure about these tests. I can fix them by defining e.g.
# exp_polar.is_integer but I'm not sure if that makes sense.
assert gamma(3*exp_polar(I*pi)/4).is_nonnegative is False
assert gamma(3*exp_polar(I*pi)/4).is_extended_nonpositive is True
y = Symbol('y', nonpositive=True, integer=True)
assert gamma(y).is_real == False
y = Symbol('y', positive=True, noninteger=True)
assert gamma(y).is_real == True
assert gamma(-1.0, evaluate=False).is_real == False
assert gamma(0, evaluate=False).is_real == False
assert gamma(-2, evaluate=False).is_real == False
def test_gamma_rewrite():
assert gamma(n).rewrite(factorial) == factorial(n - 1)
def test_gamma_series():
assert gamma(x + 1).series(x, 0, 3) == \
1 - EulerGamma*x + x**2*(EulerGamma**2/2 + pi**2/12) + O(x**3)
assert gamma(x).series(x, -1, 3) == \
-1/(x + 1) + EulerGamma - 1 + (x + 1)*(-1 - pi**2/12 - EulerGamma**2/2 + \
EulerGamma) + (x + 1)**2*(-1 - pi**2/12 - EulerGamma**2/2 + EulerGamma**3/6 - \
polygamma(2, 1)/6 + EulerGamma*pi**2/12 + EulerGamma) + O((x + 1)**3, (x, -1))
def tn_branch(s, func):
from sympy import I, pi, exp_polar
from random import uniform
c = uniform(1, 5)
expr = func(s, c*exp_polar(I*pi)) - func(s, c*exp_polar(-I*pi))
eps = 1e-15
expr2 = func(s + eps, -c + eps*I) - func(s + eps, -c - eps*I)
return abs(expr.n() - expr2.n()).n() < 1e-10
def test_lowergamma():
from sympy import meijerg, exp_polar, I, expint
assert lowergamma(x, 0) == 0
assert lowergamma(x, y).diff(y) == y**(x - 1)*exp(-y)
assert td(lowergamma(randcplx(), y), y)
assert td(lowergamma(x, randcplx()), x)
assert lowergamma(x, y).diff(x) == \
gamma(x)*digamma(x) - uppergamma(x, y)*log(y) \
- meijerg([], [1, 1], [0, 0, x], [], y)
assert lowergamma(S.Half, x) == sqrt(pi)*erf(sqrt(x))
assert not lowergamma(S.Half - 3, x).has(lowergamma)
assert not lowergamma(S.Half + 3, x).has(lowergamma)
assert lowergamma(S.Half, x, evaluate=False).has(lowergamma)
assert tn(lowergamma(S.Half + 3, x, evaluate=False),
lowergamma(S.Half + 3, x), x)
assert tn(lowergamma(S.Half - 3, x, evaluate=False),
lowergamma(S.Half - 3, x), x)
assert tn_branch(-3, lowergamma)
assert tn_branch(-4, lowergamma)
assert tn_branch(Rational(1, 3), lowergamma)
assert tn_branch(pi, lowergamma)
assert lowergamma(3, exp_polar(4*pi*I)*x) == lowergamma(3, x)
assert lowergamma(y, exp_polar(5*pi*I)*x) == \
exp(4*I*pi*y)*lowergamma(y, x*exp_polar(pi*I))
assert lowergamma(-2, exp_polar(5*pi*I)*x) == \
lowergamma(-2, x*exp_polar(I*pi)) + 2*pi*I
assert conjugate(lowergamma(x, y)) == lowergamma(conjugate(x), conjugate(y))
assert conjugate(lowergamma(x, 0)) == 0
assert unchanged(conjugate, lowergamma(x, -oo))
assert lowergamma(
x, y).rewrite(expint) == -y**x*expint(-x + 1, y) + gamma(x)
k = Symbol('k', integer=True)
assert lowergamma(
k, y).rewrite(expint) == -y**k*expint(-k + 1, y) + gamma(k)
k = Symbol('k', integer=True, positive=False)
assert lowergamma(k, y).rewrite(expint) == lowergamma(k, y)
assert lowergamma(x, y).rewrite(uppergamma) == gamma(x) - uppergamma(x, y)
assert lowergamma(70, 6) == factorial(69) - 69035724522603011058660187038367026272747334489677105069435923032634389419656200387949342530805432320 * exp(-6)
assert (lowergamma(S(77) / 2, 6) - lowergamma(S(77) / 2, 6, evaluate=False)).evalf() < 1e-16
assert (lowergamma(-S(77) / 2, 6) - lowergamma(-S(77) / 2, 6, evaluate=False)).evalf() < 1e-16
def test_uppergamma():
from sympy import meijerg, exp_polar, I, expint
assert uppergamma(4, 0) == 6
assert uppergamma(x, y).diff(y) == -y**(x - 1)*exp(-y)
assert td(uppergamma(randcplx(), y), y)
assert uppergamma(x, y).diff(x) == \
uppergamma(x, y)*log(y) + meijerg([], [1, 1], [0, 0, x], [], y)
assert td(uppergamma(x, randcplx()), x)
p = Symbol('p', positive=True)
assert uppergamma(0, p) == -Ei(-p)
assert uppergamma(p, 0) == gamma(p)
assert uppergamma(S.Half, x) == sqrt(pi)*erfc(sqrt(x))
assert not uppergamma(S.Half - 3, x).has(uppergamma)
assert not uppergamma(S.Half + 3, x).has(uppergamma)
assert uppergamma(S.Half, x, evaluate=False).has(uppergamma)
assert tn(uppergamma(S.Half + 3, x, evaluate=False),
uppergamma(S.Half + 3, x), x)
assert tn(uppergamma(S.Half - 3, x, evaluate=False),
uppergamma(S.Half - 3, x), x)
assert unchanged(uppergamma, x, -oo)
assert unchanged(uppergamma, x, 0)
assert tn_branch(-3, uppergamma)
assert tn_branch(-4, uppergamma)
assert tn_branch(Rational(1, 3), uppergamma)
assert tn_branch(pi, uppergamma)
assert uppergamma(3, exp_polar(4*pi*I)*x) == uppergamma(3, x)
assert uppergamma(y, exp_polar(5*pi*I)*x) == \
exp(4*I*pi*y)*uppergamma(y, x*exp_polar(pi*I)) + \
gamma(y)*(1 - exp(4*pi*I*y))
assert uppergamma(-2, exp_polar(5*pi*I)*x) == \
uppergamma(-2, x*exp_polar(I*pi)) - 2*pi*I
assert uppergamma(-2, x) == expint(3, x)/x**2
assert conjugate(uppergamma(x, y)) == uppergamma(conjugate(x), conjugate(y))
assert unchanged(conjugate, uppergamma(x, -oo))
assert uppergamma(x, y).rewrite(expint) == y**x*expint(-x + 1, y)
assert uppergamma(x, y).rewrite(lowergamma) == gamma(x) - lowergamma(x, y)
assert uppergamma(70, 6) == 69035724522603011058660187038367026272747334489677105069435923032634389419656200387949342530805432320*exp(-6)
assert (uppergamma(S(77) / 2, 6) - uppergamma(S(77) / 2, 6, evaluate=False)).evalf() < 1e-16
assert (uppergamma(-S(77) / 2, 6) - uppergamma(-S(77) / 2, 6, evaluate=False)).evalf() < 1e-16
def test_polygamma():
from sympy import I
assert polygamma(n, nan) is nan
assert polygamma(0, oo) is oo
assert polygamma(0, -oo) is oo
assert polygamma(0, I*oo) is oo
assert polygamma(0, -I*oo) is oo
assert polygamma(1, oo) == 0
assert polygamma(5, oo) == 0
assert polygamma(0, -9) is zoo
assert polygamma(0, -9) is zoo
assert polygamma(0, -1) is zoo
assert polygamma(0, 0) is zoo
assert polygamma(0, 1) == -EulerGamma
assert polygamma(0, 7) == Rational(49, 20) - EulerGamma
assert polygamma(1, 1) == pi**2/6
assert polygamma(1, 2) == pi**2/6 - 1
assert polygamma(1, 3) == pi**2/6 - Rational(5, 4)
assert polygamma(3, 1) == pi**4 / 15
assert polygamma(3, 5) == 6*(Rational(-22369, 20736) + pi**4/90)
assert polygamma(5, 1) == 8 * pi**6 / 63
def t(m, n):
x = S(m)/n
r = polygamma(0, x)
if r.has(polygamma):
return False
return abs(polygamma(0, x.n()).n() - r.n()).n() < 1e-10
assert t(1, 2)
assert t(3, 2)
assert t(-1, 2)
assert t(1, 4)
assert t(-3, 4)
assert t(1, 3)
assert t(4, 3)
assert t(3, 4)
assert t(2, 3)
assert t(123, 5)
assert polygamma(0, x).rewrite(zeta) == polygamma(0, x)
assert polygamma(1, x).rewrite(zeta) == zeta(2, x)
assert polygamma(2, x).rewrite(zeta) == -2*zeta(3, x)
assert polygamma(I, 2).rewrite(zeta) == polygamma(I, 2)
n1 = Symbol('n1')
n2 = Symbol('n2', real=True)
n3 = Symbol('n3', integer=True)
n4 = Symbol('n4', positive=True)
n5 = Symbol('n5', positive=True, integer=True)
assert polygamma(n1, x).rewrite(zeta) == polygamma(n1, x)
assert polygamma(n2, x).rewrite(zeta) == polygamma(n2, x)
assert polygamma(n3, x).rewrite(zeta) == polygamma(n3, x)
assert polygamma(n4, x).rewrite(zeta) == polygamma(n4, x)
assert polygamma(n5, x).rewrite(zeta) == (-1)**(n5 + 1) * factorial(n5) * zeta(n5 + 1, x)
assert polygamma(3, 7*x).diff(x) == 7*polygamma(4, 7*x)
assert polygamma(0, x).rewrite(harmonic) == harmonic(x - 1) - EulerGamma
assert polygamma(2, x).rewrite(harmonic) == 2*harmonic(x - 1, 3) - 2*zeta(3)
ni = Symbol("n", integer=True)
assert polygamma(ni, x).rewrite(harmonic) == (-1)**(ni + 1)*(-harmonic(x - 1, ni + 1)
+ zeta(ni + 1))*factorial(ni)
# Polygamma of non-negative integer order is unbranched:
from sympy import exp_polar
k = Symbol('n', integer=True, nonnegative=True)
assert polygamma(k, exp_polar(2*I*pi)*x) == polygamma(k, x)
# but negative integers are branched!
k = Symbol('n', integer=True)
assert polygamma(k, exp_polar(2*I*pi)*x).args == (k, exp_polar(2*I*pi)*x)
# Polygamma of order -1 is loggamma:
assert polygamma(-1, x) == loggamma(x)
# But smaller orders are iterated integrals and don't have a special name
assert polygamma(-2, x).func is polygamma
# Test a bug
assert polygamma(0, -x).expand(func=True) == polygamma(0, -x)
assert polygamma(2, 2.5).is_positive == False
assert polygamma(2, -2.5).is_positive == False
assert polygamma(3, 2.5).is_positive == True
assert polygamma(3, -2.5).is_positive is True
assert polygamma(-2, -2.5).is_positive is None
assert polygamma(-3, -2.5).is_positive is None
assert polygamma(2, 2.5).is_negative == True
assert polygamma(3, 2.5).is_negative == False
assert polygamma(3, -2.5).is_negative == False
assert polygamma(2, -2.5).is_negative is True
assert polygamma(-2, -2.5).is_negative is None
assert polygamma(-3, -2.5).is_negative is None
assert polygamma(I, 2).is_positive is None
assert polygamma(I, 3).is_negative is None
# issue 17350
assert polygamma(pi, 3).evalf() == polygamma(pi, 3)
assert (I*polygamma(I, pi)).as_real_imag() == \
(-im(polygamma(I, pi)), re(polygamma(I, pi)))
assert (tanh(polygamma(I, 1))).rewrite(exp) == \
(exp(polygamma(I, 1)) - exp(-polygamma(I, 1)))/(exp(polygamma(I, 1)) + exp(-polygamma(I, 1)))
assert (I / polygamma(I, 4)).rewrite(exp) == \
I*sqrt(re(polygamma(I, 4))**2 + im(polygamma(I, 4))**2)\
/((re(polygamma(I, 4)) + I*im(polygamma(I, 4)))*Abs(polygamma(I, 4)))
assert unchanged(polygamma, 2.3, 1.0)
# issue 12569
assert unchanged(im, polygamma(0, I))
assert polygamma(Symbol('a', positive=True), Symbol('b', positive=True)).is_real is True
assert polygamma(0, I).is_real is None
def test_polygamma_expand_func():
assert polygamma(0, x).expand(func=True) == polygamma(0, x)
assert polygamma(0, 2*x).expand(func=True) == \
polygamma(0, x)/2 + polygamma(0, S.Half + x)/2 + log(2)
assert polygamma(1, 2*x).expand(func=True) == \
polygamma(1, x)/4 + polygamma(1, S.Half + x)/4
assert polygamma(2, x).expand(func=True) == \
polygamma(2, x)
assert polygamma(0, -1 + x).expand(func=True) == \
polygamma(0, x) - 1/(x - 1)
assert polygamma(0, 1 + x).expand(func=True) == \
1/x + polygamma(0, x )
assert polygamma(0, 2 + x).expand(func=True) == \
1/x + 1/(1 + x) + polygamma(0, x)
assert polygamma(0, 3 + x).expand(func=True) == \
polygamma(0, x) + 1/x + 1/(1 + x) + 1/(2 + x)
assert polygamma(0, 4 + x).expand(func=True) == \
polygamma(0, x) + 1/x + 1/(1 + x) + 1/(2 + x) + 1/(3 + x)
assert polygamma(1, 1 + x).expand(func=True) == \
polygamma(1, x) - 1/x**2
assert polygamma(1, 2 + x).expand(func=True, multinomial=False) == \
polygamma(1, x) - 1/x**2 - 1/(1 + x)**2
assert polygamma(1, 3 + x).expand(func=True, multinomial=False) == \
polygamma(1, x) - 1/x**2 - 1/(1 + x)**2 - 1/(2 + x)**2
assert polygamma(1, 4 + x).expand(func=True, multinomial=False) == \
polygamma(1, x) - 1/x**2 - 1/(1 + x)**2 - \
1/(2 + x)**2 - 1/(3 + x)**2
assert polygamma(0, x + y).expand(func=True) == \
polygamma(0, x + y)
assert polygamma(1, x + y).expand(func=True) == \
polygamma(1, x + y)
assert polygamma(1, 3 + 4*x + y).expand(func=True, multinomial=False) == \
polygamma(1, y + 4*x) - 1/(y + 4*x)**2 - \
1/(1 + y + 4*x)**2 - 1/(2 + y + 4*x)**2
assert polygamma(3, 3 + 4*x + y).expand(func=True, multinomial=False) == \
polygamma(3, y + 4*x) - 6/(y + 4*x)**4 - \
6/(1 + y + 4*x)**4 - 6/(2 + y + 4*x)**4
assert polygamma(3, 4*x + y + 1).expand(func=True, multinomial=False) == \
polygamma(3, y + 4*x) - 6/(y + 4*x)**4
e = polygamma(3, 4*x + y + Rational(3, 2))
assert e.expand(func=True) == e
e = polygamma(3, x + y + Rational(3, 4))
assert e.expand(func=True, basic=False) == e
def test_digamma():
from sympy import I
assert digamma(nan) == nan
assert digamma(oo) == oo
assert digamma(-oo) == oo
assert digamma(I*oo) == oo
assert digamma(-I*oo) == oo
assert digamma(-9) == zoo
assert digamma(-9) == zoo
assert digamma(-1) == zoo
assert digamma(0) == zoo
assert digamma(1) == -EulerGamma
assert digamma(7) == Rational(49, 20) - EulerGamma
def t(m, n):
x = S(m)/n
r = digamma(x)
if r.has(digamma):
return False
return abs(digamma(x.n()).n() - r.n()).n() < 1e-10
assert t(1, 2)
assert t(3, 2)
assert t(-1, 2)
assert t(1, 4)
assert t(-3, 4)
assert t(1, 3)
assert t(4, 3)
assert t(3, 4)
assert t(2, 3)
assert t(123, 5)
assert digamma(x).rewrite(zeta) == polygamma(0, x)
assert digamma(x).rewrite(harmonic) == harmonic(x - 1) - EulerGamma
assert digamma(I).is_real is None
assert digamma(x,evaluate=False).fdiff() == polygamma(1, x)
assert digamma(x,evaluate=False).is_real is None
assert digamma(x,evaluate=False).is_positive is None
assert digamma(x,evaluate=False).is_negative is None
assert digamma(x,evaluate=False).rewrite(polygamma) == polygamma(0, x)
def test_digamma_expand_func():
assert digamma(x).expand(func=True) == polygamma(0, x)
assert digamma(2*x).expand(func=True) == \
polygamma(0, x)/2 + polygamma(0, Rational(1, 2) + x)/2 + log(2)
assert digamma(-1 + x).expand(func=True) == \
polygamma(0, x) - 1/(x - 1)
assert digamma(1 + x).expand(func=True) == \
1/x + polygamma(0, x )
assert digamma(2 + x).expand(func=True) == \
1/x + 1/(1 + x) + polygamma(0, x)
assert digamma(3 + x).expand(func=True) == \
polygamma(0, x) + 1/x + 1/(1 + x) + 1/(2 + x)
assert digamma(4 + x).expand(func=True) == \
polygamma(0, x) + 1/x + 1/(1 + x) + 1/(2 + x) + 1/(3 + x)
assert digamma(x + y).expand(func=True) == \
polygamma(0, x + y)
def test_trigamma():
assert trigamma(nan) == nan
assert trigamma(oo) == 0
assert trigamma(1) == pi**2/6
assert trigamma(2) == pi**2/6 - 1
assert trigamma(3) == pi**2/6 - Rational(5, 4)
assert trigamma(x, evaluate=False).rewrite(zeta) == zeta(2, x)
assert trigamma(x, evaluate=False).rewrite(harmonic) == \
trigamma(x).rewrite(polygamma).rewrite(harmonic)
assert trigamma(x,evaluate=False).fdiff() == polygamma(2, x)
assert trigamma(x,evaluate=False).is_real is None
assert trigamma(x,evaluate=False).is_positive is None
assert trigamma(x,evaluate=False).is_negative is None
assert trigamma(x,evaluate=False).rewrite(polygamma) == polygamma(1, x)
def test_trigamma_expand_func():
assert trigamma(2*x).expand(func=True) == \
polygamma(1, x)/4 + polygamma(1, Rational(1, 2) + x)/4
assert trigamma(1 + x).expand(func=True) == \
polygamma(1, x) - 1/x**2
assert trigamma(2 + x).expand(func=True, multinomial=False) == \
polygamma(1, x) - 1/x**2 - 1/(1 + x)**2
assert trigamma(3 + x).expand(func=True, multinomial=False) == \
polygamma(1, x) - 1/x**2 - 1/(1 + x)**2 - 1/(2 + x)**2
assert trigamma(4 + x).expand(func=True, multinomial=False) == \
polygamma(1, x) - 1/x**2 - 1/(1 + x)**2 - \
1/(2 + x)**2 - 1/(3 + x)**2
assert trigamma(x + y).expand(func=True) == \
polygamma(1, x + y)
assert trigamma(3 + 4*x + y).expand(func=True, multinomial=False) == \
polygamma(1, y + 4*x) - 1/(y + 4*x)**2 - \
1/(1 + y + 4*x)**2 - 1/(2 + y + 4*x)**2
def test_loggamma():
raises(TypeError, lambda: loggamma(2, 3))
raises(ArgumentIndexError, lambda: loggamma(x).fdiff(2))
assert loggamma(-1) is oo
assert loggamma(-2) is oo
assert loggamma(0) is oo
assert loggamma(1) == 0
assert loggamma(2) == 0
assert loggamma(3) == log(2)
assert loggamma(4) == log(6)
n = Symbol("n", integer=True, positive=True)
assert loggamma(n) == log(gamma(n))
assert loggamma(-n) is oo
assert loggamma(n/2) == log(2**(-n + 1)*sqrt(pi)*gamma(n)/gamma(n/2 + S.Half))
from sympy import I
assert loggamma(oo) is oo
assert loggamma(-oo) is zoo
assert loggamma(I*oo) is zoo
assert loggamma(-I*oo) is zoo
assert loggamma(zoo) is zoo
assert loggamma(nan) is nan
L = loggamma(Rational(16, 3))
E = -5*log(3) + loggamma(Rational(1, 3)) + log(4) + log(7) + log(10) + log(13)
assert expand_func(L).doit() == E
assert L.n() == E.n()
L = loggamma(Rational(19, 4))
E = -4*log(4) + loggamma(Rational(3, 4)) + log(3) + log(7) + log(11) + log(15)
assert expand_func(L).doit() == E
assert L.n() == E.n()
L = loggamma(Rational(23, 7))
E = -3*log(7) + log(2) + loggamma(Rational(2, 7)) + log(9) + log(16)
assert expand_func(L).doit() == E
assert L.n() == E.n()
L = loggamma(Rational(19, 4) - 7)
E = -log(9) - log(5) + loggamma(Rational(3, 4)) + 3*log(4) - 3*I*pi
assert expand_func(L).doit() == E
assert L.n() == E.n()
L = loggamma(Rational(23, 7) - 6)
E = -log(19) - log(12) - log(5) + loggamma(Rational(2, 7)) + 3*log(7) - 3*I*pi
assert expand_func(L).doit() == E
assert L.n() == E.n()
assert loggamma(x).diff(x) == polygamma(0, x)
s1 = loggamma(1/(x + sin(x)) + cos(x)).nseries(x, n=4)
s2 = (-log(2*x) - 1)/(2*x) - log(x/pi)/2 + (4 - log(2*x))*x/24 + O(x**2) + \
log(x)*x**2/2
assert (s1 - s2).expand(force=True).removeO() == 0
s1 = loggamma(1/x).series(x)
s2 = (1/x - S.Half)*log(1/x) - 1/x + log(2*pi)/2 + \
x/12 - x**3/360 + x**5/1260 + O(x**7)
assert ((s1 - s2).expand(force=True)).removeO() == 0
assert loggamma(x).rewrite('intractable') == log(gamma(x))
s1 = loggamma(x).series(x)
assert s1 == -log(x) - EulerGamma*x + pi**2*x**2/12 + x**3*polygamma(2, 1)/6 + \
pi**4*x**4/360 + x**5*polygamma(4, 1)/120 + O(x**6)
assert s1 == loggamma(x).rewrite('intractable').series(x)
assert conjugate(loggamma(x)) == loggamma(conjugate(x))
assert conjugate(loggamma(0)) is oo
assert conjugate(loggamma(1)) == loggamma(conjugate(1))
assert conjugate(loggamma(-oo)) == conjugate(zoo)
assert loggamma(Symbol('v', positive=True)).is_real is True
assert loggamma(Symbol('v', zero=True)).is_real is False
assert loggamma(Symbol('v', negative=True)).is_real is False
assert loggamma(Symbol('v', nonpositive=True)).is_real is False
assert loggamma(Symbol('v', nonnegative=True)).is_real is None
assert loggamma(Symbol('v', imaginary=True)).is_real is None
assert loggamma(Symbol('v', real=True)).is_real is None
assert loggamma(Symbol('v')).is_real is None
assert loggamma(S.Half).is_real is True
assert loggamma(0).is_real is False
assert loggamma(Rational(-1, 2)).is_real is False
assert loggamma(I).is_real is None
assert loggamma(2 + 3*I).is_real is None
def tN(N, M):
assert loggamma(1/x)._eval_nseries(x, n=N).getn() == M
tN(0, 0)
tN(1, 1)
tN(2, 3)
tN(3, 3)
tN(4, 5)
tN(5, 5)
def test_polygamma_expansion():
# A. & S., pa. 259 and 260
assert polygamma(0, 1/x).nseries(x, n=3) == \
-log(x) - x/2 - x**2/12 + O(x**4)
assert polygamma(1, 1/x).series(x, n=5) == \
x + x**2/2 + x**3/6 + O(x**5)
assert polygamma(3, 1/x).nseries(x, n=11) == \
2*x**3 + 3*x**4 + 2*x**5 - x**7 + 4*x**9/3 + O(x**11)
def test_issue_8657():
n = Symbol('n', negative=True, integer=True)
m = Symbol('m', integer=True)
o = Symbol('o', positive=True)
p = Symbol('p', negative=True, integer=False)
assert gamma(n).is_real is False
assert gamma(m).is_real is None
assert gamma(o).is_real is True
assert gamma(p).is_real is True
assert gamma(w).is_real is None
def test_issue_8524():
x = Symbol('x', positive=True)
y = Symbol('y', negative=True)
z = Symbol('z', positive=False)
p = Symbol('p', negative=False)
q = Symbol('q', integer=True)
r = Symbol('r', integer=False)
e = Symbol('e', even=True, negative=True)
assert gamma(x).is_positive is True
assert gamma(y).is_positive is None
assert gamma(z).is_positive is None
assert gamma(p).is_positive is None
assert gamma(q).is_positive is None
assert gamma(r).is_positive is None
assert gamma(e + S.Half).is_positive is True
assert gamma(e - S.Half).is_positive is False
def test_issue_14450():
assert uppergamma(Rational(3, 8), x).evalf() == uppergamma(Rational(3, 8), x)
assert lowergamma(x, Rational(3, 8)).evalf() == lowergamma(x, Rational(3, 8))
# some values from Wolfram Alpha for comparison
assert abs(uppergamma(Rational(3, 8), 2).evalf() - 0.07105675881) < 1e-9
assert abs(lowergamma(Rational(3, 8), 2).evalf() - 2.2993794256) < 1e-9
def test_issue_14528():
k = Symbol('k', integer=True, nonpositive=True)
assert isinstance(gamma(k), gamma)
def test_multigamma():
from sympy import Product
p = Symbol('p')
_k = Dummy('_k')
assert multigamma(x, p).dummy_eq(pi**(p*(p - 1)/4)*\
Product(gamma(x + (1 - _k)/2), (_k, 1, p)))
assert conjugate(multigamma(x, p)).dummy_eq(pi**((conjugate(p) - 1)*\
conjugate(p)/4)*Product(gamma(conjugate(x) + (1-conjugate(_k))/2), (_k, 1, p)))
assert conjugate(multigamma(x, 1)) == gamma(conjugate(x))
p = Symbol('p', positive=True)
assert conjugate(multigamma(x, p)).dummy_eq(pi**((p - 1)*p/4)*\
Product(gamma(conjugate(x) + (1-conjugate(_k))/2), (_k, 1, p)))
assert multigamma(nan, 1) is nan
assert multigamma(oo, 1).doit() is oo
assert multigamma(1, 1) == 1
assert multigamma(2, 1) == 1
assert multigamma(3, 1) == 2
assert multigamma(102, 1) == factorial(101)
assert multigamma(S.Half, 1) == sqrt(pi)
assert multigamma(1, 2) == pi
assert multigamma(2, 2) == pi/2
assert multigamma(1, 3) is zoo
assert multigamma(2, 3) == pi**2/2
assert multigamma(3, 3) == 3*pi**2/2
assert multigamma(x, 1).diff(x) == gamma(x)*polygamma(0, x)
assert multigamma(x, 2).diff(x) == sqrt(pi)*gamma(x)*gamma(x - S.Half)*\
polygamma(0, x) + sqrt(pi)*gamma(x)*gamma(x - S.Half)*polygamma(0, x - S.Half)
assert multigamma(x - 1, 1).expand(func=True) == gamma(x)/(x - 1)
assert multigamma(x + 2, 1).expand(func=True, mul=False) == x*(x + 1)*\
gamma(x)
assert multigamma(x - 1, 2).expand(func=True) == sqrt(pi)*gamma(x)*\
gamma(x + S.Half)/(x**3 - 3*x**2 + x*Rational(11, 4) - Rational(3, 4))
assert multigamma(x - 1, 3).expand(func=True) == pi**Rational(3, 2)*gamma(x)**2*\
gamma(x + S.Half)/(x**5 - 6*x**4 + 55*x**3/4 - 15*x**2 + x*Rational(31, 4) - Rational(3, 2))
assert multigamma(n, 1).rewrite(factorial) == factorial(n - 1)
assert multigamma(n, 2).rewrite(factorial) == sqrt(pi)*\
factorial(n - Rational(3, 2))*factorial(n - 1)
assert multigamma(n, 3).rewrite(factorial) == pi**Rational(3, 2)*\
factorial(n - 2)*factorial(n - Rational(3, 2))*factorial(n - 1)
assert multigamma(Rational(-1, 2), 3, evaluate=False).is_real == False
assert multigamma(S.Half, 3, evaluate=False).is_real == False
assert multigamma(0, 1, evaluate=False).is_real == False
assert multigamma(1, 3, evaluate=False).is_real == False
assert multigamma(-1.0, 3, evaluate=False).is_real == False
assert multigamma(0.7, 3, evaluate=False).is_real == True
assert multigamma(3, 3, evaluate=False).is_real == True
def test_gamma_as_leading_term():
assert gamma(x).as_leading_term(x) == 1/x
assert gamma(2 + x).as_leading_term(x) == S(1)
assert gamma(cos(x)).as_leading_term(x) == S(1)
assert gamma(sin(x)).as_leading_term(x) == 1/x
| |
"""
xgboost: eXtreme Gradient Boosting library
Authors: Tianqi Chen, Bing Xu
"""
from __future__ import absolute_import
import os
import sys
import ctypes
import collections
import numpy as np
import scipy.sparse
__all__ = ['DMatrix', 'CVPack', 'Booster', 'aggcv', 'cv', 'mknfold', 'train']
if sys.version_info[0] == 3:
string_types = str,
else:
string_types = basestring,
def load_xglib():
dll_path = os.path.dirname(os.path.abspath(os.path.expanduser(__file__)))
if os.name == 'nt':
dll_path = os.path.join(dll_path, '../windows/x64/Release/xgboost_wrapper.dll')
else:
dll_path = os.path.join(dll_path, 'libxgboostwrapper.so')
# load the xgboost wrapper library
lib = ctypes.cdll.LoadLibrary(dll_path)
# DMatrix functions
lib.XGDMatrixCreateFromFile.restype = ctypes.c_void_p
lib.XGDMatrixCreateFromCSR.restype = ctypes.c_void_p
lib.XGDMatrixCreateFromCSC.restype = ctypes.c_void_p
lib.XGDMatrixCreateFromMat.restype = ctypes.c_void_p
lib.XGDMatrixSliceDMatrix.restype = ctypes.c_void_p
lib.XGDMatrixGetFloatInfo.restype = ctypes.POINTER(ctypes.c_float)
lib.XGDMatrixGetUIntInfo.restype = ctypes.POINTER(ctypes.c_uint)
lib.XGDMatrixNumRow.restype = ctypes.c_ulong
# Booster functions
lib.XGBoosterCreate.restype = ctypes.c_void_p
lib.XGBoosterPredict.restype = ctypes.POINTER(ctypes.c_float)
lib.XGBoosterEvalOneIter.restype = ctypes.c_char_p
lib.XGBoosterDumpModel.restype = ctypes.POINTER(ctypes.c_char_p)
return lib
# load the XGBoost library globally
xglib = load_xglib()
def ctypes2numpy(cptr, length, dtype):
"""
Convert a ctypes pointer array to a numpy array.
"""
if not isinstance(cptr, ctypes.POINTER(ctypes.c_float)):
raise RuntimeError('expected float pointer')
res = np.zeros(length, dtype=dtype)
if not ctypes.memmove(res.ctypes.data, cptr, length * res.strides[0]):
raise RuntimeError('memmove failed')
return res
def c_str(string):
return ctypes.c_char_p(string.encode('utf-8'))
def c_array(ctype, values):
return (ctype * len(values))(*values)
class DMatrix(object):
def __init__(self, data, label=None, missing=0.0, weight=None):
"""
Data matrix used in XGBoost.
Parameters
----------
data : string/numpy array/scipy.sparse
Data source, string type is the path of svmlight format txt file or xgb buffer.
label : list or numpy 1-D array (optional)
Label of the training data.
missing : float
Value in the data which needs to be present as a missing value.
weight : list or numpy 1-D array (optional)
Weight for each instance.
"""
# force into void_p, mac need to pass things in as void_p
if data is None:
self.handle = None
return
if isinstance(data, string_types):
self.handle = ctypes.c_void_p(xglib.XGDMatrixCreateFromFile(c_str(data), 0))
elif isinstance(data, scipy.sparse.csr_matrix):
self._init_from_csr(data)
elif isinstance(data, scipy.sparse.csc_matrix):
self._init_from_csc(data)
elif isinstance(data, np.ndarray) and len(data.shape) == 2:
self._init_from_npy2d(data, missing)
else:
try:
csr = scipy.sparse.csr_matrix(data)
self._init_from_csr(csr)
except:
raise TypeError('can not intialize DMatrix from {}'.format(type(data).__name__))
if label is not None:
self.set_label(label)
if weight is not None:
self.set_weight(weight)
def _init_from_csr(self, csr):
"""
Initialize data from a CSR matrix.
"""
if len(csr.indices) != len(csr.data):
raise ValueError('length mismatch: {} vs {}'.format(len(csr.indices), len(csr.data)))
self.handle = ctypes.c_void_p(xglib.XGDMatrixCreateFromCSR(
c_array(ctypes.c_ulong, csr.indptr),
c_array(ctypes.c_uint, csr.indices),
c_array(ctypes.c_float, csr.data),
len(csr.indptr), len(csr.data)))
def _init_from_csc(self, csc):
"""
Initialize data from a CSC matrix.
"""
if len(csc.indices) != len(csc.data):
raise ValueError('length mismatch: {} vs {}'.format(len(csc.indices), len(csc.data)))
self.handle = ctypes.c_void_p(xglib.XGDMatrixCreateFromCSC(
c_array(ctypes.c_ulong, csc.indptr),
c_array(ctypes.c_uint, csc.indices),
c_array(ctypes.c_float, csc.data),
len(csc.indptr), len(csc.data)))
def _init_from_npy2d(self, mat, missing):
"""
Initialize data from a 2-D numpy matrix.
"""
data = np.array(mat.reshape(mat.size), dtype=np.float32)
self.handle = ctypes.c_void_p(xglib.XGDMatrixCreateFromMat(
data.ctypes.data_as(ctypes.POINTER(ctypes.c_float)),
mat.shape[0], mat.shape[1], ctypes.c_float(missing)))
def __del__(self):
xglib.XGDMatrixFree(self.handle)
def get_float_info(self, field):
length = ctypes.c_ulong()
ret = xglib.XGDMatrixGetFloatInfo(self.handle, c_str(field), ctypes.byref(length))
return ctypes2numpy(ret, length.value, np.float32)
def get_uint_info(self, field):
length = ctypes.c_ulong()
ret = xglib.XGDMatrixGetUIntInfo(self.handle, c_str(field), ctypes.byref(length))
return ctypes2numpy(ret, length.value, np.uint32)
def set_float_info(self, field, data):
xglib.XGDMatrixSetFloatInfo(self.handle, c_str(field),
c_array(ctypes.c_float, data), len(data))
def set_uint_info(self, field, data):
xglib.XGDMatrixSetUIntInfo(self.handle, c_str(field),
c_array(ctypes.c_uint, data), len(data))
def save_binary(self, fname, silent=True):
"""
Save DMatrix to an XGBoost buffer.
Parameters
----------
fname : string
Name of the output buffer file.
silent : bool (optional; default: True)
If set, the output is suppressed.
"""
xglib.XGDMatrixSaveBinary(self.handle, c_str(fname), int(silent))
def set_label(self, label):
"""set label of dmatrix
Args:
label: list
label for DMatrix
Returns:
None
"""
self.set_float_info('label', label)
def set_weight(self, weight):
"""
Set weight of each instance.
Parameters
----------
weight : float
Weight for positive instance.
"""
self.set_float_info('weight', weight)
def set_base_margin(self, margin):
"""
set base margin of booster to start from
this can be used to specify a prediction value of
existing model to be base_margin
However, remember margin is needed, instead of transformed prediction
e.g. for logistic regression: need to put in value before logistic transformation
see also example/demo.py
"""
self.set_float_info('base_margin', margin)
def set_group(self, group):
"""
Set group size of DMatrix (used for ranking).
Parameters
----------
group : int
Group size.
"""
xglib.XGDMatrixSetGroup(self.handle, c_array(ctypes.c_uint, group), len(group))
def get_label(self):
"""
Get the label of the DMatrix.
Returns
-------
label : list
"""
return self.get_float_info('label')
def get_weight(self):
"""
Get the weight of the DMatrix.
Returns
-------
weight : float
"""
return self.get_float_info('weight')
def get_base_margin(self):
"""
Get the base margin of the DMatrix.
Returns
-------
base_margin : float
"""
return self.get_float_info('base_margin')
def num_row(self):
"""
Get the number of rows in the DMatrix.
Returns
-------
number of rows : int
"""
return xglib.XGDMatrixNumRow(self.handle)
def slice(self, rindex):
"""
Slice the DMatrix and return a new DMatrix that only contains `rindex`.
Parameters
----------
rindex : list
List of indices to be selected.
Returns
-------
res : DMatrix
A new DMatrix containing only selected indices.
"""
res = DMatrix(None)
res.handle = ctypes.c_void_p(xglib.XGDMatrixSliceDMatrix(
self.handle, c_array(ctypes.c_int, rindex), len(rindex)))
return res
class Booster(object):
def __init__(self, params=None, cache=(), model_file=None):
"""
Learner class.
Parameters
----------
params : dict
Parameters for boosters.
cache : list
List of cache items.
model_file : string
Path to the model file.
"""
for d in cache:
if not isinstance(d, DMatrix):
raise TypeError('invalid cache item: {}'.format(type(d).__name__))
dmats = c_array(ctypes.c_void_p, [d.handle for d in cache])
self.handle = ctypes.c_void_p(xglib.XGBoosterCreate(dmats, len(cache)))
self.set_param({'seed': 0})
self.set_param(params or {})
if model_file is not None:
self.load_model(model_file)
def __del__(self):
xglib.XGBoosterFree(self.handle)
def set_param(self, params, pv=None):
if isinstance(params, collections.Mapping):
params = params.items()
elif isinstance(params, string_types) and pv is not None:
params = [(params, pv)]
for k, v in params:
xglib.XGBoosterSetParam(self.handle, c_str(k), c_str(str(v)))
def update(self, dtrain, it, fobj=None):
"""
Update (one iteration).
Parameters
----------
dtrain : DMatrix
Training data.
it : int
Current iteration number.
fobj : function
Customized objective function.
"""
if not isinstance(dtrain, DMatrix):
raise TypeError('invalid training matrix: {}'.format(type(dtrain).__name__))
if fobj is None:
xglib.XGBoosterUpdateOneIter(self.handle, it, dtrain.handle)
else:
pred = self.predict(dtrain)
grad, hess = fobj(pred, dtrain)
self.boost(dtrain, grad, hess)
def boost(self, dtrain, grad, hess):
"""
Update.
Parameters
----------
dtrain : DMatrix
The training DMatrix.
grad : list
The first order of gradient.
hess : list
The second order of gradient.
"""
if len(grad) != len(hess):
raise ValueError('grad / hess length mismatch: {} / {}'.format(len(grad), len(hess)))
if not isinstance(dtrain, DMatrix):
raise TypeError('invalid training matrix: {}'.format(type(dtrain).__name__))
xglib.XGBoosterBoostOneIter(self.handle, dtrain.handle,
c_array(ctypes.c_float, grad),
c_array(ctypes.c_float, hess),
len(grad))
def eval_set(self, evals, it=0, feval=None):
"""
Evaluate by a metric.
Parameters
----------
evals : list of tuples (DMatrix, string)
List of items to be evaluated.
it : int
Current iteration.
feval : function
Custom evaluation function.
Returns
-------
evaluation result
"""
if feval is None:
for d in evals:
if not isinstance(d[0], DMatrix):
raise TypeError('expected DMatrix, got {}'.format(type(d[0]).__name__))
if not isinstance(d[1], string_types):
raise TypeError('expected string, got {}'.format(type(d[1]).__name__))
dmats = c_array(ctypes.c_void_p, [d[0].handle for d in evals])
evnames = c_array(ctypes.c_char_p, [c_str(d[1]) for d in evals])
return xglib.XGBoosterEvalOneIter(self.handle, it, dmats, evnames, len(evals))
else:
res = '[%d]' % it
for dm, evname in evals:
name, val = feval(self.predict(dm), dm)
res += '\t%s-%s:%f' % (evname, name, val)
return res
def eval(self, mat, name='eval', it=0):
return self.eval_set([(mat, name)], it)
def predict(self, data, output_margin=False, ntree_limit=0, pred_leaf=False):
"""
Predict with data.
Parameters
----------
data : DMatrix
The dmatrix storing the input.
output_margin : bool
Whether to output the raw untransformed margin value.
ntree_limit : int
Limit number of trees in the prediction; defaults to 0 (use all trees).
pred_leaf : bool
When this option is on, the output will be a matrix of (nsample, ntrees)
with each record indicating the predicted leaf index of each sample in each tree.
Note that the leaf index of a tree is unique per tree, so you may find leaf 1
in both tree 1 and tree 0.
Returns
-------
prediction : numpy array
"""
option_mask = 0x00
if output_margin:
option_mask |= 0x01
if pred_leaf:
option_mask |= 0x02
length = ctypes.c_ulong()
preds = xglib.XGBoosterPredict(self.handle, data.handle,
option_mask, ntree_limit, ctypes.byref(length))
preds = ctypes2numpy(preds, length.value, np.float32)
if pred_leaf:
preds = preds.astype(np.int32)
nrow = data.num_row()
if preds.size != nrow and preds.size % nrow == 0:
preds = preds.reshape(nrow, preds.size / nrow)
return preds
def save_model(self, fname):
"""
Save the model to a file.
Parameters
----------
fname : string
Output file name.
"""
xglib.XGBoosterSaveModel(self.handle, c_str(fname))
def load_model(self, fname):
"""
Load the model from a file.
Parameters
----------
fname : string
Input file name.
"""
xglib.XGBoosterLoadModel(self.handle, c_str(fname))
def dump_model(self, fo, fmap='', with_stats=False):
"""
Dump model into a text file.
Parameters
----------
fo : string
Output file name.
fmap : string, optional
Name of the file containing feature map names.
with_stats : bool (optional)
Controls whether the split statistics are output.
"""
if isinstance(fo, string_types):
fo = open(fo, 'w')
need_close = True
else:
need_close = False
ret = self.get_dump(fmap, with_stats)
for i in range(len(ret)):
fo.write('booster[{}]:\n'.format(i))
fo.write(ret[i])
if need_close:
fo.close()
def get_dump(self, fmap='', with_stats=False):
"""
Returns the dump the model as a list of strings.
"""
length = ctypes.c_ulong()
sarr = xglib.XGBoosterDumpModel(self.handle, c_str(fmap),
int(with_stats), ctypes.byref(length))
res = []
for i in range(length.value):
res.append(str(sarr[i]))
return res
def get_fscore(self, fmap=''):
"""
Get feature importance of each feature.
"""
trees = self.get_dump(fmap)
fmap = {}
for tree in trees:
sys.stdout.write(str(tree) + '\n')
for l in tree.split('\n'):
arr = l.split('[')
if len(arr) == 1:
continue
fid = arr[1].split(']')[0]
fid = fid.split('<')[0]
if fid not in fmap:
fmap[fid] = 1
else:
fmap[fid] += 1
return fmap
def train(params, dtrain, num_boost_round=10, evals=(), obj=None, feval=None):
"""
Train a booster with given parameters.
Parameters
----------
params : dict
Booster params.
dtrain : DMatrix
Data to be trained.
num_boost_round: int
Number of boosting iterations.
watchlist : list of pairs (DMatrix, string)
List of items to be evaluated during training, this allows user to watch
performance on the validation set.
obj : function
Customized objective function.
feval : function
Customized evaluation function.
Returns
-------
booster : a trained booster model
"""
evals = list(evals)
bst = Booster(params, [dtrain] + [d[0] for d in evals])
for i in range(num_boost_round):
bst.update(dtrain, i, obj)
if len(evals) != 0:
bst_eval_set = bst.eval_set(evals, i, feval)
if isinstance(bst_eval_set, string_types):
sys.stderr.write(bst_eval_set + '\n')
else:
sys.stderr.write(bst_eval_set.decode() + '\n')
return bst
class CVPack(object):
def __init__(self, dtrain, dtest, param):
self.dtrain = dtrain
self.dtest = dtest
self.watchlist = [(dtrain, 'train'), (dtest, 'test')]
self.bst = Booster(param, [dtrain, dtest])
def update(self, r, fobj):
self.bst.update(self.dtrain, r, fobj)
def eval(self, r, feval):
return self.bst.eval_set(self.watchlist, r, feval)
def mknfold(dall, nfold, param, seed, evals=(), fpreproc=None):
"""
Make an n-fold list of CVPack from random indices.
"""
evals = list(evals)
np.random.seed(seed)
randidx = np.random.permutation(dall.num_row())
kstep = len(randidx) / nfold
idset = [randidx[(i * kstep): min(len(randidx), (i + 1) * kstep)] for i in range(nfold)]
ret = []
for k in range(nfold):
dtrain = dall.slice(np.concatenate([idset[i] for i in range(nfold) if k != i]))
dtest = dall.slice(idset[k])
# run preprocessing on the data set if needed
if fpreproc is not None:
dtrain, dtest, tparam = fpreproc(dtrain, dtest, param.copy())
else:
tparam = param
plst = list(tparam.items()) + [('eval_metric', itm) for itm in evals]
ret.append(CVPack(dtrain, dtest, plst))
return ret
def aggcv(rlist, show_stdv=True):
"""
Aggregate cross-validation results.
"""
cvmap = {}
ret = rlist[0].split()[0]
for line in rlist:
arr = line.split()
assert ret == arr[0]
for it in arr[1:]:
if not isinstance(it, string_types):
it = it.decode()
k, v = it.split(':')
if k not in cvmap:
cvmap[k] = []
cvmap[k].append(float(v))
for k, v in sorted(cvmap.items(), key=lambda x: x[0]):
v = np.array(v)
if not isinstance(ret, string_types):
ret = ret.decode()
if show_stdv:
ret += '\tcv-%s:%f+%f' % (k, np.mean(v), np.std(v))
else:
ret += '\tcv-%s:%f' % (k, np.mean(v))
return ret
def cv(params, dtrain, num_boost_round=10, nfold=3, metrics=(),
obj=None, feval=None, fpreproc=None, show_stdv=True, seed=0):
"""
Cross-validation with given paramaters.
Parameters
----------
params : dict
Booster params.
dtrain : DMatrix
Data to be trained.
num_boost_round : int
Number of boosting iterations.
nfold : int
Number of folds in CV.
metrics : list of strings
Evaluation metrics to be watched in CV.
obj : function
Custom objective function.
feval : function
Custom evaluation function.
fpreproc : function
Preprocessing function that takes (dtrain, dtest, param) and returns
transformed versions of those.
show_stdv : bool
Whether to display the standard deviation.
seed : int
Seed used to generate the folds (passed to numpy.random.seed).
Returns
-------
evaluation history : list(string)
"""
results = []
cvfolds = mknfold(dtrain, nfold, params, seed, metrics, fpreproc)
for i in range(num_boost_round):
for f in cvfolds:
f.update(i, obj)
res = aggcv([f.eval(i, feval) for f in cvfolds], show_stdv)
sys.stderr.write(res + '\n')
results.append(res)
return results
| |
#!/usr/bin/env python
#
# Azure Linux extension
#
# Copyright (c) Microsoft Corporation
# All rights reserved.
# MIT License
# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
# documentation files (the ""Software""), to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above
# copyright notice and this permission notice shall be included in all copies or substantial portions of the
# Software. THE SOFTWARE IS PROVIDED *AS IS*, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT
# LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF
# CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
import os
import re
import socket
import time
from Utils.misc_helpers import append_string_to_file
# op is either '--upgrade' or '--remove'
omsagent_universal_sh_cmd_template = 'sh omsagent-*.universal.x64.sh {op}'
# args is either '-w LAD' or '-x LAD' or '-l'
omsagent_lad_workspace_cmd_template = 'sh /opt/microsoft/omsagent/bin/omsadmin.sh {args}'
omsagent_lad_dir = '/etc/opt/microsoft/omsagent/LAD/'
# args is either 'install fluent-plugin-mdsd-*.gem' or 'uninstall fluent-plugin-mdsd -a'
fluentd_ruby_gem_cmd_template = '/opt/microsoft/omsagent/ruby/bin/fluent-gem {args}'
def setup_omsagent_for_lad(run_command):
"""
Install omsagent by executing the universal shell bundle. Also onboard omsagent for LAD.
Also install the out_mdsd fluentd plugin.
:param run_command: External command execution function (e.g., RunGetOutput)
:rtype: int, str
:return: 2-tuple of process exit code and output (run_command's return values as is)
"""
# 1. Install omsagent. It's a noop if it's already installed.
cmd_exit_code, cmd_output = run_command(omsagent_universal_sh_cmd_template.format(op='--upgrade'))
if cmd_exit_code != 0:
return 1, 'setup_omsagent_for_lad(): omsagent universal installer shell execution failed. ' \
'Output: {0}'.format(cmd_output)
# 1.1. Modify configure_syslog.sh to work around on a SLES 11 anomaly: No "syslog-ng" service, but "syslog"
# even though syslog-ng is installed, causing configure_syslog.sh to fail. Strange is that even though
# the configure_syslog.sh fails, it seems syslog collection works, so it's not really a bug, though
# it's just not very clean.
run_command(r'sed -i "s/RestartService syslog-ng\\s*$/RestartService syslog-ng || RestartService syslog/g" /opt/microsoft/omsagent/bin/configure_syslog.sh')
# 2. Onboard to LAD workspace. Should be a noop if it's already done.
if not os.path.isdir(omsagent_lad_dir):
cmd_exit_code, cmd_output = run_command(omsagent_lad_workspace_cmd_template.format(args='-w LAD'))
if cmd_exit_code != 0:
return 2, 'setup_omsagent_for_lad(): LAD workspace onboarding failed. Output: {0}'.format(cmd_output)
# 3. Install fluentd out_mdsd plugin (uninstall existing ones first)
run_command(fluentd_ruby_gem_cmd_template.format(args='uninstall fluent-plugin-mdsd -a'))
cmd_exit_code, cmd_output = run_command(fluentd_ruby_gem_cmd_template.format(args='install fluent-plugin-mdsd-*.gem'))
if cmd_exit_code != 0:
return 3, 'setup_omsagent_for_lad(): fluentd out_mdsd plugin install failed. Output: {0}'.format(cmd_output)
# All succeeded
return 0, 'setup_omsagent_for_lad() succeeded'
omsagent_control_cmd_template = '/opt/microsoft/omsagent/bin/service_control {op} LAD'
def control_omsagent(op, run_command):
"""
Start/stop/restart omsagent service using omsagent service_control script.
:param op: Operation type. Must be 'start', 'stop', or 'restart'
:param run_command: External command execution function (e.g., RunGetOutput)
:rtype: int, str
:return: 2-tuple of process exit code and output (run_command's return values as is)
"""
cmd_exit_code, cmd_output = run_command(omsagent_control_cmd_template.format(op=op))
if cmd_exit_code != 0:
return 1, 'control_omsagent({0}) failed. Output: {1}'.format(op, cmd_output)
return 0, 'control_omsagent({0}) succeeded'.format(op)
def tear_down_omsagent_for_lad(run_command, remove_omsagent):
"""
Remove omsagent by executing the universal shell bundle. Remove LAD workspace before that.
Don't remove omsagent if OMSAgentForLinux extension is installed (i.e., if any other omsagent workspace exists).
:param run_command: External command execution function (e.g., RunGetOutput)
:param remove_omsagent: A boolean indicating whether to remove omsagent bundle or not.
:rtype: int, str
:return: 2-tuple of process exit code and output (run_command's return values)
"""
return_msg = ''
# 1. Unconfigure syslog. Ignore failure (just collect failure output).
cmd_exit_code, cmd_output = unconfigure_syslog(run_command)
if cmd_exit_code != 0:
return_msg += 'remove_omsagent_for_lad(): unconfigure_syslog() failed. ' \
'Exit code={0}, Output={1}'.format(cmd_exit_code, cmd_output)
# 2. Remove LAD workspace. Ignore failure.
cmd_exit_code, cmd_output = run_command(omsagent_lad_workspace_cmd_template.format(args='-x LAD'))
if cmd_exit_code != 0:
return_msg += 'remove_omsagent_for_lad(): LAD workspace removal failed. ' \
'Exit code={0}, Output={1}'.format(cmd_exit_code, cmd_output)
if remove_omsagent:
# 3. Uninstall omsagent when specified. Do this only if there's no other omsagent workspace.
cmd_exit_code, cmd_output = run_command(omsagent_lad_workspace_cmd_template.format(args='-l'))
if cmd_output.strip().lower() == 'no workspace':
cmd_exit_code, cmd_output = run_command(omsagent_universal_sh_cmd_template.format(op='--remove'))
if cmd_exit_code != 0:
return_msg += 'remove_omsagent_for_lad(): remove-omsagent failed. ' \
'Exit code={0}, Output={1}'.format(cmd_exit_code, cmd_output)
else:
return_msg += 'remove_omsagent_for_lad(): omsagent workspace listing failed. ' \
'Exit code={0}, Output={1}'.format(cmd_exit_code, cmd_output)
# Done
return 0, return_msg if return_msg else 'remove_omsagent_for_lad() succeeded'
rsyslog_top_conf_path = '/etc/rsyslog.conf'
rsyslog_d_path = '/etc/rsyslog.d/'
rsyslog_d_omsagent_conf_path = '/etc/rsyslog.d/95-omsagent.conf' # hard-coded by omsagent
syslog_ng_conf_path = '/etc/syslog-ng/syslog-ng.conf'
def is_rsyslog_installed():
"""
Returns true iff rsyslog is installed on the machine.
:rtype: bool
:return: True if rsyslog is installed. False otherwise.
"""
return os.path.exists(rsyslog_top_conf_path)
def is_new_rsyslog_installed():
"""
Returns true iff newer version of rsyslog (that has /etc/rsyslog.d/) is installed on the machine.
:rtype: bool
:return: True if /etc/rsyslog.d/ exists. False otherwise.
"""
return os.path.exists(rsyslog_d_path)
def is_syslog_ng_installed():
"""
Returns true iff syslog-ng is installed on the machine.
:rtype: bool
:return: True if syslog-ng is installed. False otherwise.
"""
return os.path.exists(syslog_ng_conf_path)
def get_syslog_ng_src_name():
"""
Some syslog-ng distributions use different source name ("s_src" vs "src"), causing syslog-ng restarts
to fail when we provide a non-existent source name. Need to search the syslog-ng.conf file and retrieve
the source name as below.
:rtype: str
:return: syslog-ng source name retrieved from syslog-ng.conf. 'src' if none available.
"""
syslog_ng_src_name = 'src'
try:
with open(syslog_ng_conf_path, 'r') as f:
syslog_ng_cfg = f.read()
src_match = re.search(r'\n\s*source\s+([^\s]+)\s*{', syslog_ng_cfg)
if src_match:
syslog_ng_src_name = src_match.group(1)
except Exception as e:
pass # Ignore any errors, because the default ('src') will do.
return syslog_ng_src_name
def get_fluentd_syslog_src_port():
"""
Returns a TCP/UDP port number that'll be supplied to the fluentd syslog src plugin (for it to listen to for
syslog events from rsyslog/syslog-ng). Ports from 25224 to 25423 will be tried for bind() and the first available
one will be returned. 25224 is the default port number that's picked by omsagent.
This is definitely not 100% correct with potential races. The correct solution would be to let fluentd syslog
src plugin bind to 0 and write the resulting bound port number to a file, so that we can get the port number
from the file. However, the current fluentd in_syslog.rb doesn't write to a file, so that method won't
work. And yet we still want to minimize possibility of binding to an already-in-use port, so here's a workaround.
:rtype: int
:return: A successfully bound (& closed) TCP/UDP port number. -1 if all failed.
"""
for port in range(25224, 25424):
try:
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.bind(('', port))
s.close()
return port
except Exception as e:
pass
return -1
omsagent_config_syslog_sh_cmd_template = 'sh /opt/microsoft/omsagent/bin/configure_syslog.sh {op} LAD {port}'
def run_omsagent_config_syslog_sh(run_command, op, port=''):
"""
Run omsagent's configure_syslog.sh script for LAD.
:param run_command: External command execution function (e.g., RunGetOutput)
:param op: Type of operation. Must be one of 'configure', 'unconfigure', and 'restart'
:param port: TCP/UDP port number to supply as fluentd in_syslog plugin listen port
:rtype: int, str
:return: 2-tuple of the process exit code and the resulting output string (basically run_command's return values)
"""
return run_command(omsagent_config_syslog_sh_cmd_template.format(op=op, port=port))
fluentd_syslog_src_cfg_path = '/etc/opt/microsoft/omsagent/LAD/conf/omsagent.d/syslog.conf'
syslog_port_pattern_marker = '%SYSLOG_PORT%'
def configure_syslog(run_command, port, in_syslog_cfg, rsyslog_cfg, syslog_ng_cfg):
"""
Configure rsyslog/syslog-ng and fluentd's in_syslog with the given TCP port.
rsyslog/syslog-ng config is done by omsagent's configure_syslog.sh. We also try to unconfigure first,
to avoid duplicate entries in the related config files.
:param run_command: External command execution function (e.g., RunGetOutput)
:param port: TCP/UDP port number to be used for rsyslog/syslog-ng and fluentd's in_syslog
:param in_syslog_cfg: Fluentd's in_syslog config string. Should be overwritten to omsagent.d/syslog.conf
:param rsyslog_cfg: rsyslog config that's generated by LAD syslog configurator, that should be appended to
/etc/rsyslog.d/95-omsagent.conf or /etc/rsyslog.conf
:param syslog_ng_cfg: syslog-ng config that's generated by LAD syslog configurator, that should be appended to
/etc/syslog-ng/syslog-ng.conf
:rtype: int, str
:return: 2-tuple of the process exit code and the resulting output string (run_command's return values)
"""
if not is_rsyslog_installed() and not is_syslog_ng_installed():
return 0, 'configure_syslog(): Nothing to do: Neither rsyslog nor syslog-ng is installed on the system'
# 1. Unconfigure existing syslog instance (if any) to avoid duplicates
# Continue even if this step fails (not critical)
cmd_exit_code, cmd_output = unconfigure_syslog(run_command)
extra_msg = ''
if cmd_exit_code != 0:
extra_msg = 'configure_syslog(): configure_syslog.sh unconfigure failed (still proceeding): ' + cmd_output
# 2. Configure new syslog instance with port number.
# Ordering is very tricky. This must be done before modifying /etc/syslog-ng/syslog-ng.conf
# or /etc/rsyslog.d/95-omsagent.conf below!
cmd_exit_code, cmd_output = run_omsagent_config_syslog_sh(run_command, 'configure', port)
if cmd_exit_code != 0:
return 2, 'configure_syslog(): configure_syslog.sh configure failed: ' + cmd_output
# 2.5. Replace '%SYSLOG_PORT%' in all passed syslog configs with the obtained port number
in_syslog_cfg = in_syslog_cfg.replace(syslog_port_pattern_marker, str(port))
rsyslog_cfg = rsyslog_cfg.replace(syslog_port_pattern_marker, str(port))
syslog_ng_cfg = syslog_ng_cfg.replace(syslog_port_pattern_marker, str(port))
# 3. Configure fluentd in_syslog plugin (write the fluentd plugin config file)
try:
with open(fluentd_syslog_src_cfg_path, 'w') as f:
f.write(in_syslog_cfg)
except Exception as e:
return 3, 'configure_syslog(): Writing to omsagent.d/syslog.conf failed: {0}'.format(e)
# 4. Update (add facilities/levels) rsyslog or syslog-ng config
try:
if is_syslog_ng_installed():
append_string_to_file(syslog_ng_cfg, syslog_ng_conf_path)
elif is_new_rsyslog_installed():
append_string_to_file(rsyslog_cfg, rsyslog_d_omsagent_conf_path)
else: # old rsyslog, so append to rsyslog_top_conf_path
append_string_to_file(rsyslog_cfg, rsyslog_top_conf_path)
except Exception as e:
return 4, 'configure_syslog(): Adding facilities/levels to rsyslog/syslog-ng conf failed: {0}'.format(e)
# 5. Restart syslog
cmd_exit_code, cmd_output = restart_syslog(run_command)
if cmd_exit_code != 0:
return 5, 'configure_syslog(): Failed at restarting syslog (rsyslog or syslog-ng). ' \
'Exit code={0}, Output={1}'.format(cmd_exit_code, cmd_output)
# All succeeded
return 0, 'configure_syslog(): Succeeded. Extra message: {0}'.format(extra_msg if extra_msg else 'None')
fluentd_tail_src_cfg_path = '/etc/opt/microsoft/omsagent/LAD/conf/omsagent.d/tail.conf'
def configure_filelog(in_tail_cfg):
"""
Configure fluentd's in_tail plugin for LAD file logging.
:param in_tail_cfg: Fluentd's in_tail plugin cfg for LAD filelog setting (obtained from LadConfigAll obj)
:rtype: str, int
:return: A 2-tuple of process exit code and output
"""
# Just needs to write to the omsagent.d/tail.conf file
try:
with open(fluentd_tail_src_cfg_path, 'w') as f:
f.write(in_tail_cfg)
except Exception as e:
return 1, 'configure_filelog(): Failed writing fluentd in_tail config file'
return 0, 'configure_filelog(): Succeeded writing fluentd in_tail config file'
fluentd_out_mdsd_cfg_path = '/etc/opt/microsoft/omsagent/LAD/conf/omsagent.d/z_out_mdsd.conf'
def configure_out_mdsd(out_mdsd_cfg):
"""
Configure fluentd's out_mdsd plugin for LAD file logging.
:param out_mdsd_cfg: Fluentd's out_mdsd plugin cfg for the entire LAD setting (obtained from LadConfigAll obj)
:rtype: str, int
:return: A 2-tuple of process exit code and output
"""
# Just needs to write to the omsagent.d/tail.conf file
try:
with open(fluentd_out_mdsd_cfg_path, 'w') as f:
f.write(out_mdsd_cfg)
except Exception as e:
return 1, 'configure_out_mdsd(): Failed writing fluentd out_mdsd config file'
return 0, 'configure_out_mdsd(): Succeeded writing fluentd out_mdsd config file'
def unconfigure_syslog(run_command):
"""
Unconfigure rsyslog/syslog-ng and fluentd's in_syslog for LAD. rsyslog/syslog-ng unconfig is done
by omsagent's configure_syslog.sh.
:param run_command: External command execution function (e.g., RunGetOutput)
:rtype: int, str
:return: 2-tuple of the process exit code and the resulting output string (run_command's return values)
"""
# 1. Find the port number in fluentd's in_syslog conf..
if not os.path.isfile(fluentd_syslog_src_cfg_path):
return 0, "unconfigure_syslog(): Nothing to unconfigure: omsagent fluentd's in_syslog is not configured"
# 2. Read fluentd's in_syslog config
try:
with open(fluentd_syslog_src_cfg_path) as f:
fluentd_syslog_src_cfg = f.read()
except Exception as e:
return 1, "unconfigure_syslog(): Failed reading fluentd's in_syslog config: {0}".format(e)
# 3. Extract the port number and run omsagent's configure_syslog.sh to unconfigure
port_match = re.search(r'port\s+(\d+)', fluentd_syslog_src_cfg)
if not port_match:
return 2, 'unconfigure_syslog(): Invalid fluentd in_syslog config: port number setting not found'
port = int(port_match.group(1))
cmd_exit_code, cmd_output = run_omsagent_config_syslog_sh(run_command, 'unconfigure', port)
if cmd_exit_code != 0:
return 3, 'unconfigure_syslog(): configure_syslog.sh failed: ' + cmd_output
# 4. Remove fluentd's in_syslog conf file
try:
os.remove(fluentd_syslog_src_cfg_path)
except Exception as e:
return 4, 'unconfigure_syslog(): Removing omsagent.d/syslog.conf failed: {0}'.format(e)
#5. All succeeded
return 0, 'unconfigure_syslog(): Succeeded'
def restart_syslog(run_command):
"""
Restart rsyslog/syslog-ng (so that any new config will be applied)
:param run_command: External command execution function (e.g., RunGetOutput)
:rtype: int, str
:return: 2-tuple of the process exit code and the resulting output string (run_command's return values)
"""
return run_omsagent_config_syslog_sh(run_command, 'restart') # port param is dummy here.
def restart_omiserver(run_command):
"""
Restart omiserver as needed (it crashes sometimes, and doesn't restart automatically yet)
:param run_command: External command execution function (e.g., RunGetOutput)
:rtype: int, str
:return: 2-tuple of the process exit code and the resulting output string (run_command's return values)
"""
return run_command('/opt/omi/bin/service_control restart')
def setup_omsagent(configurator, run_command, logger_log, logger_error):
"""
Set up omsagent. Install necessary components, configure them as needed, and start the agent.
:param configurator: A LadConfigAll object that's obtained from a valid LAD JSON settings config.
This is needed to retrieve the syslog (rsyslog/syslog-ng) and the fluentd configs.
:param run_command: External command executor (e.g., RunGetOutput)
:param logger_log: Logger for normal logging messages (e.g., hutil.log)
:param logger_error: Logger for error loggin messages (e.g., hutil.error)
:return: Pair of status code and message. 0 status code for success. Non-zero status code
for a failure and the associated failure message.
"""
# Remember whether OMI (not omsagent) needs to be freshly installed.
# This is needed later to determine whether to reconfigure the omiserver.conf or not for security purpose.
need_fresh_install_omi = not os.path.exists('/opt/omi/bin/omiserver')
logger_log("Begin omsagent setup.")
# 1. Install omsagent, onboard to LAD workspace, and install fluentd out_mdsd plugin
# We now try to install/setup all the time. If it's already installed. Any additional install is a no-op.
is_omsagent_setup_correctly = False
maxTries = 5 # Try up to 5 times to install omsagent
for trialNum in range(1, maxTries + 1):
cmd_exit_code, cmd_output = setup_omsagent_for_lad(run_command)
if cmd_exit_code == 0: # Successfully set up
is_omsagent_setup_correctly = True
break
logger_error("omsagent setup failed (trial #" + str(trialNum) + ").")
if trialNum < maxTries:
logger_error("Retrying in 30 seconds...")
time.sleep(30)
if not is_omsagent_setup_correctly:
logger_error("omsagent setup failed " + str(maxTries) + " times. Giving up...")
return 1, "omsagent setup failed {0} times. " \
"Last exit code={1}, Output={2}".format(maxTries, cmd_exit_code, cmd_output)
# Issue #265. OMI httpsport shouldn't be reconfigured when LAD is re-enabled or just upgraded.
# In other words, OMI httpsport config should be updated only on a fresh OMI install.
if need_fresh_install_omi:
# Check if OMI is configured to listen to any non-zero port and reconfigure if so.
omi_listens_to_nonzero_port = run_command(r"grep '^\s*httpsport\s*=' /etc/opt/omi/conf/omiserver.conf "
r"| grep -v '^\s*httpsport\s*=\s*0\s*$'")[0] is 0
if omi_listens_to_nonzero_port:
run_command("/opt/omi/bin/omiconfigeditor httpsport -s 0 < /etc/opt/omi/conf/omiserver.conf "
"> /etc/opt/omi/conf/omiserver.conf_temp")
run_command("mv /etc/opt/omi/conf/omiserver.conf_temp /etc/opt/omi/conf/omiserver.conf")
# 2. Configure all fluentd plugins (in_syslog, in_tail, out_mdsd)
# 2.1. First get a free TCP/UDP port for fluentd in_syslog plugin.
port = get_fluentd_syslog_src_port()
if port < 0:
return 3, 'setup_omsagent(): Failed at getting a free TCP/UDP port for fluentd in_syslog'
# 2.2. Configure syslog
cmd_exit_code, cmd_output = configure_syslog(run_command, port,
configurator.get_fluentd_syslog_src_config(),
configurator.get_rsyslog_config(),
configurator.get_syslog_ng_config())
if cmd_exit_code != 0:
return 4, 'setup_omsagent(): Failed at configuring in_syslog. Exit code={0}, Output={1}'.format(cmd_exit_code,
cmd_output)
# 2.3. Configure filelog
cmd_exit_code, cmd_output = configure_filelog(configurator.get_fluentd_tail_src_config())
if cmd_exit_code != 0:
return 5, 'setup_omsagent(): Failed at configuring in_tail. Exit code={0}, Output={1}'.format(cmd_exit_code,
cmd_output)
# 2.4. Configure out_mdsd
cmd_exit_code, cmd_output = configure_out_mdsd(configurator.get_fluentd_out_mdsd_config())
if cmd_exit_code != 0:
return 6, 'setup_omsagent(): Failed at configuring out_mdsd. Exit code={0}, Output={1}'.format(cmd_exit_code,
cmd_output)
# 3. Restart omsagent
cmd_exit_code, cmd_output = control_omsagent('restart', run_command)
if cmd_exit_code != 0:
return 8, 'setup_omsagent(): Failed at restarting omsagent (fluentd). ' \
'Exit code={0}, Output={1}'.format(cmd_exit_code, cmd_output)
# All done...
return 0, "setup_omsagent(): Succeeded"
| |
"""Weight Boosting.
This module contains weight boosting estimators for both classification and
regression.
The module structure is the following:
- The `BaseWeightBoosting` base class implements a common ``fit`` method
for all the estimators in the module. Regression and classification
only differ from each other in the loss function that is optimized.
- :class:`~sklearn.ensemble.AdaBoostClassifier` implements adaptive boosting
(AdaBoost-SAMME) for classification problems.
- :class:`~sklearn.ensemble.AdaBoostRegressor` implements adaptive boosting
(AdaBoost.R2) for regression problems.
"""
# Authors: Noel Dawe <noel@dawe.me>
# Gilles Louppe <g.louppe@gmail.com>
# Hamzeh Alsalhi <ha258@cornell.edu>
# Arnaud Joly <arnaud.v.joly@gmail.com>
#
# License: BSD 3 clause
from abc import ABCMeta, abstractmethod
import numbers
import numpy as np
import warnings
from scipy.special import xlogy
from ._base import BaseEnsemble
from ..base import ClassifierMixin, RegressorMixin, is_classifier, is_regressor
from ..tree import DecisionTreeClassifier, DecisionTreeRegressor
from ..utils import check_random_state, _safe_indexing
from ..utils import check_scalar
from ..utils.extmath import softmax
from ..utils.extmath import stable_cumsum
from ..metrics import accuracy_score, r2_score
from ..utils.validation import check_is_fitted
from ..utils.validation import _check_sample_weight
from ..utils.validation import has_fit_parameter
from ..utils.validation import _num_samples
__all__ = [
"AdaBoostClassifier",
"AdaBoostRegressor",
]
class BaseWeightBoosting(BaseEnsemble, metaclass=ABCMeta):
"""Base class for AdaBoost estimators.
Warning: This class should not be used directly. Use derived classes
instead.
"""
@abstractmethod
def __init__(
self,
base_estimator=None,
*,
n_estimators=50,
estimator_params=tuple(),
learning_rate=1.0,
random_state=None,
):
super().__init__(
base_estimator=base_estimator,
n_estimators=n_estimators,
estimator_params=estimator_params,
)
self.learning_rate = learning_rate
self.random_state = random_state
def _check_X(self, X):
# Only called to validate X in non-fit methods, therefore reset=False
return self._validate_data(
X,
accept_sparse=["csr", "csc"],
ensure_2d=True,
allow_nd=True,
dtype=None,
reset=False,
)
def fit(self, X, y, sample_weight=None):
"""Build a boosted classifier/regressor from the training set (X, y).
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
The training input samples. Sparse matrix can be CSC, CSR, COO,
DOK, or LIL. COO, DOK, and LIL are converted to CSR.
y : array-like of shape (n_samples,)
The target values (class labels in classification, real numbers in
regression).
sample_weight : array-like of shape (n_samples,), default=None
Sample weights. If None, the sample weights are initialized to
1 / n_samples.
Returns
-------
self : object
"""
# Validate scalar parameters
check_scalar(
self.n_estimators,
"n_estimators",
target_type=numbers.Integral,
min_val=1,
include_boundaries="left",
)
check_scalar(
self.learning_rate,
"learning_rate",
target_type=numbers.Real,
min_val=0,
include_boundaries="neither",
)
X, y = self._validate_data(
X,
y,
accept_sparse=["csr", "csc"],
ensure_2d=True,
allow_nd=True,
dtype=None,
y_numeric=is_regressor(self),
)
sample_weight = _check_sample_weight(
sample_weight, X, np.float64, copy=True, only_non_negative=True
)
sample_weight /= sample_weight.sum()
# Check parameters
self._validate_estimator()
# Clear any previous fit results
self.estimators_ = []
self.estimator_weights_ = np.zeros(self.n_estimators, dtype=np.float64)
self.estimator_errors_ = np.ones(self.n_estimators, dtype=np.float64)
# Initialization of the random number instance that will be used to
# generate a seed at each iteration
random_state = check_random_state(self.random_state)
for iboost in range(self.n_estimators):
# Boosting step
sample_weight, estimator_weight, estimator_error = self._boost(
iboost, X, y, sample_weight, random_state
)
# Early termination
if sample_weight is None:
break
self.estimator_weights_[iboost] = estimator_weight
self.estimator_errors_[iboost] = estimator_error
# Stop if error is zero
if estimator_error == 0:
break
sample_weight_sum = np.sum(sample_weight)
if not np.isfinite(sample_weight_sum):
warnings.warn(
"Sample weights have reached infinite values,"
f" at iteration {iboost}, causing overflow. "
"Iterations stopped. Try lowering the learning rate.",
stacklevel=2,
)
break
# Stop if the sum of sample weights has become non-positive
if sample_weight_sum <= 0:
break
if iboost < self.n_estimators - 1:
# Normalize
sample_weight /= sample_weight_sum
return self
@abstractmethod
def _boost(self, iboost, X, y, sample_weight, random_state):
"""Implement a single boost.
Warning: This method needs to be overridden by subclasses.
Parameters
----------
iboost : int
The index of the current boost iteration.
X : {array-like, sparse matrix} of shape (n_samples, n_features)
The training input samples. Sparse matrix can be CSC, CSR, COO,
DOK, or LIL. COO, DOK, and LIL are converted to CSR.
y : array-like of shape (n_samples,)
The target values (class labels).
sample_weight : array-like of shape (n_samples,)
The current sample weights.
random_state : RandomState
The current random number generator
Returns
-------
sample_weight : array-like of shape (n_samples,) or None
The reweighted sample weights.
If None then boosting has terminated early.
estimator_weight : float
The weight for the current boost.
If None then boosting has terminated early.
error : float
The classification error for the current boost.
If None then boosting has terminated early.
"""
pass
def staged_score(self, X, y, sample_weight=None):
"""Return staged scores for X, y.
This generator method yields the ensemble score after each iteration of
boosting and therefore allows monitoring, such as to determine the
score on a test set after each boost.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
The training input samples. Sparse matrix can be CSC, CSR, COO,
DOK, or LIL. COO, DOK, and LIL are converted to CSR.
y : array-like of shape (n_samples,)
Labels for X.
sample_weight : array-like of shape (n_samples,), default=None
Sample weights.
Yields
------
z : float
"""
X = self._check_X(X)
for y_pred in self.staged_predict(X):
if is_classifier(self):
yield accuracy_score(y, y_pred, sample_weight=sample_weight)
else:
yield r2_score(y, y_pred, sample_weight=sample_weight)
@property
def feature_importances_(self):
"""The impurity-based feature importances.
The higher, the more important the feature.
The importance of a feature is computed as the (normalized)
total reduction of the criterion brought by that feature. It is also
known as the Gini importance.
Warning: impurity-based feature importances can be misleading for
high cardinality features (many unique values). See
:func:`sklearn.inspection.permutation_importance` as an alternative.
Returns
-------
feature_importances_ : ndarray of shape (n_features,)
The feature importances.
"""
if self.estimators_ is None or len(self.estimators_) == 0:
raise ValueError(
"Estimator not fitted, call `fit` before `feature_importances_`."
)
try:
norm = self.estimator_weights_.sum()
return (
sum(
weight * clf.feature_importances_
for weight, clf in zip(self.estimator_weights_, self.estimators_)
)
/ norm
)
except AttributeError as e:
raise AttributeError(
"Unable to compute feature importances "
"since base_estimator does not have a "
"feature_importances_ attribute"
) from e
def _samme_proba(estimator, n_classes, X):
"""Calculate algorithm 4, step 2, equation c) of Zhu et al [1].
References
----------
.. [1] J. Zhu, H. Zou, S. Rosset, T. Hastie, "Multi-class AdaBoost", 2009.
"""
proba = estimator.predict_proba(X)
# Displace zero probabilities so the log is defined.
# Also fix negative elements which may occur with
# negative sample weights.
np.clip(proba, np.finfo(proba.dtype).eps, None, out=proba)
log_proba = np.log(proba)
return (n_classes - 1) * (
log_proba - (1.0 / n_classes) * log_proba.sum(axis=1)[:, np.newaxis]
)
class AdaBoostClassifier(ClassifierMixin, BaseWeightBoosting):
"""An AdaBoost classifier.
An AdaBoost [1] classifier is a meta-estimator that begins by fitting a
classifier on the original dataset and then fits additional copies of the
classifier on the same dataset but where the weights of incorrectly
classified instances are adjusted such that subsequent classifiers focus
more on difficult cases.
This class implements the algorithm known as AdaBoost-SAMME [2].
Read more in the :ref:`User Guide <adaboost>`.
.. versionadded:: 0.14
Parameters
----------
base_estimator : object, default=None
The base estimator from which the boosted ensemble is built.
Support for sample weighting is required, as well as proper
``classes_`` and ``n_classes_`` attributes. If ``None``, then
the base estimator is :class:`~sklearn.tree.DecisionTreeClassifier`
initialized with `max_depth=1`.
n_estimators : int, default=50
The maximum number of estimators at which boosting is terminated.
In case of perfect fit, the learning procedure is stopped early.
Values must be in the range `[1, inf)`.
learning_rate : float, default=1.0
Weight applied to each classifier at each boosting iteration. A higher
learning rate increases the contribution of each classifier. There is
a trade-off between the `learning_rate` and `n_estimators` parameters.
Values must be in the range `(0.0, inf)`.
algorithm : {'SAMME', 'SAMME.R'}, default='SAMME.R'
If 'SAMME.R' then use the SAMME.R real boosting algorithm.
``base_estimator`` must support calculation of class probabilities.
If 'SAMME' then use the SAMME discrete boosting algorithm.
The SAMME.R algorithm typically converges faster than SAMME,
achieving a lower test error with fewer boosting iterations.
random_state : int, RandomState instance or None, default=None
Controls the random seed given at each `base_estimator` at each
boosting iteration.
Thus, it is only used when `base_estimator` exposes a `random_state`.
Pass an int for reproducible output across multiple function calls.
See :term:`Glossary <random_state>`.
Attributes
----------
base_estimator_ : estimator
The base estimator from which the ensemble is grown.
estimators_ : list of classifiers
The collection of fitted sub-estimators.
classes_ : ndarray of shape (n_classes,)
The classes labels.
n_classes_ : int
The number of classes.
estimator_weights_ : ndarray of floats
Weights for each estimator in the boosted ensemble.
estimator_errors_ : ndarray of floats
Classification error for each estimator in the boosted
ensemble.
feature_importances_ : ndarray of shape (n_features,)
The impurity-based feature importances if supported by the
``base_estimator`` (when based on decision trees).
Warning: impurity-based feature importances can be misleading for
high cardinality features (many unique values). See
:func:`sklearn.inspection.permutation_importance` as an alternative.
n_features_in_ : int
Number of features seen during :term:`fit`.
.. versionadded:: 0.24
feature_names_in_ : ndarray of shape (`n_features_in_`,)
Names of features seen during :term:`fit`. Defined only when `X`
has feature names that are all strings.
.. versionadded:: 1.0
See Also
--------
AdaBoostRegressor : An AdaBoost regressor that begins by fitting a
regressor on the original dataset and then fits additional copies of
the regressor on the same dataset but where the weights of instances
are adjusted according to the error of the current prediction.
GradientBoostingClassifier : GB builds an additive model in a forward
stage-wise fashion. Regression trees are fit on the negative gradient
of the binomial or multinomial deviance loss function. Binary
classification is a special case where only a single regression tree is
induced.
sklearn.tree.DecisionTreeClassifier : A non-parametric supervised learning
method used for classification.
Creates a model that predicts the value of a target variable by
learning simple decision rules inferred from the data features.
References
----------
.. [1] Y. Freund, R. Schapire, "A Decision-Theoretic Generalization of
on-Line Learning and an Application to Boosting", 1995.
.. [2] J. Zhu, H. Zou, S. Rosset, T. Hastie, "Multi-class AdaBoost", 2009.
Examples
--------
>>> from sklearn.ensemble import AdaBoostClassifier
>>> from sklearn.datasets import make_classification
>>> X, y = make_classification(n_samples=1000, n_features=4,
... n_informative=2, n_redundant=0,
... random_state=0, shuffle=False)
>>> clf = AdaBoostClassifier(n_estimators=100, random_state=0)
>>> clf.fit(X, y)
AdaBoostClassifier(n_estimators=100, random_state=0)
>>> clf.predict([[0, 0, 0, 0]])
array([1])
>>> clf.score(X, y)
0.983...
"""
def __init__(
self,
base_estimator=None,
*,
n_estimators=50,
learning_rate=1.0,
algorithm="SAMME.R",
random_state=None,
):
super().__init__(
base_estimator=base_estimator,
n_estimators=n_estimators,
learning_rate=learning_rate,
random_state=random_state,
)
self.algorithm = algorithm
def fit(self, X, y, sample_weight=None):
"""Build a boosted classifier from the training set (X, y).
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
The training input samples. Sparse matrix can be CSC, CSR, COO,
DOK, or LIL. COO, DOK, and LIL are converted to CSR.
y : array-like of shape (n_samples,)
The target values (class labels).
sample_weight : array-like of shape (n_samples,), default=None
Sample weights. If None, the sample weights are initialized to
``1 / n_samples``.
Returns
-------
self : object
Fitted estimator.
"""
# Check that algorithm is supported
if self.algorithm not in ("SAMME", "SAMME.R"):
raise ValueError(
"Algorithm must be 'SAMME' or 'SAMME.R'."
f" Got {self.algorithm!r} instead."
)
# Fit
return super().fit(X, y, sample_weight)
def _validate_estimator(self):
"""Check the estimator and set the base_estimator_ attribute."""
super()._validate_estimator(default=DecisionTreeClassifier(max_depth=1))
# SAMME-R requires predict_proba-enabled base estimators
if self.algorithm == "SAMME.R":
if not hasattr(self.base_estimator_, "predict_proba"):
raise TypeError(
"AdaBoostClassifier with algorithm='SAMME.R' requires "
"that the weak learner supports the calculation of class "
"probabilities with a predict_proba method.\n"
"Please change the base estimator or set "
"algorithm='SAMME' instead."
)
if not has_fit_parameter(self.base_estimator_, "sample_weight"):
raise ValueError(
"%s doesn't support sample_weight."
% self.base_estimator_.__class__.__name__
)
def _boost(self, iboost, X, y, sample_weight, random_state):
"""Implement a single boost.
Perform a single boost according to the real multi-class SAMME.R
algorithm or to the discrete SAMME algorithm and return the updated
sample weights.
Parameters
----------
iboost : int
The index of the current boost iteration.
X : {array-like, sparse matrix} of shape (n_samples, n_features)
The training input samples.
y : array-like of shape (n_samples,)
The target values (class labels).
sample_weight : array-like of shape (n_samples,)
The current sample weights.
random_state : RandomState instance
The RandomState instance used if the base estimator accepts a
`random_state` attribute.
Returns
-------
sample_weight : array-like of shape (n_samples,) or None
The reweighted sample weights.
If None then boosting has terminated early.
estimator_weight : float
The weight for the current boost.
If None then boosting has terminated early.
estimator_error : float
The classification error for the current boost.
If None then boosting has terminated early.
"""
if self.algorithm == "SAMME.R":
return self._boost_real(iboost, X, y, sample_weight, random_state)
else: # elif self.algorithm == "SAMME":
return self._boost_discrete(iboost, X, y, sample_weight, random_state)
def _boost_real(self, iboost, X, y, sample_weight, random_state):
"""Implement a single boost using the SAMME.R real algorithm."""
estimator = self._make_estimator(random_state=random_state)
estimator.fit(X, y, sample_weight=sample_weight)
y_predict_proba = estimator.predict_proba(X)
if iboost == 0:
self.classes_ = getattr(estimator, "classes_", None)
self.n_classes_ = len(self.classes_)
y_predict = self.classes_.take(np.argmax(y_predict_proba, axis=1), axis=0)
# Instances incorrectly classified
incorrect = y_predict != y
# Error fraction
estimator_error = np.mean(np.average(incorrect, weights=sample_weight, axis=0))
# Stop if classification is perfect
if estimator_error <= 0:
return sample_weight, 1.0, 0.0
# Construct y coding as described in Zhu et al [2]:
#
# y_k = 1 if c == k else -1 / (K - 1)
#
# where K == n_classes_ and c, k in [0, K) are indices along the second
# axis of the y coding with c being the index corresponding to the true
# class label.
n_classes = self.n_classes_
classes = self.classes_
y_codes = np.array([-1.0 / (n_classes - 1), 1.0])
y_coding = y_codes.take(classes == y[:, np.newaxis])
# Displace zero probabilities so the log is defined.
# Also fix negative elements which may occur with
# negative sample weights.
proba = y_predict_proba # alias for readability
np.clip(proba, np.finfo(proba.dtype).eps, None, out=proba)
# Boost weight using multi-class AdaBoost SAMME.R alg
estimator_weight = (
-1.0
* self.learning_rate
* ((n_classes - 1.0) / n_classes)
* xlogy(y_coding, y_predict_proba).sum(axis=1)
)
# Only boost the weights if it will fit again
if not iboost == self.n_estimators - 1:
# Only boost positive weights
sample_weight *= np.exp(
estimator_weight * ((sample_weight > 0) | (estimator_weight < 0))
)
return sample_weight, 1.0, estimator_error
def _boost_discrete(self, iboost, X, y, sample_weight, random_state):
"""Implement a single boost using the SAMME discrete algorithm."""
estimator = self._make_estimator(random_state=random_state)
estimator.fit(X, y, sample_weight=sample_weight)
y_predict = estimator.predict(X)
if iboost == 0:
self.classes_ = getattr(estimator, "classes_", None)
self.n_classes_ = len(self.classes_)
# Instances incorrectly classified
incorrect = y_predict != y
# Error fraction
estimator_error = np.mean(np.average(incorrect, weights=sample_weight, axis=0))
# Stop if classification is perfect
if estimator_error <= 0:
return sample_weight, 1.0, 0.0
n_classes = self.n_classes_
# Stop if the error is at least as bad as random guessing
if estimator_error >= 1.0 - (1.0 / n_classes):
self.estimators_.pop(-1)
if len(self.estimators_) == 0:
raise ValueError(
"BaseClassifier in AdaBoostClassifier "
"ensemble is worse than random, ensemble "
"can not be fit."
)
return None, None, None
# Boost weight using multi-class AdaBoost SAMME alg
estimator_weight = self.learning_rate * (
np.log((1.0 - estimator_error) / estimator_error) + np.log(n_classes - 1.0)
)
# Only boost the weights if I will fit again
if not iboost == self.n_estimators - 1:
# Only boost positive weights
sample_weight = np.exp(
np.log(sample_weight)
+ estimator_weight * incorrect * (sample_weight > 0)
)
return sample_weight, estimator_weight, estimator_error
def predict(self, X):
"""Predict classes for X.
The predicted class of an input sample is computed as the weighted mean
prediction of the classifiers in the ensemble.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
The training input samples. Sparse matrix can be CSC, CSR, COO,
DOK, or LIL. COO, DOK, and LIL are converted to CSR.
Returns
-------
y : ndarray of shape (n_samples,)
The predicted classes.
"""
pred = self.decision_function(X)
if self.n_classes_ == 2:
return self.classes_.take(pred > 0, axis=0)
return self.classes_.take(np.argmax(pred, axis=1), axis=0)
def staged_predict(self, X):
"""Return staged predictions for X.
The predicted class of an input sample is computed as the weighted mean
prediction of the classifiers in the ensemble.
This generator method yields the ensemble prediction after each
iteration of boosting and therefore allows monitoring, such as to
determine the prediction on a test set after each boost.
Parameters
----------
X : array-like of shape (n_samples, n_features)
The input samples. Sparse matrix can be CSC, CSR, COO,
DOK, or LIL. COO, DOK, and LIL are converted to CSR.
Yields
------
y : generator of ndarray of shape (n_samples,)
The predicted classes.
"""
X = self._check_X(X)
n_classes = self.n_classes_
classes = self.classes_
if n_classes == 2:
for pred in self.staged_decision_function(X):
yield np.array(classes.take(pred > 0, axis=0))
else:
for pred in self.staged_decision_function(X):
yield np.array(classes.take(np.argmax(pred, axis=1), axis=0))
def decision_function(self, X):
"""Compute the decision function of ``X``.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
The training input samples. Sparse matrix can be CSC, CSR, COO,
DOK, or LIL. COO, DOK, and LIL are converted to CSR.
Returns
-------
score : ndarray of shape of (n_samples, k)
The decision function of the input samples. The order of
outputs is the same of that of the :term:`classes_` attribute.
Binary classification is a special cases with ``k == 1``,
otherwise ``k==n_classes``. For binary classification,
values closer to -1 or 1 mean more like the first or second
class in ``classes_``, respectively.
"""
check_is_fitted(self)
X = self._check_X(X)
n_classes = self.n_classes_
classes = self.classes_[:, np.newaxis]
if self.algorithm == "SAMME.R":
# The weights are all 1. for SAMME.R
pred = sum(
_samme_proba(estimator, n_classes, X) for estimator in self.estimators_
)
else: # self.algorithm == "SAMME"
pred = sum(
(estimator.predict(X) == classes).T * w
for estimator, w in zip(self.estimators_, self.estimator_weights_)
)
pred /= self.estimator_weights_.sum()
if n_classes == 2:
pred[:, 0] *= -1
return pred.sum(axis=1)
return pred
def staged_decision_function(self, X):
"""Compute decision function of ``X`` for each boosting iteration.
This method allows monitoring (i.e. determine error on testing set)
after each boosting iteration.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
The training input samples. Sparse matrix can be CSC, CSR, COO,
DOK, or LIL. COO, DOK, and LIL are converted to CSR.
Yields
------
score : generator of ndarray of shape (n_samples, k)
The decision function of the input samples. The order of
outputs is the same of that of the :term:`classes_` attribute.
Binary classification is a special cases with ``k == 1``,
otherwise ``k==n_classes``. For binary classification,
values closer to -1 or 1 mean more like the first or second
class in ``classes_``, respectively.
"""
check_is_fitted(self)
X = self._check_X(X)
n_classes = self.n_classes_
classes = self.classes_[:, np.newaxis]
pred = None
norm = 0.0
for weight, estimator in zip(self.estimator_weights_, self.estimators_):
norm += weight
if self.algorithm == "SAMME.R":
# The weights are all 1. for SAMME.R
current_pred = _samme_proba(estimator, n_classes, X)
else: # elif self.algorithm == "SAMME":
current_pred = estimator.predict(X)
current_pred = (current_pred == classes).T * weight
if pred is None:
pred = current_pred
else:
pred += current_pred
if n_classes == 2:
tmp_pred = np.copy(pred)
tmp_pred[:, 0] *= -1
yield (tmp_pred / norm).sum(axis=1)
else:
yield pred / norm
@staticmethod
def _compute_proba_from_decision(decision, n_classes):
"""Compute probabilities from the decision function.
This is based eq. (4) of [1] where:
p(y=c|X) = exp((1 / K-1) f_c(X)) / sum_k(exp((1 / K-1) f_k(X)))
= softmax((1 / K-1) * f(X))
References
----------
.. [1] J. Zhu, H. Zou, S. Rosset, T. Hastie, "Multi-class AdaBoost",
2009.
"""
if n_classes == 2:
decision = np.vstack([-decision, decision]).T / 2
else:
decision /= n_classes - 1
return softmax(decision, copy=False)
def predict_proba(self, X):
"""Predict class probabilities for X.
The predicted class probabilities of an input sample is computed as
the weighted mean predicted class probabilities of the classifiers
in the ensemble.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
The training input samples. Sparse matrix can be CSC, CSR, COO,
DOK, or LIL. COO, DOK, and LIL are converted to CSR.
Returns
-------
p : ndarray of shape (n_samples, n_classes)
The class probabilities of the input samples. The order of
outputs is the same of that of the :term:`classes_` attribute.
"""
check_is_fitted(self)
n_classes = self.n_classes_
if n_classes == 1:
return np.ones((_num_samples(X), 1))
decision = self.decision_function(X)
return self._compute_proba_from_decision(decision, n_classes)
def staged_predict_proba(self, X):
"""Predict class probabilities for X.
The predicted class probabilities of an input sample is computed as
the weighted mean predicted class probabilities of the classifiers
in the ensemble.
This generator method yields the ensemble predicted class probabilities
after each iteration of boosting and therefore allows monitoring, such
as to determine the predicted class probabilities on a test set after
each boost.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
The training input samples. Sparse matrix can be CSC, CSR, COO,
DOK, or LIL. COO, DOK, and LIL are converted to CSR.
Yields
------
p : generator of ndarray of shape (n_samples,)
The class probabilities of the input samples. The order of
outputs is the same of that of the :term:`classes_` attribute.
"""
n_classes = self.n_classes_
for decision in self.staged_decision_function(X):
yield self._compute_proba_from_decision(decision, n_classes)
def predict_log_proba(self, X):
"""Predict class log-probabilities for X.
The predicted class log-probabilities of an input sample is computed as
the weighted mean predicted class log-probabilities of the classifiers
in the ensemble.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
The training input samples. Sparse matrix can be CSC, CSR, COO,
DOK, or LIL. COO, DOK, and LIL are converted to CSR.
Returns
-------
p : ndarray of shape (n_samples, n_classes)
The class probabilities of the input samples. The order of
outputs is the same of that of the :term:`classes_` attribute.
"""
return np.log(self.predict_proba(X))
class AdaBoostRegressor(RegressorMixin, BaseWeightBoosting):
"""An AdaBoost regressor.
An AdaBoost [1] regressor is a meta-estimator that begins by fitting a
regressor on the original dataset and then fits additional copies of the
regressor on the same dataset but where the weights of instances are
adjusted according to the error of the current prediction. As such,
subsequent regressors focus more on difficult cases.
This class implements the algorithm known as AdaBoost.R2 [2].
Read more in the :ref:`User Guide <adaboost>`.
.. versionadded:: 0.14
Parameters
----------
base_estimator : object, default=None
The base estimator from which the boosted ensemble is built.
If ``None``, then the base estimator is
:class:`~sklearn.tree.DecisionTreeRegressor` initialized with
`max_depth=3`.
n_estimators : int, default=50
The maximum number of estimators at which boosting is terminated.
In case of perfect fit, the learning procedure is stopped early.
Values must be in the range `[1, inf)`.
learning_rate : float, default=1.0
Weight applied to each regressor at each boosting iteration. A higher
learning rate increases the contribution of each regressor. There is
a trade-off between the `learning_rate` and `n_estimators` parameters.
Values must be in the range `(0.0, inf)`.
loss : {'linear', 'square', 'exponential'}, default='linear'
The loss function to use when updating the weights after each
boosting iteration.
random_state : int, RandomState instance or None, default=None
Controls the random seed given at each `base_estimator` at each
boosting iteration.
Thus, it is only used when `base_estimator` exposes a `random_state`.
In addition, it controls the bootstrap of the weights used to train the
`base_estimator` at each boosting iteration.
Pass an int for reproducible output across multiple function calls.
See :term:`Glossary <random_state>`.
Attributes
----------
base_estimator_ : estimator
The base estimator from which the ensemble is grown.
estimators_ : list of regressors
The collection of fitted sub-estimators.
estimator_weights_ : ndarray of floats
Weights for each estimator in the boosted ensemble.
estimator_errors_ : ndarray of floats
Regression error for each estimator in the boosted ensemble.
feature_importances_ : ndarray of shape (n_features,)
The impurity-based feature importances if supported by the
``base_estimator`` (when based on decision trees).
Warning: impurity-based feature importances can be misleading for
high cardinality features (many unique values). See
:func:`sklearn.inspection.permutation_importance` as an alternative.
n_features_in_ : int
Number of features seen during :term:`fit`.
.. versionadded:: 0.24
feature_names_in_ : ndarray of shape (`n_features_in_`,)
Names of features seen during :term:`fit`. Defined only when `X`
has feature names that are all strings.
.. versionadded:: 1.0
See Also
--------
AdaBoostClassifier : An AdaBoost classifier.
GradientBoostingRegressor : Gradient Boosting Classification Tree.
sklearn.tree.DecisionTreeRegressor : A decision tree regressor.
References
----------
.. [1] Y. Freund, R. Schapire, "A Decision-Theoretic Generalization of
on-Line Learning and an Application to Boosting", 1995.
.. [2] H. Drucker, "Improving Regressors using Boosting Techniques", 1997.
Examples
--------
>>> from sklearn.ensemble import AdaBoostRegressor
>>> from sklearn.datasets import make_regression
>>> X, y = make_regression(n_features=4, n_informative=2,
... random_state=0, shuffle=False)
>>> regr = AdaBoostRegressor(random_state=0, n_estimators=100)
>>> regr.fit(X, y)
AdaBoostRegressor(n_estimators=100, random_state=0)
>>> regr.predict([[0, 0, 0, 0]])
array([4.7972...])
>>> regr.score(X, y)
0.9771...
"""
def __init__(
self,
base_estimator=None,
*,
n_estimators=50,
learning_rate=1.0,
loss="linear",
random_state=None,
):
super().__init__(
base_estimator=base_estimator,
n_estimators=n_estimators,
learning_rate=learning_rate,
random_state=random_state,
)
self.loss = loss
self.random_state = random_state
def fit(self, X, y, sample_weight=None):
"""Build a boosted regressor from the training set (X, y).
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
The training input samples. Sparse matrix can be CSC, CSR, COO,
DOK, or LIL. COO, DOK, and LIL are converted to CSR.
y : array-like of shape (n_samples,)
The target values (real numbers).
sample_weight : array-like of shape (n_samples,), default=None
Sample weights. If None, the sample weights are initialized to
1 / n_samples.
Returns
-------
self : object
Fitted AdaBoostRegressor estimator.
"""
# Check loss
if self.loss not in ("linear", "square", "exponential"):
raise ValueError(
"loss must be 'linear', 'square', or 'exponential'."
f" Got {self.loss!r} instead."
)
# Fit
return super().fit(X, y, sample_weight)
def _validate_estimator(self):
"""Check the estimator and set the base_estimator_ attribute."""
super()._validate_estimator(default=DecisionTreeRegressor(max_depth=3))
def _boost(self, iboost, X, y, sample_weight, random_state):
"""Implement a single boost for regression
Perform a single boost according to the AdaBoost.R2 algorithm and
return the updated sample weights.
Parameters
----------
iboost : int
The index of the current boost iteration.
X : {array-like, sparse matrix} of shape (n_samples, n_features)
The training input samples.
y : array-like of shape (n_samples,)
The target values (class labels in classification, real numbers in
regression).
sample_weight : array-like of shape (n_samples,)
The current sample weights.
random_state : RandomState
The RandomState instance used if the base estimator accepts a
`random_state` attribute.
Controls also the bootstrap of the weights used to train the weak
learner.
replacement.
Returns
-------
sample_weight : array-like of shape (n_samples,) or None
The reweighted sample weights.
If None then boosting has terminated early.
estimator_weight : float
The weight for the current boost.
If None then boosting has terminated early.
estimator_error : float
The regression error for the current boost.
If None then boosting has terminated early.
"""
estimator = self._make_estimator(random_state=random_state)
# Weighted sampling of the training set with replacement
bootstrap_idx = random_state.choice(
np.arange(_num_samples(X)),
size=_num_samples(X),
replace=True,
p=sample_weight,
)
# Fit on the bootstrapped sample and obtain a prediction
# for all samples in the training set
X_ = _safe_indexing(X, bootstrap_idx)
y_ = _safe_indexing(y, bootstrap_idx)
estimator.fit(X_, y_)
y_predict = estimator.predict(X)
error_vect = np.abs(y_predict - y)
sample_mask = sample_weight > 0
masked_sample_weight = sample_weight[sample_mask]
masked_error_vector = error_vect[sample_mask]
error_max = masked_error_vector.max()
if error_max != 0:
masked_error_vector /= error_max
if self.loss == "square":
masked_error_vector **= 2
elif self.loss == "exponential":
masked_error_vector = 1.0 - np.exp(-masked_error_vector)
# Calculate the average loss
estimator_error = (masked_sample_weight * masked_error_vector).sum()
if estimator_error <= 0:
# Stop if fit is perfect
return sample_weight, 1.0, 0.0
elif estimator_error >= 0.5:
# Discard current estimator only if it isn't the only one
if len(self.estimators_) > 1:
self.estimators_.pop(-1)
return None, None, None
beta = estimator_error / (1.0 - estimator_error)
# Boost weight using AdaBoost.R2 alg
estimator_weight = self.learning_rate * np.log(1.0 / beta)
if not iboost == self.n_estimators - 1:
sample_weight[sample_mask] *= np.power(
beta, (1.0 - masked_error_vector) * self.learning_rate
)
return sample_weight, estimator_weight, estimator_error
def _get_median_predict(self, X, limit):
# Evaluate predictions of all estimators
predictions = np.array([est.predict(X) for est in self.estimators_[:limit]]).T
# Sort the predictions
sorted_idx = np.argsort(predictions, axis=1)
# Find index of median prediction for each sample
weight_cdf = stable_cumsum(self.estimator_weights_[sorted_idx], axis=1)
median_or_above = weight_cdf >= 0.5 * weight_cdf[:, -1][:, np.newaxis]
median_idx = median_or_above.argmax(axis=1)
median_estimators = sorted_idx[np.arange(_num_samples(X)), median_idx]
# Return median predictions
return predictions[np.arange(_num_samples(X)), median_estimators]
def predict(self, X):
"""Predict regression value for X.
The predicted regression value of an input sample is computed
as the weighted median prediction of the regressors in the ensemble.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
The training input samples. Sparse matrix can be CSC, CSR, COO,
DOK, or LIL. COO, DOK, and LIL are converted to CSR.
Returns
-------
y : ndarray of shape (n_samples,)
The predicted regression values.
"""
check_is_fitted(self)
X = self._check_X(X)
return self._get_median_predict(X, len(self.estimators_))
def staged_predict(self, X):
"""Return staged predictions for X.
The predicted regression value of an input sample is computed
as the weighted median prediction of the regressors in the ensemble.
This generator method yields the ensemble prediction after each
iteration of boosting and therefore allows monitoring, such as to
determine the prediction on a test set after each boost.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
The training input samples.
Yields
------
y : generator of ndarray of shape (n_samples,)
The predicted regression values.
"""
check_is_fitted(self)
X = self._check_X(X)
for i, _ in enumerate(self.estimators_, 1):
yield self._get_median_predict(X, limit=i)
| |
from __future__ import absolute_import
import difflib
import os
from salt.ext import six
from salt.utils.odict import OrderedDict
try:
from salt.exceptions import InvalidConfigError
except ImportError:
from salt.exceptions import SaltException
class InvalidConfigError(SaltException):
'''
Not yet defined by this version of salt
'''
class EtcHostsRuntimeException(Exception):
pass
# the system hosts file to load/save
HOSTS_FILE = '/etc/hosts'
# file where we keep the previous /etc/hosts file
CAASP_HOSTS_FILE = '/etc/caasp/hosts'
PREFACE = '''
#
# This file is automatically generated/managed by CaaSP/Salt
# Please add any custom entries in {file}
# Any other modification will be lost...
#
'''
ADMIN_EXPR = 'G@roles:admin'
MASTER_EXPR = 'G@roles:kube-master'
WORKER_EXPR = 'G@roles:kube-minion'
OTHER_EXPR = 'not ( P@roles:(admin|ca) or P@roles:kube-(master|minion) )'
PILLAR_INTERNAL_INFRA = 'internal_infra_domain'
PILLAR_EXTERNAL_FQDN = 'api:server:external_fqdn'
# minimal set of entries that will be written in /etc/hosts
MINIMAL_ETC_HOSTS = '''
127.0.0.1 localhost
# special IPv6 addresses
::1 localhost ipv6-localhost ipv6-loopback
fe00::0 ipv6-localnet
ff00::0 ipv6-mcastprefix
ff02::1 ipv6-allnodes
ff02::2 ipv6-allrouters
ff02::3 ipv6-allhosts
'''
def __virtual__():
return "caasp_hosts"
def _concat(lst1, lst2):
res = list(set(lst1) | set(lst2)) # join both lists (without dups)
res = [x for x in res if x] # remove empty strings
res.sort() # sort the result (for determinism)
return res
def _load_lines(filename):
__utils__['caasp_log.debug']('hosts: loading %s', filename)
with open(filename, 'r') as f:
lines = [x.strip().replace('\n', '') for x in f.readlines()]
# remove any trailing empty lines
while not lines[-1]:
del lines[-1]
__utils__['caasp_log.debug']('hosts: %d lines loaded from %s', len(lines), filename)
return lines
def _write_lines(dst, contents):
with open(dst, 'w+') as ofile:
for line in contents:
ofile.write(line + six.text_type(os.linesep))
# note: /etc/hosts needs to end with a newline so
# that some utils that read it do not break
ofile.write(six.text_type(os.linesep))
def _load_hosts(hosts, lines, marker_start=None, marker_end=None):
blocked = False
for line in lines:
line = str(line).strip()
if not line:
continue
if marker_start and line.startswith(marker_start):
__utils__['caasp_log.debug']('hosts: start of skipped block')
blocked = True
continue
if marker_end and line.startswith(marker_end):
__utils__['caasp_log.debug']('hosts: end of skipped block')
blocked = False
continue
if line.startswith('#'):
continue
if blocked:
continue
if '#' in line:
line = line[:line.index('#')].strip()
comps = line.split()
ip = comps.pop(0)
hosts.setdefault(ip, []).extend(comps)
return hosts
def _load_hosts_file(hosts, filename, marker_start=None, marker_end=None):
lines = _load_lines(filename)
return _load_hosts(hosts, lines,
marker_start=marker_start,
marker_end=marker_end)
# add a (list of) name(s) to a (maybe existing) IP
# it will remove duplicates, sort names, etc...
def _add_names(hosts, ips, names):
if not isinstance(names, list):
names = [names]
if not isinstance(ips, list):
ips = [ips]
for ip in ips:
__utils__['caasp_log.debug']('hosts: adding %s -> %s', ip, names)
if ip not in hosts:
hosts[ip] = _concat([], names)
else:
hosts[ip] = _concat(hosts[ip], names)
def _add_names_for(hosts, nodes_dict, infra_domain):
for id, ifaces in nodes_dict.items():
ip = __salt__['caasp_net.get_primary_ip'](host=id, ifaces=ifaces)
if ip:
_add_names(hosts, ip, [id, id + '.' + infra_domain])
nodename = __salt__['caasp_net.get_nodename'](host=id)
if nodename:
_add_names(hosts, ip, [nodename, nodename + '.' + infra_domain])
# note regarding node removals:
# we need the "node_(addition|removal)_in_progress" nodes here, otherwise
# - nodes being removed will be immediately banned from the cluster (with a message like:
# 'rejected connection from <NODE> (error tls: <NODE-IP> does not match any of DNSNames [...]')
# and the cluster will become unhealthy
# - nodes being added will not be able to join (with some similar TLS verification error)
# doing another /etc/hosts update just for one stale entry seem like an overkill,
# so the /etc/hosts cleanup will have to be delayed for some other moment...
def managed(name=HOSTS_FILE,
admin_nodes=None,
master_nodes=None,
worker_nodes=None,
other_nodes=None,
caasp_hosts_file=CAASP_HOSTS_FILE,
append={},
marker_start=None,
marker_end=None,
**kwargs):
'''
Generate a /etc/hosts file.
name
The hosts file to load/save.
admin_nodes
The list of admin nodes.
master_nodes
The list of master nodes.
worker_nodes
The list of worker nodes.
other_nodes
The list of other nodes.
.. code-block:: yaml
/etc/hosts:
caasp_hosts.managed
'''
this_roles = __salt__['grains.get']('roles', [])
infra_domain = __salt__['caasp_pillar.get'](PILLAR_INTERNAL_INFRA, 'infra.caasp.local')
assert infra_domain
def fqdn(name):
return name + '.' + infra_domain
# get the previous /etc/hosts file and save it on /etc/caasp/hosts
# note that this must be done ony once in tthe first run of the
# salt state
orig_etc_hosts = name or __salt__['config.option']('hosts.file')
if orig_etc_hosts is None:
raise InvalidConfigError('Could not obtain current hosts file name')
# Load the current /etc/hosts file (for calculating differences later on)
orig_etc_hosts_contents = []
if os.path.exists(orig_etc_hosts):
orig_etc_hosts_contents = _load_lines(orig_etc_hosts)
hosts = OrderedDict()
_load_hosts(hosts,
MINIMAL_ETC_HOSTS.splitlines(),
marker_start=marker_start,
marker_end=marker_end)
# copy the /etc/hosts to caasp_hosts_file the first time we run this
if caasp_hosts_file:
if not os.path.exists(caasp_hosts_file):
__utils__['caasp_log.info']('hosts: saving %s in %s', orig_etc_hosts, caasp_hosts_file)
_write_lines(caasp_hosts_file, orig_etc_hosts_contents)
# TODO remove this file if something goes wrong...
try:
# remove any previous [marker_start, marker_end] block
__salt__['file.blockreplace'](caasp_hosts_file,
marker_start,
marker_end,
content='',
backup=False)
except Exception as e:
__utils__['caasp_log.warn']('could not remove old blocks in {}: {}'.format(caasp_hosts_file, e))
assert os.path.exists(caasp_hosts_file)
__utils__['caasp_log.info']('hosts: loading entries in "%s" file', caasp_hosts_file)
if not os.path.isfile(caasp_hosts_file):
raise EtcHostsRuntimeException(
'{} cannot be loaded: it is not a file'.format(caasp_hosts_file))
_load_hosts_file(hosts,
caasp_hosts_file,
marker_start=marker_start,
marker_end=marker_end)
__utils__['caasp_log.debug']('hosts: custom /etc/hosts entries:')
for k, v in hosts.items():
__utils__['caasp_log.debug']('hosts: %s %s', k, v)
# get the admin, masters and workers
def get_with_expr(expr):
return __salt__['caasp_nodes.get_with_expr'](expr, grain='network.interfaces')
# add all the entries
try:
_add_names_for(hosts, admin_nodes or get_with_expr(ADMIN_EXPR), infra_domain)
_add_names_for(hosts, master_nodes or get_with_expr(MASTER_EXPR), infra_domain)
_add_names_for(hosts, worker_nodes or get_with_expr(WORKER_EXPR), infra_domain)
_add_names_for(hosts, other_nodes or get_with_expr(OTHER_EXPR), infra_domain)
except Exception as e:
raise EtcHostsRuntimeException(
'Could not add entries for roles in /etc/hosts: {}'.format(e))
try:
for ip, names in append.items():
_add_names(hosts, ip, names)
# add some extra names for the API servers and admin nodes
if "kube-master" in this_roles or "admin" in this_roles:
external_fqdn_name = __salt__['caasp_pillar.get'](PILLAR_EXTERNAL_FQDN)
if not __salt__['caasp_filters.is_ip'](external_fqdn_name):
_add_names(hosts, '127.0.0.1', external_fqdn_name)
# set the ldap server at the Admin node
if "admin" in this_roles:
_add_names(hosts, '127.0.0.1', fqdn('ldap'))
# try to make Salt happy by adding an ipv6 entry
# for the local host (not really used for anything else)
this_hostname = __salt__['grains.get']('localhost', '')
_add_names(hosts, ['127.0.0.1', '::1'],
[this_hostname, fqdn(this_hostname)])
__utils__['caasp_log.debug']('hosts: adding entry for the API server at 127.0.0.1')
_add_names(hosts, '127.0.0.1', ['api', fqdn('api')])
except Exception as e:
raise EtcHostsRuntimeException(
'Could not add special entries in /etc/hosts: {}'.format(e))
# (over)write the /etc/hosts
try:
preface = PREFACE.format(file=caasp_hosts_file).splitlines()
new_etc_hosts_contents = []
for ip, names in hosts.items():
names.sort()
line = '{0} {1}'.format(ip, ' '.join(names))
new_etc_hosts_contents.append(line.strip().replace('\n', ''))
new_etc_hosts_contents.sort()
new_etc_hosts_contents = preface + new_etc_hosts_contents
__utils__['caasp_log.info']('hosts: writting new content to %s', orig_etc_hosts)
_write_lines(orig_etc_hosts, new_etc_hosts_contents)
except Exception as e:
raise EtcHostsRuntimeException(
'Could not write {} file: {}'.format(orig_etc_hosts, e))
if new_etc_hosts_contents != orig_etc_hosts_contents:
# calculate the changes
diff = difflib.unified_diff(orig_etc_hosts_contents,
new_etc_hosts_contents,
lineterm='')
return list(diff)
else:
return []
| |
#!/usr/bin/env python2.7
# Copyright 2013 Facebook, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from optparse import OptionParser
import os
SCRIPT_PATH = os.path.dirname(os.path.realpath(__file__))
import re
import sys
import urlparse
import pprint
from datetime import datetime, timedelta
import json
sys.path.insert(0, SCRIPT_PATH + '/../tornado')
import tornado.ioloop
import tornado.web
sys.path.insert(0, SCRIPT_PATH + '/../lib')
from LogAccessor import LogAccessor, LogAccessorException
ALL_LEVELS = ["INFO", "DEBUG", "WARN", "ERROR", "FATAL"]
def err(line):
if not isinstance(line, basestring):
line = pprint.pformat(line)
sys.stderr.write(line + "\n")
def summarize(results):
fingerprint_summary = {}
level_summary = {}
regex_summary = {}
level_summary = dict(zip(ALL_LEVELS, (0, 0, 0, 0, 0)))
for logline in results:
level_summary[logline[r'level']] += 1
if logline[r'fp'] not in fingerprint_summary:
fingerprint_summary[logline[r'fp']] = \
{'fp': logline[r'fp'], 'count': 0,
'level': logline[r'level'],
'norm_text': logline[r'norm_text']}
fingerprint_summary[logline[r'fp']]['count'] += 1
summary = {'level': level_summary,
'fp': fingerprint_summary,
'regex': regex_summary}
return summary
class HBLogHandlersParent(tornado.web.RequestHandler):
def parse_url_args(self):
url_args = urlparse.parse_qs(self.request.query)
for key, val in url_args.items():
url_args[key] = url_args[key][0].split(',')
if url_args.has_key("sampling-rate") and \
url_args["sampling-rate"][0] != "None":
self.sampling_rate = float(url_args["sampling-rate"][0])
else:
self.sampling_rate = None
self.logs_glob = url_args['glob'][0]
for i in ['fp', 'fp-exclude', 're', 're-exclude']:
if not url_args.has_key(i):
url_args[i] = []
err("%s INFO %s %s %s" %
(
datetime.now(),
self.request.uri,
self.sampling_rate,
self.logs_glob,
))
self.url_args = url_args
def fetch_and_filter(self, log_accessor):
if self.url_args.has_key("universal-offset"):
filename, byte_offset = \
self.url_args["universal-offset"][0].split(':')
universal_offset = {'byte_offset': int(byte_offset),
'filename': filename}
if self.settings['verbose']:
err("seeking to %s ..." % universal_offset)
log_accessor.seek_offset(universal_offset)
else:
start_time = self.url_args["start"][0]
end_time = self.url_args["end"][0]
seek_time_str = str(start_time).split('.')[0]
if self.settings['verbose']:
err("seeking to %s ..." % seek_time_str)
log_accessor.seek_time(seek_time_str)
if self.settings['verbose']:
err("--------------- seeked to --------------")
err(log_accessor.look_one_rec_ahead())
err("----------------------------------------")
previous_line = None # timestamp and level for unrecognized lines
# will be attributed from the previous line
# in the SingleFileLogAccessor library class
for line in log_accessor:
if 'unrecognized_line' in line.keys() and line['unrecognized_line']:
if not previous_line:
if self.settings['verbose']:
err("Got unrecognized line "
"before any recognized line in %s" %
log_accessor.get_universal_offset())
elif not self.url_args.has_key("universal-offset") and \
line['ts'] > end_time:
if self.settings['verbose']:
err("----- reached end-time at --------------")
err(line)
err("----------------------------------------")
raise StopIteration
# for unrecognized lines don't StopIteration
if line['level'] in self.url_args['levels-list']:
if self.url_args['fp'] == []:
if not any([True for fpex in self.url_args['fp-exclude'] if
line['fp'].startswith(fpex)]):
take_it = False
if self.url_args['re'] == []:
take_it = True
for r in self.url_args['re']:
if re.search(r, line['text'], re.IGNORECASE):
take_it = True
for r in self.url_args['re-exclude']:
if re.search(r, line['text'], re.IGNORECASE):
take_it = False
if take_it:
yield line
elif any([True for fp in self.url_args['fp'] if
line['fp'].startswith(fp)]):
yield line
previous_line = line
class MainHandler(HBLogHandlersParent):
def get(self):
self.set_header("Content-Type", "text/html")
href_example_list = ["/log/stream",
"/log/summary"]
self.write("<pre>\n")
self.write("Examples:\n")
for href in href_example_list:
self.write("<a href=\"%s\">%s</a>\n" % (href, href))
self.write("</pre>\n")
class LogStream(HBLogHandlersParent):
def get(self):
self.set_header("Content-Type", "text/plain")
self.parse_url_args()
if self.settings['verbose']:
err("basedir %s" % self.settings["basedir"])
if self.url_args.has_key("universal-offset"):
max_klines = 3 # tail -f is different and needs to fail faster
else:
max_klines = 20000
log_accessor = LogAccessor(self.logs_glob, max_klines=max_klines,
sampling_rate=self.sampling_rate,
verbose=self.settings['verbose'],
debug=self.settings['debug'],
)
for line in self.fetch_and_filter(log_accessor):
line_pkg = {'pkg-cls': 'log-accessor-line', 'pkg-obj': line}
self.write("%s\n" % json.dumps(line_pkg))
log_accessor.close_all_files()
line_pkg = {'pkg-cls': 'exit-status',
'pkg-obj':
{'status': 'success',
'universal-offset': log_accessor.get_universal_offset()}
}
self.write("%s\n" % json.dumps(line_pkg))
class LogSummary(HBLogHandlersParent):
def get(self):
self.set_header("Content-Type", "text/plain")
self.parse_url_args()
log_accessor = LogAccessor(self.logs_glob, max_klines=20000,
sampling_rate=self.sampling_rate,
verbose=self.settings['verbose'],
debug=self.settings['debug'],
)
results = []
for line in self.fetch_and_filter(log_accessor):
results.append(line)
log_accessor.close_all_files()
summary = summarize(results)
line_pkg = {'pkg-cls': 'log-accessor-line', 'pkg-obj': summary}
self.write("%s\n" % json.dumps(line_pkg))
line_pkg = {'pkg-cls': 'exit-status',
'pkg-obj': {'status': 'success'}
}
self.write("%s\n" % json.dumps(line_pkg))
if __name__ == "__main__":
usage = "%prog: [options]"
parser = OptionParser(usage=usage)
parser.add_option("--basedir", default="/tmp/hblog/test_logs",
help="Base dir for all log dirs (def: %default)")
parser.add_option("--verbose", "-v", action="store_true", default=False,
help="Verbose logging")
parser.add_option("--debug", "-d", action="store_true", default=False,
help="Very verbose logging")
options, _ = parser.parse_args()
options = vars(options) # convert object to dict
if options['debug']:
options['verbose'] = True
application = tornado.web.Application([
(r"/", MainHandler),
(r"/log/stream", LogStream),
(r"/log/summary", LogSummary)
],
**options)
application.listen(6957, '0.0.0.0')
tornado.ioloop.IOLoop.instance().start()
| |
from hs_travelportapp.home.models import Airline, UCode
from datetime import datetime
PARSE_DATE_FORMAT = '%Y-%m-%dT%H:%M:%S'
FORMATTING_DATE_FORMAT = '%d %b'
FORMATTING_TIME_FORMAT = '%H:%M'
AIRLINE_CACHE = {}
AIRPORT_CACHE = {}
RAILS_CACHE = {}
class ResultParser(object):
def __init__(self, resp_obj):
self.resp_obj = resp_obj
self.resp_json = {
'air': {},
'rail': {},
}
self._airports = {}
self._airlines = {}
self._rails = {}
self._segment_cxrs = {}
self._air_matrix = {}
self._via = {}
def get_json(self, key_prefix=''):
try:
resp_type = str(type(self.resp_obj))
if 'LowFareSearchRsp' in resp_type:
# Errors, Warnings, Info messages
response_messages = self.resp_obj.get_ResponseMessage()
self._init_response_messages(self.resp_json, response_messages)
# Flights
response_flights = self.resp_obj.get_FlightDetailsList()
if response_flights:
response_flights = response_flights.get_FlightDetails()
self._init_flights(self.resp_json, response_flights)
# Segments
response_segments = self.resp_obj.get_AirSegmentList()
if response_segments:
response_segments = response_segments.get_AirSegment()
self._init_segments(self.resp_json, response_segments)
# Air Solutions
response_solutions = self.resp_obj.get_AirPricingSolution()
self._init_solutions(self.resp_json, response_solutions)
# Airports and Airlines
self.resp_json['air']['airports'] = self._airports
self.resp_json['air']['airlines'] = self._airlines
self.resp_json['air']['matrix'] = self._air_matrix
self.resp_json['air']['via'] = self._via
elif 'RailAvailabilitySearchRsp' in str(type(self.resp_obj)):
# Errors, Warnings, Info messages
response_messages = self.resp_obj.get_ResponseMessage()
self._init_response_messages(self.resp_json, response_messages, 'rail')
if 'LowFareSearchRsp' in resp_type or \
'RailAvailabilitySearchRsp' in resp_type:
# Rail segments
response_rail_segments = self.resp_obj.get_RailSegmentList()
if response_rail_segments:
response_rail_segments = response_rail_segments.get_RailSegment()
self._init_rail_segments(self.resp_json, response_rail_segments, key_prefix)
# Rail journeys
response_rail_journeys = self.resp_obj.get_RailJourneyList()
if response_rail_journeys:
response_rail_journeys = response_rail_journeys.get_RailJourney()
self._init_rail_journeys(self.resp_json, response_rail_journeys, key_prefix)
# Rail solutions
response_rail_solutions = self.resp_obj.get_RailPricingSolution()
if response_rail_solutions:
self._init_rail_solutions(self.resp_json, response_rail_solutions, key_prefix)
# Rails
self.resp_json['rail']['coords'] = self._rails
except Exception, ex:
print 'response_to_json :: get_json :: Unexpected exception :: %s', ex
return self.resp_json
def _fill_rails(self, codes):
if not isinstance(codes, list):
codes = [codes]
for i in range(len(codes)-1, -1, -1):
code = codes[i]
if not code or code in self._rails:
codes.pop(i)
elif code in RAILS_CACHE:
self._rails[code] = RAILS_CACHE[code]
codes.pop(i)
if (not codes):
return
try:
rails = UCode.objects.filter(ucode__in=codes)
for r in rails:
ucode = r.ucode.strip()
RAILS_CACHE[ucode] = self._rails[ucode] = {'lat':str(r.latitude), 'lng':str(r.longitude)}
except Exception, ex:
print codes, "not in known ucodes", ex
def _fill_airlines(self, codes):
if not isinstance(codes, list):
codes = [codes]
for i in range(len(codes)-1, -1, -1):
code = codes[i]
if not code or code in self._airlines:
codes.pop(i)
elif code in AIRLINE_CACHE:
self._airlines[code] = AIRLINE_CACHE[code]
codes.pop(i)
if (not codes):
return
try:
airlines = Airline.objects.filter(code__in=codes)
for airline in airlines:
code = airline.code.strip()
AIRLINE_CACHE[code] = self._airlines[code] = airline.name
except Exception, ex:
print codes, "not in known airlines", ex
def _fill_airports(self, codes):
if not isinstance(codes, list):
codes = [codes]
for i in range(len(codes)-1, -1, -1):
code = codes[i].strip()
if not code or code in self._airports:
codes.pop(i)
elif code in AIRPORT_CACHE:
self._airports[code] = AIRPORT_CACHE[code]
codes.pop(i)
if (not codes):
return
try:
codes_copy = list(codes)
airports = UCode.objects.filter(st_type=1, iata_code__in=codes)
for arpt in airports:
arpt_obj = {
"s": "%s" % arpt.city,
"l": "%s, %s, %s" % (arpt.name, arpt.city, arpt.country),
"lng": str(arpt.longitude),
"lat": str(arpt.latitude)
}
code = arpt.iata_code.strip()
AIRPORT_CACHE[code] = self._airports[code] = arpt_obj
codes_copy.remove(code)
for c in codes_copy:
AIRPORT_CACHE[c] = self._airports[c] = {"s": c, "l": c}
except Exception, ex:
print codes, "not in known airports", ex
def _init_rail_solutions(self, resp_json, response_rail_solutions, key_prefix):
results = []
journeys = resp_json['rail']['journeys']
segments = resp_json['rail']['segments']
oc_names = []
for j in response_rail_solutions:
journey_refs = j.get_RailJourneyRef()
jref_ids = []
for jr in journey_refs:
key = '%s%s' % (key_prefix, jr.get_Key())
for s in journeys[key]['seg_ref']:
oc_n = segments[s]['oc_name']
if oc_n and oc_n not in oc_names:
oc_names.append(oc_n)
jref_ids.append(key)
price = j.get_TotalPrice()
results.append({
'journeys': jref_ids,
'price': price,
'raw_p': float(price[3:]),
'oc_names': oc_names
})
resp_json['rail']['results'] = results
def _init_rail_journeys(self, resp_json, response_rail_journeys, key_prefix):
journeys = {}
for j in response_rail_journeys:
dep_time = datetime.strptime(j.get_DepartureTime()[0:19], PARSE_DATE_FORMAT)
arr_time = datetime.strptime(j.get_ArrivalTime()[0:19], PARSE_DATE_FORMAT)
seg_refs = j.get_RailSegmentRef()
seg_keys = []
for sr in seg_refs:
seg_keys.append('%s%s' % (key_prefix, sr.get_Key()))
origin_code = j.get_Origin()
destination_code = j.get_Destination()
journeys['%s%s' % (key_prefix, j.get_Key())] = {
'o': '%s%s' % (j.get_OriginStationName(), (' (%s)' % origin_code) if origin_code else ''),
'd': '%s%s' % (j.get_DestinationStationName(), (' (%s)' % destination_code) if destination_code else ''),
'dd':dep_time.strftime(FORMATTING_DATE_FORMAT),
'dt':dep_time.strftime(FORMATTING_TIME_FORMAT),
'ad':arr_time.strftime(FORMATTING_DATE_FORMAT),
'at':arr_time.strftime(FORMATTING_TIME_FORMAT),
'leg': 0 if j.get_JourneyDirection() == 'Outward' else 1,
'seg_ref': seg_keys,
'pc': j.get_ProviderCode(),
'sc': j.get_SupplierCode()
}
resp_json['rail']['journeys'] = journeys
def _init_rail_segments(self, resp_json, response_segments, key_prefix):
segments = {}
ucodes = []
for s in response_segments:
dep_time = datetime.strptime(s.get_DepartureTime()[0:19], PARSE_DATE_FORMAT)
arr_time = datetime.strptime(s.get_ArrivalTime()[0:19], PARSE_DATE_FORMAT)
op_company = s.get_OperatingCompany()
origin_code = s.get_Origin()
destination_code = s.get_Destination()
origin_ucode = s.get_RailLocOrigin()
destination_ucode = s.get_RailLocDestination()
segments['%s%s' % (key_prefix, s.get_Key())] = {
'oc_name': op_company.get_Name() if op_company is not None else '',
'oc_code': op_company.get_Code() if op_company is not None else '',
'tno': s.get_TrainNumber(),
'o': '%s%s' % (s.get_OriginStationName(), (' (%s)' % origin_code) if origin_code else ''),
'd': '%s%s' % (s.get_DestinationStationName(), (' (%s)' % destination_code) if destination_code else ''),
'o_ucode': origin_ucode,
'd_ucode': destination_ucode,
'dd':dep_time.strftime(FORMATTING_DATE_FORMAT),
'dt':dep_time.strftime(FORMATTING_TIME_FORMAT),
'ad':arr_time.strftime(FORMATTING_DATE_FORMAT),
'at':arr_time.strftime(FORMATTING_TIME_FORMAT),
}
if origin_ucode not in ucodes:
ucodes.append(origin_ucode)
if destination_ucode not in ucodes:
ucodes.append(destination_ucode)
resp_json['rail']['segments'] = segments
self._fill_rails(ucodes)
def _set_via(self, outbound_flights, inbound_flights, s_key):
# set vias
via_obj = {'o':'', 'd':'', 'via':{'out': [], 'in': []}}
outbound_fl_len = len(outbound_flights)
inbound_fl_len = len(inbound_flights)
via_obj['o'] = outbound_flights[0]['o']
via_obj['d'] = outbound_flights[outbound_fl_len-1]['d']
for i in range(0, outbound_fl_len-1):
via_obj['via']['out'].append(outbound_flights[i]['d'])
for i in range(0, inbound_fl_len-1):
via_obj['via']['in'].append(inbound_flights[i]['d'])
if len(via_obj['via']['out']) == len(via_obj['via']['in']):
same_via = True
for via in via_obj['via']['in']:
if via not in via_obj['via']['out']:
same_via = False
else:
same_via = False
if same_via:
via_obj['via']['in'] = []
self._via[s_key] = via_obj
def _init_solutions(self, resp_json, response_solutions):
solutions = []
segments_list = []
flights_list = []
if 'segments' in resp_json['air']:
segments_list = resp_json['air']['segments']
if 'flights' in resp_json['air']:
flights_list = resp_json['air']['flights']
lc_outbound_routing_opt = []
lc_inbound_routing_opt = []
for s in response_solutions:
s_key = s.get_Key()
sol_segments = s.get_AirSegmentRef()
api = s.get_AirPricingInfo()[0]
binfo = api.get_BookingInfo()
tmp_segments_class = {}
for bi in binfo:
tmp_segments_class[bi.get_SegmentRef()] = bi.get_BookingCode()
provider_code = api.get_ProviderCode()
if not provider_code == 'ACH': # GDS
segments = []
cur_seg = None
flights_per_dir = {'inbound_flights':[], 'outbound_flights':[]}
#via_obj = {'o':'', 'd':'', 'via':{'out': [], 'in': []}}
for ss in sol_segments:
key = ss.get_Key()
segments.append({'key': key, 'class': tmp_segments_class[key] })
cur_seg = segments_list[key]
f_list = []
# set inbound and outbound flights
for f_ref in cur_seg['f_ref']:
f_list.extend([{'o': flights_list[f_ref]['o'], 'd': flights_list[f_ref]['d']}])
if cur_seg['leg'] == 0:
flights_per_dir['outbound_flights'].extend(f_list)
else:
flights_per_dir['inbound_flights'].extend(f_list)
# update matrix
segment_cxr = self._segment_cxrs[key]
if segment_cxr not in self._air_matrix:
self._air_matrix[segment_cxr] = []
if s_key not in self._air_matrix[segment_cxr]:
self._air_matrix[segment_cxr].append(s_key)
self._set_via(flights_per_dir['outbound_flights'], flights_per_dir['inbound_flights'], s_key)
cxr = api.get_PlatingCarrier()
solutions.append({
'key': s_key,
'segments': segments,
'price': s.get_TotalPrice(),
'cxr': [cxr]
})
else: # lowcosts
cxr = api.get_SupplierCode()
connections = s.get_Connection()
connecting_segments = []
for sc in connections:
connecting_segments.append(sc.get_SegmentIndex)
seg_len = len(sol_segments)
curr_routing_segments = []
for i in range(0, seg_len):
ss = sol_segments[i]
key = ss.get_Key()
leg = segments_list[key]['leg']
curr_routing_segments.append({'key': key, 'class': tmp_segments_class[key] })
if i not in connecting_segments:
price = s.get_TotalPrice()
raw_price = float(price[3:])
rtg_option = {'segments': curr_routing_segments, 'price': price, 'raw_p': raw_price, 'cxr': cxr}
if leg == 0:
lc_outbound_routing_opt.append(rtg_option)
else:
lc_inbound_routing_opt.append(rtg_option)
curr_routing_segments = []
sol_key = 0
lc_solutions = []
# Restrict the low cost combinations to top 100 cheapest
lc_outbound_routing_opt = sorted(lc_outbound_routing_opt, key=lambda x: x['raw_p'])[:10]
lc_inbound_routing_opt = sorted(lc_inbound_routing_opt, key=lambda x: x['raw_p'])[:10]
for oro in lc_outbound_routing_opt:
for s in oro['segments']:
cur_seg = segments_list[s['key']]
f_list = []
for f_ref in cur_seg['f_ref']:
f_list.extend([{'o': flights_list[f_ref]['o'], 'd': flights_list[f_ref]['d']}])
oro['flights'] = f_list
for iro in lc_inbound_routing_opt:
for s in iro['segments']:
cur_seg = segments_list[s['key']]
f_list = []
for f_ref in cur_seg['f_ref']:
f_list.extend([{'o': flights_list[f_ref]['o'], 'd': flights_list[f_ref]['d']}])
iro['flights'] = f_list
for lc_outb_rtg_opt in lc_outbound_routing_opt:
for lc_inb_rtg_opt in lc_inbound_routing_opt:
segments = []
segments.extend(lc_outb_rtg_opt['segments'])
segments.extend(lc_inb_rtg_opt['segments'])
sol_key = sol_key + 1
key = 'LC_%d' % sol_key
cxrs = []
if lc_outb_rtg_opt['cxr'] not in cxrs:
cxrs.append(lc_outb_rtg_opt['cxr'])
if lc_inb_rtg_opt['cxr'] not in cxrs:
cxrs.append(lc_inb_rtg_opt['cxr'])
raw_price = lc_outb_rtg_opt['raw_p'] + lc_inb_rtg_opt['raw_p']
currency = lc_outb_rtg_opt['price'][0:3]
self._set_via(lc_outb_rtg_opt['flights'], lc_inb_rtg_opt['flights'], key)
lc_solutions.append({
'key': key,
'segments': segments,
'price': '%s%.2f' % (currency, raw_price),
'raw_price': raw_price,
'cxr': cxrs
})
for c in cxrs:
if c not in self._air_matrix:
self._air_matrix[c] = []
if key not in self._air_matrix[c]:
self._air_matrix[c].append(key)
solutions.extend(sorted(lc_solutions, key=lambda x: x['raw_price']))
resp_json['air']['results'] = solutions
def _init_segments(self, resp_json, response_segments):
segments = {}
airports = []
airlines = []
for s in response_segments:
dep_time = datetime.strptime(s.get_DepartureTime()[0:19], PARSE_DATE_FORMAT)
arr_time = datetime.strptime(s.get_ArrivalTime()[0:19], PARSE_DATE_FORMAT)
seg_flights = s.get_FlightDetailsRef()
flight_refs = []
for sf in seg_flights:
flight_refs.append(sf.get_Key())
seg_ai = s.get_AirAvailInfo()
seg_providers = []
for sai in seg_ai:
seg_providers.append(sai.get_ProviderCode())
orig = s.get_Origin()
dest = s.get_Destination()
cxr = s.get_Carrier()
key = s.get_Key()
segments[key] = {
'cxr': cxr,
'fno': s.get_FlightNumber(),
'o': orig,
'd': dest,
'dd':dep_time.strftime(FORMATTING_DATE_FORMAT),
'dt':dep_time.strftime(FORMATTING_TIME_FORMAT),
'ad':arr_time.strftime(FORMATTING_DATE_FORMAT),
'at':arr_time.strftime(FORMATTING_TIME_FORMAT),
'f_ref': flight_refs,
'pc': seg_providers,
'leg': s.get_Group()
}
self._segment_cxrs[key] = cxr
if orig not in airports:
airports.append(orig)
if dest not in airports:
airports.append(dest)
if cxr not in airlines:
airlines.append(cxr)
self._fill_airlines(airlines)
self._fill_airports(airports)
resp_json['air']['segments'] = segments
def _init_flights(self, resp_json, response_flights):
flights = {}
airports = []
for f in response_flights:
dep_time = datetime.strptime(f.get_DepartureTime()[0:19], PARSE_DATE_FORMAT)
arr_time = datetime.strptime(f.get_ArrivalTime()[0:19], PARSE_DATE_FORMAT)
orig = f.get_Origin()
dest = f.get_Destination()
travel_time = f.get_TravelTime()
hours = travel_time // 60
min = travel_time % 60
flights[f.get_Key()] = {
'o': orig,
'd': dest,
'dd':dep_time.strftime(FORMATTING_DATE_FORMAT),
'dt':dep_time.strftime(FORMATTING_TIME_FORMAT),
'ad':arr_time.strftime(FORMATTING_DATE_FORMAT),
'at':arr_time.strftime(FORMATTING_TIME_FORMAT),
'ft':f.get_FlightTime(),
'o_ter': f.get_OriginTerminal(),
'd_ter': f.get_DestinationTerminal(),
'time': '%sh %sm' % (hours, min)
}
if orig not in airports:
airports.append(orig)
if dest not in airports:
airports.append(dest)
self._fill_airports(airports)
resp_json['air']['flights'] = flights
def _init_response_messages(self, resp_json, response_messages, section='air'):
errors = []
warnings = []
infos = []
for rm in response_messages:
t = rm.get_Type()
msg = {'pc': rm.get_ProviderCode(),
'sc': rm.get_SupplierCode(),
'c': rm.get_Code(),
'msg': rm.get_valueOf_()
}
if t == 'Error':
errors.append(msg)
elif t == 'Warning':
warnings.append(msg)
else:
infos.append(msg)
if errors:
resp_json[section]['errors'] = errors
if warnings:
resp_json[section]['warnings'] = warnings
if infos:
resp_json[section]['infos'] = infos
| |
from utils.database import dbutils
import json
import requests
import polyline
import cPickle as pickle
import os
def get_routes(location_pairs, routes_path, get_time=False):
"""
Gets the driving route from Open Street Routing Machine for an array
of pairs of coordinates. Decodes the returned polyline routes into an
array or lat/lon tuples. Saves the resulting paths as a pickle.
Args:
location_pairs (dictionary): The set of location pairs to query for
routes_path (string): The file path for the routes pickle
get_time (bool): whether or not to save the duration for route
Returns:
array: The routes between all of the supplied pairs of locations
"""
# TODO: Change this to being walking directions. Use google maps routing.
routes = {}
if os.path.isfile(routes_path):
routes = pickle.load(open(routes_path, 'rb'))
for location_pair_key in location_pairs:
if location_pair_key not in routes:
location = location_pairs[location_pair_key]
url = 'http://router.project-osrm.org/route/v1/driving/%s' % \
location
response = requests.get(url)
route = response.json()['routes'][0]
if get_time:
routes[location_pair_key] = {
'duration': route['duration'],
'distance': route['distance']
}
else:
routes[location_pair_key] = polyline.decode(route['geometry'])
pickle.dump(routes, open(routes_path, 'wb'))
return routes
def get_tower_pairs(query):
"""
Gets the pairs of sequential towers that exist in a set of CDR records.
Only adds transitions between towers that exist in one users records.
Args:
query (string): The POSTGRES query to retrieve the set of CDR records
Returns:
array: The routes between all of the pairs of towers
"""
conn = dbutils.connect()
cursor = conn.cursor()
cursor.execute(query)
records = cursor.fetchall()
prev_user = None
prev_tower = None
prev_lat = None
prev_lon = None
tower_pairs = {}
for index in range(len(records)):
user, lon, lat, hour, minute, tower = records[index]
if prev_user == user and prev_tower is not None and prev_tower != tower:
key = '{0},{1};{2},{3}'.format(prev_lon, prev_lat, lon, lat)
tower_pairs[key] = key
prev_user = user
prev_tower = tower
prev_lon = lon
prev_lat = lat
return get_routes(tower_pairs)
def museum_main(routes_path):
"""
Calculates routes between every pair of museums that are visited in a row
Args:
routes_path (string): path for output museum routes pickle
"""
# TODO: Finish this so that it creates paths. Need to complete walking
# directions implementation for routes first
conn = dbutils.connect()
cursor = conn.cursor()
museum_location_query = """
SELECT latitude, longitude, string
FROM optourism.firenze_card_locations;
"""
cursor.execute(museum_location_query)
records = cursor.fetchall()
museum_pairs = {}
for start_museum in records:
start_lat, start_lon, start_code = start_museum
for end_museum in records:
end_lat, end_lon, end_code = end_museum
key = '{0}{1}'.format(start_code, end_code)
reverse_key = '{1}{0}'.format(start_code, end_code)
if end_code == start_code or reverse_key in museum_pairs:
continue
location = '{0},{1};{2},{3}'.format(start_lon, start_lat, end_lon,
end_lat)
museum_pairs[key] = location
return get_routes(museum_pairs, routes_path, get_time=True)
def cdr_main(routes_path, output_path):
"""
Retrieves a set of CDR records for users with notable paths and
interpolates these paths with routes between their tower locations
equally spaced over the time gap.
Creates a JSON data file to feed into the deck.gl paths visualization.
Args:
routes_path (string): The file path for the routes pickle
output_path (string): The file path for the output json
"""
conn = dbutils.connect()
cursor = conn.cursor()
routes_query = """
SELECT
paths.cust_id,
paths.lon,
paths.lat,
date_part('hour', paths.date_time_m) AS hour,
date_part('minute', paths.date_time_m) AS minute,
paths.tower_id
FROM optourism.foreigners_path_records_joined AS paths
JOIN optourism.foreigners_features AS features
ON features.cust_id = paths.cust_id
AND (
date_part('day', paths.date_time_m) = 27
OR
date_part('day', paths.date_time_m) = 28
)
AND date_part('month', paths.date_time_m) = 7
AND features.days_active < 15
ORDER BY cust_id ASC, hour ASC, minute ASC;
"""
cursor.execute(routes_query)
records = cursor.fetchall()
clean_records = []
data = None
prev_user = None
prev_time = None
prev_tower = None
prev_lat = None
prev_lon = None
id_counter = 0
routes = pickle.load(open(routes_path, 'rb'))
for index in range(len(records)):
user, lon, lat, hour, minute, tower = records[index]
timestamp = hour * 60 + minute
if prev_user is not None and prev_user != user:
data['endTime'] = prev_time
clean_records.append(data)
if prev_user is None or prev_user != user:
data = {
'color': id_counter,
'startTime': timestamp,
'segments': []
}
id_counter += 1
elif prev_tower is not None and prev_tower != tower:
key = '{0},{1};{2},{3}'.format(prev_lon, prev_lat, lon, lat)
if key in routes:
route = routes[key]
delta = (timestamp - prev_time) / (len(route) + 1)
for i in range(len(route)):
stop_lat, stop_lon = route[i]
segment = [stop_lon, stop_lat, prev_time + delta * (i + 1)]
data['segments'].append(segment)
data['segments'].append([lon, lat, timestamp])
prev_user = user
prev_time = timestamp
prev_tower = tower
prev_lat = lat
prev_lon = lon
with open(output_path, 'w') as outfile:
json.dump(clean_records, outfile)
if __name__ == '__main__':
curr_dir = os.path.dirname(os.path.abspath(__file__))
pickle_path = os.path.join(curr_dir, 'output', 'tower_routes.p')
cdr_output_path = os.path.join(curr_dir, 'output', 'tower_routes.json')
cdr_main(pickle_path, cdr_output_path)
museum_pickle_path = os.path.join(curr_dir, 'output', 'museum_routes.p')
museum_main(museum_pickle_path)
| |
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Fast-Fourier Transform ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.framework import dtypes as _dtypes
from tensorflow.python.framework import ops as _ops
from tensorflow.python.ops import manip_ops
from tensorflow.python.framework import tensor_util as _tensor_util
from tensorflow.python.ops import array_ops as _array_ops
from tensorflow.python.ops import gen_spectral_ops
from tensorflow.python.ops import math_ops as _math_ops
from tensorflow.python.util.tf_export import tf_export
def _infer_fft_length_for_rfft(input_tensor, fft_rank):
"""Infers the `fft_length` argument for a `rank` RFFT from `input_tensor`."""
# A TensorShape for the inner fft_rank dimensions.
fft_shape = input_tensor.get_shape()[-fft_rank:]
# If any dim is unknown, fall back to tensor-based math.
if not fft_shape.is_fully_defined():
return _array_ops.shape(input_tensor)[-fft_rank:]
# Otherwise, return a constant.
return _ops.convert_to_tensor(fft_shape.as_list(), _dtypes.int32)
def _infer_fft_length_for_irfft(input_tensor, fft_rank):
"""Infers the `fft_length` argument for a `rank` IRFFT from `input_tensor`."""
# A TensorShape for the inner fft_rank dimensions.
fft_shape = input_tensor.get_shape()[-fft_rank:]
# If any dim is unknown, fall back to tensor-based math.
if not fft_shape.is_fully_defined():
fft_length = _array_ops.unstack(_array_ops.shape(input_tensor)[-fft_rank:])
fft_length[-1] = _math_ops.maximum(0, 2 * (fft_length[-1] - 1))
return _array_ops.stack(fft_length)
# Otherwise, return a constant.
fft_length = fft_shape.as_list()
if fft_length:
fft_length[-1] = max(0, 2 * (fft_length[-1] - 1))
return _ops.convert_to_tensor(fft_length, _dtypes.int32)
def _maybe_pad_for_rfft(input_tensor, fft_rank, fft_length, is_reverse=False):
"""Pads `input_tensor` to `fft_length` on its inner-most `fft_rank` dims."""
fft_shape = _tensor_util.constant_value_as_shape(fft_length)
# Edge case: skip padding empty tensors.
if (input_tensor.shape.ndims is not None and
any(dim.value == 0 for dim in input_tensor.shape.dims)):
return input_tensor
# If we know the shapes ahead of time, we can either skip or pre-compute the
# appropriate paddings. Otherwise, fall back to computing paddings in
# TensorFlow.
if fft_shape.is_fully_defined() and input_tensor.shape.ndims is not None:
# Slice the last FFT-rank dimensions from input_tensor's shape.
input_fft_shape = input_tensor.shape[-fft_shape.ndims:]
if input_fft_shape.is_fully_defined():
# In reverse, we only pad the inner-most dimension to fft_length / 2 + 1.
if is_reverse:
fft_shape = fft_shape[:-1].concatenate(
fft_shape.dims[-1].value // 2 + 1)
paddings = [[0, max(fft_dim.value - input_dim.value, 0)]
for fft_dim, input_dim in zip(
fft_shape.dims, input_fft_shape.dims)]
if any(pad > 0 for _, pad in paddings):
outer_paddings = [[0, 0]] * max((input_tensor.shape.ndims -
fft_shape.ndims), 0)
return _array_ops.pad(input_tensor, outer_paddings + paddings)
return input_tensor
# If we can't determine the paddings ahead of time, then we have to pad. If
# the paddings end up as zero, tf.pad has a special-case that does no work.
input_rank = _array_ops.rank(input_tensor)
input_fft_shape = _array_ops.shape(input_tensor)[-fft_rank:]
outer_dims = _math_ops.maximum(0, input_rank - fft_rank)
outer_paddings = _array_ops.zeros([outer_dims], fft_length.dtype)
# In reverse, we only pad the inner-most dimension to fft_length / 2 + 1.
if is_reverse:
fft_length = _array_ops.concat([fft_length[:-1],
fft_length[-1:] // 2 + 1], 0)
fft_paddings = _math_ops.maximum(0, fft_length - input_fft_shape)
paddings = _array_ops.concat([outer_paddings, fft_paddings], 0)
paddings = _array_ops.stack([_array_ops.zeros_like(paddings), paddings],
axis=1)
return _array_ops.pad(input_tensor, paddings)
def _rfft_wrapper(fft_fn, fft_rank, default_name):
"""Wrapper around gen_spectral_ops.rfft* that infers fft_length argument."""
def _rfft(input_tensor, fft_length=None, name=None):
"""Wrapper around gen_spectral_ops.rfft* that infers fft_length argument."""
with _ops.name_scope(name, default_name,
[input_tensor, fft_length]) as name:
input_tensor = _ops.convert_to_tensor(input_tensor, _dtypes.float32)
input_tensor.shape.with_rank_at_least(fft_rank)
if fft_length is None:
fft_length = _infer_fft_length_for_rfft(input_tensor, fft_rank)
else:
fft_length = _ops.convert_to_tensor(fft_length, _dtypes.int32)
input_tensor = _maybe_pad_for_rfft(input_tensor, fft_rank, fft_length)
return fft_fn(input_tensor, fft_length, name)
_rfft.__doc__ = fft_fn.__doc__
return _rfft
def _irfft_wrapper(ifft_fn, fft_rank, default_name):
"""Wrapper around gen_spectral_ops.irfft* that infers fft_length argument."""
def _irfft(input_tensor, fft_length=None, name=None):
"""Wrapper irfft* that infers fft_length argument."""
with _ops.name_scope(name, default_name,
[input_tensor, fft_length]) as name:
input_tensor = _ops.convert_to_tensor(input_tensor, _dtypes.complex64)
input_tensor.shape.with_rank_at_least(fft_rank)
if fft_length is None:
fft_length = _infer_fft_length_for_irfft(input_tensor, fft_rank)
else:
fft_length = _ops.convert_to_tensor(fft_length, _dtypes.int32)
input_tensor = _maybe_pad_for_rfft(input_tensor, fft_rank, fft_length,
is_reverse=True)
return ifft_fn(input_tensor, fft_length, name)
_irfft.__doc__ = ifft_fn.__doc__
return _irfft
# FFT/IFFT 1/2/3D are exported via
# third_party/tensorflow/core/api_def/python_api/
fft = gen_spectral_ops.fft
ifft = gen_spectral_ops.ifft
fft2d = gen_spectral_ops.fft2d
ifft2d = gen_spectral_ops.ifft2d
fft3d = gen_spectral_ops.fft3d
ifft3d = gen_spectral_ops.ifft3d
rfft = _rfft_wrapper(gen_spectral_ops.rfft, 1, "rfft")
tf_export("signal.rfft", v1=["signal.rfft", "spectral.rfft"])(rfft)
irfft = _irfft_wrapper(gen_spectral_ops.irfft, 1, "irfft")
tf_export("signal.irfft", v1=["signal.irfft", "spectral.irfft"])(irfft)
rfft2d = _rfft_wrapper(gen_spectral_ops.rfft2d, 2, "rfft2d")
tf_export("signal.rfft2d", v1=["signal.rfft2d", "spectral.rfft2d"])(rfft2d)
irfft2d = _irfft_wrapper(gen_spectral_ops.irfft2d, 2, "irfft2d")
tf_export("signal.irfft2d", v1=["signal.irfft2d", "spectral.irfft2d"])(irfft2d)
rfft3d = _rfft_wrapper(gen_spectral_ops.rfft3d, 3, "rfft3d")
tf_export("signal.rfft3d", v1=["signal.rfft3d", "spectral.rfft3d"])(rfft3d)
irfft3d = _irfft_wrapper(gen_spectral_ops.irfft3d, 3, "irfft3d")
tf_export("signal.irfft3d", v1=["signal.irfft3d", "spectral.irfft3d"])(irfft3d)
def _fft_size_for_grad(grad, rank):
return _math_ops.reduce_prod(_array_ops.shape(grad)[-rank:])
@_ops.RegisterGradient("FFT")
def _fft_grad(_, grad):
size = _math_ops.cast(_fft_size_for_grad(grad, 1), grad.dtype)
return ifft(grad) * size
@_ops.RegisterGradient("IFFT")
def _ifft_grad(_, grad):
rsize = _math_ops.cast(
1. / _math_ops.cast(_fft_size_for_grad(grad, 1), grad.dtype.real_dtype),
grad.dtype)
return fft(grad) * rsize
@_ops.RegisterGradient("FFT2D")
def _fft2d_grad(_, grad):
size = _math_ops.cast(_fft_size_for_grad(grad, 2), grad.dtype)
return ifft2d(grad) * size
@_ops.RegisterGradient("IFFT2D")
def _ifft2d_grad(_, grad):
rsize = _math_ops.cast(
1. / _math_ops.cast(_fft_size_for_grad(grad, 2), grad.dtype.real_dtype),
grad.dtype)
return fft2d(grad) * rsize
@_ops.RegisterGradient("FFT3D")
def _fft3d_grad(_, grad):
size = _math_ops.cast(_fft_size_for_grad(grad, 3), grad.dtype)
return ifft3d(grad) * size
@_ops.RegisterGradient("IFFT3D")
def _ifft3d_grad(_, grad):
rsize = _math_ops.cast(
1. / _math_ops.cast(_fft_size_for_grad(grad, 3), grad.dtype.real_dtype),
grad.dtype)
return fft3d(grad) * rsize
def _rfft_grad_helper(rank, irfft_fn):
"""Returns a gradient function for an RFFT of the provided rank."""
# Can't happen because we don't register a gradient for RFFT3D.
assert rank in (1, 2), "Gradient for RFFT3D is not implemented."
def _grad(op, grad):
"""A gradient function for RFFT with the provided `rank` and `irfft_fn`."""
fft_length = op.inputs[1]
input_shape = _array_ops.shape(op.inputs[0])
is_even = _math_ops.cast(1 - (fft_length[-1] % 2), _dtypes.complex64)
def _tile_for_broadcasting(matrix, t):
expanded = _array_ops.reshape(
matrix,
_array_ops.concat([
_array_ops.ones([_array_ops.rank(t) - 2], _dtypes.int32),
_array_ops.shape(matrix)
], 0))
return _array_ops.tile(
expanded, _array_ops.concat([_array_ops.shape(t)[:-2], [1, 1]], 0))
def _mask_matrix(length):
"""Computes t_n = exp(sqrt(-1) * pi * n^2 / line_len)."""
# TODO(rjryan): Speed up computation of twiddle factors using the
# following recurrence relation and cache them across invocations of RFFT.
#
# t_n = exp(sqrt(-1) * pi * n^2 / line_len)
# for n = 0, 1,..., line_len-1.
# For n > 2, use t_n = t_{n-1}^2 / t_{n-2} * t_1^2
a = _array_ops.tile(
_array_ops.expand_dims(_math_ops.range(length), 0), (length, 1))
b = _array_ops.transpose(a, [1, 0])
return _math_ops.exp(
-2j * np.pi * _math_ops.cast(a * b, _dtypes.complex64) /
_math_ops.cast(length, _dtypes.complex64))
def _ymask(length):
"""A sequence of [1+0j, -1+0j, 1+0j, -1+0j, ...] with length `length`."""
return _math_ops.cast(1 - 2 * (_math_ops.range(length) % 2),
_dtypes.complex64)
y0 = grad[..., 0:1]
if rank == 1:
ym = grad[..., -1:]
extra_terms = y0 + is_even * ym * _ymask(input_shape[-1])
elif rank == 2:
# Create a mask matrix for y0 and ym.
base_mask = _mask_matrix(input_shape[-2])
# Tile base_mask to match y0 in shape so that we can batch-matmul the
# inner 2 dimensions.
tiled_mask = _tile_for_broadcasting(base_mask, y0)
y0_term = _math_ops.matmul(tiled_mask, _math_ops.conj(y0))
extra_terms = y0_term
ym = grad[..., -1:]
ym_term = _math_ops.matmul(tiled_mask, _math_ops.conj(ym))
inner_dim = input_shape[-1]
ym_term = _array_ops.tile(
ym_term,
_array_ops.concat([
_array_ops.ones([_array_ops.rank(grad) - 1], _dtypes.int32),
[inner_dim]
], 0)) * _ymask(inner_dim)
extra_terms += is_even * ym_term
# The gradient of RFFT is the IRFFT of the incoming gradient times a scaling
# factor, plus some additional terms to make up for the components dropped
# due to Hermitian symmetry.
input_size = _math_ops.cast(
_fft_size_for_grad(op.inputs[0], rank), _dtypes.float32)
the_irfft = irfft_fn(grad, fft_length)
return 0.5 * (the_irfft * input_size + _math_ops.real(extra_terms)), None
return _grad
def _irfft_grad_helper(rank, rfft_fn):
"""Returns a gradient function for an IRFFT of the provided rank."""
# Can't happen because we don't register a gradient for IRFFT3D.
assert rank in (1, 2), "Gradient for IRFFT3D is not implemented."
def _grad(op, grad):
"""A gradient function for IRFFT with the provided `rank` and `rfft_fn`."""
# Generate a simple mask like [1.0, 2.0, ..., 2.0, 1.0] for even-length FFTs
# and [1.0, 2.0, ..., 2.0] for odd-length FFTs. To reduce extra ops in the
# graph we special-case the situation where the FFT length and last
# dimension of the input are known at graph construction time.
fft_length = op.inputs[1]
is_odd = _math_ops.mod(fft_length[-1], 2)
input_last_dimension = _array_ops.shape(op.inputs[0])[-1]
mask = _array_ops.concat(
[[1.0], 2.0 * _array_ops.ones([input_last_dimension - 2 + is_odd]),
_array_ops.ones([1 - is_odd])], 0)
rsize = _math_ops.reciprocal(_math_ops.cast(
_fft_size_for_grad(grad, rank), _dtypes.float32))
# The gradient of IRFFT is the RFFT of the incoming gradient times a scaling
# factor and a mask. The mask scales the gradient for the Hermitian
# symmetric components of the RFFT by a factor of two, since these
# components are de-duplicated in the RFFT.
the_rfft = rfft_fn(grad, fft_length)
return the_rfft * _math_ops.cast(rsize * mask, _dtypes.complex64), None
return _grad
@tf_export("signal.fftshift")
def fftshift(x, axes=None, name=None):
"""Shift the zero-frequency component to the center of the spectrum.
This function swaps half-spaces for all axes listed (defaults to all).
Note that ``y[0]`` is the Nyquist component only if ``len(x)`` is even.
@compatibility(numpy)
Equivalent to numpy.fft.fftshift.
https://docs.scipy.org/doc/numpy/reference/generated/numpy.fft.fftshift.html
@end_compatibility
For example:
```python
x = tf.signal.fftshift([ 0., 1., 2., 3., 4., -5., -4., -3., -2., -1.])
x.numpy() # array([-5., -4., -3., -2., -1., 0., 1., 2., 3., 4.])
```
Args:
x: `Tensor`, input tensor.
axes: `int` or shape `tuple`, optional Axes over which to shift. Default is
None, which shifts all axes.
name: An optional name for the operation.
Returns:
A `Tensor`, The shifted tensor.
"""
with _ops.name_scope(name, "fftshift") as name:
x = _ops.convert_to_tensor(x)
if axes is None:
axes = tuple(range(x.shape.ndims))
shift = [int(dim // 2) for dim in x.shape]
elif isinstance(axes, int):
shift = int(x.shape[axes] // 2)
else:
shift = [int((x.shape[ax]) // 2) for ax in axes]
return manip_ops.roll(x, shift, axes, name)
@tf_export("signal.ifftshift")
def ifftshift(x, axes=None, name=None):
"""The inverse of fftshift.
Although identical for even-length x,
the functions differ by one sample for odd-length x.
@compatibility(numpy)
Equivalent to numpy.fft.ifftshift.
https://docs.scipy.org/doc/numpy/reference/generated/numpy.fft.ifftshift.html
@end_compatibility
For example:
```python
x = tf.signal.ifftshift([[ 0., 1., 2.],[ 3., 4., -4.],[-3., -2., -1.]])
x.numpy() # array([[ 4., -4., 3.],[-2., -1., -3.],[ 1., 2., 0.]])
```
Args:
x: `Tensor`, input tensor.
axes: `int` or shape `tuple` Axes over which to calculate. Defaults to None,
which shifts all axes.
name: An optional name for the operation.
Returns:
A `Tensor`, The shifted tensor.
"""
with _ops.name_scope(name, "ifftshift") as name:
x = _ops.convert_to_tensor(x)
if axes is None:
axes = tuple(range(x.shape.ndims))
shift = [-int(dim // 2) for dim in x.shape]
elif isinstance(axes, int):
shift = -int(x.shape[axes] // 2)
else:
shift = [-int(x.shape[ax] // 2) for ax in axes]
return manip_ops.roll(x, shift, axes, name)
_ops.RegisterGradient("RFFT")(_rfft_grad_helper(1, irfft))
_ops.RegisterGradient("IRFFT")(_irfft_grad_helper(1, rfft))
_ops.RegisterGradient("RFFT2D")(_rfft_grad_helper(2, irfft2d))
_ops.RegisterGradient("IRFFT2D")(_irfft_grad_helper(2, rfft2d))
| |
from copy import deepcopy
from datetime import datetime as dt, timedelta
from functools import update_wrapper, wraps
from inspect import isclass
import re
from singledispatch import singledispatch
from .const import DEFAULT_LIMIT
def cacheable_generator(obj_type):
""" Caching decorator for API generator methods
If the decorated method has cached objects for that method and argument
combination and those objects have not expired, the cached objects are
yielded without calling the underlying method. If there are no objects in
that method's cache or the objects have expired, the underlying API method
is called and the resulting objects are then cached and yielded.
Cache object expiration is based on the obj_type, and defaults to
traw.const.DEFAULT_CACHE_TIMEOUT (300 seconds). Cache expiry timeouts can
be adjusted on a per-object bases from the client:
.. code-block:: python
client.change_cache_timeout(models.Run, 30)
The above will set the cache timeout of models.Run objects from 300
seconds to 30 seconds
"""
def _cacheable_generator(func):
""" """
cache = func.cache = {}
@wraps(func)
def cacheable_func(inst, *args, **kwargs):
key = str(args) + str(kwargs)
if key not in cache or cache[key]['expires'] < dt.now():
returned_vals = list()
timeout = inst.cache_timeouts[inst][obj_type]
expires = dt.now() + timedelta(seconds=timeout)
for val in func(inst, *args, **kwargs):
returned_vals.append(val)
yield val
else: # pylint: disable=useless-else-on-loop
# Only cache results if the generator has been exhausted
cache[key] = dict()
cache[key]['value'] = returned_vals
cache[key]['expires'] = expires
else:
for val in cache[key]['value']:
yield val
return cacheable_func
return _cacheable_generator
def cacheable(obj_type):
""" Caching decorator for API methods that return a single object
If the decorated method has a cached object for that method and argument
combination and that object has not expired, the cached object is
returned without calling the underlying method. If there is no object in
that method's cache or the object has expired, the underlying API method
is called and the resulting object is then cached and returned.
Cache object expiration is based on the obj_type, and defaults to
traw.const.DEFAULT_CACHE_TIMEOUT (300 seconds). Cache expiry timeouts can
be adjusted on a per-object bases from the client:
.. code-block:: python
client.change_cache_timeout(models.Run, 30)
The above will set the cache timeout of models.Run objects from 300
seconds to 30 seconds
"""
def cacheable_func(func):
""" """
cache = func.cache = {}
@wraps(func)
def _cacheable_func(inst, *args, **kwargs):
key = str(args) + str(kwargs)
if key not in cache or cache[key]['expires'] < dt.now():
timeout = inst.cache_timeouts[inst][obj_type]
cache[key] = dict()
cache[key]['value'] = func(inst, *args, **kwargs)
cache[key]['expires'] = dt.now() + timedelta(seconds=timeout)
return cache[key]['value']
return _cacheable_func
return cacheable_func
def clear_cache(method):
""" API method decorator for API methods that POST to the TestRail API
When TRAW adds/closes/deletes/updates ojects to the TestRail API, any
objects of the same type that are currently cached by TRAW are
considered stale. To insure TRAW does not return a cached object that
has been modified on the TestRail side, ``clear_cache`` is used to
clear the cache of any API method that works with that same object
type.
For instance, if ``traw.api.project_by_id`` has been decorated with the
``@cacheable`` decorator, then any API method that POSTs changes to the
TestRail API for models.Project objects should be decorated with
``@clear_cache(project_by_id)``
.. code-block:: python
class API(object):
@cacheable
def foo_by_id(self, foo_id):
# ...
return foo
@cacheable_generator
def foos(self):
# ...
yield from foo_list
@clear_cache(foo_by_id)
@clear_cache(foos)
def add_foo(self, new_foo):
# ...
return new_foo_response
The method cache is only cleared if the reponse is successful;
exceptions raises from the actual call to TestRail's API will not clear
the cache.
"""
def target(func):
@wraps(func)
def _func(*args, **kwargs):
response = func(*args, **kwargs)
method.cache.clear()
return response
return _func
return target
def dispatchmethod(func):
""" singledispatch for class methods
This provides for a way to use ``functools.singledispatch`` inside of a class.
It has the same basic interface that ``singledispatch`` does.
"""
# This implementation builds on the following gist:
# https://gist.github.com/adamnew123456/9218f99ba35da225ca11
dispatcher = singledispatch(func)
def register(type): # pylint: disable=redefined-builtin
def _register(func):
return dispatcher.register(type)(func)
return _register
def dispatch(type): # pylint: disable=redefined-builtin
return dispatcher.dispatch(type)
def wrapper(inst, *args, **kwargs):
obj = args[0] if len(args) > 0 else inst
cls = obj if isclass(obj) else obj.__class__
impl = dispatch(cls)
return impl(inst, *args, **kwargs)
wrapper.register = register
wrapper.dispatch = dispatch
wrapper.registry = dispatcher.registry
wrapper._clear_cache = dispatcher._clear_cache # pylint: disable=protected-access
update_wrapper(wrapper, func)
return wrapper
def duration_to_timedelta(duration):
def timespan(segment):
return int(segment.group(0)[:-1]) if segment else 0
timedelta_map = {
'weeks': timespan(re.search(r'\d+w', duration)),
'days': timespan(re.search(r'\d+d', duration)),
'hours': timespan(re.search(r'\d+h', duration)),
'minutes': timespan(re.search(r'\d+m', duration)),
'seconds': timespan(re.search(r'\d+s', duration))
}
return timedelta(**timedelta_map)
def paginate(func):
""" """
@wraps(func)
def paginated_func(*args, **kwargs):
limit = kwargs.get('limit', None)
offset = 0
keep_paging = True
while keep_paging:
new_kwargs = deepcopy(kwargs)
new_kwargs['offset'] = offset
if limit:
new_kwargs['limit'] = min([limit - offset, DEFAULT_LIMIT])
obj_count = 0
for obj_count, obj in enumerate(func(*args, **new_kwargs), 1):
yield obj
if limit and obj_count + offset >= limit:
break
offset = offset + obj_count
if limit and offset >= limit:
keep_paging = False
elif limit is None and DEFAULT_LIMIT > obj_count:
# If obj_count is less than the paging size (DEFAULT_LIMIT),
# it indicates that there are no more objects for the API
# to return
keep_paging = False
return paginated_func
| |
"""Switch platform for Hyperion."""
import functools
from typing import Any, Callable, Dict, Optional
from hyperion import client
from hyperion.const import (
KEY_COMPONENT,
KEY_COMPONENTID_ALL,
KEY_COMPONENTID_BLACKBORDER,
KEY_COMPONENTID_BOBLIGHTSERVER,
KEY_COMPONENTID_FORWARDER,
KEY_COMPONENTID_GRABBER,
KEY_COMPONENTID_LEDDEVICE,
KEY_COMPONENTID_SMOOTHING,
KEY_COMPONENTID_V4L,
KEY_COMPONENTS,
KEY_COMPONENTSTATE,
KEY_ENABLED,
KEY_NAME,
KEY_STATE,
KEY_UPDATE,
)
from homeassistant.components.switch import SwitchEntity
from homeassistant.config_entries import ConfigEntry
from homeassistant.core import callback
from homeassistant.helpers.dispatcher import (
async_dispatcher_connect,
async_dispatcher_send,
)
from homeassistant.helpers.typing import HomeAssistantType
from homeassistant.util import slugify
from . import get_hyperion_unique_id, listen_for_instance_updates
from .const import (
COMPONENT_TO_NAME,
CONF_INSTANCE_CLIENTS,
DOMAIN,
NAME_SUFFIX_HYPERION_COMPONENT_SWITCH,
SIGNAL_ENTITY_REMOVE,
TYPE_HYPERION_COMPONENT_SWITCH_BASE,
)
COMPONENT_SWITCHES = [
KEY_COMPONENTID_ALL,
KEY_COMPONENTID_SMOOTHING,
KEY_COMPONENTID_BLACKBORDER,
KEY_COMPONENTID_FORWARDER,
KEY_COMPONENTID_BOBLIGHTSERVER,
KEY_COMPONENTID_GRABBER,
KEY_COMPONENTID_LEDDEVICE,
KEY_COMPONENTID_V4L,
]
async def async_setup_entry(
hass: HomeAssistantType, config_entry: ConfigEntry, async_add_entities: Callable
) -> bool:
"""Set up a Hyperion platform from config entry."""
entry_data = hass.data[DOMAIN][config_entry.entry_id]
server_id = config_entry.unique_id
def component_to_switch_type(component: str) -> str:
"""Convert a component to a switch type string."""
return slugify(
f"{TYPE_HYPERION_COMPONENT_SWITCH_BASE} {COMPONENT_TO_NAME[component]}"
)
def component_to_unique_id(component: str, instance_num: int) -> str:
"""Convert a component to a unique_id."""
assert server_id
return get_hyperion_unique_id(
server_id, instance_num, component_to_switch_type(component)
)
def component_to_switch_name(component: str, instance_name: str) -> str:
"""Convert a component to a switch name."""
return (
f"{instance_name} "
f"{NAME_SUFFIX_HYPERION_COMPONENT_SWITCH} "
f"{COMPONENT_TO_NAME.get(component, component.capitalize())}"
)
@callback
def instance_add(instance_num: int, instance_name: str) -> None:
"""Add entities for a new Hyperion instance."""
assert server_id
switches = []
for component in COMPONENT_SWITCHES:
switches.append(
HyperionComponentSwitch(
component_to_unique_id(component, instance_num),
component_to_switch_name(component, instance_name),
component,
entry_data[CONF_INSTANCE_CLIENTS][instance_num],
),
)
async_add_entities(switches)
@callback
def instance_remove(instance_num: int) -> None:
"""Remove entities for an old Hyperion instance."""
assert server_id
for component in COMPONENT_SWITCHES:
async_dispatcher_send(
hass,
SIGNAL_ENTITY_REMOVE.format(
component_to_unique_id(component, instance_num),
),
)
listen_for_instance_updates(hass, config_entry, instance_add, instance_remove)
return True
class HyperionComponentSwitch(SwitchEntity):
"""ComponentBinarySwitch switch class."""
def __init__(
self,
unique_id: str,
name: str,
component_name: str,
hyperion_client: client.HyperionClient,
) -> None:
"""Initialize the switch."""
self._unique_id = unique_id
self._name = name
self._component_name = component_name
self._client = hyperion_client
self._client_callbacks = {
f"{KEY_COMPONENTS}-{KEY_UPDATE}": self._update_components
}
@property
def should_poll(self) -> bool:
"""Return whether or not this entity should be polled."""
return False
@property
def entity_registry_enabled_default(self) -> bool:
"""Whether or not the entity is enabled by default."""
# These component controls are for advanced users and are disabled by default.
return False
@property
def unique_id(self) -> str:
"""Return a unique id for this instance."""
return self._unique_id
@property
def name(self) -> str:
"""Return the name of the switch."""
return self._name
@property
def is_on(self) -> bool:
"""Return true if the switch is on."""
for component in self._client.components:
if component[KEY_NAME] == self._component_name:
return bool(component.setdefault(KEY_ENABLED, False))
return False
@property
def available(self) -> bool:
"""Return server availability."""
return bool(self._client.has_loaded_state)
async def _async_send_set_component(self, value: bool) -> None:
"""Send a component control request."""
await self._client.async_send_set_component(
**{
KEY_COMPONENTSTATE: {
KEY_COMPONENT: self._component_name,
KEY_STATE: value,
}
}
)
# pylint: disable=unused-argument
async def async_turn_on(self, **kwargs: Any) -> None:
"""Turn on the switch."""
await self._async_send_set_component(True)
# pylint: disable=unused-argument
async def async_turn_off(self, **kwargs: Any) -> None:
"""Turn off the switch."""
await self._async_send_set_component(False)
@callback
def _update_components(self, _: Optional[Dict[str, Any]] = None) -> None:
"""Update Hyperion components."""
self.async_write_ha_state()
async def async_added_to_hass(self) -> None:
"""Register callbacks when entity added to hass."""
assert self.hass
self.async_on_remove(
async_dispatcher_connect(
self.hass,
SIGNAL_ENTITY_REMOVE.format(self._unique_id),
functools.partial(self.async_remove, force_remove=True),
)
)
self._client.add_callbacks(self._client_callbacks)
async def async_will_remove_from_hass(self) -> None:
"""Cleanup prior to hass removal."""
self._client.remove_callbacks(self._client_callbacks)
| |
from datetime import datetime, timedelta
from typing import Any, Dict, Optional
import pytest
from graphql.error import GraphQLError
from graphql.language import ValueNode
from graphql.pyutils import inspect
from graphql.type import (
GraphQLArgument,
GraphQLField,
GraphQLInputField,
GraphQLInputObjectType,
GraphQLInt,
GraphQLList,
GraphQLObjectType,
GraphQLScalarType,
GraphQLSchema,
)
from graphql.utilities import value_from_ast_untyped
from gql import Client, gql
def serialize_datetime(value: Any) -> str:
if not isinstance(value, datetime):
raise GraphQLError("Cannot serialize datetime value: " + inspect(value))
return value.isoformat()
def parse_datetime_value(value: Any) -> datetime:
if isinstance(value, str):
try:
# Note: a more solid custom scalar should use dateutil.parser.isoparse
# Not using it here in the test to avoid adding another dependency
return datetime.fromisoformat(value)
except Exception:
raise GraphQLError("Cannot parse datetime value : " + inspect(value))
else:
raise GraphQLError("Cannot parse datetime value: " + inspect(value))
def parse_datetime_literal(
value_node: ValueNode, variables: Optional[Dict[str, Any]] = None
) -> datetime:
ast_value = value_from_ast_untyped(value_node, variables)
if not isinstance(ast_value, str):
raise GraphQLError("Cannot parse literal datetime value: " + inspect(ast_value))
return parse_datetime_value(ast_value)
DatetimeScalar = GraphQLScalarType(
name="Datetime",
serialize=serialize_datetime,
parse_value=parse_datetime_value,
parse_literal=parse_datetime_literal,
)
def resolve_shift_days(root, _info, time, days):
return time + timedelta(days=days)
def resolve_latest(root, _info, times):
return max(times)
def resolve_seconds(root, _info, interval):
print(f"interval={interval!r}")
return (interval["end"] - interval["start"]).total_seconds()
IntervalInputType = GraphQLInputObjectType(
"IntervalInput",
fields={
"start": GraphQLInputField(DatetimeScalar),
"end": GraphQLInputField(DatetimeScalar),
},
)
queryType = GraphQLObjectType(
name="RootQueryType",
fields={
"shiftDays": GraphQLField(
DatetimeScalar,
args={
"time": GraphQLArgument(DatetimeScalar),
"days": GraphQLArgument(GraphQLInt),
},
resolve=resolve_shift_days,
),
"latest": GraphQLField(
DatetimeScalar,
args={"times": GraphQLArgument(GraphQLList(DatetimeScalar))},
resolve=resolve_latest,
),
"seconds": GraphQLField(
GraphQLInt,
args={"interval": GraphQLArgument(IntervalInputType)},
resolve=resolve_seconds,
),
},
)
schema = GraphQLSchema(query=queryType)
@pytest.mark.skipif(
not hasattr(datetime, "fromisoformat"), reason="fromisoformat is new in Python 3.7+"
)
def test_shift_days():
client = Client(schema=schema, parse_results=True, serialize_variables=True)
now = datetime.fromisoformat("2021-11-12T11:58:13.461161")
query = gql("query shift5days($time: Datetime) {shiftDays(time: $time, days: 5)}")
variable_values = {
"time": now,
}
result = client.execute(query, variable_values=variable_values)
print(result)
assert result["shiftDays"] == datetime.fromisoformat("2021-11-17T11:58:13.461161")
@pytest.mark.skipif(
not hasattr(datetime, "fromisoformat"), reason="fromisoformat is new in Python 3.7+"
)
def test_shift_days_serialized_manually_in_query():
client = Client(schema=schema)
query = gql(
"""{
shiftDays(time: "2021-11-12T11:58:13.461161", days: 5)
}"""
)
result = client.execute(query, parse_result=True)
print(result)
assert result["shiftDays"] == datetime.fromisoformat("2021-11-17T11:58:13.461161")
@pytest.mark.skipif(
not hasattr(datetime, "fromisoformat"), reason="fromisoformat is new in Python 3.7+"
)
def test_shift_days_serialized_manually_in_variables():
client = Client(schema=schema, parse_results=True)
query = gql("query shift5days($time: Datetime) {shiftDays(time: $time, days: 5)}")
variable_values = {
"time": "2021-11-12T11:58:13.461161",
}
result = client.execute(query, variable_values=variable_values)
print(result)
assert result["shiftDays"] == datetime.fromisoformat("2021-11-17T11:58:13.461161")
@pytest.mark.skipif(
not hasattr(datetime, "fromisoformat"), reason="fromisoformat is new in Python 3.7+"
)
def test_latest():
client = Client(schema=schema, parse_results=True)
now = datetime.fromisoformat("2021-11-12T11:58:13.461161")
in_five_days = datetime.fromisoformat("2021-11-17T11:58:13.461161")
query = gql("query latest($times: [Datetime!]!) {latest(times: $times)}")
variable_values = {
"times": [now, in_five_days],
}
result = client.execute(
query, variable_values=variable_values, serialize_variables=True
)
print(result)
assert result["latest"] == in_five_days
@pytest.mark.skipif(
not hasattr(datetime, "fromisoformat"), reason="fromisoformat is new in Python 3.7+"
)
def test_seconds():
client = Client(schema=schema)
now = datetime.fromisoformat("2021-11-12T11:58:13.461161")
in_five_days = datetime.fromisoformat("2021-11-17T11:58:13.461161")
query = gql(
"query seconds($interval: IntervalInput) {seconds(interval: $interval)}"
)
variable_values = {"interval": {"start": now, "end": in_five_days}}
result = client.execute(
query, variable_values=variable_values, serialize_variables=True
)
print(result)
assert result["seconds"] == 432000
| |
# Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
This module's main purpose is to act as a script to create new versions
of erfa.c when ERFA is updated (or this generator is enhanced).
`Jinja2 <http://jinja.pocoo.org/>`_ must be installed for this
module/script to function.
Note that this does *not* currently automate the process of creating structs
or dtypes for those structs. They should be added manually in the template file.
"""
from __future__ import absolute_import, division, print_function
# note that we do *not* use unicode_literals here, because that makes the
# generated code's strings have u'' in them on py 2.x
import re
import os.path
from astropy.utils.compat.odict import OrderedDict
ctype_to_dtype = {'double' : "numpy.double",
'int' : "numpy.intc",
'eraASTROM' : "dt_eraASTROM",
'eraLDBODY' : "dt_eraLDBODY",
'char' : "numpy.dtype('S16')",
'const char' : "numpy.dtype('S16')",
}
NDIMS_REX = re.compile(re.escape("numpy.dtype([('fi0', '.*', <(.*)>)])").replace(r'\.\*','.*').replace(r'\<', '(').replace(r'\>',')'))
class FunctionDoc(object):
def __init__(self, doc):
self.doc = doc.replace("**", " ").replace("/*\n", "").replace("*/", "")
self.__input = None
self.__output = None
self.__ret_info = None
@property
def input(self):
if self.__input is None:
self.__input = []
result = re.search("Given([^\n]*):\n(.+?) \n", self.doc, re.DOTALL)
if result is not None:
__input = result.group(2)
for i in __input.split("\n"):
arg_doc = ArgumentDoc(i)
if arg_doc.name is not None:
self.__input.append(arg_doc)
result = re.search("Given and returned([^\n]*):\n(.+?) \n", self.doc, re.DOTALL)
if result is not None:
__input = result.group(2)
for i in __input.split("\n"):
arg_doc = ArgumentDoc(i)
if arg_doc.name is not None:
self.__input.append(arg_doc)
return self.__input
@property
def output(self):
if self.__output is None:
self.__output = []
result = re.search("Returned([^\n]*):\n(.+?) \n", self.doc, re.DOTALL)
if result is not None:
__output = result.group(2)
for i in __output.split("\n"):
arg_doc = ArgumentDoc(i)
if arg_doc.name is not None:
self.__output.append(arg_doc)
result = re.search("Given and returned([^\n]*):\n(.+?) \n", self.doc, re.DOTALL)
if result is not None:
__output = result.group(2)
for i in __output.split("\n"):
arg_doc = ArgumentDoc(i)
if arg_doc.name is not None:
self.__output.append(arg_doc)
return self.__output
@property
def ret_info(self):
if self.__ret_info is None:
ret_info = []
result = re.search("Returned \\(function value\\)([^\n]*):\n(.+?) \n", self.doc, re.DOTALL)
if result is not None:
ret_info.append(ReturnDoc(result.group(2)))
if len(ret_info) == 0:
self.__ret_info = ''
elif len(ret_info) == 1:
self.__ret_info = ret_info[0]
else:
raise ValueError("Multiple C return sections found in this doc:\n" + self.doc)
return self.__ret_info
def __repr__(self):
return self.doc.replace(" \n", "\n")
class ArgumentDoc(object):
def __init__(self, doc):
match = re.search("^ +([^ ]+)[ ]+([^ ]+)[ ]+(.+)", doc)
if match is not None:
self.name = match.group(1)
self.type = match.group(2)
self.doc = match.group(3)
else:
self.name = None
self.type = None
self.doc = None
def __repr__(self):
return " {0:15} {1:15} {2}".format(self.name, self.type, self.doc)
class Argument(object):
def __init__(self, definition, doc):
self.doc = doc
self.__inout_state = None
self.ctype, ptr_name_arr = definition.strip().rsplit(" ", 1)
if "*" == ptr_name_arr[0]:
self.is_ptr = True
name_arr = ptr_name_arr[1:]
else:
self.is_ptr = False
name_arr = ptr_name_arr
if "[]" in ptr_name_arr:
self.is_ptr = True
name_arr = name_arr[:-2]
if "[" in name_arr:
self.name, arr = name_arr.split("[", 1)
self.shape = tuple([int(size) for size in arr[:-1].split("][")])
else:
self.name = name_arr
self.shape = ()
@property
def inout_state(self):
if self.__inout_state is None:
self.__inout_state = ''
for i in self.doc.input:
if self.name in i.name.split(','):
self.__inout_state = 'in'
for o in self.doc.output:
if self.name in o.name.split(','):
if self.__inout_state == 'in':
self.__inout_state = 'inout'
else:
self.__inout_state = 'out'
return self.__inout_state
@property
def ctype_ptr(self):
if (self.is_ptr) | (len(self.shape)>0):
return self.ctype+" *"
else:
return self.ctype
@property
def name_in_broadcast(self):
if len(self.shape)>0:
return "{0}_in[...{1}]".format(self.name, ",0"*len(self.shape))
else:
return "{0}_in".format(self.name)
@property
def name_out_broadcast(self):
if len(self.shape)>0:
return "{0}_out[...{1}]".format(self.name, ",0"*len(self.shape))
else:
return "{0}_out".format(self.name)
@property
def dtype(self):
return ctype_to_dtype[self.ctype]
@property
def ndim(self):
return len(self.shape)
@property
def cshape(self):
return ''.join(['[{0}]'.format(s) for s in self.shape])
@property
def name_for_call(self):
if self.is_ptr:
return '_'+self.name
else:
return '*_'+self.name
def __repr__(self):
return "Argument('{0}', name='{1}', ctype='{2}', inout_state='{3}')".format(self.definition, self.name, self.ctype, self.inout_state)
class ReturnDoc(object):
def __init__(self, doc):
self.doc = doc
self.infoline = doc.split('\n')[0].strip()
self.type = self.infoline.split()[0]
self.descr = self.infoline.split()[1]
if self.descr.startswith('status'):
self.statuscodes = statuscodes = {}
code = None
for line in doc[doc.index(':')+1:].split('\n'):
ls = line.strip()
if ls != '':
if ' = ' in ls:
code, msg = ls.split(' = ')
if code != 'else':
code = int(code)
statuscodes[code] = msg
elif code is not None:
statuscodes[code] += ls
else:
self.statuscodes = None
def __repr__(self):
return "Return value, type={0:15}, {1}, {2}".format(self.type, self.descr, self.doc)
class Return(object):
def __init__(self, ctype, doc):
self.name = 'c_retval'
self.name_out_broadcast = self.name+"_out"
self.inout_state = 'stat' if ctype == 'int' else 'ret'
self.ctype = ctype
self.ctype_ptr = ctype
self.shape = ()
self.doc = doc
def __repr__(self):
return "Return(name='{0}', ctype='{1}', inout_state='{2}')".format(self.name, self.ctype, self.inout_state)
@property
def dtype(self):
return ctype_to_dtype[self.ctype]
@property
def nd_dtype(self):
"""
This if the return type has a multi-dimensional output, like
double[3][3]
"""
return "'fi0'" in self.dtype
@property
def doc_info(self):
return self.doc.ret_info
class Function(object):
"""
A class representing a C function.
Parameters
----------
name : str
The name of the function
source_path : str
Either a directory, which means look for the function in a
stand-alone file (like for the standard ERFA distribution), or a
file, which means look for the function in that file (as for the
astropy-packaged single-file erfa.c).
match_line : str, optional
If given, searching of the source file will skip until it finds
a line matching this string, and start from there.
"""
def __init__(self, name, source_path, match_line=None):
self.name = name
self.pyname = name.split('era')[-1].lower()
self.filename = self.pyname+".c"
if os.path.isdir(source_path):
self.filepath = os.path.join(os.path.normpath(source_path), self.filename)
else:
self.filepath = source_path
with open(self.filepath) as f:
if match_line:
line = f.readline()
while line != '':
if line.startswith(match_line):
filecontents = '\n' + line + f.read()
break
line = f.readline()
else:
msg = ('Could not find the match_line "{0}" in '
'the source file "{1}"')
raise ValueError(msg.format(match_line, self.filepath))
else:
filecontents = f.read()
pattern = "\n([^\n]+{0} ?\([^)]+\)).+?(/\*.+?\*/)".format(name)
p = re.compile(pattern, flags=re.DOTALL|re.MULTILINE)
search = p.search(filecontents)
self.cfunc = " ".join(search.group(1).split())
self.doc = FunctionDoc(search.group(2))
self.args = []
for arg in re.search("\(([^)]+)\)", self.cfunc).group(1).split(', '):
self.args.append(Argument(arg, self.doc))
self.ret = re.search("^(.*){0}".format(name), self.cfunc).group(1).strip()
if self.ret != 'void':
self.args.append(Return(self.ret, self.doc))
def args_by_inout(self, inout_filter, prop=None, join=None):
"""
Gives all of the arguments and/or returned values, depending on whether
they are inputs, outputs, etc.
The value for `inout_filter` should be a string containing anything
that arguments' `inout_state` attribute produces. Currently, that can be:
* "in" : input
* "out" : output
* "inout" : something that's could be input or output (e.g. a struct)
* "ret" : the return value of the C function
* "stat" : the return value of the C function if it is a status code
It can also be a "|"-separated string giving inout states to OR
together.
"""
result = []
for arg in self.args:
if arg.inout_state in inout_filter.split('|'):
if prop is None:
result.append(arg)
else:
result.append(getattr(arg, prop))
if join is not None:
return join.join(result)
else:
return result
def __repr__(self):
return "Function(name='{0}', pyname='{1}', filename='{2}', filepath='{3}')".format(self.name, self.pyname, self.filename, self.filepath)
class Constant(object):
def __init__(self, name, value, doc):
self.name = name.replace("ERFA_","")
self.value = value.replace("ERFA_","")
self.doc = doc
def main(srcdir, outfn, templateloc, verbose=True):
from jinja2 import Environment, FileSystemLoader
if verbose:
print_ = lambda *args, **kwargs: print(*args, **kwargs)
else:
print_ = lambda *args, **kwargs: None
#Prepare the jinja2 templating environment
env = Environment(loader=FileSystemLoader(templateloc))
def prefix(a_list, pre):
return [pre+'{0}'.format(an_element) for an_element in a_list]
def postfix(a_list, post):
return ['{0}'.format(an_element)+post for an_element in a_list]
def surround(a_list, pre, post):
return [pre+'{0}'.format(an_element)+post for an_element in a_list]
env.filters['prefix'] = prefix
env.filters['postfix'] = postfix
env.filters['surround'] = surround
erfa_c_in = env.get_template('core.c.templ')
erfa_py_in = env.get_template('core.py.templ')
#Extract all the ERFA function names from erfa.h
if os.path.isdir(srcdir):
erfahfn = os.path.join(srcdir, 'erfa.h')
multifilserc = True
else:
erfahfn = os.path.join(os.path.split(srcdir)[0], 'erfa.h')
multifilserc = False
with open(erfahfn, "r") as f:
erfa_h = f.read()
funcs = OrderedDict()
section_subsection_functions = re.findall('/\* (\w*)/(\w*) \*/\n(.*?)\n\n',
erfa_h, flags=re.DOTALL|re.MULTILINE)
for section, subsection, functions in section_subsection_functions:
print_("{0}.{1}".format(section, subsection))
if section == "Astronomy":
func_names = re.findall(' (\w+)\(.*?\);', functions, flags=re.DOTALL)
for name in func_names:
print_("{0}.{1}.{2}...".format(section, subsection, name))
if multifilserc:
# easy because it just looks in the file itself
funcs[name] = Function(name, srcdir)
else:
# Have to tell it to look for a declaration matching
# the start of the header declaration, otherwise it
# might find a *call* of the function instead of the
# definition
for line in functions.split('\n'):
if name in line:
# [:-1] is to remove trailing semicolon, and
# splitting on '(' is because the header and
# C files don't necessarily have to match
# argument names and line-breaking or
# whitespace
match_line = line[:-1].split('(')[0]
funcs[name] = Function(name, srcdir, match_line)
break
else:
raise ValueError("A name for a C file wasn't "
"found in the string that "
"spawned it. This should be "
"impossible!")
funcs = list(funcs.values())
#Extract all the ERFA constants from erfam.h
erfamhfn = os.path.join(srcdir, 'erfam.h')
with open(erfamhfn, 'r') as f:
erfa_m_h = f.read()
constants = []
for chunk in erfa_m_h.split("\n\n"):
result = re.findall("#define (ERFA_\w+?) (.+?)$", chunk, flags=re.DOTALL|re.MULTILINE)
if result:
doc = re.findall("/\* (.+?) \*/\n", chunk, flags=re.DOTALL)
for (name, value) in result:
constants.append(Constant(name, value, doc))
print_("Rendering template")
erfa_c = erfa_c_in.render(funcs=funcs)
erfa_py = erfa_py_in.render(funcs=funcs, constants=constants)
if outfn is not None:
outfn_c = os.path.splitext(outfn)[0] + ".c"
print_("Saving to", outfn, 'and', outfn_c)
with open(outfn, "w") as f:
f.write(erfa_py)
with open(outfn_c, "w") as f:
f.write(erfa_c)
print_("Done!")
return erfa_c, erfa_py, funcs
DEFAULT_ERFA_LOC = os.path.join(os.path.split(__file__)[0],
'../../cextern/erfa')
DEFAULT_TEMPLATE_LOC = os.path.split(__file__)[0]
if __name__ == '__main__':
from argparse import ArgumentParser
ap = ArgumentParser()
ap.add_argument('srcdir', default=DEFAULT_ERFA_LOC, nargs='?',
help='Directory where the ERFA c and header files '
'can be found or to a single erfa.c file '
'(which must be in the same directory as '
'erfa.h). Defaults to the builtin astropy '
'erfa: "{0}"'.format(DEFAULT_ERFA_LOC))
ap.add_argument('-o', '--output', default='core.py',
help='The output filename. This is the name for only the '
'pure-python output, the C part will have the '
'same name but with a ".c" extension.')
ap.add_argument('-t', '--template-loc',
default=DEFAULT_TEMPLATE_LOC,
help='the location where the "core.c.templ" '
'template can be found.')
ap.add_argument('-q', '--quiet', action='store_false', dest='verbose',
help='Suppress output normally printed to stdout.')
args = ap.parse_args()
main(args.srcdir, args.output, args.template_loc)
| |
# Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Gradients for operators defined in tensor_array_ops.py."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import ops
from tensorflow.python.ops import tensor_array_ops
ops.NoGradient("TensorArray")
ops.NoGradient("TensorArrayGrad")
ops.NoGradient("TensorArraySize")
ops.NoGradient("TensorArrayClose")
def _GetGradSource(op_or_tensor):
"""Identify which call to tf.gradients created this gradient op or tensor.
TensorArray gradient calls use an accumulator TensorArray object. If
multiple gradients are calculated and run in the same session, the multiple
gradient nodes may accidentally flow throuth the same accumulator TensorArray.
This double counting breaks the TensorArray gradient flow.
The solution is to identify which gradient call this particular
TensorArray*Grad is being called in, by looking at the input gradient
tensor's name, and create or lookup an accumulator gradient TensorArray
associated with this specific call. This solves any confusion and ensures
different gradients from the same forward graph get their own accumulators.
This function creates the unique label associated with the tf.gradients call
that is used to create the gradient TensorArray.
Args:
op_or_tensor: `Tensor` or `Operation` which is an input to a
TensorArray*Grad call.
Returns:
A python string, the unique label associated with this particular
gradients calculation.
Raises:
ValueError: If not called within a gradients calculation.
"""
name_tokens = op_or_tensor.name.split("/")
grad_pos = [i for i, x in enumerate(name_tokens) if x.startswith("gradients")]
if not grad_pos:
raise ValueError(
"Expected op/tensor name to start with gradients (excluding scope)"
", got: %s" % op_or_tensor.name)
return "/".join(name_tokens[:grad_pos[0] + 1])
@ops.RegisterGradient("TensorArrayRead")
def _TensorArrayReadGrad(op, grad):
"""Gradient for TensorArrayRead.
Args:
op: Forward TensorArrayRead op.
grad: Gradient `Tensor` to TensorArrayRead.
Returns:
A flow `Tensor`, which can be used in control dependencies to
force the write of `grad` to the gradient `TensorArray`.
"""
# Note: the forward flow dependency in the call to grad() is necessary for
# the case of dynamic sized TensorArrays. When creating the gradient
# TensorArray, the final size of the forward array must be known.
# For this we need to wait until it has been created by depending on
# the input flow of the original op.
handle = op.inputs[0]
index = op.inputs[1]
flow = op.inputs[2]
dtype = op.get_attr("dtype")
grad_source = _GetGradSource(grad)
g = tensor_array_ops.TensorArray(dtype=dtype, handle=handle).grad(
source=grad_source, flow=flow)
w_g = g.write(index, grad)
return [None, None, w_g.flow]
@ops.RegisterGradient("TensorArrayWrite")
def _TensorArrayWriteGrad(op, flow):
"""Gradient for TensorArrayWrite.
Args:
op: Forward TensorArrayWrite op.
flow: Gradient `Tensor` flow to TensorArrayWrite.
Returns:
A grad `Tensor`, the gradient created in an upstream ReadGrad or PackGrad.
"""
# handle is the output store_handle of TensorArrayReadGrad or
# the handle output of TensorArrayWriteGrad. we must use this one.
handle = op.inputs[0]
index = op.inputs[1]
dtype = op.get_attr("T")
grad_source = _GetGradSource(flow)
g = tensor_array_ops.TensorArray(dtype=dtype, handle=handle).grad(
source=grad_source, flow=flow)
grad = g.read(index)
return [None, None, grad, flow]
@ops.RegisterGradient("TensorArrayPack")
def _TensorArrayPackGrad(op, grad):
"""Gradient for TensorArrayPack.
Args:
op: Forward TensorArrayPack op.
grad: Gradient `Tensor` to TensorArrayPack.
Returns:
A flow `Tensor`, which can be used in control dependencies to
force the write of `grad` to the gradient `TensorArray`.
"""
# Note: the forward flow dependency in the call to grad() is necessary for
# the case of dynamic sized TensorArrays. When creating the gradient
# TensorArray, the final size of the forward array must be known.
# For this we need to wait until it has been created by depending on
# the input flow of the original op.
handle = op.inputs[0]
flow = op.inputs[1]
dtype = op.get_attr("dtype")
grad_source = _GetGradSource(grad)
g = tensor_array_ops.TensorArray(dtype=dtype, handle=handle).grad(
source=grad_source, flow=flow)
u_g = g.unpack(grad)
return [None, u_g.flow]
@ops.RegisterGradient("TensorArrayUnpack")
def _TensorArrayUnpackGrad(op, flow):
"""Gradient for TensorArrayUnpack.
Args:
op: Forward TensorArrayUnpack op.
flow: Gradient `Tensor` flow to TensorArrayUnpack.
Returns:
A grad `Tensor`, the gradient created in upstream ReadGrads or PackGrad.
"""
handle = op.inputs[0]
dtype = op.get_attr("T")
grad_source = _GetGradSource(flow)
g = tensor_array_ops.TensorArray(dtype=dtype, handle=handle).grad(
source=grad_source, flow=flow)
grad = g.pack()
return [None, grad, flow]
@ops.RegisterGradient("TensorArrayConcat")
def _TensorArrayConcatGrad(op, grad, unused_lengths_grad):
"""Gradient for TensorArrayConcat.
Args:
op: Forward TensorArrayConcat op.
grad: Gradient `Tensor` to TensorArrayConcat.
Returns:
A flow `Tensor`, which can be used in control dependencies to
force the write of `grad` to the gradient `TensorArray`.
"""
# Note: the forward flow dependency in the call to grad() is necessary for
# the case of dynamic sized TensorArrays. When creating the gradient
# TensorArray, the final size of the forward array must be known.
# For this we need to wait until it has been created by depending on
# the input flow of the original op.
handle = op.inputs[0]
flow = op.inputs[1]
lengths = op.outputs[1]
dtype = op.get_attr("dtype")
grad_source = _GetGradSource(grad)
g = tensor_array_ops.TensorArray(dtype=dtype, handle=handle).grad(
source=grad_source, flow=flow)
u_g = g.split(grad, lengths=lengths)
# handle, flow_in
return [None, u_g.flow]
@ops.RegisterGradient("TensorArraySplit")
def _TensorArraySplitGrad(op, flow):
"""Gradient for TensorArraySplit.
Args:
op: Forward TensorArraySplit op.
flow: Gradient `Tensor` flow to TensorArraySplit.
Returns:
A grad `Tensor`, the gradient created in upstream ReadGrads or PackGrad.
"""
handle = op.inputs[0]
dtype = op.get_attr("T")
grad_source = _GetGradSource(flow)
g = tensor_array_ops.TensorArray(dtype=dtype, handle=handle).grad(
source=grad_source, flow=flow)
grad = g.concat()
# handle, value, lengths, flow_in
return [None, grad, None, flow]
# pylint: enable=protected-access
| |
# flake8: NOQA E501
import inspect
import os
import sys
from os.path import dirname, relpath
import cihai
# Get the project root dir, which is the parent dir of this
cwd = os.getcwd()
project_root = os.path.dirname(cwd)
sys.path.insert(0, project_root)
sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), "_ext")))
# package data
about = {}
with open("../cihai/__about__.py") as fp:
exec(fp.read(), about)
extensions = [
"sphinx.ext.autodoc",
"sphinx_autodoc_typehints",
"sphinx.ext.todo",
"sphinx.ext.napoleon",
"sphinx.ext.intersphinx",
"sphinx.ext.linkcode",
"sphinx_inline_tabs",
"sphinx_copybutton",
"sphinxext.opengraph",
"myst_parser",
]
myst_enable_extensions = ["colon_fence", "substitution", "replacements"]
templates_path = ["_templates"]
source_suffix = {".rst": "restructuredtext", ".md": "markdown"}
master_doc = "index"
project = about["__title__"]
copyright = about["__copyright__"]
version = "%s" % (".".join(about["__version__"].split("."))[:2])
release = "%s" % (about["__version__"])
exclude_patterns = ["_build"]
pygments_style = "monokai"
pygments_dark_style = "monokai"
html_extra_path = ["manifest.json"]
html_static_path = ["_static"]
html_css_files = ["css/custom.css"]
html_theme = "furo"
html_theme_options = {
"light_logo": "img/cihai.svg",
"dark_logo": "img/cihai.svg",
"footer_icons": [
{
"name": "GitHub",
"url": about["__github__"],
"html": """
<svg stroke="currentColor" fill="currentColor" stroke-width="0" viewBox="0 0 16 16">
<path fill-rule="evenodd" d="M8 0C3.58 0 0 3.58 0 8c0 3.54 2.29 6.53 5.47 7.59.4.07.55-.17.55-.38 0-.19-.01-.82-.01-1.49-2.01.37-2.53-.49-2.69-.94-.09-.23-.48-.94-.82-1.13-.28-.15-.68-.52-.01-.53.63-.01 1.08.58 1.23.82.72 1.21 1.87.87 2.33.66.07-.52.28-.87.51-1.07-1.78-.2-3.64-.89-3.64-3.95 0-.87.31-1.59.82-2.15-.08-.2-.36-1.02.08-2.12 0 0 .67-.21 2.2.82.64-.18 1.32-.27 2-.27.68 0 1.36.09 2 .27 1.53-1.04 2.2-.82 2.2-.82.44 1.1.16 1.92.08 2.12.51.56.82 1.27.82 2.15 0 3.07-1.87 3.75-3.65 3.95.29.25.54.73.54 1.48 0 1.07-.01 1.93-.01 2.2 0 .21.15.46.55.38A8.013 8.013 0 0 0 16 8c0-4.42-3.58-8-8-8z"></path>
</svg>
""",
"class": "",
},
],
}
html_theme_path = []
html_sidebars = {
"**": [
"sidebar/scroll-start.html",
"sidebar/brand.html",
"sidebar/search.html",
"sidebar/navigation.html",
"sidebar/projects.html",
"sidebar/scroll-end.html",
]
}
# sphinxext.opengraph
ogp_site_url = about["__docs__"]
ogp_image = "_static/img/icons/icon-192x192.png"
ogp_desscription_length = about["__description__"]
ogp_site_name = about["__title__"]
htmlhelp_basename = "%sdoc" % about["__title__"]
latex_documents = [
(
"index",
"{0}.tex".format(about["__package_name__"]),
"{0} Documentation".format(about["__title__"]),
about["__author__"],
"manual",
)
]
man_pages = [
(
"index",
about["__package_name__"],
"{0} Documentation".format(about["__title__"]),
about["__author__"],
1,
)
]
texinfo_documents = [
(
"index",
"{0}".format(about["__package_name__"]),
"{0} Documentation".format(about["__title__"]),
about["__author__"],
about["__package_name__"],
about["__description__"],
"Miscellaneous",
)
]
intersphinx_mapping = {
"python": ("http://docs.python.org/", None),
"sphinx": ("http://www.sphinx-doc.org/en/stable/", None),
"sqlalchemy": ("http://docs.sqlalchemy.org/en/latest/", None),
"pandas": ("http://pandas.pydata.org/pandas-docs/stable", None),
"unihan-etl": ("https://unihan-etl.git-pull.com/", None),
}
autodoc_member_order = "groupwise"
def linkcode_resolve(domain, info): # NOQA: C901
"""
Determine the URL corresponding to Python object
Notes
-----
From https://github.com/numpy/numpy/blob/v1.15.1/doc/source/conf.py, 7c49cfa
on Jul 31. License BSD-3. https://github.com/numpy/numpy/blob/v1.15.1/LICENSE.txt
"""
if domain != "py":
return None
modname = info["module"]
fullname = info["fullname"]
submod = sys.modules.get(modname)
if submod is None:
return None
obj = submod
for part in fullname.split("."):
try:
obj = getattr(obj, part)
except Exception:
return None
# strip decorators, which would resolve to the source of the decorator
# possibly an upstream bug in getsourcefile, bpo-1764286
try:
unwrap = inspect.unwrap
except AttributeError:
pass
else:
obj = unwrap(obj)
try:
fn = inspect.getsourcefile(obj)
except Exception:
fn = None
if not fn:
return None
try:
source, lineno = inspect.getsourcelines(obj)
except Exception:
lineno = None
if lineno:
linespec = "#L%d-L%d" % (lineno, lineno + len(source) - 1)
else:
linespec = ""
fn = relpath(fn, start=dirname(cihai.__file__))
if "dev" in about["__version__"]:
return "%s/blob/master/%s/%s%s" % (
about["__github__"],
about["__package_name__"],
fn,
linespec,
)
else:
return "%s/blob/v%s/%s/%s%s" % (
about["__github__"],
about["__version__"],
about["__package_name__"],
fn,
linespec,
)
| |
from numpy import prod
import cupy
from cupy.cuda import cufft
from cupy.fft import config
from cupy.fft._fft import (_convert_fft_type, _default_fft_func, _fft,
_get_cufft_plan_nd, _get_fftn_out_size,
_output_dtype)
from cupy.fft._cache import get_plan_cache
def get_fft_plan(a, shape=None, axes=None, value_type='C2C'):
""" Generate a CUDA FFT plan for transforming up to three axes.
Args:
a (cupy.ndarray): Array to be transform, assumed to be either C- or
F- contiguous.
shape (None or tuple of ints): Shape of the transformed axes of the
output. If ``shape`` is not given, the lengths of the input along
the axes specified by ``axes`` are used.
axes (None or int or tuple of int): The axes of the array to
transform. If `None`, it is assumed that all axes are transformed.
Currently, for performing N-D transform these must be a set of up
to three adjacent axes, and must include either the first or the
last axis of the array.
value_type (str): The FFT type to perform. Acceptable values are:
* 'C2C': complex-to-complex transform (default)
* 'R2C': real-to-complex transform
* 'C2R': complex-to-real transform
Returns:
a cuFFT plan for either 1D transform (``cupy.cuda.cufft.Plan1d``) or
N-D transform (``cupy.cuda.cufft.PlanNd``).
.. note::
The returned plan can not only be passed as one of the arguments of
the functions in ``cupyx.scipy.fftpack``, but also be used as a
context manager for both ``cupy.fft`` and ``cupyx.scipy.fftpack``
functions:
.. code-block:: python
x = cupy.random.random(16).reshape(4, 4).astype(complex)
plan = cupyx.scipy.fftpack.get_fft_plan(x)
with plan:
y = cupy.fft.fftn(x)
# alternatively:
y = cupyx.scipy.fftpack.fftn(x) # no explicit plan is given!
# alternatively:
y = cupyx.scipy.fftpack.fftn(x, plan=plan) # pass plan explicitly
In the first case, no cuFFT plan will be generated automatically,
even if ``cupy.fft.config.enable_nd_planning = True`` is set.
.. note::
If this function is called under the context of
:func:`~cupy.fft.config.set_cufft_callbacks`, the generated plan will
have callbacks enabled.
.. warning::
This API is a deviation from SciPy's, is currently experimental, and
may be changed in the future version.
"""
# check input array
if a.flags.c_contiguous:
order = 'C'
elif a.flags.f_contiguous:
order = 'F'
else:
raise ValueError('Input array a must be contiguous')
if isinstance(shape, int):
shape = (shape,)
if isinstance(axes, int):
axes = (axes,)
if (shape is not None) and (axes is not None) and len(shape) != len(axes):
raise ValueError('Shape and axes have different lengths.')
# check axes
# n=1: 1d (need axis1D); n>1: Nd
if axes is None:
n = a.ndim if shape is None else len(shape)
axes = tuple(i for i in range(-n, 0))
if n == 1:
axis1D = 0
else: # axes is a tuple
n = len(axes)
if n == 1:
axis1D = axes[0]
if axis1D >= a.ndim or axis1D < -a.ndim:
err = 'The chosen axis ({0}) exceeds the number of '\
'dimensions of a ({1})'.format(axis1D, a.ndim)
raise ValueError(err)
elif n > 3:
raise ValueError('Only up to three axes is supported')
# Note that "shape" here refers to the shape along trasformed axes, not
# the shape of the output array, and we need to convert it to the latter.
# The result is as if "a=_cook_shape(a); return a.shape" is called.
# Because of this, we need to use (possibly unsorted) axes.
transformed_shape = shape
shape = list(a.shape)
if transformed_shape is not None:
for s, axis in zip(transformed_shape, axes):
if s is not None:
if axis == axes[-1] and value_type == 'C2R':
s = s // 2 + 1
shape[axis] = s
shape = tuple(shape)
# check value_type
out_dtype = _output_dtype(a.dtype, value_type)
fft_type = _convert_fft_type(out_dtype, value_type)
# TODO(leofang): figure out if we really have to skip F-order?
if n > 1 and value_type != 'C2C' and a.flags.f_contiguous:
raise ValueError('C2R/R2C PlanNd for F-order arrays is not supported')
# generate plan
# (load from cache if it exists, otherwise create one but don't cache it)
if n > 1: # ND transform
if cupy.cuda.runtime.is_hip and value_type == 'C2R':
raise RuntimeError("hipFFT's C2R PlanNd is buggy and unsupported")
out_size = _get_fftn_out_size(
shape, transformed_shape, axes[-1], value_type)
# _get_cufft_plan_nd interacts with plan cache and callback
plan = _get_cufft_plan_nd(
shape, fft_type, axes=axes, order=order, out_size=out_size,
to_cache=False)
else: # 1D transform
# prepare plan arguments
if value_type != 'C2R':
out_size = shape[axis1D]
else:
out_size = _get_fftn_out_size(
shape, transformed_shape, axis1D, value_type)
batch = prod(shape) // shape[axis1D]
devices = None if not config.use_multi_gpus else config._devices
keys = (out_size, fft_type, batch, devices)
mgr = config.get_current_callback_manager()
if mgr is not None:
# to avoid a weird segfault, we generate and cache distinct plans
# for every possible (load_aux, store_aux) pairs; the plans are
# still generated from the same external Python module
load_aux = mgr.cb_load_aux_arr
store_aux = mgr.cb_store_aux_arr
keys += (mgr.cb_load, mgr.cb_store,
0 if load_aux is None else load_aux.data.ptr,
0 if store_aux is None else store_aux.data.ptr)
cache = get_plan_cache()
cached_plan = cache.get(keys)
if cached_plan is not None:
plan = cached_plan
elif mgr is None:
plan = cufft.Plan1d(out_size, fft_type, batch, devices=devices)
else: # has callback
# TODO(leofang): support multi-GPU callback (devices is ignored)
if devices:
raise NotImplementedError('multi-GPU cuFFT callbacks are not '
'yet supported')
plan = mgr.create_plan(('Plan1d', keys[:-3]))
mgr.set_callbacks(plan)
return plan
def fft(x, n=None, axis=-1, overwrite_x=False, plan=None):
"""Compute the one-dimensional FFT.
Args:
x (cupy.ndarray): Array to be transformed.
n (None or int): Length of the transformed axis of the output. If ``n``
is not given, the length of the input along the axis specified by
``axis`` is used.
axis (int): Axis over which to compute the FFT.
overwrite_x (bool): If True, the contents of ``x`` can be destroyed.
plan (:class:`cupy.cuda.cufft.Plan1d` or ``None``): a cuFFT plan for
transforming ``x`` over ``axis``, which can be obtained using::
plan = cupyx.scipy.fftpack.get_fft_plan(x, axis)
Note that `plan` is defaulted to None, meaning CuPy will use an
auto-generated plan behind the scene.
Returns:
cupy.ndarray:
The transformed array which shape is specified by ``n`` and type
will convert to complex if that of the input is another.
.. note::
The argument `plan` is currently experimental and the interface may be
changed in the future version.
.. seealso:: :func:`scipy.fftpack.fft`
"""
return _fft(x, (n,), (axis,), None, cufft.CUFFT_FORWARD,
overwrite_x=overwrite_x, plan=plan)
def ifft(x, n=None, axis=-1, overwrite_x=False, plan=None):
"""Compute the one-dimensional inverse FFT.
Args:
x (cupy.ndarray): Array to be transformed.
n (None or int): Length of the transformed axis of the output. If ``n``
is not given, the length of the input along the axis specified by
``axis`` is used.
axis (int): Axis over which to compute the FFT.
overwrite_x (bool): If True, the contents of ``x`` can be destroyed.
plan (:class:`cupy.cuda.cufft.Plan1d` or ``None``): a cuFFT plan for
transforming ``x`` over ``axis``, which can be obtained using::
plan = cupyx.scipy.fftpack.get_fft_plan(x, axis)
Note that `plan` is defaulted to None, meaning CuPy will use an
auto-generated plan behind the scene.
Returns:
cupy.ndarray:
The transformed array which shape is specified by ``n`` and type
will convert to complex if that of the input is another.
.. note::
The argument `plan` is currently experimental and the interface may be
changed in the future version.
.. seealso:: :func:`scipy.fftpack.ifft`
"""
return _fft(x, (n,), (axis,), None, cufft.CUFFT_INVERSE,
overwrite_x=overwrite_x, plan=plan)
def fft2(x, shape=None, axes=(-2, -1), overwrite_x=False, plan=None):
"""Compute the two-dimensional FFT.
Args:
x (cupy.ndarray): Array to be transformed.
shape (None or tuple of ints): Shape of the transformed axes of the
output. If ``shape`` is not given, the lengths of the input along
the axes specified by ``axes`` are used.
axes (tuple of ints): Axes over which to compute the FFT.
overwrite_x (bool): If True, the contents of ``x`` can be destroyed.
plan (:class:`cupy.cuda.cufft.PlanNd` or ``None``): a cuFFT plan for
transforming ``x`` over ``axes``, which can be obtained using::
plan = cupyx.scipy.fftpack.get_fft_plan(x, axes)
Note that `plan` is defaulted to None, meaning CuPy will either
use an auto-generated plan behind the scene if cupy.fft.config.
enable_nd_planning = True, or use no cuFFT plan if it is set to
False.
Returns:
cupy.ndarray:
The transformed array which shape is specified by ``shape`` and
type will convert to complex if that of the input is another.
.. seealso:: :func:`scipy.fftpack.fft2`
.. note::
The argument `plan` is currently experimental and the interface may be
changed in the future version.
"""
func = _default_fft_func(x, shape, axes, plan)
return func(x, shape, axes, None, cufft.CUFFT_FORWARD,
overwrite_x=overwrite_x, plan=plan)
def ifft2(x, shape=None, axes=(-2, -1), overwrite_x=False, plan=None):
"""Compute the two-dimensional inverse FFT.
Args:
x (cupy.ndarray): Array to be transformed.
shape (None or tuple of ints): Shape of the transformed axes of the
output. If ``shape`` is not given, the lengths of the input along
the axes specified by ``axes`` are used.
axes (tuple of ints): Axes over which to compute the FFT.
overwrite_x (bool): If True, the contents of ``x`` can be destroyed.
plan (:class:`cupy.cuda.cufft.PlanNd` or ``None``): a cuFFT plan for
transforming ``x`` over ``axes``, which can be obtained using::
plan = cupyx.scipy.fftpack.get_fft_plan(x, axes)
Note that `plan` is defaulted to None, meaning CuPy will either
use an auto-generated plan behind the scene if cupy.fft.config.
enable_nd_planning = True, or use no cuFFT plan if it is set to
False.
Returns:
cupy.ndarray:
The transformed array which shape is specified by ``shape`` and
type will convert to complex if that of the input is another.
.. seealso:: :func:`scipy.fftpack.ifft2`
.. note::
The argument `plan` is currently experimental and the interface may be
changed in the future version.
"""
func = _default_fft_func(x, shape, axes, plan)
return func(x, shape, axes, None, cufft.CUFFT_INVERSE,
overwrite_x=overwrite_x, plan=plan)
def fftn(x, shape=None, axes=None, overwrite_x=False, plan=None):
"""Compute the N-dimensional FFT.
Args:
x (cupy.ndarray): Array to be transformed.
shape (None or tuple of ints): Shape of the transformed axes of the
output. If ``shape`` is not given, the lengths of the input along
the axes specified by ``axes`` are used.
axes (tuple of ints): Axes over which to compute the FFT.
overwrite_x (bool): If True, the contents of ``x`` can be destroyed.
plan (:class:`cupy.cuda.cufft.PlanNd` or ``None``): a cuFFT plan for
transforming ``x`` over ``axes``, which can be obtained using::
plan = cupyx.scipy.fftpack.get_fft_plan(x, axes)
Note that `plan` is defaulted to None, meaning CuPy will either
use an auto-generated plan behind the scene if cupy.fft.config.
enable_nd_planning = True, or use no cuFFT plan if it is set to
False.
Returns:
cupy.ndarray:
The transformed array which shape is specified by ``shape`` and
type will convert to complex if that of the input is another.
.. seealso:: :func:`scipy.fftpack.fftn`
.. note::
The argument `plan` is currently experimental and the interface may be
changed in the future version.
"""
func = _default_fft_func(x, shape, axes, plan)
return func(x, shape, axes, None, cufft.CUFFT_FORWARD,
overwrite_x=overwrite_x, plan=plan)
def ifftn(x, shape=None, axes=None, overwrite_x=False, plan=None):
"""Compute the N-dimensional inverse FFT.
Args:
x (cupy.ndarray): Array to be transformed.
shape (None or tuple of ints): Shape of the transformed axes of the
output. If ``shape`` is not given, the lengths of the input along
the axes specified by ``axes`` are used.
axes (tuple of ints): Axes over which to compute the FFT.
overwrite_x (bool): If True, the contents of ``x`` can be destroyed.
plan (:class:`cupy.cuda.cufft.PlanNd` or ``None``): a cuFFT plan for
transforming ``x`` over ``axes``, which can be obtained using::
plan = cupyx.scipy.fftpack.get_fft_plan(x, axes)
Note that `plan` is defaulted to None, meaning CuPy will either
use an auto-generated plan behind the scene if cupy.fft.config.
enable_nd_planning = True, or use no cuFFT plan if it is set to
False.
Returns:
cupy.ndarray:
The transformed array which shape is specified by ``shape`` and
type will convert to complex if that of the input is another.
.. seealso:: :func:`scipy.fftpack.ifftn`
.. note::
The argument `plan` is currently experimental and the interface may be
changed in the future version.
"""
func = _default_fft_func(x, shape, axes, plan)
return func(x, shape, axes, None, cufft.CUFFT_INVERSE,
overwrite_x=overwrite_x, plan=plan)
def rfft(x, n=None, axis=-1, overwrite_x=False, plan=None):
"""Compute the one-dimensional FFT for real input.
The returned real array contains
.. code-block:: python
[y(0),Re(y(1)),Im(y(1)),...,Re(y(n/2))] # if n is even
[y(0),Re(y(1)),Im(y(1)),...,Re(y(n/2)),Im(y(n/2))] # if n is odd
Args:
x (cupy.ndarray): Array to be transformed.
n (None or int): Length of the transformed axis of the output. If ``n``
is not given, the length of the input along the axis specified by
``axis`` is used.
axis (int): Axis over which to compute the FFT.
overwrite_x (bool): If True, the contents of ``x`` can be destroyed.
plan (:class:`cupy.cuda.cufft.Plan1d` or ``None``): a cuFFT plan for
transforming ``x`` over ``axis``, which can be obtained using::
plan = cupyx.scipy.fftpack.get_fft_plan(
x, axes, value_type='R2C')
Note that `plan` is defaulted to None, meaning CuPy will either
use an auto-generated plan behind the scene if cupy.fft.config.
enable_nd_planning = True, or use no cuFFT plan if it is set to
False.
Returns:
cupy.ndarray:
The transformed array.
.. seealso:: :func:`scipy.fftpack.rfft`
.. note::
The argument `plan` is currently experimental and the interface may be
changed in the future version.
"""
if n is None:
n = x.shape[axis]
shape = list(x.shape)
shape[axis] = n
f = _fft(x, (n,), (axis,), None, cufft.CUFFT_FORWARD, 'R2C',
overwrite_x=overwrite_x, plan=plan)
z = cupy.empty(shape, f.real.dtype)
slice_z = [slice(None)] * x.ndim
slice_f = [slice(None)] * x.ndim
slice_z[axis] = slice(1)
slice_f[axis] = slice(1)
z[tuple(slice_z)] = f[tuple(slice_f)].real
slice_z[axis] = slice(1, None, 2)
slice_f[axis] = slice(1, None)
z[tuple(slice_z)] = f[tuple(slice_f)].real
slice_z[axis] = slice(2, None, 2)
slice_f[axis] = slice(1, n - f.shape[axis] + 1)
z[tuple(slice_z)] = f[tuple(slice_f)].imag
return z
def irfft(x, n=None, axis=-1, overwrite_x=False):
"""Compute the one-dimensional inverse FFT for real input.
Args:
x (cupy.ndarray): Array to be transformed.
n (None or int): Length of the transformed axis of the output. If ``n``
is not given, the length of the input along the axis specified by
``axis`` is used.
axis (int): Axis over which to compute the FFT.
overwrite_x (bool): If True, the contents of ``x`` can be destroyed.
Returns:
cupy.ndarray:
The transformed array.
.. seealso:: :func:`scipy.fftpack.irfft`
.. note::
This function does not support a precomputed `plan`. If you need this
capability, please consider using :func:`cupy.fft.irfft` or :func:`
cupyx.scipy.fft.irfft`.
"""
if n is None:
n = x.shape[axis]
m = min(n, x.shape[axis])
shape = list(x.shape)
shape[axis] = n // 2 + 1
if x.dtype in (cupy.float16, cupy.float32):
z = cupy.zeros(shape, dtype=cupy.complex64)
else:
z = cupy.zeros(shape, dtype=cupy.complex128)
slice_x = [slice(None)] * x.ndim
slice_z = [slice(None)] * x.ndim
slice_x[axis] = slice(1)
slice_z[axis] = slice(1)
z[tuple(slice_z)].real = x[tuple(slice_x)]
slice_x[axis] = slice(1, m, 2)
slice_z[axis] = slice(1, m // 2 + 1)
z[tuple(slice_z)].real = x[tuple(slice_x)]
slice_x[axis] = slice(2, m, 2)
slice_z[axis] = slice(1, (m + 1) // 2)
z[tuple(slice_z)].imag = x[tuple(slice_x)]
return _fft(z, (n,), (axis,), None, cufft.CUFFT_INVERSE, 'C2R',
overwrite_x=overwrite_x)
| |
# -*- coding: utf-8 -*-
# 1st-run initialisation
# designed to be called from Crontab's @reboot
# however this isn't reliable (doesn't work on Win32 Service) so still in models for now...
# Deployments can change settings live via appadmin
# Set deployment_settings.base.prepopulate to False in Production (to save 1x DAL hit every page)
if not deployment_settings.get_base_prepopulate() or db(db["s3_setting"].id > 0).count():
populate = False
else:
populate = True
if populate:
# Themes
tablename = "admin_theme"
table = db[tablename]
if not db(table.id > 0).count():
table.insert(
name = T("Sahana Blue"),
logo = "img/sahanapy_logo.png",
#header_background = "img/header_bg.png",
#footer = "footer.html",
col_background = "336699",
col_menu = "0066cc",
col_highlight = "0077aa",
col_txt_background = "f3f6ff",
col_txt_border = "c6d1f5",
col_txt_underline = "003366",
col_txt = "006699",
col_input = "ffffcc",
col_border_btn_out = "6699cc",
col_border_btn_in = "4589ce",
col_btn_hover = "3377bb",
)
table.insert(
name = T("Sahana Green"),
logo = "img/sahanapy_logo_green.png",
#header_background = "img/header_bg.png",
#footer = "footer.html",
col_background = "337733",
col_menu = "cc7722",
col_highlight = "338833",
col_txt_background = "f3f6ff",
col_txt_border = "c6d1f5",
col_txt_underline = "003366",
col_txt = "006699",
col_input = "ffffcc",
col_border_btn_out = "6699cc",
col_border_btn_in = "4589ce",
col_btn_hover = "3377bb",
)
table.insert(
# Needs work
# - some colours need changing independently of each other
# - logo size needs storing
name = T("Sahana Steel"),
logo = "img/sahanapy_logo_ideamonk.png",
#header_background = "img/header_bg.png",
#footer = "footer.html",
col_background = "dbdbdb",
col_menu = "0066cc",
col_highlight = "0077aa",
col_txt_background = "f3f6ff",
col_txt_border = "c6d1f5",
col_txt_underline = "003366",
col_txt = "eeeeee",
col_input = "ffffcc",
col_border_btn_out = "c6d1f5",
col_border_btn_in = "4589ce",
col_btn_hover = "3377bb",
)
# Global Settings
tablename = "s3_setting"
table = db[tablename]
# Ensure that the theme we defined is in the DB ready to be used as a FK
db.commit()
if not db(table.id > 0).count():
table.insert(
admin_name = T("Sahana Administrator").xml(),
admin_email = "support@Not Set",
admin_tel = T("Not Set").xml(),
theme = 1
)
# Organisation Registry
tablename = "org_cluster"
table = db[tablename]
if not db(table.id > 0).count():
table.insert(
abrv = T("Agriculture"),
name = T("Agriculture")
)
table.insert(
abrv = T("Camp"),
name = T("Camp Coordination/Management")
)
table.insert(
abrv = T("Recovery"),
name = T("Early Recovery")
)
table.insert(
abrv = T("Education"),
name = T("Education")
)
table.insert(
abrv = T("Shelter"),
name = T("Emergency Shelter")
)
table.insert(
abrv = T("Telecommunications"),
name = T("Emergency Telecommunications")
)
table.insert(
abrv = T("Health"),
name = T("Health")
)
table.insert(
abrv = T("Logistics"),
name = T("Logistics")
)
table.insert(
abrv = T("Nutrition"),
name = T("Nutrition")
)
table.insert(
abrv = T("Protection"),
name = T("Protection")
)
table.insert(
abrv = T("WASH"),
name = T("Water Sanitation Hygiene")
)
tablename = "org_cluster_subsector"
table = db[tablename]
# Ensure that the clusters we defined are in the DB ready to be used as a FK
db.commit()
if not db(table.id > 0).count():
cluster_shelter = db(db.org_cluster.abrv == "Shelter").select(db.org_cluster.id, limitby=(0, 1)).first().id
cluster_nutrition = db(db.org_cluster.abrv == "Nutrition").select(db.org_cluster.id, limitby=(0, 1)).first().id
cluster_wash = db(db.org_cluster.abrv == "WASH").select(db.org_cluster.id, limitby=(0, 1)).first().id
table.insert(
cluster_id = cluster_shelter,
abrv = T("Clothing")
)
table.insert(
cluster_id = cluster_shelter,
abrv = T("Shelter")
)
table.insert(
cluster_id = cluster_nutrition,
abrv = T("Cooking NFIs")
)
table.insert(
cluster_id = cluster_nutrition,
abrv = T("Food Supply")
)
table.insert(
cluster_id = cluster_wash,
abrv = T("Aggravating factors")
)
table.insert(
cluster_id = cluster_wash,
abrv = T("Disease vectors")
)
table.insert(
cluster_id = cluster_wash,
abrv = T("Drainage")
)
table.insert(
cluster_id = cluster_wash,
abrv = T("Excreta disposal")
)
table.insert(
cluster_id = cluster_wash,
abrv = T("Hygiene NFIs")
)
table.insert(
cluster_id = cluster_wash,
abrv = T("Hygiene practice")
)
table.insert(
cluster_id = cluster_wash,
abrv = T("Solid waste")
)
table.insert(
cluster_id = cluster_wash,
abrv = T("Water supply")
)
# Person Registry
tablename = "pr_person"
table = db[tablename]
# Should work for our 3 supported databases: sqlite, MySQL & PostgreSQL
field = "first_name"
db.executesql("CREATE INDEX %s__idx on %s(%s);" % (field, tablename, field))
field = "middle_name"
db.executesql("CREATE INDEX %s__idx on %s(%s);" % (field, tablename, field))
field = "last_name"
db.executesql("CREATE INDEX %s__idx on %s(%s);" % (field, tablename, field))
# Synchronisation
tablename = "sync_setting"
table = db[tablename]
if not db(table.id > 0).count():
table.insert(proxy="")
# Incident Reporting System
if "irs" in deployment_settings.modules:
# Categories visible to ends-users by default
tablename = "irs_icategory"
table = db[tablename]
if not db(table.id > 0).count():
table.insert(code = "flood")
table.insert(code = "geophysical.landslide")
table.insert(code = "roadway.bridgeClosure")
table.insert(code = "roadway.roadwayClosure")
table.insert(code = "other.buildingCollapsed")
table.insert(code = "other.peopleTrapped")
table.insert(code = "other.powerFailure")
# Messaging Module
if "msg" in deployment_settings.modules:
tablename = "msg_email_settings"
table = db[tablename]
if not db(table.id > 0).count():
table.insert(
inbound_mail_server = "imap.gmail.com",
inbound_mail_type = "imap",
inbound_mail_ssl = True,
inbound_mail_port = 993,
inbound_mail_username = "username",
inbound_mail_password = "password",
inbound_mail_delete = False,
#outbound_mail_server = "mail:25",
#outbound_mail_from = "demo@sahanafoundation.org",
)
# Need entries for the Settings/1/Update URLs to work
tablename = "msg_setting"
table = db[tablename]
if not db(table.id > 0).count():
table.insert( outgoing_sms_handler = "Gateway" )
tablename = "msg_modem_settings"
table = db[tablename]
if not db(table.id > 0).count():
table.insert( modem_baud = 115200 )
tablename = "msg_gateway_settings"
table = db[tablename]
if not db(table.id > 0).count():
table.insert( to_variable = "to" )
tablename = "msg_tropo_settings"
table = db[tablename]
if not db(table.id > 0).count():
table.insert( token_messaging = "" )
tablename = "msg_twitter_settings"
table = db[tablename]
if not db(table.id > 0).count():
table.insert( pin = "" )
# Assessment
if "assess" in deployment_settings.modules:
tablename = "assess_baseline_type"
table = db[tablename]
if not db(table.id > 0).count():
table.insert( name = "# of population")
table.insert( name = "# of households" )
table.insert( name = "# of children under 5" )
table.insert( name = "# of children" )
table.insert( name = "# of cattle" )
table.insert( name = "Ha. of fields" )
# Impacts
if deployment_settings.has_module("irs") or deployment_settings.has_module("assess"):
tablename = "impact_type"
table = db[tablename]
if not db(table.id > 0).count():
table.insert( name = "# of People Affected" )
table.insert( name = "# People Needing Food",
cluster_id = \
shn_get_db_field_value(db = db,
table = "org_cluster",
field = "id",
look_up = "Food",
look_up_field = "abrv")
)
table.insert( name = "# People at Risk From Vector-Borne Diseases",
cluster_id = \
shn_get_db_field_value(db = db,
table = "org_cluster",
field = "id",
look_up = "Health",
look_up_field = "abrv")
)
table.insert( name = "# People without Access to Safe Drinking-Water",
cluster_id = \
shn_get_db_field_value(db = db,
table = "org_cluster",
field = "id",
look_up = "WASH",
look_up_field = "abrv")
)
table.insert( name = "# Houses Damaged",
cluster_id = \
shn_get_db_field_value(db = db,
table = "org_cluster",
field = "id",
look_up = "Shelter",
look_up_field = "abrv")
)
table.insert( name = "# Houses Flooded",
cluster_id = \
shn_get_db_field_value(db = db,
table = "org_cluster",
field = "id",
look_up = "Shelter",
look_up_field = "abrv")
)
table.insert( name = "Water Level still high?",
cluster_id = \
shn_get_db_field_value(db = db,
table = "org_cluster",
field = "id",
look_up = "Shelter",
look_up_field = "abrv")
)
table.insert( name = "Ha. Fields Flooded",
cluster_id = \
shn_get_db_field_value(db = db,
table = "org_cluster",
field = "id",
look_up = "Agriculture",
look_up_field = "abrv")
)
# Supply / Inventory
tablename = "supply_item_category"
table = db[tablename]
if not db(table.id > 0).count():
#shn_import_table("supply_item_category")
table.insert( name = "Agriculture" )
#table.insert( name = "Clothing" )
#table.insert( name = "Equipment" )
table.insert( name = "Food" )
table.insert( name = "Health" )
#table.insert( name = "NFIs" )
table.insert( name = "Shelter" )
#table.insert( name = "Transport" )
table.insert( name = "WASH" )
tablename = "supply_item"
table = db[tablename]
if not db(table.id > 0).count():
#shn_import_table("supply_item_pakistan")
agriculture = db(db.supply_item_category.name == "Agriculture").select(db.supply_item_category.id, limitby=(0, 1)).first().id
food = db(db.supply_item_category.name == "Food").select(db.supply_item_category.id, limitby=(0, 1)).first().id
health = db(db.supply_item_category.name == "Health").select(db.supply_item_category.id, limitby=(0, 1)).first().id
shelter = db(db.supply_item_category.name == "Shelter").select(db.supply_item_category.id, limitby=(0, 1)).first().id
wash = db(db.supply_item_category.name == "WASH").select(db.supply_item_category.id, limitby=(0, 1)).first().id
table.insert(
item_category_id = agriculture,
name = "Rice Seed",
base_unit = "sack20kg",
comments = "This should provide enough seed for 1 Hectare of land"
)
table.insert(
item_category_id = food,
name = "Rice",
base_unit = "sack50kg",
comments = "This should feed 125 people for 1 day"
)
table.insert(
item_category_id = food,
name = "Cooking Utensils",
base_unit = "kit",
comments = "Cooking Utensils for a Household"
)
table.insert(
item_category_id = health,
name = "First Ait Kit",
base_unit = "kit",
comments = "This should provide basic first aid (bandages, oral rehydration salts, etc) for 100 people to self-administer"
)
table.insert(
item_category_id = health,
name = "Medical Kit",
base_unit = "kit",
comments = "This should provide medical supplies (medicines, vaccines) for a professional clinic to provide assistance to a total community of 10,000 people."
)
table.insert(
item_category_id = shelter,
name = "Shelter Kit",
base_unit = "kit",
comments = "This kit is suitable to provide emergency repair to a damaged home. It contains a tarpaulin, zinc sheet, wooden poles, hammer & nails"
)
table.insert(
item_category_id = shelter,
name = "Tent",
base_unit = "piece",
comments = "This should house a family of up to 8 people"
)
table.insert(
item_category_id = wash,
name = "Hygiene Kit",
base_unit = "kit",
comments = "Personal Hygiene supplies for 100 Households (5 persons/household): Each get 2x Buckets, 10x Soap, Cotton cloth"
)
table.insert(
item_category_id = wash,
name = "Water Purification Sachets",
base_unit = "kit",
comments = "Designed to provide a 1st phase drinking water purification solution at the household level. Contains 600 sachets to provide sufficient drinking water (4l) for 100 people for 30 days."
)
# enter base_unit as packets
item_rows = db(table.id > 0).select(table.id, table.base_unit)
for item_row in item_rows:
db.supply_item_packet.insert(
item_id = item_row.id,
name = item_row.base_unit,
quantity = 1
)
# Project Module
if deployment_settings.has_module("project"):
tablename = "project_need_type"
table = db[tablename]
if not db(table.id > 0).count():
table.insert( name = T("People Needing Food") )
table.insert( name = T("People Needing Water") )
table.insert( name = T("People Needing Shelter") )
# Budget Module
if "budget" in deployment_settings.modules:
tablename = "budget_parameter"
table = db[tablename]
if not db(table.id > 0).count():
table.insert(
)
# Logistics (old)
if "lms" in deployment_settings.modules:
tablename = "lms_catalog"
table = db[tablename]
if not db(table.id > 0).count():
table.insert(
name="Default",
description="Default Catalog",
comments="All items are by default added to this Catalog"
)
# Ticketing System
if "ticket" in deployment_settings.modules:
tablename = "ticket_category"
table = db[tablename]
if not db(table.id > 0).count():
table.insert( name = "Report Missing Person" )
table.insert( name = "Report Security Incident" )
table.insert( name = "Report Information" )
table.insert( name = "Request for Assistance" )
table.insert( name = "Offer of Help" )
# GIS Module
tablename = "gis_marker"
table = db[tablename]
# Can't do sub-folders :/
# need a script to read in the list of default markers from the filesystem, copy/rename & populate the DB 1 by 1
if not db(table.id > 0).count():
# We want to start at ID 1, but postgres won't let us truncate() & not needed anyway this is only run on 1st_run.
#table.truncate()
table.insert(
name = "marker_red",
height = 34,
width = 20,
image = "gis_marker.image.marker_red.png"
)
table.insert(
name = "marker_yellow",
height = 34,
width = 20,
image = "gis_marker.image.marker_yellow.png"
)
table.insert(
name = "marker_amber",
height = 34,
width = 20,
image = "gis_marker.image.marker_amber.png"
)
table.insert(
name = "marker_green",
height = 34,
width = 20,
image = "gis_marker.image.marker_green.png"
)
table.insert(
name = "person",
height = 50,
width = 50,
image = "gis_marker.image.Civil_Disturbance_Theme.png"
)
table.insert(
name = "school",
height = 33,
width = 44,
image = "gis_marker.image.Edu_Schools_S1.png"
)
table.insert(
name = "food",
height = 40,
width = 40,
image = "gis_marker.image.Emergency_Food_Distribution_Centers_S1.png"
)
table.insert(
name = "office",
height = 40,
width = 40,
image = "gis_marker.image.Emergency_Operations_Center_S1.png"
)
table.insert(
name = "shelter",
height = 40,
width = 40,
image = "gis_marker.image.Emergency_Shelters_S1.png"
)
table.insert(
name = "activity",
height = 40,
width = 40,
image = "gis_marker.image.Emergency_Teams_S1.png"
)
table.insert(
name = "hospital",
height = 40,
width = 40,
image = "gis_marker.image.E_Med_Hospital_S1.png"
)
table.insert(
name = "earthquake",
height = 50,
width = 50,
image = "gis_marker.image.Geo_Earth_Quake_Epicenter.png"
)
table.insert(
name = "volcano",
height = 50,
width = 50,
image = "gis_marker.image.Geo_Volcanic_Threat.png"
)
table.insert(
name = "tsunami",
height = 50,
width = 50,
image = "gis_marker.image.Hydro_Meteor_Tsunami_ch.png"
)
table.insert(
name = "church",
height = 33,
width = 44,
image = "gis_marker.image.Public_Venue_Church_S1.png"
)
table.insert(
name = "mosque",
height = 33,
width = 44,
image = "gis_marker.image.Public_Venue_Mosque_S1.png"
)
table.insert(
name = "temple",
height = 33,
width = 44,
image = "gis_marker.image.Public_Venue_Temple_S1.png"
)
table.insert(
name = "phone",
height = 10,
width = 5,
image = "gis_marker.image.SMS_Message_Phone.png"
)
table.insert(
name = "orphanage",
height = 33,
width = 44,
image = "gis_marker.image.Special_Needs_Child_Day_Care_S1.png"
)
table.insert(
name = "airport",
height = 33,
width = 44,
image = "gis_marker.image.Trans_Airport_S1.png"
)
table.insert(
name = "bridge",
height = 33,
width = 44,
image = "gis_marker.image.Trans_Bridge_S1.png"
)
table.insert(
name = "helicopter",
height = 33,
width = 44,
image = "gis_marker.image.Trans_Helicopter_Landing_Site_S1.png"
)
table.insert(
name = "port",
height = 33,
width = 44,
image = "gis_marker.image.Trans_Port_S1.png"
)
table.insert(
name = "rail_station",
height = 33,
width = 44,
image = "gis_marker.image.Trans_Rail_Station_S1.png"
)
table.insert(
name = "vehicle",
height = 50,
width = 50,
image = "gis_marker.image.Transport_Vehicle_Theme.png"
)
table.insert(
name = "water",
height = 33,
width = 44,
image = "gis_marker.image.Water_Supply_Infrastructure_Theme_S1.png"
)
table.insert(
name = "volunteer",
height = 40,
width = 39,
image = "gis_marker.image.Volunteer.png"
)
tablename = "gis_symbology"
table = db[tablename]
if not db(table.id > 0).count():
table.insert(
name = "Australasia"
)
table.insert(
name = "Canada"
)
table.insert(
name = "US"
)
tablename = "gis_projection"
table = db[tablename]
if not db(table.id > 0).count():
# We want to start at ID 1, but postgres won't let us truncate() & not needed anyway this is only run on 1st_run.
#table.truncate()
table.insert(
uuid = "www.sahanafoundation.org/GIS-PROJECTION-900913",
name = "Spherical Mercator",
epsg = 900913,
maxExtent = "-20037508, -20037508, 20037508, 20037508.34",
maxResolution = 156543.0339,
units = "m"
)
table.insert(
uuid = "www.sahanafoundation.org/GIS-PROJECTION-4326",
name = "WGS84",
epsg = 4326,
maxExtent = "-180,-90,180,90",
maxResolution = 1.40625,
units = "degrees"
# OSM use these:
#maxResolution = 156543.0339,
#units = "m"
)
tablename = "gis_config"
table = db[tablename]
# Ensure that the projection/marker we defined are in the DB ready to be used as FKs
db.commit()
symbology_us = db(db.gis_symbology.name == "US").select(db.gis_symbology.id, limitby=(0, 1)).first().id
if not db(table.id > 0).count():
# We want to start at ID 1, but postgres won't let us truncate() & not needed anyway this is only run on 1st_run.
#table.truncate()
table.insert(
lat = "51.8",
lon = "-1.3",
zoom = 7,
projection_id = 1,
marker_id = 1,
map_height = 600,
map_width = 1000,
symbology_id = symbology_us,
wmsbrowser_url = "http://geo.eden.sahanafoundation.org/geoserver/wms?service=WMS&request=GetCapabilities"
)
tablename = "gis_feature_class"
table = db[tablename]
if not db(table.id > 0).count():
table.insert(
uuid = "www.sahanafoundation.org/GIS-FEATURE-CLASS-TRACK",
name = "Track",
gps_marker = "TracBack Point",
resource = "gis_track"
)
table.insert(
uuid = "www.sahanafoundation.org/GIS-FEATURE-CLASS-L0",
name = "Country",
)
table.insert(
uuid = "www.sahanafoundation.org/GIS-FEATURE-CLASS-L1",
name = "Province",
)
table.insert(
uuid = "www.sahanafoundation.org/GIS-FEATURE-CLASS-L2",
name = "District",
)
table.insert(
uuid = "www.sahanafoundation.org/GIS-FEATURE-CLASS-L3",
name = "Town",
gps_marker = "City (Medium)",
)
table.insert(
uuid = "www.sahanafoundation.org/GIS-FEATURE-CLASS-L4",
name = "Village",
gps_marker = "City (Small)",
)
table.insert(
uuid = "www.sahanafoundation.org/GIS-FEATURE-CLASS-AIRPORT",
name = "Airport",
symbology_id = symbology_us,
marker_id = db(db.gis_marker.name == "airport").select(db.gis_marker.id, limitby=(0, 1)).first().id,
gps_marker = "Airport",
)
table.insert(
uuid = "www.sahanafoundation.org/GIS-FEATURE-CLASS-BRIDGE",
name = "Bridge",
symbology_id = symbology_us,
marker_id = db(db.gis_marker.name == "bridge").select(db.gis_marker.id, limitby=(0, 1)).first().id,
gps_marker = "Bridge",
)
table.insert(
uuid = "www.sahanafoundation.org/GIS-FEATURE-CLASS-CHURCH",
name = "Church",
symbology_id = symbology_us,
marker_id = db(db.gis_marker.name == "church").select(db.gis_marker.id, limitby=(0, 1)).first().id,
gps_marker = "Church",
)
table.insert(
uuid = "www.sahanafoundation.org/GIS-FEATURE-CLASS-FOOD",
name = "Food",
symbology_id = symbology_us,
marker_id = db(db.gis_marker.name == "food").select(db.gis_marker.id, limitby=(0, 1)).first().id,
gps_marker = "Restaurant",
)
table.insert(
uuid = "www.sahanafoundation.org/GIS-FEATURE-CLASS-HOSPITAL",
name = "Hospital",
symbology_id = symbology_us,
marker_id = db(db.gis_marker.name == "hospital").select(db.gis_marker.id, limitby=(0, 1)).first().id,
gps_marker = "Medical Facility",
resource = "hms_hospital"
)
table.insert(
uuid = "www.sahanafoundation.org/GIS-FEATURE-CLASS-INCIDENT",
name = "Incident",
gps_marker = "Danger Area",
)
table.insert(
uuid = "www.sahanafoundation.org/GIS-FEATURE-CLASS-OFFICE",
name = "Office",
symbology_id = symbology_us,
marker_id = db(db.gis_marker.name == "office").select(db.gis_marker.id, limitby=(0, 1)).first().id,
gps_marker = "Building",
resource = "org_office"
)
table.insert(
uuid = "www.sahanafoundation.org/GIS-FEATURE-CLASS-PERSON",
name = "Person",
symbology_id = symbology_us,
marker_id = db(db.gis_marker.name == "person").select(db.gis_marker.id, limitby=(0, 1)).first().id,
gps_marker = "Contact, Dreadlocks",
resource = "pr_person"
)
table.insert(
uuid = "www.sahanafoundation.org/GIS-FEATURE-CLASS-PORT",
name = "Port",
symbology_id = symbology_us,
marker_id = db(db.gis_marker.name == "port").select(db.gis_marker.id, limitby=(0, 1)).first().id,
gps_marker = "Marina",
)
table.insert(
uuid = "www.sahanafoundation.org/GIS-FEATURE-CLASS-PROJECT",
name = "Project",
)
table.insert(
uuid = "www.sahanafoundation.org/GIS-FEATURE-CLASS-SCHOOL",
name = "School",
marker_id = db(db.gis_marker.name == "school").select(db.gis_marker.id, limitby=(0, 1)).first().id,
gps_marker = "School",
)
table.insert(
uuid = "www.sahanafoundation.org/GIS-FEATURE-CLASS-SHELTER",
name = "Shelter",
symbology_id = symbology_us,
marker_id = db(db.gis_marker.name == "shelter").select(db.gis_marker.id, limitby=(0, 1)).first().id,
gps_marker = "Campground",
resource = "cr_shelter"
)
table.insert(
uuid = "www.sahanafoundation.org/GIS-FEATURE-CLASS-SMS",
name = "SMS",
marker_id = db(db.gis_marker.name == "phone").select(db.gis_marker.id, limitby=(0, 1)).first().id,
)
table.insert(
uuid = "www.sahanafoundation.org/GIS-FEATURE-CLASS-VEHICLE",
name = "Vehicle",
symbology_id = symbology_us,
marker_id = db(db.gis_marker.name == "vehicle").select(db.gis_marker.id, limitby=(0, 1)).first().id,
gps_marker = "Car",
)
table.insert(
uuid = "www.sahanafoundation.org/GIS-FEATURE-CLASS-VOLUNTEER",
name = "Volunteer",
symbology_id = symbology_us,
marker_id = db(db.gis_marker.name == "volunteer").select(db.gis_marker.id, limitby=(0, 1)).first().id,
gps_marker = "Contact, Dreadlocks",
)
table.insert(
uuid = "www.sahanafoundation.org/GIS-FEATURE-CLASS-WAREHOUSE",
name = "Warehouse",
symbology_id = symbology_us,
marker_id = db(db.gis_marker.name == "office").select(db.gis_marker.id, limitby=(0, 1)).first().id,
gps_marker = "Building",
)
table.insert(
uuid = "www.sahanafoundation.org/GIS-FEATURE-CLASS-WATER",
name = "Water",
symbology_id = symbology_us,
marker_id = db(db.gis_marker.name == "water").select(db.gis_marker.id, limitby=(0, 1)).first().id,
gps_marker = "Drinking Water",
)
tablename = "gis_apikey"
table = db[tablename]
if not db(table.id > 0).count():
table.insert(
name = "google",
apikey = "ABQIAAAAgB-1pyZu7pKAZrMGv3nksRRi_j0U6kJrkFvY4-OX2XYmEAa76BSH6SJQ1KrBv-RzS5vygeQosHsnNw",
description = "localhost"
)
table.insert(
name = "yahoo",
apikey = "euzuro-openlayers",
description = "trial - replace for Production use"
)
table.insert(
name = "multimap",
apikey = "metacarta_04",
description = "trial - replace for Production use"
)
tablename = "gis_layer_feature"
table = db[tablename]
if not db(table.id > 0).count():
table.insert(
name = "Incident Reports",
module = "irs",
resource = "ireport",
popup_label = "Incident",
# Default (but still better to define here as otherwise each feature needs to check it's feature_class)
marker_id = db(db.gis_marker.name == "marker_red").select(db.gis_marker.id, limitby=(0, 1)).first().id
)
table.insert(
name = "Hospitals",
module = "hms",
resource = "hospital",
popup_label = "Hospital",
marker_id = db(db.gis_marker.name == "hospital").select(db.gis_marker.id, limitby=(0, 1)).first().id
)
table.insert(
name = "Shelters",
module = "cr",
resource = "shelter",
popup_label = "Shelter",
marker_id = db(db.gis_marker.name == "shelter").select(db.gis_marker.id, limitby=(0, 1)).first().id
)
table.insert(
name = "Offices",
module = "org",
resource = "office",
popup_label = "Office",
marker_id = db(db.gis_marker.name == "office").select(db.gis_marker.id, limitby=(0, 1)).first().id
)
table.insert(
name = "Requests",
module = "rms",
resource = "req",
popup_label = "Request",
marker_id = db(db.gis_marker.name == "marker_yellow").select(db.gis_marker.id, limitby=(0, 1)).first().id
)
table.insert(
name = "Assessments",
module = "assess",
resource = "rat",
popup_label = "Rapid Assessment",
marker_id = db(db.gis_marker.name == "marker_green").select(db.gis_marker.id, limitby=(0, 1)).first().id
)
table.insert(
name = "Activities",
module = "project",
resource = "activity",
popup_label = "Activity",
marker_id = db(db.gis_marker.name == "activity").select(db.gis_marker.id, limitby=(0, 1)).first().id
)
table.insert(
name = "Warehouses",
module = "inventory",
resource = "store",
popup_label = "Warehouse",
marker_id = db(db.gis_marker.name == "office").select(db.gis_marker.id, limitby=(0, 1)).first().id
)
tablename = "gis_layer_coordinate"
table = db[tablename]
if not db(table.id > 0).count():
# Populate table
table.insert(
name = "Coordinate Grid",
enabled = False,
visible = False
)
tablename = "gis_layer_openstreetmap"
table = db[tablename]
if not db(table.id > 0).count():
# Populate table
table.insert(
name = "OpenStreetMap (Mapnik)",
url1 = "http://a.tile.openstreetmap.org/",
url2 = "http://b.tile.openstreetmap.org/",
url3 = "http://c.tile.openstreetmap.org/",
attribution = '<a href="http://www.openstreetmap.org/" target="_blank">OpenStreetMap</a>'
)
table.insert(
name = "OpenStreetMap (CycleMap)",
url1 = "http://a.tile.opencyclemap.org/cycle/",
url2 = "http://b.tile.opencyclemap.org/cycle/",
url3 = "http://c.tile.opencyclemap.org/cycle/",
attribution = '<a href="http://www.opencyclemap.org/" target="_blank">OpenCycleMap</a>'
)
table.insert(
name = "OpenStreetMap (Labels)",
url1 = "http://tiler1.censusprofiler.org/labelsonly/",
attribution = 'Labels overlay CC-by-SA by <a href="http://oobrien.com/oom/" target="_blank">OpenOrienteeringMap</a>/<a href="http://www.openstreetmap.org/">OpenStreetMap</a> data',
base = False,
visible = False
)
table.insert(
name = "OpenStreetMap (Relief)",
url1 = "http://toolserver.org/~cmarqu/hill/",
attribution = 'Relief by <a href="http://hikebikemap.de/" target="_blank">Hike & Bike Map</a>',
base = False,
visible = False
)
table.insert(
name = "OpenStreetMap (MapQuest)",
url1 = "http://otile1.mqcdn.com/tiles/1.0.0/osm/",
url2 = "http://otile2.mqcdn.com/tiles/1.0.0/osm/",
url3 = "http://otile3.mqcdn.com/tiles/1.0.0/osm/",
attribution = 'Tiles Courtesy of <a href="http://open.mapquest.co.uk/" target="_blank">MapQuest</a> <img src="http://developer.mapquest.com/content/osm/mq_logo.png" border="0">',
enabled = False
)
table.insert(
name = "OpenStreetMap (Osmarender)",
url1 = "http://a.tah.openstreetmap.org/Tiles/tile/",
url2 = "http://b.tah.openstreetmap.org/Tiles/tile/",
url3 = "http://c.tah.openstreetmap.org/Tiles/tile/",
attribution = '<a href="http://www.openstreetmap.org/" target="_blank">OpenStreetMap</a>',
enabled = False
)
table.insert(
name = "OpenStreetMap (Taiwan)",
url1 = "http://tile.openstreetmap.tw/tiles/",
enabled = False
)
table.insert(
name = "OpenStreetMap (Sahana)",
url1 = "http://geo.eden.sahanafoundation.org/tiles/",
enabled = False
)
#table.insert(
# name = "OpenAerialMap",
# url1 = "http://tile.openaerialmap.org/tiles/1.0.0/openaerialmap-900913/",
# enabled = False
# )
tablename = "gis_layer_google"
table = db[tablename]
if not db(table.id > 0).count():
# Populate table
for subtype in gis_layer_google_subtypes:
table.insert(
name = "Google " + subtype,
subtype = subtype,
enabled = False
)
tablename = "gis_layer_yahoo"
table = db[tablename]
if not db(table.id > 0).count():
# Populate table
for subtype in gis_layer_yahoo_subtypes:
table.insert(
name = "Yahoo " + subtype,
subtype = subtype,
enabled = False
)
tablename = "gis_layer_bing"
table = db[tablename]
if not db(table.id > 0).count():
# Populate table
for subtype in gis_layer_bing_subtypes:
table.insert(
name = "Bing " + subtype,
subtype = subtype,
enabled = False
)
tablename = "gis_layer_mgrs"
table = db[tablename]
if not db(table.id > 0).count():
# Populate table
table.insert(
name = "MGRS Atlas PDFs",
description = "http://en.wikipedia.org/wiki/Military_grid_reference_system",
url = "http://www.sharedgeo.org/datasets/shared/maps/usng/pdf.map?VERSION=1.0.0&SERVICE=WFS&request=GetFeature&typename=wfs_all_maps",
enabled = False
)
tablename = "gis_layer_wms"
table = db[tablename]
if not db(table.id > 0).count():
# Populate table
table.insert(
name = "VMap0",
description = "A Free low-resolution Vector Map of the whole world",
url = "http://labs.metacarta.com/wms/vmap0",
#projection_id = db(db.gis_projection.epsg == 4326).select(limitby=(0, 1)).first().id,
layers = "basic",
enabled = False
)
table.insert(
name = "Blue Marble",
description = "A composite of four months of MODIS observations with a spatial resolution (level of detail) of 1 square kilometer per pixel.",
url = "http://maps.opengeo.org/geowebcache/service/wms",
#projection_id = db(db.gis_projection.epsg == 4326).select(limitby=(0, 1)).first().id,
layers = "bluemarble",
enabled = False
)
tablename = "gis_layer_georss"
table = db[tablename]
if not db(table.id > 0).count():
# Populate table
table.insert(
name = "Earthquakes",
description = "USGS: Global 7-day",
url = "http://earthquake.usgs.gov/eqcenter/catalogs/eqs7day-M2.5.xml",
projection_id = db(db.gis_projection.epsg == 4326).select(limitby=(0, 1)).first().id,
marker_id = db(db.gis_marker.name == "earthquake").select(limitby=(0, 1)).first().id,
enabled = False
)
table.insert(
name = "Volcanoes",
description = "USGS: US recent",
url = "http://volcano.wr.usgs.gov/rss/vhpcaprss.xml",
projection_id = db(db.gis_projection.epsg == 4326).select(limitby=(0, 1)).first().id,
marker_id = db(db.gis_marker.name == "volcano").select(limitby=(0, 1)).first().id,
enabled = False
)
tablename = "gis_wmc_layer"
table = db[tablename]
if not db(table.id > 0).count():
# Populate table with the layers currently-supported by GeoExplorer
table.insert(
source = "ol",
type_ = "OpenLayers.Layer",
title = "None",
visibility = False,
group_ = "background",
fixed = True
)
table.insert(
source = "osm",
name = "mapnik",
title = "OpenStreetMap",
visibility = True,
group_ = "background",
fixed = True
)
table.insert(
source = "osm",
name = "osmarender",
title = "Tiles@home",
visibility = False,
group_ = "background",
fixed = True
)
table.insert(
source = "google",
name = "ROADMAP",
title = "Google Maps",
visibility = False,
opacity = 1,
group_ = "background",
fixed = True
)
table.insert(
source = "google",
name = "SATELLITE",
title = "Google Satellite",
visibility = False,
opacity = 1,
group_ = "background",
fixed = True
)
table.insert(
source = "google",
name = "HYBRID",
title = "Google Hybrid",
visibility = False,
opacity = 1,
group_ = "background",
fixed = True
)
table.insert(
source = "google",
name = "TERRAIN",
title = "Google Terrain",
visibility = False,
opacity = 1,
group_ = "background",
fixed = True
)
table.insert(
source = "sahana",
name = "Pakistan:level3",
title = "L3: Tehsils",
visibility = False,
opacity = 0.74,
img_format = "image/png",
styles = "",
transparent = True
)
table.insert(
source = "sahana",
name = "Pakistan:pak_flood_17Aug",
title = "Flood Extent - 17 August",
visibility = False,
opacity = 0.45,
img_format = "image/png",
styles = "",
transparent = True
)
tablename = "gis_location"
table = db[tablename]
if not db(table.id > 0).count():
# L0 Countries
import_file = os.path.join(request.folder,
"private", "import",
"countries.csv")
table.import_from_csv_file(open(import_file, "r"))
# Should work for our 3 supported databases: sqlite, MySQL & PostgreSQL
field = "name"
db.executesql("CREATE INDEX %s__idx on %s(%s);" % (field, tablename, field))
# Authorization
# User Roles (uses native Web2Py Auth Groups)
acl = auth.permission
table = auth.settings.table_group_name
if not db(db[table].id > 0).count():
create_role = auth.s3_create_role
# Do not remove or change order of these 5 definitions (System Roles):
create_role("Administrator", "System Administrator - can access & make changes to any data")
create_role("Authenticated", "Authenticated - all logged-in users",
dict(c="gis", uacl=acl.ALL, oacl=acl.ALL),
dict(c="gis", f="location", uacl=acl.READ, oacl=acl.ALL),
dict(c="inventory", uacl=acl.READ, oacl=acl.ALL),
dict(c="logs", uacl=acl.READ, oacl=acl.ALL)
)
create_role("Anonymous", "Unauthenticated users",
dict(c="gis", uacl=acl.READ, oacl=acl.READ))
create_role("Editor", "Editor - can access & make changes to any unprotected data")
create_role("MapAdmin", "MapAdmin - allowed access to edit the MapService Catalogue",
dict(c="gis", uacl=acl.ALL, oacl=acl.ALL),
dict(c="gis", f="location", uacl=acl.ALL, oacl=acl.ALL))
# Additional roles + ACLs
create_role("DVI", "Role for DVI staff - permission to access the DVI module",
dict(c="dvi", uacl=acl.ALL, oacl=acl.ALL))
create_role("HMS Staff", "Hospital Staff - permission to add/update own records in the HMS",
dict(c="hms", uacl=acl.CREATE, oacl=acl.ALL))
create_role("HMS Admin", "Hospital Admin - permission to add/update all records in the HMS",
dict(c="hms", uacl=acl.ALL, oacl=acl.ALL))
# Security Defaults for all tables (if using 'full' security policy: i.e. native Web2Py)
if session.s3.security_policy not in (1, 2, 3, 4, 5):
table = auth.settings.table_permission_name
if not db(db[table].id > 0).count():
# For performance we only populate this once (at system startup)
# => need to populate manually when adding new tables to the database! (less RAD)
authenticated = auth.id_group("Authenticated")
editors = auth.id_group("Editor")
for tablename in db.tables:
table = db[tablename]
# allow all registered users the ability to Read all records
auth.add_permission(authenticated, "read", table)
# allow anonymous users the ability to Read all records
#auth.add_permission(anonymous, "read", table)
# Editors can make changes
auth.add_permission(editors, "create", table)
auth.add_permission(editors, "update", table)
auth.add_permission(editors, "delete", table)
# Module-specific defaults can be set here
#table = pr_person
# Clear out defaults
#auth.del_permission(authenticated, "read", table)
#auth.del_permission(editors, "create", table)
#auth.del_permission(editors, "update", table)
#auth.del_permission(editors, "delete", table)
# Add specific Role(s)
#id = auth.id_group("myrole")
#auth.add_permission(id, "read", table)
#auth.add_permission(id, "create", table)
#auth.add_permission(id, "update", table)
#auth.add_permission(id, "delete", table)
# Ensure DB population committed when running through shell
db.commit()
tablename = "gis_location"
table = db[tablename]
# L2 counties
import_file = os.path.join(request.folder,
"private", "import",
"AllSt.txt")
regex = re.compile(r'(?P<fips>\d{5})\s+\d\s+(?P<county>\w+), (?P<state>[A-Z]{2})$')
with open(import_file) as f:
for line in f:
match = regex.search(line)
if match:
data = match.groupdict()
table.insert(code=data['fips'],
name='%s_%s' % (data['state'], data['county']),
level='2')
| |
# Copyright (c) 2013 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import urllib
import eventlet
import eventlet.wsgi
import eventlet.greenio
from swift.common import exceptions
from swift.common import http
from swift.common import swob
from swift.common import utils
from swift.common import request_helpers
class Receiver(object):
"""
Handles incoming SSYNC requests to the object server.
These requests come from the object-replicator daemon that uses
:py:mod:`.ssync_sender`.
The number of concurrent SSYNC requests is restricted by
use of a replication_semaphore and can be configured with the
object-server.conf [object-server] replication_concurrency
setting.
An SSYNC request is really just an HTTP conduit for
sender/receiver replication communication. The overall
SSYNC request should always succeed, but it will contain
multiple requests within its request and response bodies. This
"hack" is done so that replication concurrency can be managed.
The general process inside an SSYNC request is:
1. Initialize the request: Basic request validation, mount check,
acquire semaphore lock, etc..
2. Missing check: Sender sends the hashes and timestamps of
the object information it can send, receiver sends back
the hashes it wants (doesn't have or has an older
timestamp).
3. Updates: Sender sends the object information requested.
4. Close down: Release semaphore lock, etc.
"""
def __init__(self, app, request):
self.app = app
self.request = request
self.device = None
self.partition = None
self.fp = None
# We default to dropping the connection in case there is any exception
# raised during processing because otherwise the sender could send for
# quite some time before realizing it was all in vain.
self.disconnect = True
self.initialize_request()
def __call__(self):
"""
Processes an SSYNC request.
Acquires a semaphore lock and then proceeds through the steps
of the SSYNC process.
"""
# The general theme for functions __call__ calls is that they should
# raise exceptions.MessageTimeout for client timeouts (logged locally),
# swob.HTTPException classes for exceptions to return to the caller but
# not log locally (unmounted, for example), and any other Exceptions
# will be logged with a full stack trace.
# This is because the client is never just some random user but
# is instead also our code and we definitely want to know if our code
# is broken or doing something unexpected.
try:
# Double try blocks in case our main error handlers fail.
try:
# Need to send something to trigger wsgi to return response
# headers and kick off the ssync exchange.
yield '\r\n'
# If semaphore is in use, try to acquire it, non-blocking, and
# return a 503 if it fails.
if self.app.replication_semaphore:
if not self.app.replication_semaphore.acquire(False):
raise swob.HTTPServiceUnavailable()
try:
with self.diskfile_mgr.replication_lock(self.device):
for data in self.missing_check():
yield data
for data in self.updates():
yield data
# We didn't raise an exception, so end the request
# normally.
self.disconnect = False
finally:
if self.app.replication_semaphore:
self.app.replication_semaphore.release()
except exceptions.ReplicationLockTimeout as err:
self.app.logger.debug(
'%s/%s/%s SSYNC LOCK TIMEOUT: %s' % (
self.request.remote_addr, self.device, self.partition,
err))
yield ':ERROR: %d %r\n' % (0, str(err))
except exceptions.MessageTimeout as err:
self.app.logger.error(
'%s/%s/%s TIMEOUT in replication.Receiver: %s' % (
self.request.remote_addr, self.device, self.partition,
err))
yield ':ERROR: %d %r\n' % (408, str(err))
except swob.HTTPException as err:
body = ''.join(err({}, lambda *args: None))
yield ':ERROR: %d %r\n' % (err.status_int, body)
except Exception as err:
self.app.logger.exception(
'%s/%s/%s EXCEPTION in replication.Receiver' %
(self.request.remote_addr, self.device, self.partition))
yield ':ERROR: %d %r\n' % (0, str(err))
except Exception:
self.app.logger.exception('EXCEPTION in replication.Receiver')
if self.disconnect:
# This makes the socket close early so the remote side doesn't have
# to send its whole request while the lower Eventlet-level just
# reads it and throws it away. Instead, the connection is dropped
# and the remote side will get a broken-pipe exception.
try:
socket = self.request.environ['wsgi.input'].get_socket()
eventlet.greenio.shutdown_safe(socket)
socket.close()
except Exception:
pass # We're okay with the above failing.
def initialize_request(self):
"""
Basic validation of request and mount check.
This function will be called before attempting to acquire a
replication semaphore lock, so contains only quick checks.
"""
# This environ override has been supported since eventlet 0.14:
# https://bitbucket.org/eventlet/eventlet/commits/ \
# 4bd654205a4217970a57a7c4802fed7ff2c8b770
self.request.environ['eventlet.minimum_write_chunk_size'] = 0
self.device, self.partition, self.policy = \
request_helpers.get_name_and_placement(self.request, 2, 2, False)
self.frag_index = self.node_index = None
if self.request.headers.get('X-Backend-Ssync-Frag-Index'):
try:
self.frag_index = int(
self.request.headers['X-Backend-Ssync-Frag-Index'])
except ValueError:
raise swob.HTTPBadRequest(
'Invalid X-Backend-Ssync-Frag-Index %r' %
self.request.headers['X-Backend-Ssync-Frag-Index'])
if self.request.headers.get('X-Backend-Ssync-Node-Index'):
try:
self.node_index = int(
self.request.headers['X-Backend-Ssync-Node-Index'])
except ValueError:
raise swob.HTTPBadRequest(
'Invalid X-Backend-Ssync-Node-Index %r' %
self.request.headers['X-Backend-Ssync-Node-Index'])
if self.node_index != self.frag_index:
# a primary node should only receive it's own fragments
raise swob.HTTPBadRequest(
'Frag-Index (%s) != Node-Index (%s)' % (
self.frag_index, self.node_index))
utils.validate_device_partition(self.device, self.partition)
self.diskfile_mgr = self.app._diskfile_router[self.policy]
if not self.diskfile_mgr.get_dev_path(self.device):
raise swob.HTTPInsufficientStorage(drive=self.device)
self.fp = self.request.environ['wsgi.input']
def missing_check(self):
"""
Handles the receiver-side of the MISSING_CHECK step of a
SSYNC request.
Receives a list of hashes and timestamps of object
information the sender can provide and responds with a list
of hashes desired, either because they're missing or have an
older timestamp locally.
The process is generally:
1. Sender sends `:MISSING_CHECK: START` and begins
sending `hash timestamp` lines.
2. Receiver gets `:MISSING_CHECK: START` and begins
reading the `hash timestamp` lines, collecting the
hashes of those it desires.
3. Sender sends `:MISSING_CHECK: END`.
4. Receiver gets `:MISSING_CHECK: END`, responds with
`:MISSING_CHECK: START`, followed by the list of
hashes it collected as being wanted (one per line),
`:MISSING_CHECK: END`, and flushes any buffers.
5. Sender gets `:MISSING_CHECK: START` and reads the list
of hashes desired by the receiver until reading
`:MISSING_CHECK: END`.
The collection and then response is so the sender doesn't
have to read while it writes to ensure network buffers don't
fill up and block everything.
"""
with exceptions.MessageTimeout(
self.app.client_timeout, 'missing_check start'):
line = self.fp.readline(self.app.network_chunk_size)
if line.strip() != ':MISSING_CHECK: START':
raise Exception(
'Looking for :MISSING_CHECK: START got %r' % line[:1024])
object_hashes = []
while True:
with exceptions.MessageTimeout(
self.app.client_timeout, 'missing_check line'):
line = self.fp.readline(self.app.network_chunk_size)
if not line or line.strip() == ':MISSING_CHECK: END':
break
parts = line.split()
object_hash, timestamp = [urllib.unquote(v) for v in parts[:2]]
want = False
try:
df = self.diskfile_mgr.get_diskfile_from_hash(
self.device, self.partition, object_hash, self.policy,
frag_index=self.frag_index)
except exceptions.DiskFileNotExist:
want = True
else:
try:
df.open()
except exceptions.DiskFileDeleted as err:
want = err.timestamp < timestamp
except exceptions.DiskFileError as err:
want = True
else:
want = df.timestamp < timestamp
if want:
object_hashes.append(object_hash)
yield ':MISSING_CHECK: START\r\n'
if object_hashes:
yield '\r\n'.join(object_hashes)
yield '\r\n'
yield ':MISSING_CHECK: END\r\n'
def updates(self):
"""
Handles the UPDATES step of an SSYNC request.
Receives a set of PUT and DELETE subrequests that will be
routed to the object server itself for processing. These
contain the information requested by the MISSING_CHECK step.
The PUT and DELETE subrequests are formatted pretty much
exactly like regular HTTP requests, excepting the HTTP
version on the first request line.
The process is generally:
1. Sender sends `:UPDATES: START` and begins sending the
PUT and DELETE subrequests.
2. Receiver gets `:UPDATES: START` and begins routing the
subrequests to the object server.
3. Sender sends `:UPDATES: END`.
4. Receiver gets `:UPDATES: END` and sends `:UPDATES:
START` and `:UPDATES: END` (assuming no errors).
5. Sender gets `:UPDATES: START` and `:UPDATES: END`.
If too many subrequests fail, as configured by
replication_failure_threshold and replication_failure_ratio,
the receiver will hang up the request early so as to not
waste any more time.
At step 4, the receiver will send back an error if there were
any failures (that didn't cause a hangup due to the above
thresholds) so the sender knows the whole was not entirely a
success. This is so the sender knows if it can remove an out
of place partition, for example.
"""
with exceptions.MessageTimeout(
self.app.client_timeout, 'updates start'):
line = self.fp.readline(self.app.network_chunk_size)
if line.strip() != ':UPDATES: START':
raise Exception('Looking for :UPDATES: START got %r' % line[:1024])
successes = 0
failures = 0
while True:
with exceptions.MessageTimeout(
self.app.client_timeout, 'updates line'):
line = self.fp.readline(self.app.network_chunk_size)
if not line or line.strip() == ':UPDATES: END':
break
# Read first line METHOD PATH of subrequest.
method, path = line.strip().split(' ', 1)
subreq = swob.Request.blank(
'/%s/%s%s' % (self.device, self.partition, path),
environ={'REQUEST_METHOD': method})
# Read header lines.
content_length = None
replication_headers = []
while True:
with exceptions.MessageTimeout(self.app.client_timeout):
line = self.fp.readline(self.app.network_chunk_size)
if not line:
raise Exception(
'Got no headers for %s %s' % (method, path))
line = line.strip()
if not line:
break
header, value = line.split(':', 1)
header = header.strip().lower()
value = value.strip()
subreq.headers[header] = value
if header != 'etag':
# make sure ssync doesn't cause 'Etag' to be added to
# obj metadata in addition to 'ETag' which object server
# sets (note capitalization)
replication_headers.append(header)
if header == 'content-length':
content_length = int(value)
# Establish subrequest body, if needed.
if method == 'DELETE':
if content_length not in (None, 0):
raise Exception(
'DELETE subrequest with content-length %s' % path)
elif method == 'PUT':
if content_length is None:
raise Exception(
'No content-length sent for %s %s' % (method, path))
def subreq_iter():
left = content_length
while left > 0:
with exceptions.MessageTimeout(
self.app.client_timeout,
'updates content'):
chunk = self.fp.read(
min(left, self.app.network_chunk_size))
if not chunk:
raise Exception(
'Early termination for %s %s' % (method, path))
left -= len(chunk)
yield chunk
subreq.environ['wsgi.input'] = utils.FileLikeIter(
subreq_iter())
else:
raise Exception('Invalid subrequest method %s' % method)
subreq.headers['X-Backend-Storage-Policy-Index'] = int(self.policy)
subreq.headers['X-Backend-Replication'] = 'True'
if self.node_index is not None:
# primary node should not 409 if it has a non-primary fragment
subreq.headers['X-Backend-Ssync-Frag-Index'] = self.node_index
if replication_headers:
subreq.headers['X-Backend-Replication-Headers'] = \
' '.join(replication_headers)
# Route subrequest and translate response.
resp = subreq.get_response(self.app)
if http.is_success(resp.status_int) or \
resp.status_int == http.HTTP_NOT_FOUND:
successes += 1
else:
failures += 1
if failures >= self.app.replication_failure_threshold and (
not successes or
float(failures) / successes >
self.app.replication_failure_ratio):
raise Exception(
'Too many %d failures to %d successes' %
(failures, successes))
# The subreq may have failed, but we want to read the rest of the
# body from the remote side so we can continue on with the next
# subreq.
for junk in subreq.environ['wsgi.input']:
pass
if failures:
raise swob.HTTPInternalServerError(
'ERROR: With :UPDATES: %d failures to %d successes' %
(failures, successes))
yield ':UPDATES: START\r\n'
yield ':UPDATES: END\r\n'
| |
# Copyright 2013: Mirantis Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
from keystoneclient import exceptions as keystone_exceptions
import mock
from rally.cli.commands import deployment
from rally.cli import envutils
from rally.common import objects
from rally import consts
from rally import exceptions
from tests.unit import fakes
from tests.unit import test
class DeploymentCommandsTestCase(test.TestCase):
def setUp(self):
super(DeploymentCommandsTestCase, self).setUp()
self.deployment = deployment.DeploymentCommands()
@mock.patch.dict(os.environ, {"RALLY_DEPLOYMENT": "my_deployment_id"})
@mock.patch("rally.cli.commands.deployment.DeploymentCommands.list")
@mock.patch("rally.cli.commands.deployment.api.Deployment.create")
@mock.patch("rally.cli.commands.deployment.open",
side_effect=mock.mock_open(read_data="{\"some\": \"json\"}"),
create=True)
def test_create(self, mock_open, mock_deployment_create,
mock_deployment_commands_list):
self.deployment.create("fake_deploy", False, "path_to_config.json")
mock_deployment_create.assert_called_once_with(
{"some": "json"}, "fake_deploy")
@mock.patch.dict(os.environ, {"OS_AUTH_URL": "fake_auth_url",
"OS_USERNAME": "fake_username",
"OS_PASSWORD": "fake_password",
"OS_TENANT_NAME": "fake_tenant_name",
"OS_REGION_NAME": "fake_region_name",
"OS_ENDPOINT_TYPE": "fake_endpoint_typeURL",
"OS_ENDPOINT": "fake_endpoint",
"OS_INSECURE": "True",
"OS_CACERT": "fake_cacert",
"RALLY_DEPLOYMENT": "fake_deployment_id"})
@mock.patch("rally.cli.commands.deployment.api.Deployment.create")
@mock.patch("rally.cli.commands.deployment.DeploymentCommands.list")
def test_createfromenv_keystonev2(self, mock_list, mock_deployment_create):
self.deployment.create("from_env", True)
mock_deployment_create.assert_called_once_with(
{
"type": "ExistingCloud",
"auth_url": "fake_auth_url",
"region_name": "fake_region_name",
"endpoint_type": "fake_endpoint_type",
"endpoint": "fake_endpoint",
"admin": {
"username": "fake_username",
"password": "fake_password",
"tenant_name": "fake_tenant_name"
},
"https_insecure": True,
"https_cacert": "fake_cacert"
},
"from_env"
)
@mock.patch.dict(os.environ, {"OS_AUTH_URL": "fake_auth_url",
"OS_USERNAME": "fake_username",
"OS_PASSWORD": "fake_password",
"OS_TENANT_NAME": "fake_tenant_name",
"OS_REGION_NAME": "fake_region_name",
"OS_ENDPOINT_TYPE": "fake_endpoint_typeURL",
"OS_PROJECT_DOMAIN_NAME": "fake_pdn",
"OS_USER_DOMAIN_NAME": "fake_udn",
"OS_ENDPOINT": "fake_endpoint",
"OS_INSECURE": "True",
"OS_CACERT": "fake_cacert",
"RALLY_DEPLOYMENT": "fake_deployment_id"})
@mock.patch("rally.cli.commands.deployment.api.Deployment.create")
@mock.patch("rally.cli.commands.deployment.DeploymentCommands.list")
def test_createfromenv_keystonev3(self, mock_list, mock_deployment_create):
self.deployment.create("from_env", True)
mock_deployment_create.assert_called_once_with(
{
"type": "ExistingCloud",
"auth_url": "fake_auth_url",
"region_name": "fake_region_name",
"endpoint_type": "fake_endpoint_type",
"endpoint": "fake_endpoint",
"admin": {
"username": "fake_username",
"password": "fake_password",
"user_domain_name": "fake_udn",
"project_domain_name": "fake_pdn",
"project_name": "fake_tenant_name"
},
"https_insecure": True,
"https_cacert": "fake_cacert"
},
"from_env"
)
@mock.patch("rally.cli.commands.deployment.DeploymentCommands.list")
@mock.patch("rally.cli.commands.deployment.DeploymentCommands.use")
@mock.patch("rally.cli.commands.deployment.api.Deployment.create",
return_value=dict(uuid="uuid"))
@mock.patch("rally.cli.commands.deployment.open",
side_effect=mock.mock_open(read_data="{\"uuid\": \"uuid\"}"),
create=True)
def test_create_and_use(self, mock_open, mock_deployment_create,
mock_deployment_commands_use,
mock_deployment_commands_list):
self.deployment.create("fake_deploy", False, "path_to_config.json",
True)
mock_deployment_create.assert_called_once_with(
{"uuid": "uuid"}, "fake_deploy")
mock_deployment_commands_use.assert_called_once_with("uuid")
@mock.patch("rally.cli.commands.deployment.api.Deployment.recreate")
def test_recreate(self, mock_deployment_recreate):
deployment_id = "43924f8b-9371-4152-af9f-4cf02b4eced4"
self.deployment.recreate(deployment_id)
mock_deployment_recreate.assert_called_once_with(deployment_id)
@mock.patch("rally.cli.commands.deployment.envutils.get_global")
def test_recreate_no_deployment_id(self, mock_get_global):
mock_get_global.side_effect = exceptions.InvalidArgumentsException
self.assertRaises(exceptions.InvalidArgumentsException,
self.deployment.recreate, None)
@mock.patch("rally.cli.commands.deployment.api.Deployment.destroy")
def test_destroy(self, mock_deployment_destroy):
deployment_id = "53fd0273-60ce-42e5-a759-36f1a683103e"
self.deployment.destroy(deployment_id)
mock_deployment_destroy.assert_called_once_with(deployment_id)
@mock.patch("rally.cli.commands.deployment.envutils.get_global")
def test_destroy_no_deployment_id(self, mock_get_global):
mock_get_global.side_effect = exceptions.InvalidArgumentsException
self.assertRaises(exceptions.InvalidArgumentsException,
self.deployment.destroy, None)
@mock.patch("rally.cli.commands.deployment.cliutils.print_list")
@mock.patch("rally.cli.commands.deployment.utils.Struct")
@mock.patch("rally.cli.commands.deployment.envutils.get_global")
@mock.patch("rally.cli.commands.deployment.api.Deployment.list")
def test_list_different_deployment_id(self, mock_deployment_list,
mock_get_global, mock_struct,
mock_print_list):
current_deployment_id = "26a3ce76-0efa-40e4-86e5-514574bd1ff6"
mock_get_global.return_value = current_deployment_id
fake_deployment_list = [
{"uuid": "fa34aea2-ae2e-4cf7-a072-b08d67466e3e",
"created_at": "03-12-2014",
"name": "dep1",
"status": "deploy->started",
"active": "False"}]
mock_deployment_list.return_value = fake_deployment_list
self.deployment.list()
fake_deployment = fake_deployment_list[0]
fake_deployment["active"] = ""
mock_struct.assert_called_once_with(**fake_deployment)
headers = ["uuid", "created_at", "name", "status", "active"]
mock_print_list.assert_called_once_with([mock_struct()], headers,
sortby_index=headers.index(
"created_at"))
@mock.patch("rally.cli.commands.deployment.cliutils.print_list")
@mock.patch("rally.cli.commands.deployment.utils.Struct")
@mock.patch("rally.cli.commands.deployment.envutils.get_global")
@mock.patch("rally.cli.commands.deployment.api.Deployment.list")
def test_list_current_deployment_id(self, mock_deployment_list,
mock_get_global, mock_struct,
mock_print_list):
current_deployment_id = "64258e84-ffa1-4011-9e4c-aba07bdbcc6b"
mock_get_global.return_value = current_deployment_id
fake_deployment_list = [{"uuid": current_deployment_id,
"created_at": "13-12-2014",
"name": "dep2",
"status": "deploy->finished",
"active": "True"}]
mock_deployment_list.return_value = fake_deployment_list
self.deployment.list()
fake_deployment = fake_deployment_list[0]
fake_deployment["active"] = "*"
mock_struct.assert_called_once_with(**fake_deployment)
headers = ["uuid", "created_at", "name", "status", "active"]
mock_print_list.assert_called_once_with([mock_struct()], headers,
sortby_index=headers.index(
"created_at"))
@mock.patch("rally.cli.commands.deployment.api.Deployment.get")
@mock.patch("json.dumps")
def test_config(self, mock_json_dumps, mock_deployment_get):
deployment_id = "fa4a423e-f15d-4d83-971a-89574f892999"
value = {"config": "config"}
mock_deployment_get.return_value = value
self.deployment.config(deployment_id)
mock_json_dumps.assert_called_once_with(value["config"],
sort_keys=True, indent=4)
mock_deployment_get.assert_called_once_with(deployment_id)
@mock.patch("rally.cli.commands.deployment.envutils.get_global")
def test_config_no_deployment_id(self, mock_get_global):
mock_get_global.side_effect = exceptions.InvalidArgumentsException
self.assertRaises(exceptions.InvalidArgumentsException,
self.deployment.config, None)
@mock.patch("rally.cli.commands.deployment.cliutils.print_list")
@mock.patch("rally.cli.commands.deployment.utils.Struct")
@mock.patch("rally.cli.commands.deployment.api.Deployment.get")
def test_show(self, mock_deployment_get, mock_struct, mock_print_list):
deployment_id = "b1a6153e-a314-4cb3-b63b-cf08c1a416c3"
value = {
"admin": {
"auth_url": "url",
"username": "u",
"password": "p",
"tenant_name": "t",
"region_name": "r",
"endpoint_type": consts.EndpointType.INTERNAL
},
"users": []
}
mock_deployment_get.return_value = value
self.deployment.show(deployment_id)
mock_deployment_get.assert_called_once_with(deployment_id)
headers = ["auth_url", "username", "password", "tenant_name",
"region_name", "endpoint_type"]
fake_data = ["url", "u", "***", "t", "r", consts.EndpointType.INTERNAL]
mock_struct.assert_called_once_with(**dict(zip(headers, fake_data)))
mock_print_list.assert_called_once_with([mock_struct()], headers)
@mock.patch("rally.cli.commands.deployment.envutils.get_global")
def test_deploy_no_deployment_id(self, mock_get_global):
mock_get_global.side_effect = exceptions.InvalidArgumentsException
self.assertRaises(exceptions.InvalidArgumentsException,
self.deployment.show, None)
@mock.patch("os.remove")
@mock.patch("os.symlink")
@mock.patch("rally.cli.commands.deployment.api.Deployment.get",
return_value=fakes.FakeDeployment(
uuid="593b683c-4b16-4b2b-a56b-e162bd60f10b"))
@mock.patch("os.path.exists", return_value=True)
@mock.patch("rally.common.fileutils.update_env_file")
def test_use(self, mock_update_env_file, mock_path_exists,
mock_deployment_get, mock_symlink, mock_remove):
deployment_id = mock_deployment_get.return_value["uuid"]
mock_deployment_get.return_value["admin"] = {
"auth_url": "fake_auth_url",
"username": "fake_username",
"password": "fake_password",
"tenant_name": "fake_tenant_name",
"endpoint": "fake_endpoint",
"region_name": None}
with mock.patch("rally.cli.commands.deployment.open", mock.mock_open(),
create=True) as mock_file:
self.deployment.use(deployment_id)
self.assertEqual(2, mock_path_exists.call_count)
mock_update_env_file.assert_called_once_with(os.path.expanduser(
"~/.rally/globals"),
"RALLY_DEPLOYMENT", "%s\n" % deployment_id)
mock_file.return_value.write.assert_any_call(
"export OS_ENDPOINT='fake_endpoint'\n")
mock_file.return_value.write.assert_any_call(
"export OS_AUTH_URL='fake_auth_url'\n"
"export OS_USERNAME='fake_username'\n"
"export OS_PASSWORD='fake_password'\n"
"export OS_TENANT_NAME='fake_tenant_name'\n")
mock_symlink.assert_called_once_with(
os.path.expanduser("~/.rally/openrc-%s" % deployment_id),
os.path.expanduser("~/.rally/openrc"))
mock_remove.assert_called_once_with(os.path.expanduser(
"~/.rally/openrc"))
@mock.patch("os.remove")
@mock.patch("os.symlink")
@mock.patch("rally.cli.commands.deployment.api.Deployment.get",
return_value=fakes.FakeDeployment(
uuid="593b683c-4b16-4b2b-a56b-e162bd60f10b"))
@mock.patch("os.path.exists", return_value=True)
@mock.patch("rally.common.fileutils.update_env_file")
def test_use_with_v3_auth(self, mock_update_env_file, mock_path_exists,
mock_deployment_get, mock_symlink, mock_remove):
deployment_id = mock_deployment_get.return_value["uuid"]
mock_deployment_get.return_value["admin"] = {
"auth_url": "http://localhost:5000/v3",
"username": "fake_username",
"password": "fake_password",
"tenant_name": "fake_tenant_name",
"endpoint": "fake_endpoint",
"region_name": None,
"user_domain_name": "fake_user_domain",
"project_domain_name": "fake_project_domain"}
with mock.patch("rally.cli.commands.deployment.open", mock.mock_open(),
create=True) as mock_file:
self.deployment.use(deployment_id)
self.assertEqual(2, mock_path_exists.call_count)
mock_update_env_file.assert_called_once_with(os.path.expanduser(
"~/.rally/globals"),
"RALLY_DEPLOYMENT", "%s\n" % deployment_id)
mock_file.return_value.write.assert_any_call(
"export OS_ENDPOINT='fake_endpoint'\n")
mock_file.return_value.write.assert_any_call(
"export OS_AUTH_URL='http://localhost:5000/v3'\n"
"export OS_USERNAME='fake_username'\n"
"export OS_PASSWORD='fake_password'\n"
"export OS_TENANT_NAME='fake_tenant_name'\n")
mock_file.return_value.write.assert_any_call(
"export OS_USER_DOMAIN_NAME='fake_user_domain'\n"
"export OS_PROJECT_DOMAIN_NAME='fake_project_domain'\n")
mock_symlink.assert_called_once_with(
os.path.expanduser("~/.rally/openrc-%s" % deployment_id),
os.path.expanduser("~/.rally/openrc"))
mock_remove.assert_called_once_with(os.path.expanduser(
"~/.rally/openrc"))
@mock.patch("rally.cli.commands.deployment.DeploymentCommands."
"_update_openrc_deployment_file")
@mock.patch("rally.common.fileutils.update_globals_file")
@mock.patch("rally.cli.commands.deployment.api.Deployment")
def test_use_by_name(self, mock_api_deployment, mock_update_globals_file,
mock__update_openrc_deployment_file):
fake_deployment = fakes.FakeDeployment(
uuid="fake_uuid",
admin="fake_credentials")
mock_api_deployment.list.return_value = [fake_deployment]
mock_api_deployment.get.return_value = fake_deployment
status = self.deployment.use(deployment="fake_name")
self.assertIsNone(status)
mock_api_deployment.get.assert_called_once_with("fake_name")
mock_update_globals_file.assert_called_once_with(
envutils.ENV_DEPLOYMENT, "fake_uuid")
mock__update_openrc_deployment_file.assert_called_once_with(
"fake_uuid", "fake_credentials")
@mock.patch("rally.cli.commands.deployment.api.Deployment.get")
def test_deployment_not_found(self, mock_deployment_get):
deployment_id = "e87e4dca-b515-4477-888d-5f6103f13b42"
mock_deployment_get.side_effect = exceptions.DeploymentNotFound(
deployment=deployment_id)
self.assertEqual(1, self.deployment.use(deployment_id))
@mock.patch("rally.cli.commands.deployment.cliutils.print_list")
@mock.patch("rally.cli.commands.deployment.api.Deployment.check")
@mock.patch("rally.cli.commands.deployment.api.Deployment.get")
def test_deployment_check(self, mock_deployment_get,
mock_deployment_check, mock_print_list):
deployment_id = "e87e4dca-b515-4477-888d-5f6103f13b42"
sample_credential = objects.Credential("http://192.168.1.1:5000/v2.0/",
"admin",
"adminpass").to_dict()
deployment = {"admin": sample_credential,
"users": [sample_credential]}
mock_deployment_get.return_value = deployment
mock_deployment_check.return_value = {}
self.deployment.check(deployment_id)
mock_deployment_get.assert_called_once_with(deployment_id)
mock_deployment_check.assert_called_once_with(deployment)
headers = ["services", "type", "status"]
mock_print_list.assert_called_once_with([], headers)
@mock.patch("rally.cli.commands.deployment.api.Deployment.get")
def test_deployment_check_not_exist(self, mock_deployment_get):
deployment_id = "e87e4dca-b515-4477-888d-5f6103f13b42"
mock_deployment_get.side_effect = exceptions.DeploymentNotFound(
deployment=deployment_id)
self.assertEqual(self.deployment.check(deployment_id), 1)
@mock.patch("rally.cli.commands.deployment.api.Deployment.check")
@mock.patch("rally.cli.commands.deployment.api.Deployment.get")
def test_deployment_check_raise(self, mock_deployment_get,
mock_deployment_check):
deployment_id = "e87e4dca-b515-4477-888d-5f6103f13b42"
sample_credential = objects.Credential("http://192.168.1.1:5000/v2.0/",
"admin",
"adminpass").to_dict()
sample_credential["not-exist-key"] = "error"
mock_deployment_get.return_value = {"admin": sample_credential}
refused = keystone_exceptions.ConnectionRefused()
mock_deployment_check.side_effect = refused
self.assertEqual(self.deployment.check(deployment_id), 1)
| |
from __future__ import absolute_import
import drmaa
import errno
import os
import shlex
import shutil
from . import app
from alta.objectstore import build_object_store
from comoda import ensure_dir
from celery import group
from celery import chain
from celery.utils.log import get_task_logger
from grp import getgrgid
from ..utils import IEMSampleSheetReader, IEMRunInfoReader, LogBook, runJob, \
get_conf, touch, check_progress_status, PROGRESS_STATUS, \
get_md5
from pwd import getpwuid
logger = get_task_logger(__name__)
@app.task(name='presta.app.tasks.run_presta_check')
def run_presta_check(**kwargs):
emit_events = kwargs.get('emit_events', False)
root_path = kwargs.get('root_path')
cmd_line = ['presta', 'check']
if emit_events:
cmd_line.append('--emit_events')
if root_path:
cmd_line.extend(['--root_path', root_path])
result = runJob(cmd_line, logger)
return True if result else False
@app.task(name='presta.app.tasks.run_presta_proc')
def run_presta_proc(**kwargs):
emit_events = kwargs.get('emit_events', False)
rd_label = kwargs.get('rd_label')
rd_path = kwargs.get('rd_path')
ds_path = kwargs.get('ds_path')
cmd_line = ['presta', 'proc']
if rd_label:
cmd_line.extend(['--rd_label', rd_label])
if rd_path:
cmd_line.extend(['--rd_path', rd_path])
if ds_path:
cmd_line.extend(['--ds_path', ds_path])
if emit_events:
cmd_line.append('--emit_events')
result = runJob(cmd_line, logger)
return True if result else False
return False
@app.task(name='presta.app.tasks.run_presta_qc')
def run_presta_qc(**kwargs):
emit_events = kwargs.get('emit_events', False)
rd_label = kwargs.get('rd_label')
ds_path = kwargs.get('ds_path')
qc_path = kwargs.get('qc_path')
qc_export_path = kwargs.get('qc_export_path')
rerun = kwargs.get('force')
cmd_line = ['presta', 'qc']
if rd_label:
cmd_line.extend(['--rd_label', rd_label])
if qc_export_path:
cmd_line.extend(['--qc_export_path', qc_export_path])
if ds_path:
cmd_line.extend(['--ds_path', ds_path])
if qc_path:
cmd_line.extend(['--qc_path', qc_path])
if rerun:
cmd_line.append('--rerun')
if emit_events:
cmd_line.append('--emit_events')
result = runJob(cmd_line, logger)
return True if result else False
@app.task(name='presta.app.tasks.run_presta_sync')
def run_presta_sync(**kwargs):
emit_events = kwargs.get('emit_events', False)
rundir_label = kwargs.get('rd_label')
force = kwargs.get('force')
cmd_line = ['presta', 'sync']
if emit_events:
cmd_line.append('--emit_events')
if rundir_label:
cmd_line.extend(['--rundir_label', rundir_label])
if force:
cmd_line.append('--force')
result = runJob(cmd_line, logger)
return True if result else False
@app.task(name='presta.app.tasks.rd_collect_fastq')
def rd_collect_fastq(**kwargs):
path = kwargs.get('ds_path')
results = []
for (localroot, dirnames, filenames) in os.walk(path):
for f in filenames:
if f[-3:] == '.gz':
logger.info('FASTQ = {}'.format(f))
results.append(os.path.join(localroot, f))
return results
@app.task(name='presta.app.tasks.rd_ready_to_be_preprocessed')
def rd_ready_to_be_preprocessed(**kwargs):
"""
Verify if sequencer has ended to write, if rundir's ownership is
correct and if samplesheet has been uploaded into iRODS
"""
path = kwargs.get('path')
user = kwargs.get('user')
grp = kwargs.get('group')
rundir_label = kwargs.get('rd_label')
samplesheet_filename = kwargs.get('ssht_filename', 'SampleSheet.csv')
ir_conf = kwargs.get('ir_conf')
io_conf = kwargs.get('io_conf')
ipath = os.path.join(ir_conf['runs_collection'],
rundir_label,
samplesheet_filename)
task0 = seq_completed.si(path)
task1 = check_ownership.si(user=user, group=grp, dir=path)
task2 = samplesheet_ready.si(ir_conf, ipath)
task3 = check_metadata.si(ir_conf, os.path.dirname(ipath))
task4 = check_preprocessing_status.si(rd_path=path, io_conf=io_conf)
pipeline = group([task0, task1, task2, task3, task4])
result = pipeline.apply_async()
return result.join()
@app.task(name='presta.app.tasks.samplesheet_ready')
def samplesheet_ready(ir_conf, ipath):
ir = build_object_store(store='irods',
host=ir_conf['host'],
port=ir_conf['port'],
user=ir_conf['user'],
password=ir_conf['password'].encode('ascii'),
zone=ir_conf['zone'])
try:
exists, iobj = ir.exists(ipath, delivery=True)
ir.sess.cleanup()
except Exception, e:
logger.error(str(e))
exists = False
ir.sess.cleanup()
if exists:
with iobj.open('r') as f:
samplesheet = IEMSampleSheetReader(f)
return exists, samplesheet.barcodes_have_the_same_size()
else:
return False, False
@app.task(name='presta.app.tasks.check_metadata')
def check_metadata(ir_conf, ipath, get_metadata=False):
def retrieve_imetadata(iobj):
return [dict(name=m.name,
value=m.value,
units=m.units)
for m in iobj.metadata.items()]
ir = build_object_store(store='irods',
host=ir_conf['host'],
port=ir_conf['port'],
user=ir_conf['user'],
password=ir_conf['password'].encode('ascii'),
zone=ir_conf['zone'])
try:
exists, iobj = ir.exists(ipath, delivery=True)
ir.sess.cleanup()
except Exception, e:
logger.error(str(e))
ir.sess.cleanup()
if get_metadata:
return exists and len(iobj.metadata.items()) > 0, retrieve_imetadata(iobj)
return exists and len(iobj.metadata.items()) > 0
@app.task(name='presta.app.tasks.seq_completed')
def seq_completed(rd_path):
illumina_last_file = 'RTAComplete.txt'
localroot, dirnames, filenames = os.walk(rd_path).next()
return True if illumina_last_file in filenames else False
@app.task(name='presta.app.task.check_ownership')
def check_ownership(**kwargs):
user = kwargs.get('user')
grp = kwargs.get('group')
d = kwargs.get('dir')
def find_owner(directory):
try:
return getpwuid(os.stat(directory).st_uid).pw_name
except:
return ''
def find_group(directory):
try:
return getgrgid(os.stat(directory).st_gid).gr_name
except:
return ''
return True if user == find_owner(d) and grp == find_group(d) else False
@app.task(name='presta.app.task.check_preprocessing_status')
def check_preprocessing_status(**kwargs):
rd_path = kwargs.get('rd_path')
io_conf = kwargs.get('io_conf')
rd_progress_status = check_rd_progress_status(rd_path=rd_path, io_conf=io_conf)
return rd_progress_status in PROGRESS_STATUS.get('TODO')
@app.task(name='presta.app.tasks.copy')
def copy(src, dest):
result = False
try:
if os.path.exists(dest):
shutil.rmtree(dest)
shutil.copytree(src, dest)
result = True
except OSError as e:
if e.errno == errno.ENOTDIR:
shutil.copy(src, dest)
result = True
else:
logger.error('Source not copied. Error: {}'.format(e))
return result
@app.task(name='presta.app.tasks.remove')
def remove(files=list()):
result = False
try:
for f in files:
if os.path.exists(f):
os.remove(f)
result = True
except OSError as e:
logger.error('Source not copied. Error: {}'.format(e))
return result
@app.task(name='presta.app.tasks.copy_qc_dirs', ignore_result=True)
def copy_qc_dirs(trigger=None, **kwargs):
if trigger is False:
return trigger
src = kwargs.get('src')
dest = kwargs.get('dest')
dirs = ['Stats', 'Reports', 'fastqc']
ensure_dir(dest)
task0 = copy.si(os.path.join(src, dirs[0]), os.path.join(dest, dirs[0]))
task1 = copy.si(os.path.join(src, dirs[1]), os.path.join(dest, dirs[1]))
task2 = copy.si(os.path.join(src, dirs[2]), os.path.join(dest, dirs[2]))
job = group([task0, task1, task2])
result = job.apply_async()
return result
#return result.join()
@app.task(name='presta.app.tasks.merge')
def merge(**kwargs):
src = kwargs.get('src', list())
dst = kwargs.get('dst', None)
remove_src = kwargs.get('remove_src', False)
if isinstance(src, list) and len(src) > 0:
try:
with open(dst, 'wb') as outfile:
for infile in src:
shutil.copyfileobj(open(infile), outfile)
if not os.path.exists(dst):
return False
if remove_src:
task = remove.si(src)
task.delay()
return True
except OSError as e:
logger.error('Sources not merged. Error: {}'.format(e))
return list()
return True
@app.task(name='presta.app.tasks.generate_md5_checksum')
def generate_md5_checksum(**kwargs):
src = kwargs.get('src')
dst = kwargs.get('dst')
if os.path.exists(src):
try:
with open(src, 'rb') as f:
hash_string = get_md5(f)
with open(dst, 'w') as f:
f.write("{} {}\n".format(hash_string, os.path.basename(src)))
return True
except IOError as e:
logger.error('MD5 hash not generated. Error: {}'.format(e))
return False
return True
@app.task(name='presta.app.tasks.set_progress_status')
def set_progress_status(**kwargs):
progress_status_file = kwargs.get('progress_status_file')
return touch(progress_status_file, logger)
@app.task(name='presta.app.tasks.sanitize_metadata', ignore_result=True)
def sanitize_metadata(**kwargs):
ir_conf = kwargs.get('conf')
rundir_label = kwargs.get('rd_label')
samplesheet_filename = kwargs.get('ssht_filename')
sanitize = kwargs.get('sanitize')
logbook_path = kwargs.get('logbook_path')
logbook = LogBook(filename=logbook_path)
logbook.start(task_name=sanitize_metadata.__name__, args=kwargs)
if sanitize:
rundir_ipath = os.path.join(ir_conf['runs_collection'],
rundir_label)
samplesheet_ipath = os.path.join(ir_conf['runs_collection'],
rundir_label,
samplesheet_filename)
samplesheet_has_metadata, imetadata = check_metadata(ir_conf=ir_conf,
ipath=samplesheet_ipath,
get_metadata=True)
if samplesheet_has_metadata:
_set_imetadata(ir_conf=ir_conf,
ipath=rundir_ipath,
imetadata=imetadata)
logbook.end()
@app.task(name='presta.app.tasks.copy_samplesheet_from_irods',
ignore_result=True)
def copy_samplesheet_from_irods(**kwargs):
ir_conf = kwargs.get('conf')
samplesheet_file_path = kwargs.get('ssht_path')
samplesheet_filename = os.path.basename(samplesheet_file_path)
rundir_label = kwargs.get('rd_label')
overwrite_samplesheet = kwargs.get('overwrite_samplesheet')
logbook_path = kwargs.get('logbook_path')
logbook = LogBook(filename=logbook_path)
logbook.start(task_name=copy_samplesheet_from_irods.__name__, args=kwargs)
if overwrite_samplesheet:
ir = build_object_store(store='irods',
host=ir_conf['host'],
port=ir_conf['port'],
user=ir_conf['user'],
password=ir_conf['password'].encode('ascii'),
zone=ir_conf['zone'])
ipath = os.path.join(ir_conf['runs_collection'],
rundir_label,
samplesheet_filename)
logger.info('Coping samplesheet from iRODS {} to FS {}'.format(
ipath, samplesheet_file_path))
try:
ir.get_object(ipath, dest_path=samplesheet_file_path)
ir.sess.cleanup()
except:
ir.sess.cleanup()
logbook.end()
return samplesheet_file_path
@app.task(name='presta.app.tasks.copy_run_info_to_irods',
ignore_result=True)
def copy_run_info_to_irods(**kwargs):
ir_conf = kwargs.get('conf')
run_info_file_path = kwargs.get('run_info_path')
run_info_filename = os.path.basename(run_info_file_path)
rundir_label = kwargs.get('rd_label')
irods_path = os.path.join(ir_conf['runs_collection'],
rundir_label,
run_info_filename)
logbook_path = kwargs.get('logbook_path')
logbook = LogBook(filename=logbook_path)
logbook.start(task_name=copy_run_info_to_irods.__name__, args=kwargs)
_copy_file_into_irods(conf=ir_conf,
file_path=run_info_file_path,
irods_path=irods_path)
logbook.end()
return run_info_file_path
@app.task(name='presta.app.tasks.copy_run_parameters_to_irods',
ignore_result=True)
def copy_run_parameters_to_irods(**kwargs):
ir_conf = kwargs.get('conf')
run_parameters_file_path = kwargs.get('run_parameters_path')
run_parameters_filename = os.path.basename(run_parameters_file_path)
rundir_label = kwargs.get('rd_label')
logbook_path = kwargs.get('logbook_path')
logbook = LogBook(filename=logbook_path)
logbook.start(task_name=copy_run_parameters_to_irods.__name__, args=kwargs)
irods_path = os.path.join(ir_conf['runs_collection'],
rundir_label,
run_parameters_filename)
_copy_file_into_irods(conf=ir_conf,
file_path=run_parameters_file_path,
irods_path=irods_path)
logbook.end()
return run_parameters_file_path
@app.task(name='presta.app.tasks.replace_values_into_samplesheet',
ignore_result=True)
def replace_values_into_samplesheet(**kwargs):
samplesheet_file_path = kwargs.get('ssht_path')
overwrite_samplesheet = kwargs.get('overwrite_samplesheet')
logbook_path = kwargs.get('logbook_path')
logbook = LogBook(filename=logbook_path)
logbook.start(task_name=replace_values_into_samplesheet.__name__, args=kwargs)
if overwrite_samplesheet:
with open(samplesheet_file_path, 'r') as f:
samplesheet = IEMSampleSheetReader(f)
with open(samplesheet_file_path, 'w') as f:
for row in samplesheet.get_body(replace=True):
f.write(row)
logbook.end()
@app.task(name='presta.app.tasks.replace_index_cycles_into_run_info',
ignore_result=True)
def replace_index_cycles_into_run_info(**kwargs):
ir_conf = kwargs.get('conf')
run_info_file_path = kwargs.get('run_info_path')
rundir_label = kwargs.get('rd_label')
logbook_path = kwargs.get('logbook_path')
logbook = LogBook(filename=logbook_path)
logbook.start(task_name=replace_index_cycles_into_run_info.__name__, args=kwargs)
index_cycles_from_metadata = _get_index_cycles_from_metadata(ir_conf=ir_conf,
rundir_label=rundir_label)
index_cycles_from_run_info_file, default_index_cycles = _get_index_cycles_from_run_info_file(
run_info_file_path=run_info_file_path,
get_default_values=True)
index_cycles = default_index_cycles \
if index_cycles_from_metadata == index_cycles_from_run_info_file \
else index_cycles_from_metadata
logger.info('Editing index cycles on: {}\n'
'Old values:{}\n'
'New values: {}'.format(run_info_file_path,
index_cycles_from_run_info_file,
index_cycles))
run_info_file = IEMRunInfoReader(run_info_file_path)
run_info_file.set_index_cycles(index_cycles)
logbook.end()
@app.task(name='presta.app.tasks.move', ignore_result=True)
def move(src, dest):
try:
shutil.move(src, dest)
except shutil.Error as e:
logger.error('Source not moved. Error: {}'.format(e))
@app.task(name='presta.app.tasks.bcl2fastq')
def bcl2fastq(**kwargs):
rd_path = kwargs.get('rd_path')
ds_path = kwargs.get('ds_path')
ssht_path = kwargs.get('ssht_path')
logbook_path = kwargs.get('logbook_path')
run_info_file_path = kwargs.get('run_info_path')
no_lane_splitting = kwargs.get('no_lane_splitting', False)
with_failed_reads = kwargs.get('with_failed_reads', False)
barcode_mismatches = kwargs.get('barcode_mismatches', 1)
submit_to_batch_scheduler = kwargs.get('batch_queuing', True)
queue_spec = kwargs.get('queue_spec')
command = 'bcl2fastq'
rd_arg = '-R {}'.format(rd_path)
output_arg = '-o {}'.format(ds_path)
samplesheet_arg = '--sample-sheet {}'.format(ssht_path)
options = ['--ignore-missing-bcls',
'--ignore-missing-filter',
'--ignore-missing-positions',
'--find-adapters-with-sliding-window',
'--barcode-mismatches {}'.format(barcode_mismatches)]
if no_lane_splitting:
options.append('--no-lane-splitting')
if with_failed_reads:
options.append('--with-failed-reads')
with open(ssht_path, 'r') as f:
samplesheet = IEMSampleSheetReader(f)
barcode_mask = samplesheet.get_barcode_mask()
is_paired_end = _is_paired_end(run_info_file_path=run_info_file_path)
for lane, barcode_length in barcode_mask.items():
if barcode_length['index'] is None or barcode_length['index'] in ['None']:
continue
elif barcode_length['index1'] is None or barcode_length['index1'] in ['None']:
mask = "{}:Y*,I{}n*,Y*".format(lane, barcode_length['index']) if is_paired_end \
else "{}:Y*,I{}n*".format(lane, barcode_length['index'])
else:
mask = "{}:Y*,I{}n*,I{}n*,Y*".format(lane, barcode_length['index'], barcode_length['index1']) \
if is_paired_end else "{}:Y*,I{}n*,I{}n*".format(lane, barcode_length['index'], barcode_length['index1'])
options.append("--use-bases-mask {}".format(mask))
cmd_line = shlex.split(' '.join([command, rd_arg, output_arg,
samplesheet_arg, ' '.join(options)]))
logger.info('Executing {}'.format(cmd_line))
logbook = LogBook(filename=logbook_path)
logbook.start(task_name=bcl2fastq.__name__, args=kwargs)
if submit_to_batch_scheduler:
home = os.path.expanduser("~")
launcher = kwargs.get('launcher', 'launcher')
jt = {'jobName': command,
'nativeSpecification': queue_spec,
'remoteCommand': os.path.join(home, launcher),
'args': cmd_line
}
try:
output = runGEJob(jt)
except:
output = runJob(cmd_line, logger)
else:
output = runJob(cmd_line, logger)
logbook.end()
return True if output else False
@app.task(name='presta.app.tasks.qc_runner')
def qc_runner(file_list, **kwargs):
def chunk(lis, n):
return [lis[i:i + n] for i in range(0, len(lis), n)]
chunk_size = kwargs.get('chunk_size', 6)
tasks = list()
for f in chunk(file_list, chunk_size):
task = fastqc.s(f, outdir=kwargs.get('outdir'),
threads=chunk_size,
batch_queuing=kwargs.get('batch_queuing'),
queue_spec=kwargs.get('queue_spec')
).delay()
tasks.append(task.task_id)
return tasks
@app.task(name='presta.app.tasks.fastqc')
def fastqc(fq_list, **kwargs):
command = 'fastqc'
output_arg = '--outdir {}'.format(kwargs.get('outdir'))
options = ['--format fastq',
'--threads {}'.format(kwargs.get('threads', 1))]
fq_list_arg = ' '.join(fq_list)
submit_to_batch_scheduler = kwargs.get('batch_queuing', True)
queue_spec = kwargs.get('queue_spec')
cmd_line = shlex.split(' '.join([command, output_arg, ' '.join(options),
fq_list_arg]))
logger.info('Executing {}'.format(cmd_line))
if submit_to_batch_scheduler:
home = os.path.expanduser("~")
launcher = kwargs.get('launcher', 'launcher')
jt = {'jobName': command,
'nativeSpecification': queue_spec,
'remoteCommand': os.path.join(home, launcher),
'args': cmd_line
}
try:
output = runGEJob(jt)
except:
output = runJob(cmd_line, logger)
else:
output = runJob(cmd_line, logger)
return True if output else False
@app.task(name='presta.app.tasks.rd_collect_samples')
def rd_collect_samples(**kwargs):
ir_conf = kwargs.get('conf')
rundir_label = kwargs.get('rd_label')
samplesheet_filename = kwargs.get('samplesheet_filename', 'SampleSheet.csv')
samples = []
ir = build_object_store(store='irods',
host=ir_conf['host'],
port=ir_conf['port'],
user=ir_conf['user'],
password=ir_conf['password'].encode('ascii'),
zone=ir_conf['zone'])
ipath = os.path.join(ir_conf['runs_collection'],
rundir_label,
samplesheet_filename)
try:
exists, iobj = ir.exists(ipath, delivery=True)
ir.sess.cleanup()
except:
ir.sess.cleanup()
if exists:
with iobj.open('r') as f:
samplesheet = IEMSampleSheetReader(f)
samples = [dict(
id=r['Sample_ID'],
name=r['Sample_Name']
) for r in samplesheet.data]
return samples
@app.task(name='presta.app.tasks.search_rd_to_archive')
def search_rd_to_archive(**kwargs):
emit_events = kwargs.get('emit_events', False)
conf = get_conf(logger, None)
io_conf = conf.get_io_section()
rd_root_path = kwargs.get('rd_root_path') if kwargs.get('rd_root_path') \
else io_conf.get('rundirs_root_path')
archive_root_path = kwargs.get('archive_root_path') if kwargs.get('archive_root_path') \
else io_conf.get('archive_root_path')
logger.info('Checking rundirs in: {}'.format(rd_root_path))
localroot, dirnames, filenames = os.walk(rd_root_path).next()
for rd_label in dirnames:
rd_path = os.path.join(localroot,
rd_label)
if check_rd_to_archive(rd_path=rd_path,
io_conf=io_conf):
logger.info('{} processed. Ready to be archived'.format(rd_label))
if emit_events:
archiving_started_file = os.path.join(rd_path, io_conf.get('archiving_started_file'))
archiving_completed_file = os.path.join(rd_path, io_conf.get('archiving_completed_file'))
archive_task = chain(
set_progress_status.si(progress_status_file=archiving_started_file),
archive_rd.si(rd_path=rd_path,
archive_path=os.path.join(archive_root_path,
rd_label,
io_conf.get('rawdata_folder_name'))),
set_progress_status.si(progress_status_file=archiving_completed_file),
)
archive_task.delay()
@app.task(name='presta.app.tasks.search_rd_to_backup')
def search_rd_to_backup(**kwargs):
return None
@app.task(name='presta.app.tasks.search_rd_to_stage')
def search_rd_to_stage(**kwargs):
emit_events = kwargs.get('emit_events', False)
conf = get_conf(logger, None)
io_conf = conf.get_io_section()
archive_root_path = kwargs.get('archive_root_path') if kwargs.get('archive_root_path') \
else io_conf.get('archive_root_path')
stage_root_path = kwargs.get('staging_root_path') if kwargs.get('staging_root_path') \
else io_conf.get('staging_root_path')
logger.info('Checking rundirs in: {}'.format(archive_root_path))
localroot, dirnames, filenames = os.walk(archive_root_path).next()
for rd_label in dirnames:
rd_path = os.path.join(localroot,
rd_label)
if check_rd_to_stage(rd_path=rd_path,
io_conf=io_conf):
logger.info('{} backuped. Ready to be staged'.format(rd_label))
if emit_events:
staging_started_file = os.path.join(rd_path, io_conf.get('staging_started_file'))
staging_completed_file = os.path.join(rd_path, io_conf.get('staging_completed_file'))
stage_task = chain(
set_progress_status.si(progress_status_file=staging_started_file),
stage_rd.si(rd_path=rd_path,
stage_path=os.path.join(stage_root_path,
rd_label)),
set_progress_status.si(progress_status_file=staging_completed_file),
)
stage_task.delay()
@app.task(name='presta.app.tasks.archive_rd')
def archive_rd(**kwargs):
rd_path = kwargs.get('rd_path')
archive_path = kwargs.get('archive_path')
src = rd_path
dest = archive_path
logger.info('Archiving {} in {}'.format(src, dest))
mv_task = move.si(src=src, dest=dest)
mv_task.apply_async()
@app.task(name='presta.app.tasks.stage_rd')
def stage_rd(**kwargs):
rd_path = kwargs.get('rd_path')
stage_path = kwargs.get('stage_path')
src = rd_path
dest = stage_path
logger.info('Staging {} in {}'.format(src, dest))
mv_task = move.si(src=src, dest=dest)
mv_task.apply_async()
@app.task(name='presta.app.tasks.check_rd_to_archive')
def check_rd_to_archive(**kwargs):
rd_path = kwargs.get('rd_path')
io_conf = kwargs.get('io_conf')
rd_progress_status = check_rd_progress_status(rd_path=rd_path, io_conf=io_conf)
rd_archiving_status = check_rd_archiving_status(rd_path=rd_path, io_conf=io_conf)
return rd_progress_status in PROGRESS_STATUS.get('COMPLETED') and rd_archiving_status in PROGRESS_STATUS.get('TODO')
@app.task(name='presta.app.tasks.check_rd_to_stage')
def check_rd_to_stage(**kwargs):
rd_path = kwargs.get('rd_path')
io_conf = kwargs.get('io_conf')
rd_backup_status = check_rd_backup_status(rd_path=rd_path, io_conf=io_conf)
rd_staging_status = check_rd_staging_status(rd_path=rd_path, io_conf=io_conf)
return rd_backup_status in PROGRESS_STATUS.get('COMPLETED') and rd_staging_status in PROGRESS_STATUS.get('TODO')
@app.task(name='presta.app.tasks.check_rd_progress_status')
def check_rd_progress_status(**kwargs):
rd_path = kwargs.get('rd_path')
io_conf = kwargs.get('io_conf')
started_file = io_conf.get('preprocessing_started_file')
completed_file = io_conf.get('preprocessing_completed_file')
return check_progress_status(rd_path, started_file, completed_file)
@app.task(name='presta.app.tasks.check_rd_backup_status')
def check_rd_backup_status(**kwargs):
rd_path = kwargs.get('rd_path')
io_conf = kwargs.get('io_conf')
started_file = io_conf.get('backup_started_file')
completed_file = io_conf.get('backup_completed_file')
return check_progress_status(rd_path, started_file, completed_file)
@app.task(name='presta.app.tasks.check_rd_staging_status')
def check_rd_staging_status(**kwargs):
rd_path = kwargs.get('rd_path')
io_conf = kwargs.get('io_conf')
started_file = io_conf.get('staging_started_file')
completed_file = io_conf.get('staging_completed_file')
return check_progress_status(rd_path, started_file, completed_file)
@app.task(name='presta.app.tasks.check_rd_archiving_status')
def check_rd_archiving_status(**kwargs):
rd_path = kwargs.get('rd_path')
io_conf = kwargs.get('io_conf')
started_file = io_conf.get('archiving_started_file')
completed_file = io_conf.get('archiving_completed_file')
return check_progress_status(rd_path, started_file, completed_file)
def _set_imetadata(ir_conf, ipath, imetadata):
ir = build_object_store(store='irods',
host=ir_conf['host'],
port=ir_conf['port'],
user=ir_conf['user'],
password=ir_conf['password'].encode('ascii'),
zone=ir_conf['zone'])
for m in imetadata:
ir.add_object_metadata(path=ipath,
meta=(m.get('name'),
m.get('value') if len(m.get('value')) > 0 else None,
m.get('units')))
ir.sess.cleanup()
def _copy_file_into_irods(**kwargs):
ir_conf = kwargs.get('conf')
file_path = kwargs.get('file_path')
irods_path = kwargs.get('irods_path')
ir = build_object_store(store='irods',
host=ir_conf['host'],
port=ir_conf['port'],
user=ir_conf['user'],
password=ir_conf['password'].encode('ascii'),
zone=ir_conf['zone'])
logger.info('Coping from FS {} to iRODS {}'.format(file_path, irods_path))
try:
ir.put_object(source_path=file_path, dest_path=irods_path, force=True)
ir.sess.cleanup()
except:
ir.sess.cleanup()
def _get_index_cycles_from_metadata(ir_conf, rundir_label):
ipath = os.path.join(ir_conf['runs_collection'],
rundir_label)
rundir_has_metadata, imetadata = check_metadata(ir_conf=ir_conf,
ipath=ipath,
get_metadata=True)
if rundir_has_metadata:
return dict(index=next((m['value'] for m in imetadata
if m["name"] == "index1_cycles" and m['value'] != "None"), None),
index1=next((m['value'] for m in imetadata
if m["name"] == "index2_cycles" and m['value'] != "None"), None),
)
return dict(index=None, index1=None)
def _get_index_cycles_from_run_info_file(run_info_file_path, get_default_values=False):
with open(run_info_file_path, 'r') as f:
run_info_file = IEMRunInfoReader(f)
if get_default_values:
return run_info_file.get_index_cycles(), run_info_file.get_default_index_cycles()
return run_info_file.get_index_cycles()
def _is_paired_end(run_info_file_path):
run_info_file = IEMRunInfoReader(run_info_file_path)
return run_info_file.is_paired_end_sequencing()
def runGEJob(jt_attr):
def init_job_template(jt, attr):
jt.jobName = '_'.join(['presta', attr['jobName']])
jt.nativeSpecification = attr['nativeSpecification']
jt.remoteCommand = attr['remoteCommand']
jt.args = attr['args']
return jt
with drmaa.Session() as s:
jt = init_job_template(s.createJobTemplate(), jt_attr)
jobid = s.runJob(jt)
logger.info('Your job has been submitted with ID %s' % jobid)
retval = s.wait(jobid, drmaa.Session.TIMEOUT_WAIT_FOREVER)
logger.info('Job: {0} finished with status {1}'.format(retval.jobId,
retval.exitStatus))
logger.info('Cleaning up')
s.deleteJobTemplate(jt)
return retval.hasExited
| |
from wagtail.wagtailcore.blocks import ChoiceBlock
class IconChoiceBlock(ChoiceBlock):
choices = [
('address-book', 'address-book'),
('address-book-o', 'address-book-o'),
('address-card', 'address-card'),
('address-card-o', 'address-card-o'),
('adjust', 'adjust'),
('american-sign-language-interpreting', 'american-sign-language-interpreting'),
('anchor', 'anchor'),
('archive', 'archive'),
('area-chart', 'area-chart'),
('arrows', 'arrows'),
('arrows-h', 'arrows-h'),
('arrows-v', 'arrows-v'),
('asl-interpreting ', 'asl-interpreting '),
('assistive-listening-systems', 'assistive-listening-systems'),
('asterisk', 'asterisk'),
('at', 'at'),
('audio-description', 'audio-description'),
('automobile ', 'automobile '),
('balance-scale', 'balance-scale'),
('ban', 'ban'),
('bank ', 'bank '),
('bar-chart', 'bar-chart'),
('bar-chart-o ', 'bar-chart-o '),
('barcode', 'barcode'),
('bars', 'bars'),
('bath', 'bath'),
('bathtub ', 'bathtub '),
('battery ', 'battery '),
('battery-0 ', 'battery-0 '),
('battery-1 ', 'battery-1 '),
('battery-2 ', 'battery-2 '),
('battery-3 ', 'battery-3 '),
('battery-4 ', 'battery-4 '),
('battery-empty', 'battery-empty'),
('battery-full', 'battery-full'),
('battery-half', 'battery-half'),
('battery-quarter', 'battery-quarter'),
('battery-three-quarters', 'battery-three-quarters'),
('bed', 'bed'),
('beer', 'beer'),
('bell', 'bell'),
('bell-o', 'bell-o'),
('bell-slash', 'bell-slash'),
('bell-slash-o', 'bell-slash-o'),
('bicycle', 'bicycle'),
('binoculars', 'binoculars'),
('birthday-cake', 'birthday-cake'),
('blind', 'blind'),
('bluetooth', 'bluetooth'),
('bluetooth-b', 'bluetooth-b'),
('bolt', 'bolt'),
('bomb', 'bomb'),
('book', 'book'),
('bookmark', 'bookmark'),
('bookmark-o', 'bookmark-o'),
('braille', 'braille'),
('briefcase', 'briefcase'),
('bug', 'bug'),
('building', 'building'),
('building-o', 'building-o'),
('bullhorn', 'bullhorn'),
('bullseye', 'bullseye'),
('bus', 'bus'),
('cab ', 'cab '),
('calculator', 'calculator'),
('calendar', 'calendar'),
('calendar-check-o', 'calendar-check-o'),
('calendar-minus-o', 'calendar-minus-o'),
('calendar-o', 'calendar-o'),
('calendar-plus-o', 'calendar-plus-o'),
('calendar-times-o', 'calendar-times-o'),
('camera', 'camera'),
('camera-retro', 'camera-retro'),
('car', 'car'),
('caret-square-o-down', 'caret-square-o-down'),
('caret-square-o-left', 'caret-square-o-left'),
('caret-square-o-right', 'caret-square-o-right'),
('caret-square-o-up', 'caret-square-o-up'),
('cart-arrow-down', 'cart-arrow-down'),
('cart-plus', 'cart-plus'),
('cc', 'cc'),
('certificate', 'certificate'),
('check', 'check'),
('check-circle', 'check-circle'),
('check-circle-o', 'check-circle-o'),
('check-square', 'check-square'),
('check-square-o', 'check-square-o'),
('child', 'child'),
('circle', 'circle'),
('circle-o', 'circle-o'),
('circle-o-notch', 'circle-o-notch'),
('circle-thin', 'circle-thin'),
('clock-o', 'clock-o'),
('clone', 'clone'),
('close ', 'close '),
('cloud', 'cloud'),
('cloud-download', 'cloud-download'),
('cloud-upload', 'cloud-upload'),
('code', 'code'),
('code-fork', 'code-fork'),
('coffee', 'coffee'),
('cog', 'cog'),
('cogs', 'cogs'),
('comment', 'comment'),
('comment-o', 'comment-o'),
('commenting', 'commenting'),
('commenting-o', 'commenting-o'),
('comments', 'comments'),
('comments-o', 'comments-o'),
('compass', 'compass'),
('copyright', 'copyright'),
('creative-commons', 'creative-commons'),
('credit-card', 'credit-card'),
('credit-card-alt', 'credit-card-alt'),
('crop', 'crop'),
('crosshairs', 'crosshairs'),
('cube', 'cube'),
('cubes', 'cubes'),
('cutlery', 'cutlery'),
('dashboard ', 'dashboard '),
('database', 'database'),
('deaf', 'deaf'),
('deafness ', 'deafness '),
('desktop', 'desktop'),
('diamond', 'diamond'),
('dot-circle-o', 'dot-circle-o'),
('download', 'download'),
('drivers-license ', 'drivers-license '),
('drivers-license-o ', 'drivers-license-o '),
('edit ', 'edit '),
('ellipsis-h', 'ellipsis-h'),
('ellipsis-v', 'ellipsis-v'),
('envelope', 'envelope'),
('envelope-o', 'envelope-o'),
('envelope-open', 'envelope-open'),
('envelope-open-o', 'envelope-open-o'),
('envelope-square', 'envelope-square'),
('eraser', 'eraser'),
('exchange', 'exchange'),
('exclamation', 'exclamation'),
('exclamation-circle', 'exclamation-circle'),
('exclamation-triangle', 'exclamation-triangle'),
('external-link', 'external-link'),
('external-link-square', 'external-link-square'),
('eye', 'eye'),
('eye-slash', 'eye-slash'),
('eyedropper', 'eyedropper'),
('fax', 'fax'),
('feed ', 'feed '),
('female', 'female'),
('fighter-jet', 'fighter-jet'),
('file-archive-o', 'file-archive-o'),
('file-audio-o', 'file-audio-o'),
('file-code-o', 'file-code-o'),
('file-excel-o', 'file-excel-o'),
('file-image-o', 'file-image-o'),
('file-movie-o ', 'file-movie-o '),
('file-pdf-o', 'file-pdf-o'),
('file-photo-o ', 'file-photo-o '),
('file-picture-o ', 'file-picture-o '),
('file-powerpoint-o', 'file-powerpoint-o'),
('file-sound-o ', 'file-sound-o '),
('file-video-o', 'file-video-o'),
('file-word-o', 'file-word-o'),
('file-zip-o ', 'file-zip-o '),
('film', 'film'),
('filter', 'filter'),
('fire', 'fire'),
('fire-extinguisher', 'fire-extinguisher'),
('flag', 'flag'),
('flag-checkered', 'flag-checkered'),
('flag-o', 'flag-o'),
('flash ', 'flash '),
('flask', 'flask'),
('folder', 'folder'),
('folder-o', 'folder-o'),
('folder-open', 'folder-open'),
('folder-open-o', 'folder-open-o'),
('frown-o', 'frown-o'),
('futbol-o', 'futbol-o'),
('gamepad', 'gamepad'),
('gavel', 'gavel'),
('gear ', 'gear '),
('gears ', 'gears '),
('gift', 'gift'),
('glass', 'glass'),
('globe', 'globe'),
('graduation-cap', 'graduation-cap'),
('group ', 'group '),
('hand-grab-o ', 'hand-grab-o '),
('hand-lizard-o', 'hand-lizard-o'),
('hand-paper-o', 'hand-paper-o'),
('hand-peace-o', 'hand-peace-o'),
('hand-pointer-o', 'hand-pointer-o'),
('hand-rock-o', 'hand-rock-o'),
('hand-scissors-o', 'hand-scissors-o'),
('hand-spock-o', 'hand-spock-o'),
('hand-stop-o ', 'hand-stop-o '),
('handshake-o', 'handshake-o'),
('hard-of-hearing ', 'hard-of-hearing '),
('hashtag', 'hashtag'),
('hdd-o', 'hdd-o'),
('headphones', 'headphones'),
('heart', 'heart'),
('heart-o', 'heart-o'),
('heartbeat', 'heartbeat'),
('history', 'history'),
('home', 'home'),
('hotel ', 'hotel '),
('hourglass', 'hourglass'),
('hourglass-1 ', 'hourglass-1 '),
('hourglass-2 ', 'hourglass-2 '),
('hourglass-3 ', 'hourglass-3 '),
('hourglass-end', 'hourglass-end'),
('hourglass-half', 'hourglass-half'),
('hourglass-o', 'hourglass-o'),
('hourglass-start', 'hourglass-start'),
('i-cursor', 'i-cursor'),
('id-badge', 'id-badge'),
('id-card', 'id-card'),
('id-card-o', 'id-card-o'),
('image ', 'image '),
('inbox', 'inbox'),
('industry', 'industry'),
('info', 'info'),
('info-circle', 'info-circle'),
('institution ', 'institution '),
('key', 'key'),
('keyboard-o', 'keyboard-o'),
('language', 'language'),
('laptop', 'laptop'),
('leaf', 'leaf'),
('legal ', 'legal '),
('lemon-o', 'lemon-o'),
('level-down', 'level-down'),
('level-up', 'level-up'),
('life-bouy ', 'life-bouy '),
('life-buoy ', 'life-buoy '),
('life-ring', 'life-ring'),
('life-saver ', 'life-saver '),
('lightbulb-o', 'lightbulb-o'),
('line-chart', 'line-chart'),
('location-arrow', 'location-arrow'),
('lock', 'lock'),
('low-vision', 'low-vision'),
('magic', 'magic'),
('magnet', 'magnet'),
('mail-forward ', 'mail-forward '),
('mail-reply ', 'mail-reply '),
('mail-reply-all ', 'mail-reply-all '),
('male', 'male'),
('map', 'map'),
('map-marker', 'map-marker'),
('map-o', 'map-o'),
('map-pin', 'map-pin'),
('map-signs', 'map-signs'),
('meh-o', 'meh-o'),
('microchip', 'microchip'),
('microphone', 'microphone'),
('microphone-slash', 'microphone-slash'),
('minus', 'minus'),
('minus-circle', 'minus-circle'),
('minus-square', 'minus-square'),
('minus-square-o', 'minus-square-o'),
('mobile', 'mobile'),
('mobile-phone ', 'mobile-phone '),
('money', 'money'),
('moon-o', 'moon-o'),
('mortar-board ', 'mortar-board '),
('motorcycle', 'motorcycle'),
('mouse-pointer', 'mouse-pointer'),
('music', 'music'),
('navicon ', 'navicon '),
('newspaper-o', 'newspaper-o'),
('object-group', 'object-group'),
('object-ungroup', 'object-ungroup'),
('paint-brush', 'paint-brush'),
('paper-plane', 'paper-plane'),
('paper-plane-o', 'paper-plane-o'),
('paw', 'paw'),
('pencil', 'pencil'),
('pencil-square', 'pencil-square'),
('pencil-square-o', 'pencil-square-o'),
('percent', 'percent'),
('phone', 'phone'),
('phone-square', 'phone-square'),
('photo ', 'photo '),
('picture-o', 'picture-o'),
('pie-chart', 'pie-chart'),
('plane', 'plane'),
('plug', 'plug'),
('plus', 'plus'),
('plus-circle', 'plus-circle'),
('plus-square', 'plus-square'),
('plus-square-o', 'plus-square-o'),
('podcast', 'podcast'),
('power-off', 'power-off'),
('print', 'print'),
('puzzle-piece', 'puzzle-piece'),
('qrcode', 'qrcode'),
('question', 'question'),
('question-circle', 'question-circle'),
('question-circle-o', 'question-circle-o'),
('quote-left', 'quote-left'),
('quote-right', 'quote-right'),
('random', 'random'),
('recycle', 'recycle'),
('refresh', 'refresh'),
('registered', 'registered'),
('remove ', 'remove '),
('reorder ', 'reorder '),
('reply', 'reply'),
('reply-all', 'reply-all'),
('retweet', 'retweet'),
('road', 'road'),
('rocket', 'rocket'),
('rss', 'rss'),
('rss-square', 'rss-square'),
('s15 ', 's15 '),
('search', 'search'),
('search-minus', 'search-minus'),
('search-plus', 'search-plus'),
('send ', 'send '),
('send-o ', 'send-o '),
('server', 'server'),
('share', 'share'),
('share-alt', 'share-alt'),
('share-alt-square', 'share-alt-square'),
('share-square', 'share-square'),
('share-square-o', 'share-square-o'),
('shield', 'shield'),
('ship', 'ship'),
('shopping-bag', 'shopping-bag'),
('shopping-basket', 'shopping-basket'),
('shopping-cart', 'shopping-cart'),
('shower', 'shower'),
('sign-in', 'sign-in'),
('sign-language', 'sign-language'),
('sign-out', 'sign-out'),
('signal', 'signal'),
('signing ', 'signing '),
('sitemap', 'sitemap'),
('sliders', 'sliders'),
('smile-o', 'smile-o'),
('snowflake-o', 'snowflake-o'),
('soccer-ball-o ', 'soccer-ball-o '),
('sort', 'sort'),
('sort-alpha-asc', 'sort-alpha-asc'),
('sort-alpha-desc', 'sort-alpha-desc'),
('sort-amount-asc', 'sort-amount-asc'),
('sort-amount-desc', 'sort-amount-desc'),
('sort-asc', 'sort-asc'),
('sort-desc', 'sort-desc'),
('sort-down ', 'sort-down '),
('sort-numeric-asc', 'sort-numeric-asc'),
('sort-numeric-desc', 'sort-numeric-desc'),
('sort-up ', 'sort-up '),
('space-shuttle', 'space-shuttle'),
('spinner', 'spinner'),
('spoon', 'spoon'),
('square', 'square'),
('square-o', 'square-o'),
('star', 'star'),
('star-half', 'star-half'),
('star-half-empty ', 'star-half-empty '),
('star-half-full ', 'star-half-full '),
('star-half-o', 'star-half-o'),
('star-o', 'star-o'),
('sticky-note', 'sticky-note'),
('sticky-note-o', 'sticky-note-o'),
('street-view', 'street-view'),
('suitcase', 'suitcase'),
('sun-o', 'sun-o'),
('support ', 'support '),
('tablet', 'tablet'),
('tachometer', 'tachometer'),
('tag', 'tag'),
('tags', 'tags'),
('tasks', 'tasks'),
('taxi', 'taxi'),
('television', 'television'),
('terminal', 'terminal'),
('thermometer ', 'thermometer '),
('thermometer-0 ', 'thermometer-0 '),
('thermometer-1 ', 'thermometer-1 '),
('thermometer-2 ', 'thermometer-2 '),
('thermometer-3 ', 'thermometer-3 '),
('thermometer-4 ', 'thermometer-4 '),
('thermometer-empty', 'thermometer-empty'),
('thermometer-full', 'thermometer-full'),
('thermometer-half', 'thermometer-half'),
('thermometer-quarter', 'thermometer-quarter'),
('thermometer-three-quarters', 'thermometer-three-quarters'),
('thumb-tack', 'thumb-tack'),
('thumbs-down', 'thumbs-down'),
('thumbs-o-down', 'thumbs-o-down'),
('thumbs-o-up', 'thumbs-o-up'),
('thumbs-up', 'thumbs-up'),
('ticket', 'ticket'),
('times', 'times'),
('times-circle', 'times-circle'),
('times-circle-o', 'times-circle-o'),
('times-rectangle ', 'times-rectangle '),
('times-rectangle-o ', 'times-rectangle-o '),
('tint', 'tint'),
('toggle-down ', 'toggle-down '),
('toggle-left ', 'toggle-left '),
('toggle-off', 'toggle-off'),
('toggle-on', 'toggle-on'),
('toggle-right ', 'toggle-right '),
('toggle-up ', 'toggle-up '),
('trademark', 'trademark'),
('trash', 'trash'),
('trash-o', 'trash-o'),
('tree', 'tree'),
('trophy', 'trophy'),
('truck', 'truck'),
('tty', 'tty'),
('tv ', 'tv '),
('umbrella', 'umbrella'),
('universal-access', 'universal-access'),
('university', 'university'),
('unlock', 'unlock'),
('unlock-alt', 'unlock-alt'),
('unsorted ', 'unsorted '),
('upload', 'upload'),
('user', 'user'),
('user-circle', 'user-circle'),
('user-circle-o', 'user-circle-o'),
('user-o', 'user-o'),
('user-plus', 'user-plus'),
('user-secret', 'user-secret'),
('user-times', 'user-times'),
('users', 'users'),
('vcard ', 'vcard '),
('vcard-o ', 'vcard-o '),
('video-camera', 'video-camera'),
('volume-control-phone', 'volume-control-phone'),
('volume-down', 'volume-down'),
('volume-off', 'volume-off'),
('volume-up', 'volume-up'),
('warning ', 'warning '),
('wheelchair', 'wheelchair'),
('wheelchair-alt', 'wheelchair-alt'),
('wifi', 'wifi'),
('window-close', 'window-close'),
('window-close-o', 'window-close-o'),
('window-maximize', 'window-maximize'),
('window-minimize', 'window-minimize'),
('window-restore', 'window-restore'),
('wrench', 'wrench'),
]
| |
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Support for ragged tensors."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import check_ops
from tensorflow.python.ops import gen_ragged_math_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops.ragged import ragged_functional_ops
from tensorflow.python.ops.ragged import ragged_tensor
from tensorflow.python.ops.ragged import ragged_util
from tensorflow.python.ops.ragged import segment_id_ops
from tensorflow.python.util.tf_export import tf_export
#===============================================================================
# ragged.range
#===============================================================================
# pylint: disable=redefined-builtin
@tf_export('ragged.range')
def range(starts, limits=None, deltas=1, dtype=None,
name=None, row_splits_dtype=dtypes.int64):
"""Returns a `RaggedTensor` containing the specified sequences of numbers.
Each row of the returned `RaggedTensor` contains a single sequence:
```python
ragged.range(starts, limits, deltas)[i] ==
tf.range(starts[i], limits[i], deltas[i])
```
If `start[i] < limits[i] and deltas[i] > 0`, then `output[i]` will be an
empty list. Similarly, if `start[i] > limits[i] and deltas[i] < 0`, then
`output[i]` will be an empty list. This behavior is consistent with the
Python `range` function, but differs from the `tf.range` op, which returns
an error for these cases.
Examples:
>>> tf.ragged.range([3, 5, 2]).to_list()
[[0, 1, 2], [0, 1, 2, 3, 4], [0, 1]]
>>> tf.ragged.range([0, 5, 8], [3, 3, 12]).to_list()
[[0, 1, 2], [], [8, 9, 10, 11]]
>>> tf.ragged.range([0, 5, 8], [3, 3, 12], 2).to_list()
[[0, 2], [], [8, 10]]
The input tensors `starts`, `limits`, and `deltas` may be scalars or vectors.
The vector inputs must all have the same size. Scalar inputs are broadcast
to match the size of the vector inputs.
Args:
starts: Vector or scalar `Tensor`. Specifies the first entry for each range
if `limits` is not `None`; otherwise, specifies the range limits, and the
first entries default to `0`.
limits: Vector or scalar `Tensor`. Specifies the exclusive upper limits for
each range.
deltas: Vector or scalar `Tensor`. Specifies the increment for each range.
Defaults to `1`.
dtype: The type of the elements of the resulting tensor. If not specified,
then a value is chosen based on the other args.
name: A name for the operation.
row_splits_dtype: `dtype` for the returned `RaggedTensor`'s `row_splits`
tensor. One of `tf.int32` or `tf.int64`.
Returns:
A `RaggedTensor` of type `dtype` with `ragged_rank=1`.
"""
row_splits_dtype = dtypes.as_dtype(row_splits_dtype)
if limits is None:
starts, limits = 0, starts
with ops.name_scope(name, 'RaggedRange', [starts, limits, deltas]) as name:
starts = ops.convert_to_tensor(starts, dtype=dtype, name='starts')
limits = ops.convert_to_tensor(limits, dtype=dtype, name='limits')
deltas = ops.convert_to_tensor(deltas, dtype=dtype, name='deltas')
# infer dtype if not explicitly provided
if dtype is None:
starts, limits, deltas = _infer_matching_dtype(
[starts, limits, deltas],
[dtypes.int32, dtypes.int64, dtypes.float32, dtypes.float64])
result = gen_ragged_math_ops.ragged_range(
starts, limits, deltas, Tsplits=row_splits_dtype, name=name)
return ragged_tensor.RaggedTensor.from_row_splits(result.rt_dense_values,
result.rt_nested_splits,
validate=False)
def _infer_matching_dtype(tensors, dtype_hierarchy):
"""Infers a matching dtype for tensors, and casts them to that dtype."""
assert all(t.dtype in dtype_hierarchy for t in tensors)
inferred_dtype = max([t.dtype for t in tensors], key=dtype_hierarchy.index)
return [math_ops.cast(t, inferred_dtype) for t in tensors]
ops.no_gradient('RaggedRange')
#===============================================================================
# ragged_segment_<AGGREGATE>
#===============================================================================
# Docstring template used for the raggged_segment_<AGGREGATE> ops.
_RAGGED_SEGMENT_DOCSTRING = """\
Computes the %(combination)s along segments of a RaggedTensor.
Returns a RaggedTensor `output` with `num_segments` rows, where the row
`output[i]` is formed by taking the %(combination)s of all rows of `data`
whose corresponding `segment_id` is `i`.
The length of the row `output[i]` will be the maximum of the lengths of
all rows of `data` whose corresponding `segment_id` is `i`. If no `data`
rows correspond to a given segment ID, then the output row for that segment
ID will be empty.
Args:
data: A `RaggedTensor` containing the values to combine.
segment_ids: A `Tensor` or `RaggedTensor`. Must have type `int64` or
`int32`. `segment_ids.shape` must be a prefix of `data.shape`.
Must be greater than or equal to zero, and less than `num_segments`.
`segment_ids` is not required to be sorted.
num_segments: An `int32` or `int64` scalar specifying the number of
distinct segment ids.
name: A name prefix for the returned tensor (optional).
Returns:
A `RaggedTensor` containing the %(combined)s values. The returned tensor
has the same dtype as `data`, and its shape is
`[num_segments] + data.shape[segment_ids.rank:]`.
Raises:
ValueError: If `segment_ids.shape` is not a prefix of `data.shape`.
"""
def _ragged_segment_aggregate(unsorted_segment_op,
data,
segment_ids,
num_segments,
separator=None,
name=None):
"""Aggregates along segments of a RaggedTensor using `unsorted_segment_op`.
Returns a RaggedTensor `output` with `num_segments` rows, where the row
`output[i]` is formed by combining all rows of `data` whose corresponding
`segment_id` is `i`. The values in each row are combined using
`unsorted_segment_op`.
The length of the row `output[i]` will be the maximum of the lengths of
all rows of `data` whose corresponding `segment_id` is `i`. If no `data`
rows correspond to a given segment ID, then the output row for that segment
ID will be empty.
Args:
unsorted_segment_op: The tensorflow `op` that should be used to combine
values in each row. Must have the same signature and basic behavior as
`unsorted_segment_sum`, `unsorted_segment_max`, etc.
data: A `RaggedTensor` containing the values to be combined.
segment_ids: A `Tensor` or `RaggedTensor`. Must have type `int64` or
`int32`. `segment_ids.shape` must be a prefix of `data.shape`.
`segment_ids` is not required to be sorted.
num_segments: An `int32` or `int64` scalar.
separator: An optional string. Defaults to None. The separator to
use when joining. Only used for string types.
name: A name prefix for the returned tensor (optional).
Returns:
A `RaggedTensor` containing the aggregated values. The returned tensor
has the same dtype as `data`, and its shape is
`[num_segments] + data.shape[segment_ids.rank:]`.
Raises:
ValueError: If segment_ids.shape is not a prefix of data.shape.
"""
if not (ragged_tensor.is_ragged(data) or
ragged_tensor.is_ragged(segment_ids)):
if separator is not None:
# It uses unsorted_segment_join.
return unsorted_segment_op(data, segment_ids, num_segments, separator,
name)
else:
return unsorted_segment_op(data, segment_ids, num_segments, name)
with ops.name_scope(name, 'RaggedSegment',
[data, segment_ids, num_segments]) as name:
data = ragged_tensor.convert_to_tensor_or_ragged_tensor(data, name='data')
segment_ids = ragged_tensor.convert_to_tensor_or_ragged_tensor(
segment_ids, name='segment_ids')
data, segment_ids = ragged_tensor.match_row_splits_dtypes(data, segment_ids)
if segment_ids.dtype not in (dtypes.int32, dtypes.int64):
raise ValueError('segment_ids must have dtype int32 or int64.')
if ragged_tensor.is_ragged(segment_ids):
if not ragged_tensor.is_ragged(data):
raise ValueError('segment_ids.shape must be a prefix of data.shape, '
'but segment_ids is ragged and data is not.')
check_splits = check_ops.assert_equal(
segment_ids.row_splits,
data.row_splits,
message='segment_ids.shape must be a prefix of data.shape')
with ops.control_dependencies([check_splits]):
return _ragged_segment_aggregate(unsorted_segment_op, data.values,
segment_ids.values, num_segments,
separator)
# Find the length of each row in data. (shape=[data_nrows])
data_row_lengths = data.row_splits[1:] - data.row_splits[:-1]
# Find the length that each output row will have. The length of the row
# corresponding to segment `id` is `max(data_row_lengths[i])` where
# `segment_ids[i]=id`. (shape=[output_nrows])
output_row_lengths = math_ops.maximum(
math_ops.unsorted_segment_max(data_row_lengths, segment_ids,
num_segments), 0)
# Build the splits tensor for the output RaggedTensor.
output_splits = array_ops.concat([
array_ops.zeros([1], output_row_lengths.dtype),
math_ops.cumsum(output_row_lengths)
],
axis=0)
# For each row in `data`, find the start & limit position where that row's
# values will be aggregated in output.values.
data_row_to_out_row_start = array_ops.gather(output_splits, segment_ids)
data_row_to_out_row_limit = data_row_to_out_row_start + data_row_lengths
# For each value in `data.values`, find the position where it will
# aggregated in `output.values`.
# Get the target output values index for each data values index.
data_val_to_out_val_index = range(data_row_to_out_row_start,
data_row_to_out_row_limit).values
# Recursively aggregate the values.
output_values = _ragged_segment_aggregate(unsorted_segment_op, data.values,
data_val_to_out_val_index,
output_splits[-1], separator)
return ragged_tensor.RaggedTensor.from_row_splits(
output_values, output_splits, validate=False)
def segment_sum(data, segment_ids, num_segments, name=None):
# For docs, see: _RAGGED_SEGMENT_DOCSTRING
return _ragged_segment_aggregate(math_ops.unsorted_segment_sum,
data=data,
segment_ids=segment_ids,
num_segments=num_segments,
name=(name or'RaggedSegmentSum'))
def segment_prod(data, segment_ids, num_segments, name=None):
# For docs, see: _RAGGED_SEGMENT_DOCSTRING
return _ragged_segment_aggregate(math_ops.unsorted_segment_prod,
data=data,
segment_ids=segment_ids,
num_segments=num_segments,
name=(name or 'RaggedSegmentProd'))
def segment_min(data, segment_ids, num_segments, name=None):
# For docs, see: _RAGGED_SEGMENT_DOCSTRING
return _ragged_segment_aggregate(math_ops.unsorted_segment_min,
data=data,
segment_ids=segment_ids,
num_segments=num_segments,
name=(name or 'RaggedSegmentMin'))
def segment_max(data, segment_ids, num_segments, name=None):
# For docs, see: _RAGGED_SEGMENT_DOCSTRING
return _ragged_segment_aggregate(math_ops.unsorted_segment_max,
data=data,
segment_ids=segment_ids,
num_segments=num_segments,
name=(name or 'RaggedSegmentMax'))
def segment_mean(data, segment_ids, num_segments, name=None):
"""For docs, see: _RAGGED_SEGMENT_DOCSTRING."""
with ops.name_scope(name, 'RaggedSegmentMean',
[data, segment_ids, num_segments]):
total = segment_sum(data, segment_ids, num_segments)
ones = ragged_tensor.RaggedTensor.from_nested_row_splits(
array_ops.ones_like(data.flat_values), data.nested_row_splits,
validate=False)
count = segment_sum(ones, segment_ids, num_segments)
if ragged_tensor.is_ragged(total):
return total.with_flat_values(total.flat_values / count.flat_values)
else:
return total / count
def segment_sqrt_n(data, segment_ids, num_segments, name=None):
"""For docs, see: _RAGGED_SEGMENT_DOCSTRING."""
with ops.name_scope(name, 'RaggedSegmentSqrtN',
[data, segment_ids, num_segments]):
total = segment_sum(data, segment_ids, num_segments)
ones = ragged_tensor.RaggedTensor.from_nested_row_splits(
array_ops.ones_like(data.flat_values), data.nested_row_splits,
validate=False)
count = segment_sum(ones, segment_ids, num_segments)
if ragged_tensor.is_ragged(total):
return total.with_flat_values(
total.flat_values / math_ops.sqrt(count.flat_values))
else:
return total / math_ops.sqrt(count)
def _set_ragged_segment_docstring(func, combination, combined):
func.__doc__ = _RAGGED_SEGMENT_DOCSTRING % dict(
combination=combination, combined=combined)
_set_ragged_segment_docstring(segment_sum, 'sum', 'summed')
_set_ragged_segment_docstring(segment_prod, 'product', 'multiplied')
_set_ragged_segment_docstring(segment_min, 'minimum', 'minimized')
_set_ragged_segment_docstring(segment_max, 'maximum', 'maximized')
_set_ragged_segment_docstring(segment_mean, 'mean', 'averaged')
_set_ragged_segment_docstring(segment_sqrt_n, 'sum divided by sqrt(N)',
'summed')
#===============================================================================
# ragged_reduce_<AGGREGATE>
#===============================================================================
# Docstring template used for ragged_reduce_<AGGREGATE> ops.
_RAGGED_REDUCE_DOCSTRING = """\
Computes the %(combination)s of elements across dimensions of a `RaggedTensor`.
Reduces `input_tensor` along the dimensions given in `axis` by taking the
%(combination)s of values. If a reduced dimension has no elements for
some index, then the value for that index will be %(default)s.
The rank of the tensor is reduced by `1` for each entry in `axis`. If
`axis` is not specified, then all dimensions are reduced, and a scalar
value is returned.
Args:
input_tensor: A `RaggedTensor` containing the values to be %(combined)s.
axis: The dimensions to reduce. May be `None` (to reduce all axes), an
`int` (to reduce a single axis), a `list` or `tuple` of `int` (to reduce
a given set of axes), or a `Tensor` with a constant value. Must be in
the range `[0, input_tensor.rank]`.
name: A name prefix for the returned tensor (optional).
Returns:
A `RaggedTensor` containing the %(combined)s values. The returned tensor
has the same dtype as `data`, and its shape is given by removing the
dimensions specified in `axis` from `input_tensor.shape`. The `ragged_rank`
of the returned tensor is given by substracting any ragged dimensions
specified in `axis` from `input_tensor.ragged_rank`.
Raises:
ValueError: If `axis` contains a `Tensor` whose value is not constant.
####Example:
%(example)s
"""
_RAGGED_REDUCE_SUM_EXAMPLE = """
>>> rt = tf.ragged.constant([[3, 1, 4], [1, 5], [9], [2, 6]])
>>> tf.reduce_sum(rt, axis=0).numpy() # = [3+1+9+2, 1+5+6, 4]
array([15, 12, 4], dtype=int32)
>>> tf.reduce_sum(rt, axis=1).numpy() # = [3+1+4, 1+5, 9, 2+6]
array([8, 6, 9, 8], dtype=int32)
"""
_RAGGED_REDUCE_PROD_EXAMPLE = """
>>> rt = tf.ragged.constant([[3, 1, 4], [1, 5], [9], [2, 6]])
>>> tf.reduce_prod(rt, axis=0).numpy() # = [3*1*9*2, 1*5*6, 4]
array([54, 30, 4], dtype=int32)
>>> tf.reduce_prod(rt, axis=1).numpy() # = [3*1*4, 1*5, 9, 2*6]
array([12, 5, 9, 12], dtype=int32)
"""
_RAGGED_REDUCE_MIN_EXAMPLE = """
>>> rt = tf.ragged.constant([[3, 1, 4], [1, 5], [9], [2, 6]])
>>> tf.reduce_min(rt, axis=0).numpy()
array([1, 1, 4], dtype=int32)
>>> tf.reduce_min(rt, axis=1).numpy()
array([1, 1, 9, 2], dtype=int32)
"""
_RAGGED_REDUCE_MAX_EXAMPLE = """
>>> rt = tf.ragged.constant([[3, 1, 4], [1, 5], [9], [2, 6]])
>>> tf.reduce_max(rt, axis=0).numpy()
array([9, 6, 4], dtype=int32)
>>> tf.reduce_max(rt, axis=1).numpy()
array([4, 5, 9, 6], dtype=int32)
"""
_RAGGED_REDUCE_MEAN_EXAMPLE = """
>>> rt = tf.ragged.constant([[3, 1, 4], [1, 5], [9], [2, 6]])
>>> tf.reduce_mean(rt, axis=0).numpy()
array([3.75, 4. , 4. ])
>>> tf.reduce_mean(rt, axis=1).numpy()
array([2.66666667, 3. , 9. , 4. ])
"""
_RAGGED_REDUCE_ALL_EXAMPLE = """
>>> rt = tf.ragged.constant([[True, True], [True, True, False, True], [False, True]])
>>> tf.reduce_all(rt, axis=0).numpy()
array([False, True, False, True])
>>> tf.reduce_all(rt, axis=1).numpy()
array([ True, False, False])
"""
_RAGGED_REDUCE_ANY_EXAMPLE = """
>>> rt = tf.ragged.constant([[True, True], [True, True, False, True], [False, True]])
>>> tf.reduce_any(rt, axis=0).numpy()
array([ True, True, False, True])
>>> tf.reduce_any(rt, axis=1).numpy()
array([ True, True, True])
"""
def ragged_reduce_aggregate(reduce_op,
unsorted_segment_op,
rt_input,
axis,
keepdims,
separator=None,
name=None):
"""Aggregates across axes of a RaggedTensor using the given `Tensor` ops.
Reduces `rt_input` along the dimensions given in `axis`. The rank of the
tensor is reduced by 1 for each entry in `axis`. If `axis` is not specified,
then all dimensions are reduced, and a scalar value is returned.
This op assumes that `reduce_op` and `unsorted_segment_op` are associative;
if not, then reducing multiple axes will return incorrect results. (In
particular, reducing multiple axes is currently implemented by reducing the
axes one at a time.)
Args:
reduce_op: The tensorflow `op` that should be used to reduce values in
uniform dimensions. Must have the same signature and basic behavior as
`reduce_sum`, `reduce_max`, etc.
unsorted_segment_op: The tensorflow `op` that should be used to combine
values in ragged dimensions. Must have the same signature and basic
behavior as `unsorted_segment_sum`, `unsorted_segment_max`, etc.
rt_input: A `Tensor` or `RaggedTensor` containing the values to be reduced.
axis: The axis or axes to reduce. May be `None` (to reduce all axes), an
`int` (to reduce a single axis), a `list` or `tuple` of `int` (to reduce a
given set of axes), or a `Tensor` with a constant value. Must be in the
range `[0, rt_input.rank)`.
keepdims: If true, retains reduced dimensions with length 1.
separator: An optional string. Defaults to None. The separator to use when
joining. The separator must not be set for non-string data types. (i.e.
if separator is not None then it uses string ops)
name: A name prefix for the returned tensor (optional).
Returns:
A `RaggedTensor` containing the reduced values. The returned tensor
has the same dtype as `data`, and its shape is given by removing the
dimensions specified in `axis` from `rt_input.shape`. The `ragged_rank`
of the returned tensor is given by substracting any ragged dimensions
specified in `axis` from `rt_input.ragged_rank`.
Raises:
ValueError: If `axis` contains a `Tensor` whose value is not constant.
"""
if not ragged_tensor.is_ragged(rt_input):
if separator is None:
return reduce_op(rt_input, axis, name=name)
else:
# When separator is not None, We infer that dtype is string and
# reduce_join will be called.
return reduce_op(rt_input, axis, name=name, separator=separator)
if keepdims:
raise ValueError('keepdims=True is not supported for RaggedTensors.')
if isinstance(axis, ops.Tensor):
axis = tensor_util.constant_value(axis)
if axis is None:
raise ValueError('axis must be known at graph construction time.')
if isinstance(axis, np.ndarray):
axis = axis.tolist()
# When reducing all axes, just ignore splits & reduce the inner values.
if axis is None:
return reduce_op(rt_input.flat_values, None, name=name)
with ops.name_scope(name, 'RaggedReduce', [rt_input, axis]):
if isinstance(axis, (tuple, list)):
if not axis:
return rt_input
elif len(axis) == 1:
axis = axis[0]
else:
# When reducing multiple axes, as we reduce one at a time (see below),
# the negative axis has to be converted to positive at the first run
# as the sort with negative axis will have different orders.
# See GitHub issue 27497.
axis = [
ragged_util.get_positive_axis(a, rt_input.shape.ndims) for a in axis
]
# When reducing multiple axes, just reduce one at a time. This is less
# efficient, and only works for associative ops. (In particular, it
# does not work for reduce_mean.) However, reducing multiple axes at
# once will probably require a nontrivial c++ op.
axis = sorted(axis)
inner_reduced = ragged_reduce_aggregate(reduce_op, unsorted_segment_op,
rt_input, axis[-1], keepdims,
separator)
return ragged_reduce_aggregate(reduce_op, unsorted_segment_op,
inner_reduced, axis[:-1], keepdims,
separator)
rt_input = ragged_tensor.convert_to_tensor_or_ragged_tensor(
rt_input, name='rt_input')
axis = ragged_util.get_positive_axis(axis, rt_input.shape.ndims)
if axis == 0:
# out[i_1, i_2, ..., i_N] = sum_{j} rt_input[j, i_1, i_2, ..., i_N]
row_lengths = rt_input.row_splits[1:] - rt_input.row_splits[:-1]
num_segments = math_ops.maximum(math_ops.reduce_max(row_lengths), 0)
segment_ids = range(row_lengths).values
return _ragged_segment_aggregate(unsorted_segment_op, rt_input.values,
segment_ids, num_segments, separator)
elif axis == 1:
# out[i_0, i_1, i_2, ..., i_N] = sum_{j} rt_input[i_0, j, i_2, ..., i_N]
num_segments = array_ops.shape(rt_input.row_splits)[0] - 1
segment_ids = segment_id_ops.row_splits_to_segment_ids(
rt_input.row_splits)
return _ragged_segment_aggregate(unsorted_segment_op, rt_input.values,
segment_ids, num_segments, separator)
else:
# out[i_0, ..., i_[axis-1], i_axis+1], ..., i_N] =
# sum_{j} rt_input [i_0, ..., i_[axis-1], j, i_axis+1], ..., i_N]
return rt_input.with_values(
ragged_reduce_aggregate(reduce_op, unsorted_segment_op,
rt_input.values, axis - 1, keepdims,
separator))
def reduce_sum(input_tensor, axis=None, keepdims=None, name=None):
"""For docs, see: _RAGGED_REDUCE_DOCSTRING."""
return ragged_reduce_aggregate(
reduce_op=math_ops.reduce_sum,
unsorted_segment_op=math_ops.unsorted_segment_sum,
rt_input=input_tensor,
axis=axis, keepdims=keepdims,
name=(name or 'RaggedReduceSum'))
def reduce_prod(input_tensor, axis=None, keepdims=None, name=None):
"""For docs, see: _RAGGED_REDUCE_DOCSTRING."""
return ragged_reduce_aggregate(
reduce_op=math_ops.reduce_prod,
unsorted_segment_op=math_ops.unsorted_segment_prod,
rt_input=input_tensor,
axis=axis,
keepdims=keepdims,
name=(name or 'RaggedReduceProd'))
def reduce_min(input_tensor, axis=None, keepdims=None, name=None):
"""For docs, see: _RAGGED_REDUCE_DOCSTRING."""
return ragged_reduce_aggregate(
reduce_op=math_ops.reduce_min,
unsorted_segment_op=math_ops.unsorted_segment_min,
rt_input=input_tensor,
axis=axis,
keepdims=keepdims,
name=(name or 'RaggedReduceMin'))
def reduce_max(input_tensor, axis=None, keepdims=None, name=None):
"""For docs, see: _RAGGED_REDUCE_DOCSTRING."""
return ragged_reduce_aggregate(
reduce_op=math_ops.reduce_max,
unsorted_segment_op=math_ops.unsorted_segment_max,
rt_input=input_tensor,
axis=axis,
keepdims=keepdims,
name=(name or 'RaggedReduceMax'))
def reduce_mean(input_tensor, axis=None, keepdims=None, name=None):
"""For docs, see: _RAGGED_REDUCE_DOCSTRING."""
with ops.name_scope(name, 'RaggedReduceMean', [input_tensor, axis]):
total = reduce_sum(input_tensor, axis, keepdims)
if ragged_tensor.is_ragged(input_tensor):
ones = ragged_tensor.RaggedTensor.from_nested_row_splits(
array_ops.ones_like(input_tensor.flat_values),
input_tensor.nested_row_splits, validate=False)
else:
ones = array_ops.ones_like(input_tensor)
count = reduce_sum(ones, axis, keepdims)
if ragged_tensor.is_ragged(total):
return ragged_tensor.RaggedTensor.from_nested_row_splits(
total.flat_values / count.flat_values, total.nested_row_splits,
validate=False)
else:
return total / count
def _cast(input_tensor, dtype):
return ragged_functional_ops.map_flat_values(math_ops.cast, input_tensor,
dtype)
def reduce_all(input_tensor, axis=None, keepdims=None, name=None):
"""For docs, see: _RAGGED_REDUCE_DOCSTRING."""
with ops.name_scope(name, 'RaggedReduceAll', [input_tensor, axis]):
return _cast(
reduce_prod(_cast(input_tensor, dtypes.int32), axis, keepdims),
dtypes.bool)
def reduce_any(input_tensor, axis=None, keepdims=None, name=None):
"""For docs, see: _RAGGED_REDUCE_DOCSTRING."""
with ops.name_scope(name, 'RaggedReduceAny', [input_tensor, axis]):
return _cast(
reduce_sum(_cast(input_tensor, dtypes.int32), axis, keepdims),
dtypes.bool)
def _set_ragged_reduce_docstring(func, combination, combined, default, example):
func.__doc__ = _RAGGED_REDUCE_DOCSTRING % dict(
combination=combination,
combined=combined,
default=default,
example=example)
_set_ragged_reduce_docstring(reduce_sum, 'sum', 'summed', '0',
_RAGGED_REDUCE_SUM_EXAMPLE)
_set_ragged_reduce_docstring(reduce_prod, 'product', 'multiplied', '1',
_RAGGED_REDUCE_PROD_EXAMPLE)
_set_ragged_reduce_docstring(reduce_min, 'minimum', 'minimized',
'`input_tensor.dtype.min`',
_RAGGED_REDUCE_MIN_EXAMPLE)
_set_ragged_reduce_docstring(reduce_max, 'maximum', 'maximized',
'`input_tensor.dtype.max`',
_RAGGED_REDUCE_MAX_EXAMPLE)
_set_ragged_reduce_docstring(reduce_mean, 'mean', 'averaged', 'NaN',
_RAGGED_REDUCE_MEAN_EXAMPLE)
_set_ragged_reduce_docstring(reduce_all, 'logical and', 'and-ed', 'True',
_RAGGED_REDUCE_ALL_EXAMPLE)
_set_ragged_reduce_docstring(reduce_any, 'logical or', 'or-ed', 'False',
_RAGGED_REDUCE_ANY_EXAMPLE)
| |
# -*- coding: utf-8 -*-
"""
celery.utils
~~~~~~~~~~~~
Utility functions.
"""
from __future__ import absolute_import, print_function
import numbers
import os
import re
import socket
import sys
import traceback
import warnings
import datetime
from collections import Callable
from functools import partial, wraps
from inspect import getargspec
from pprint import pprint
from kombu.entity import Exchange, Queue
from celery.exceptions import CPendingDeprecationWarning, CDeprecationWarning
from celery.five import WhateverIO, items, reraise, string_t
__all__ = ['worker_direct', 'warn_deprecated', 'deprecated', 'lpmerge',
'is_iterable', 'isatty', 'cry', 'maybe_reraise', 'strtobool',
'jsonify', 'gen_task_name', 'nodename', 'nodesplit',
'cached_property']
PY3 = sys.version_info[0] == 3
PENDING_DEPRECATION_FMT = """
{description} is scheduled for deprecation in \
version {deprecation} and removal in version v{removal}. \
{alternative}
"""
DEPRECATION_FMT = """
{description} is deprecated and scheduled for removal in
version {removal}. {alternative}
"""
#: Billiard sets this when execv is enabled.
#: We use it to find out the name of the original ``__main__``
#: module, so that we can properly rewrite the name of the
#: task to be that of ``App.main``.
MP_MAIN_FILE = os.environ.get('MP_MAIN_FILE') or None
#: Exchange for worker direct queues.
WORKER_DIRECT_EXCHANGE = Exchange('C.dq')
#: Format for worker direct queue names.
WORKER_DIRECT_QUEUE_FORMAT = '{hostname}.dq'
#: Separator for worker node name and hostname.
NODENAME_SEP = '@'
NODENAME_DEFAULT = 'celery'
RE_FORMAT = re.compile(r'%(\w)')
def worker_direct(hostname):
"""Return :class:`kombu.Queue` that is a direct route to
a worker by hostname.
:param hostname: The fully qualified node name of a worker
(e.g. ``w1@example.com``). If passed a
:class:`kombu.Queue` instance it will simply return
that instead.
"""
if isinstance(hostname, Queue):
return hostname
return Queue(WORKER_DIRECT_QUEUE_FORMAT.format(hostname=hostname),
WORKER_DIRECT_EXCHANGE,
hostname, auto_delete=True)
def warn_deprecated(description=None, deprecation=None,
removal=None, alternative=None, stacklevel=2):
ctx = {'description': description,
'deprecation': deprecation, 'removal': removal,
'alternative': alternative}
if deprecation is not None:
w = CPendingDeprecationWarning(PENDING_DEPRECATION_FMT.format(**ctx))
else:
w = CDeprecationWarning(DEPRECATION_FMT.format(**ctx))
warnings.warn(w, stacklevel=stacklevel)
def deprecated(deprecation=None, removal=None,
alternative=None, description=None):
"""Decorator for deprecated functions.
A deprecation warning will be emitted when the function is called.
:keyword deprecation: Version that marks first deprecation, if this
argument is not set a ``PendingDeprecationWarning`` will be emitted
instead.
:keyword removal: Future version when this feature will be removed.
:keyword alternative: Instructions for an alternative solution (if any).
:keyword description: Description of what is being deprecated.
"""
def _inner(fun):
@wraps(fun)
def __inner(*args, **kwargs):
from .imports import qualname
warn_deprecated(description=description or qualname(fun),
deprecation=deprecation,
removal=removal,
alternative=alternative,
stacklevel=3)
return fun(*args, **kwargs)
return __inner
return _inner
def deprecated_property(deprecation=None, removal=None,
alternative=None, description=None):
def _inner(fun):
return _deprecated_property(
fun, deprecation=deprecation, removal=removal,
alternative=alternative, description=description or fun.__name__)
return _inner
class _deprecated_property(object):
def __init__(self, fget=None, fset=None, fdel=None, doc=None, **depreinfo):
self.__get = fget
self.__set = fset
self.__del = fdel
self.__name__, self.__module__, self.__doc__ = (
fget.__name__, fget.__module__, fget.__doc__,
)
self.depreinfo = depreinfo
self.depreinfo.setdefault('stacklevel', 3)
def __get__(self, obj, type=None):
if obj is None:
return self
warn_deprecated(**self.depreinfo)
return self.__get(obj)
def __set__(self, obj, value):
if obj is None:
return self
if self.__set is None:
raise AttributeError('cannot set attribute')
warn_deprecated(**self.depreinfo)
self.__set(obj, value)
def __delete__(self, obj):
if obj is None:
return self
if self.__del is None:
raise AttributeError('cannot delete attribute')
warn_deprecated(**self.depreinfo)
self.__del(obj)
def setter(self, fset):
return self.__class__(self.__get, fset, self.__del, **self.depreinfo)
def deleter(self, fdel):
return self.__class__(self.__get, self.__set, fdel, **self.depreinfo)
def lpmerge(L, R):
"""In place left precedent dictionary merge.
Keeps values from `L`, if the value in `R` is :const:`None`."""
set = L.__setitem__
[set(k, v) for k, v in items(R) if v is not None]
return L
def is_iterable(obj):
try:
iter(obj)
except TypeError:
return False
return True
def fun_takes_kwargs(fun, kwlist=[]):
# deprecated
S = getattr(fun, 'argspec', getargspec(fun))
if S.keywords is not None:
return kwlist
return [kw for kw in kwlist if kw in S.args]
def isatty(fh):
try:
return fh.isatty()
except AttributeError:
pass
def cry(out=None, sepchr='=', seplen=49): # pragma: no cover
"""Return stacktrace of all active threads,
taken from https://gist.github.com/737056."""
import threading
out = WhateverIO() if out is None else out
P = partial(print, file=out)
# get a map of threads by their ID so we can print their names
# during the traceback dump
tmap = dict((t.ident, t) for t in threading.enumerate())
sep = sepchr * seplen
for tid, frame in items(sys._current_frames()):
thread = tmap.get(tid)
if not thread:
# skip old junk (left-overs from a fork)
continue
P('{0.name}'.format(thread))
P(sep)
traceback.print_stack(frame, file=out)
P(sep)
P('LOCAL VARIABLES')
P(sep)
pprint(frame.f_locals, stream=out)
P('\n')
return out.getvalue()
def maybe_reraise():
"""Re-raise if an exception is currently being handled, or return
otherwise."""
exc_info = sys.exc_info()
try:
if exc_info[2]:
reraise(exc_info[0], exc_info[1], exc_info[2])
finally:
# see http://docs.python.org/library/sys.html#sys.exc_info
del(exc_info)
def strtobool(term, table={'false': False, 'no': False, '0': False,
'true': True, 'yes': True, '1': True,
'on': True, 'off': False}):
"""Convert common terms for true/false to bool
(true/false/yes/no/on/off/1/0)."""
if isinstance(term, string_t):
try:
return table[term.lower()]
except KeyError:
raise TypeError('Cannot coerce {0!r} to type bool'.format(term))
return term
def jsonify(obj,
builtin_types=(numbers.Real, string_t), key=None,
keyfilter=None,
unknown_type_filter=None):
"""Transforms object making it suitable for json serialization"""
from kombu.abstract import Object as KombuDictType
_jsonify = partial(jsonify, builtin_types=builtin_types, key=key,
keyfilter=keyfilter,
unknown_type_filter=unknown_type_filter)
if isinstance(obj, KombuDictType):
obj = obj.as_dict(recurse=True)
if obj is None or isinstance(obj, builtin_types):
return obj
elif isinstance(obj, (tuple, list)):
return [_jsonify(v) for v in obj]
elif isinstance(obj, dict):
return dict((k, _jsonify(v, key=k))
for k, v in items(obj)
if (keyfilter(k) if keyfilter else 1))
elif isinstance(obj, datetime.datetime):
# See "Date Time String Format" in the ECMA-262 specification.
r = obj.isoformat()
if obj.microsecond:
r = r[:23] + r[26:]
if r.endswith('+00:00'):
r = r[:-6] + 'Z'
return r
elif isinstance(obj, datetime.date):
return obj.isoformat()
elif isinstance(obj, datetime.time):
r = obj.isoformat()
if obj.microsecond:
r = r[:12]
return r
elif isinstance(obj, datetime.timedelta):
return str(obj)
else:
if unknown_type_filter is None:
raise ValueError(
'Unsupported type: {0!r} {1!r} (parent: {2})'.format(
type(obj), obj, key))
return unknown_type_filter(obj)
def gen_task_name(app, name, module_name):
"""Generate task name from name/module pair."""
try:
module = sys.modules[module_name]
except KeyError:
# Fix for manage.py shell_plus (Issue #366)
module = None
if module is not None:
module_name = module.__name__
# - If the task module is used as the __main__ script
# - we need to rewrite the module part of the task name
# - to match App.main.
if MP_MAIN_FILE and module.__file__ == MP_MAIN_FILE:
# - see comment about :envvar:`MP_MAIN_FILE` above.
module_name = '__main__'
if module_name == '__main__' and app.main:
return '.'.join([app.main, name])
return '.'.join(p for p in (module_name, name) if p)
def nodename(name, hostname):
"""Create node name from name/hostname pair."""
return NODENAME_SEP.join((name, hostname))
def anon_nodename(hostname=None, prefix='gen'):
return nodename(''.join([prefix, str(os.getpid())]),
hostname or socket.gethostname())
def nodesplit(nodename):
"""Split node name into tuple of name/hostname."""
parts = nodename.split(NODENAME_SEP, 1)
if len(parts) == 1:
return None, parts[0]
return parts
def default_nodename(hostname):
name, host = nodesplit(hostname or '')
return nodename(name or NODENAME_DEFAULT, host or socket.gethostname())
def node_format(s, nodename, **extra):
name, host = nodesplit(nodename)
return host_format(
s, host, n=name or NODENAME_DEFAULT, **extra)
def _fmt_process_index(prefix='', default='0'):
from .log import current_process_index
index = current_process_index()
return '{0}{1}'.format(prefix, index) if index else default
_fmt_process_index_with_prefix = partial(_fmt_process_index, '-', '')
def host_format(s, host=None, **extra):
host = host or socket.gethostname()
name, _, domain = host.partition('.')
keys = dict({
'h': host, 'n': name, 'd': domain,
'i': _fmt_process_index, 'I': _fmt_process_index_with_prefix,
}, **extra)
return simple_format(s, keys)
def simple_format(s, keys, pattern=RE_FORMAT, expand=r'\1'):
if s:
keys.setdefault('%', '%')
def resolve(match):
resolver = keys[match.expand(expand)]
if isinstance(resolver, Callable):
return resolver()
return resolver
return pattern.sub(resolve, s)
return s
# ------------------------------------------------------------------------ #
# > XXX Compat
from .log import LOG_LEVELS # noqa
from .imports import ( # noqa
qualname as get_full_cls_name, symbol_by_name as get_cls_by_name,
instantiate, import_from_cwd
)
from .functional import chunks, noop # noqa
from kombu.utils import cached_property, kwdict, uuid # noqa
gen_unique_id = uuid
| |
'''
Created on 5 Nov 2015
@author: Sara
'''
class ParseInterOpMetrics(object):
'''
A series of methods to parse the InterOp binary files generated by the MiSeqReporter software
Supported versions of files are specified in Import.py (as this collection of functions is called by that script)
'''
#Class variables (i.e. not specific to a single instance)- similar but not the same as static variables in java
#http://www.toptal.com/python/python-class-attributes-an-overly-thorough-guide
def __init__(self,filehandle):
#global ENDIANNESS
#global ENCODING_DICTIONARY
import struct as s
self.s = s
import datetime as dt
self.dt = dt
import math
self.math = math
self.Y = 6 #Known index of Y in the bytearray- could attempt to retrieve this better another time
self.YV = 4 #Known size of YV- could attempt to retrieve this better another time
self.file_handle = filehandle
self.ENDIANNESS = "<"
self.ENCODING_DICTIONARY = {"x":1,"c":1,"b":1,"B":1,"?":1,"h":2,"H":2,"i":4,"I":4,"l":4,"L":4,"q":8,"Q":8,"f":4,"d":8,"s":1,"p":1,"P":1}
def open_file_to_bytearray(self,filehandle):
'''
For all supported binary InterOp files
Reads the data specified in the file pointed to by the filehandle and outputs it as a bytearray
'''
values = bytearray()
with open(filehandle, "rb") as f:
for line in f:
for b in line:
values.append(b)
return values #This is the bytearray of the data
def convert_bytes_index(self,the_bytearray,supported_version_number):
'''
For the IndexMetricsOut.bin file
Unpacks the byte encoding the version of the file and checks that it is a supported version
Returns the starting byte for the data encoded in the rest of the bytearray
'''
for index in xrange(len(the_bytearray)):
byte_start = (index) #This will be where I will want to start the next readout from
if index < 1:
value = self.s.unpack_from(self.ENDIANNESS + "B",the_bytearray,index)
if value[0] != supported_version_number:
raise Exception("Unsupported file version")
else:
#return get_chunk_len(byte_start,the_bytearray)
#print byte_start
return byte_start
def get_Y(self,readout_start,the_bytearray,offset=0):
'''
For the IndexMetricsOut.bin file
There are three variable length records in this file. This function reads out the length of the
variable length string specified as Y in the Illumina specification documentation
Takes as input the output of the function convert_bytes_index (indicating the starting position of the
first record in the bytearray), the the bytearray output by the function open_file_to_bytearray, the
entry length which is output by the function get_entry_len_ind and the offset which indicates the position
of the record in the whole bytearray being parsed and is counted in the calling function
(PopulateBinaryTablesIndexClass)
Outputs the length of the variable length string Y
'''
ind_Y = self.Y
index_bytes_length = self.ENCODING_DICTIONARY.get("H",None)
Y = self.s.unpack_from(self.ENDIANNESS + "H",the_bytearray[(offset+readout_start+ind_Y):(offset+readout_start+ind_Y+index_bytes_length)],0)
return Y[0]
def get_V(self,readout_start,the_bytearray,offset=0):
'''
For the IndexMetricsOut.bin file
There are three variable length records in this file. This function reads out the length of the
variable length string specified as V in the Illumina specification documentation
Takes as input the output of the function convert_bytes_index (indicating the starting position of the
first record in the bytearray), the the bytearray output by the function open_file_to_bytearray, the
entry length which is output by the function get_entry_len_ind and the offset which indicates the position
of the record in the whole bytearray being parsed and is counted in the calling function
(PopulateBinaryTablesIndexClass)
Outputs the length of the variable length string V
'''
ind_Y = self.Y
ind_YV = self.YV
index_bytes_length = self.ENCODING_DICTIONARY.get("H",None)
Y = self.s.unpack_from(self.ENDIANNESS + "H",the_bytearray[(offset+readout_start+ind_Y):(offset+readout_start+ind_Y+index_bytes_length)],0)
ind_V = ind_Y + index_bytes_length + Y[0] + ind_YV
V = self.s.unpack_from(self.ENDIANNESS + "H",the_bytearray[(offset+readout_start+ind_V):(offset+readout_start+ind_V+index_bytes_length)],0)
return V[0]
def get_W(self,readout_start,the_bytearray,offset=0):
'''
For the IndexMetricsOut.bin file
There are three variable length records in this file. This function reads out the length of the
variable length string specified as W in the Illumina specification documentation
Takes as input the output of the function convert_bytes_index (indicating the starting position of the
first record in the bytearray), the the bytearray output by the function open_file_to_bytearray, the
entry length which is output by the function get_entry_len_ind and the offset which indicates the position
of the record in the whole bytearray being parsed and is counted in the calling function
(PopulateBinaryTablesIndexClass)
Outputs the length of the variable length string W
'''
ind_Y = self.Y
ind_YV = self.YV
index_bytes_length = self.ENCODING_DICTIONARY.get("H",None)
Y = self.s.unpack_from(self.ENDIANNESS + "H",the_bytearray[(offset+readout_start+ind_Y):(offset+readout_start+ind_Y+index_bytes_length)],0)
ind_V = ind_Y + index_bytes_length + Y[0] + ind_YV
V = self.s.unpack_from(self.ENDIANNESS + "H",the_bytearray[(offset+readout_start+ind_V):(offset+readout_start+ind_V+index_bytes_length)],0)
ind_W = ind_V + index_bytes_length + V[0]
W = self.s.unpack_from(self.ENDIANNESS + "H",the_bytearray[(offset+readout_start+ind_W):(offset+readout_start+ind_W+index_bytes_length)],0)
return W[0]
def get_entry_len_ind(self,readout_start,the_bytearray,offset=0):
'''
For the IndexMetricsOut.bin file
As each record is of variable length, this function reads out the size of the 'chunk' of the bytearray
which corresponds to the record about to be parsed
Takes as input the output of the function convert_bytes_index (indicating the starting position of the
first record in the bytearray), the the bytearray output by the function open_file_to_bytearray, the
entry length which is output by the function get_entry_len_ind and the offset which indicates the position
of the record in the whole bytearray being parsed and is counted in the calling function
(PopulateBinaryTablesIndexClass)
Outputs the length of the record
'''
ind_Y = self.Y
ind_YV = self.YV
index_bytes_length = self.ENCODING_DICTIONARY.get("H",None)
Y = self.s.unpack_from(self.ENDIANNESS + "H",the_bytearray[(offset+readout_start+ind_Y):(offset+readout_start+ind_Y+index_bytes_length)],0)
ind_V = ind_Y + index_bytes_length + Y[0] + ind_YV
V = self.s.unpack_from(self.ENDIANNESS + "H",the_bytearray[(offset+readout_start+ind_V):(offset+readout_start+ind_V+index_bytes_length)],0)
ind_W = ind_V + index_bytes_length + V[0]
W = self.s.unpack_from(self.ENDIANNESS + "H",the_bytearray[(offset+readout_start+ind_W):(offset+readout_start+ind_W+index_bytes_length)],0)
return (ind_W + index_bytes_length + W[0])
def get_array_segment(self,readout_start,the_bytearray,entry_length,offset):
'''
For all supported InterOp files
Takes as input the output of the functions convert_bytes_index for the IndexMetricsOut.bin file
and convert_bytes for all other supported files (indicating the starting position of the first record
in the bytearray), the bytearray output by the function open_file_to_bytearray, the entry length which
is output by the functions get_entry_len_ind for the IndexMetricsOut.bin file and get_entry_len for all
other supported files, and the offset which indicates the position of the record in the whole bytearray
being parsed and is counted in the calling function (PopulateBinaryTables...).
This function reads out the array segment for de-encoding (i.e. the bit which corresponds to a single record)
and outputs this as a bytearray
'''
byte_start = readout_start + offset
return the_bytearray[byte_start:(byte_start+entry_length)]
def get_encoding_string_var(self,Y,V,W,encoding):
'''
For the IndexMetricsOut.bin file
Necessary function to cope with when the encoding string codes for variable values indicating the length of the
subsequent string
Takes as input the results of the functions get_Y, get_V and get_W, indicating the locations of the variables
describing the length of the variable length strings, and the encoding string as specified by Illumina
Outputs a new encoding string with the correct number of character specifiers to match the variable length
of string for each record
'''
encoding_constants = (Y,V,W)
let = []
count = 0
for letter in encoding:
if letter != 's':
let.append(letter)
elif letter == 's':
#print "arr_start is " + str(arr_start)
let.pop() #Pop from the stack (pop always pops from the end of a stack)
let.append(letter*encoding_constants[count])
encoding_string = "".join(let)
count += 1
return encoding_string
def get_values_simple(self,encoding_string,array_segment):
'''
For all supported InterOp files
Returns the de-encoded section of the bytearray passed in according to the specifications in the
encoding string
Requires as input the encoding string and the section of the bytearray to be de-encoded
'''
return self.s.unpack_from(self.ENDIANNESS + encoding_string,array_segment)
def handle_nan(self,lst):
'''
For all supported InterOp files
Takes as input the de-encoded section of the bytearray corresponding to a single record
Replaces NaNs with 0s and outputs the new record as a list
Currently increases the run-time of the code. Requires improvement.
'''
new_list = []
for entry in lst:
#print entry
#print type(entry)
if self.math.isnan(entry):
new_list.append(0)
else:
new_list.append(entry)
return new_list
def get_datetime(self,encoding_string,array_segment):
'''
For the ExtractionMetricsOut.bin file
Takes the segment of the bytearray corresponding to a record and the encoding string specified
(which it uses to find the position into the bytearray where the datetime entry begins) as input
Outputs the date and time (in the format output by the datetime library) as a string
'''
size = 0
for enc in encoding_string:
enc_size = self.ENCODING_DICTIONARY.get(enc,None)
size += enc_size
#return size
dt_bytes = array_segment[size:len(array_segment)]
bitlst = []
for b in dt_bytes:
for i in (xrange(8)): #reversed(xrange(8)): The reversed here doesn't work as it is byte order reversed as well as bit order within bytes
bitlst.append((b >> i) & 1)
bitlst[((len(dt_bytes)*8)-1)] = 0
bitlst[((len(dt_bytes)*8)-2)] = 0
binary = ''.join(str(c) for c in bitlst)[::-1] #Reversed as is little endian
ticks = int(binary,2)
datetime = self.dt.datetime(1, 1, 1) + self.dt.timedelta(microseconds = ticks/10)
return str(datetime)
def get_s(self,encoding):
'''
For the IndexMetricsOut.bin file
Required to handle the variable length strings.
Finds the positions of the s's in the encoding string and outputs these in a list
Takes as input the output of the encoding_string function (get_encoding_string_var), NOT the raw encoding string.
'''
s_indices = []
for index,letter in enumerate(encoding):
if letter == 's' and encoding[index-1] != 's': #Index of first s each time
s_indices.append(index)
return s_indices
def get_formatted_values(self,Y,V,W,s_indices,raw_result):
'''
For the IndexMetricsOut.bin file
Joins together the characters comprising the strings so that each one is not a new entry.
Currently they are de-encoded as individual characters and the output from the struct library supplies
each as an individual character in a list, which should be entered into the database as a string (single entry)
Also removes the numbers encoding the string lengths as they are not useful data to be stored in the database.
Requires as input the output from the functions get_s (s_indices), get_Y, get_V, and get_W, which give the position
of the lengths of each variable length string for this record as well as the locations of the encoding s, indicating
string, in the encoding string which is passed as input to the struct library. Also requires initial output
from use of the struct library, which contains all the characters as separate entries (raw_result).
'''
#Create a list for the new data
formatted = []
for entry in raw_result[0:(s_indices[0]-1)]: #-1 removes the entry denoting the string length
formatted.append(entry)
formatted.append("".join(raw_result[s_indices[0]:((s_indices[0])+Y)]))
for entry in raw_result[((s_indices[0])+Y):(s_indices[1]-1)]: #-1 removes the entry denoting the string length
formatted.append(entry)
formatted.append("".join(raw_result[s_indices[1]:((s_indices[1])+V)]))
if len(s_indices) < 3:
formatted.append(' ')
else:
for entry in raw_result[((s_indices[1])+V):(s_indices[2]-1)]: #-1 removes the entry denoting the string length
formatted.append(entry)
formatted.append("".join(raw_result[s_indices[2]:((s_indices[2])+W)]))
return formatted
for entry in raw_result[((s_indices[1])+V):(s_indices[2]-1)]: #-1 removes the entry denoting the string length
formatted.append(entry)
formatted.append("".join(raw_result[s_indices[2]:((s_indices[2])+W)]))
return formatted
def convert_bytes(self,the_bytearray,supported_version_number):
'''
For all binary files supported except the IndexMetricsOut.bin file
Unpacks the byte encoding the version of the file and checks that it is a supported version
Returns the starting byte for the data encoded in the rest of the bytearray
'''
for index in xrange(len(the_bytearray)):
byte_start = (index) #This will be where I will want to start the next readout from
if index < 2: #FIX HERE SEE OTHER PARSER
value = self.s.unpack_from(self.ENDIANNESS + "B",the_bytearray,index)
if (index == 0) and value[0] != supported_version_number:
raise Exception("Unsupported file version")
else:
#return get_chunk_len(byte_start,the_bytearray)
return byte_start
def get_entry_len(self,the_bytearray):
'''
For all binary files supported except the IndexMetricsOut.bin file
Finds the length of the bytearray (it is encoded in the first byte) and returns this
Takes the bytearray created by the function open_file_to_bytearray as input
'''
for index in xrange(len(the_bytearray)):
if index == 1: #First byte is the length of the record
value = self.s.unpack_from(self.ENDIANNESS + "B",the_bytearray,index)
return value[0]
#return value
#else:
#return get_chunk_len(byte_start,the_bytearray)
#return byte_start
| |
# Copyright 2014, Sandia Corporation. Under the terms of Contract
# DE-AC04-94AL85000 with Sandia Corporation, the U.S. Government retains certain
# rights in this software.
from __future__ import division
import io
import collections
import difflib
import json
import nose.tools
import numbers
import numpy
import os
import re
import sys
import xml.etree.ElementTree as xml
import toyplot
import toyplot.color
import toyplot.compatibility
import toyplot.data
import toyplot.html
import toyplot.locator
import toyplot.svg
try:
import toyplot.pdf
except:
pass
try:
import toyplot.png
except:
pass
try:
import toyplot.cairo.eps
except:
pass
try:
import toyplot.cairo.pdf
except:
pass
try:
import toyplot.cairo.png
except:
pass
try:
import toyplot.qt.pdf
except:
pass
try:
import toyplot.qt.png
except:
pass
##########################################################################
# Test fixtures.
def assert_color_equal(a, b):
numpy.testing.assert_array_almost_equal(
(a["r"], a["g"], a["b"], a["a"]), b)
def assert_colors_equal(a, b):
for j, k in zip(a, b):
assert_color_equal(j, k)
def assert_masked_array(a, dtype, b, mask):
nose.tools.assert_is_instance(a, numpy.ma.MaskedArray)
nose.tools.assert_equal(a.dtype, dtype)
numpy.testing.assert_array_equal(a, b)
numpy.testing.assert_array_equal(a.mask, mask)
def json_comparison_string(o):
"""Convert a Python object to a JSON string representation that can be used for comparison.
Limits the precision of floating-point numbers.
"""
if o is None:
return "null"
if isinstance(o, toyplot.compatibility.string_type):
return "\"" + o + "\""
if isinstance(o, numbers.Integral):
return str(o)
if isinstance(o, numbers.Real):
return "%.9g" % o
if isinstance(o, collections.Sequence):
return "[" + ",".join([json_comparison_string(i) for i in o]) + "]"
if isinstance(o, collections.Mapping):
return "{" + ",".join(["\"" + key + "\":" + json_comparison_string(value)
for key, value in o.items()]) + "}"
raise Exception("Unexpected value: %s" % o)
def xml_comparison_string(element):
"""Convert an XML element to a pretty string representation that can be used for comparison.
Filters-out elements and attributes (like id) that shouldn't be compared,
and limits the precision of floating-point numbers.
"""
def format_value(value):
try:
return "%.9g" % float(value)
except:
return value
def write_element(element, buffer, indent):
buffer.write(u"%s<%s" % (indent, element.tag))
for key, value in element.items():
if key in ["id", "clip-path"]:
continue
if key == "d" and element.tag == "{http://www.w3.org/2000/svg}path":
buffer.write(
u" %s='%s'" % (key, " ".join([format_value(d) for d in value.split(" ")])))
elif key == "transform":
buffer.write(u" %s='%s'" % (
key, "".join([format_value(d) for d in re.split("(,|\(|\))", value)])))
elif key == "points" and element.tag == "{http://www.w3.org/2000/svg}polygon":
buffer.write(u" %s='%s'" % (key, " ".join(
[",".join([format_value(i) for i in p.split(",")]) for p in value.split(" ")])))
else:
buffer.write(u" %s='%s'" % (key, format_value(value)))
text = element.text if element.text is not None else ""
if element.tag in [
"{http://www.sandia.gov/toyplot}data-table",
"{http://www.sandia.gov/toyplot}axes"]:
text = str(json_comparison_string(json.loads(element.text)))
buffer.write(u">%s\n" % text)
for child in list(element):
write_element(child, buffer, indent + " ")
buffer.write(u"%s</%s>\n" % (indent, element.tag))
buffer = io.StringIO()
write_element(element, buffer, indent="")
return buffer.getvalue()
def assert_canvas_matches(canvas, name):
# Render every representation of the canvas for coverage ...
html = io.BytesIO()
toyplot.html.render(canvas, html)
svg = io.BytesIO()
toyplot.svg.render(canvas, svg)
for module in ["toyplot.pdf", "toyplot.png", "toyplot.cairo.eps", "toyplot.cairo.pdf", "toyplot.cairo.png", "toyplot.qt.pdf", "toyplot.qt.png"]:
if module in sys.modules:
buffer = io.BytesIO()
sys.modules[module].render(canvas, buffer)
# Get rid of any past failures ...
if os.path.exists("tests/diffs/%s.svg" % name):
os.remove("tests/diffs/%s.svg" % name)
if os.path.exists("tests/diffs/%s.reference.svg" % name):
os.remove("tests/diffs/%s.reference.svg" % name)
if os.path.exists("tests/failed/%s.svg" % name):
os.remove("tests/failed/%s.svg" % name)
# If there's no stored SVG reference for this canvas, create one ...
if not os.path.exists("tests/reference/%s.svg" % name):
with open("tests/reference/%s.svg" % name, "wb") as file:
file.write(svg.getvalue())
raise AssertionError(
"Created new reference file tests/reference/%s.svg ... you should verify its contents before re-running the test." %
name)
# Compare the SVG representation of the canvas to the SVG reference ...
svg_dom = xml.fromstring(svg.getvalue())
reference_dom = xml.parse("tests/reference/%s.svg" % name).getroot()
svg_string = xml_comparison_string(svg_dom)
reference_string = xml_comparison_string(reference_dom)
try:
if svg_string != reference_string:
raise Exception(
"\n".join(
list(
difflib.context_diff(
svg_string.split("\n"),
reference_string.split("\n"),
lineterm="",
fromfile="test svg",
tofile="reference svg"))))
except Exception as e:
if not os.path.exists("tests/diffs"):
os.mkdir("tests/diffs")
with open("tests/diffs/%s.svg" % name, "wb") as file:
file.write(svg_string)
with open("tests/diffs/%s.reference.svg" % name, "wb") as file:
file.write(reference_string)
if not os.path.exists("tests/failed"):
os.mkdir("tests/failed")
with open("tests/failed/%s.svg" % name, "wb") as file:
file.write(svg.getvalue())
raise AssertionError(
"Test output tests/failed/%s.svg doesn't match tests/reference/%s.svg:\n%s" %
(name, name, e))
def assert_html_matches(html, name):
reference_file = "tests/reference/%s.html" % name
test_file = "tests/failed/%s.html" % name
if os.path.exists(test_file):
os.remove(test_file)
if os.path.exists(reference_file):
reference_html = open(reference_file, "rb").read()
if html != reference_html:
if not os.path.exists("tests/failed"):
os.mkdir("tests/failed")
with open(test_file, "wb") as file:
file.write(html)
raise AssertionError(
"Test output %s doesn't match %s." %
(test_file, reference_file))
else:
with open(reference_file, "wb") as file:
file.write(html)
raise AssertionError(
"Created new reference file %s. You should verify its contents before re-running the test." %
(reference_file))
##########################################################################
# Test test fixtures.
def test_xml_comparison_string():
nose.tools.assert_equal(
xml_comparison_string(xml.fromstring("<svg/>")), "<svg>\n</svg>\n")
nose.tools.assert_equal(xml_comparison_string(
xml.fromstring("<svg><a/></svg>")), "<svg>\n <a>\n </a>\n</svg>\n")
nose.tools.assert_equal(
xml_comparison_string(
xml.fromstring("<svg><a>foo</a></svg>")),
"<svg>\n <a>foo\n </a>\n</svg>\n")
nose.tools.assert_equal(
xml_comparison_string(
xml.fromstring("<svg><a b='c'>foo</a></svg>")),
"<svg>\n <a b='c'>foo\n </a>\n</svg>\n")
nose.tools.assert_equal(
xml_comparison_string(
xml.fromstring("<svg><a b='.333333333333333'>foo</a></svg>")),
"<svg>\n <a b='0.333333333'>foo\n </a>\n</svg>\n")
nose.tools.assert_equal(
xml_comparison_string(
xml.fromstring("<svg><a b='.666666666666666'>foo</a></svg>")),
"<svg>\n <a b='0.666666667'>foo\n </a>\n</svg>\n")
nose.tools.assert_equal(
xml_comparison_string(
xml.fromstring("<svg><a id='1234'/></svg>")),
"<svg>\n <a>\n </a>\n</svg>\n")
##########################################################################
# toyplot
def test_require_style():
nose.tools.assert_equal(toyplot.require.style(None), None)
nose.tools.assert_equal(toyplot.require.style({}), {})
nose.tools.assert_equal(toyplot.require.style(
{"stroke": toyplot.color.near_black}), {"stroke": toyplot.color.near_black})
with nose.tools.assert_raises(ValueError):
toyplot.require.style([])
with nose.tools.assert_raises(ValueError):
toyplot.require.style("")
def test_style_combine():
nose.tools.assert_equal(toyplot.style.combine(None), {})
nose.tools.assert_equal(toyplot.style.combine({}), {})
nose.tools.assert_equal(
toyplot.style.combine({"a": "b"}, None), {"a": "b"})
nose.tools.assert_equal(toyplot.style.combine({"a": "b"}, {}), {"a": "b"})
nose.tools.assert_equal(
toyplot.style.combine({"a": "b"}, {"c": "d"}), {"a": "b", "c": "d"})
nose.tools.assert_equal(
toyplot.style.combine({"a": "b"}, {"a": "d"}), {"a": "d"})
def test_require_scalar():
nose.tools.assert_equal(toyplot.require.scalar(1), 1)
nose.tools.assert_equal(toyplot.require.scalar(1.2), 1.2)
with nose.tools.assert_raises(ValueError):
nose.tools.assert_equal(toyplot.require.scalar("foo"), "foo")
def test_require_scalar_array():
assert_masked_array(
toyplot.require.scalar_array(1), "float64", [1], [False])
assert_masked_array(
toyplot.require.scalar_array(1.1), "float64", [1.1], [False])
assert_masked_array(
toyplot.require.scalar_array("1.2"), "float64", [1.2], [False])
assert_masked_array(toyplot.require.scalar_array(
[1, 2, 3]), "float64", [1, 2, 3], [False, False, False])
assert_masked_array(toyplot.require.scalar_array(
["1", "2", 3]), "float64", [1, 2, 3], [False, False, False])
assert_masked_array(toyplot.require.scalar_array([[1, 2], [3, 4]]), "float64", [
[1, 2], [3, 4]], [[False, False], [False, False]])
assert_masked_array(toyplot.require.scalar_array(
numpy.array([1, 2, 3])), "float64", [1, 2, 3], [False, False, False])
assert_masked_array(
toyplot.require.scalar_array(
numpy.ma.array(
[
1, 2, 3], mask=[
False, True, False])), "float64", [
1, 2, 3], [
False, True, False])
assert_masked_array(
toyplot.require.scalar_array(
numpy.array(
[
1, numpy.nan, 3])), "float64", [
1, numpy.nan, 3], [
False, True, False])
assert_masked_array(
toyplot.require.scalar_array(
numpy.ma.array(
[
1, numpy.nan, 3], mask=[
False, False, True])), "float64", [
1, numpy.nan, 3], [
False, True, True])
with nose.tools.assert_raises(ValueError):
toyplot.require.scalar_array("foo")
with nose.tools.assert_raises(ValueError):
toyplot.require.scalar_array(["1", "foo"])
def test_require_scalar_vector():
numpy.testing.assert_array_equal(toyplot.require.scalar_vector(1), [1])
numpy.testing.assert_array_equal(
toyplot.require.scalar_vector([1, 2, 3]), [1, 2, 3])
numpy.testing.assert_array_equal(
toyplot.require.scalar_vector([1, 2, 3], length=3), [1, 2, 3])
with nose.tools.assert_raises(ValueError):
toyplot.require.scalar_vector(1, length=3)
with nose.tools.assert_raises(ValueError):
toyplot.require.scalar_vector([1, 2, 3], length=2)
with nose.tools.assert_raises(ValueError):
toyplot.require.scalar_vector([[1, 2], [3, 4]])
def test_require_scalar_matrix():
with nose.tools.assert_raises(ValueError):
toyplot.require.scalar_matrix(1)
with nose.tools.assert_raises(ValueError):
toyplot.require.scalar_matrix([1, 2, 3])
numpy.testing.assert_array_equal(
toyplot.require.scalar_matrix([[1, 2], [3, 4]]), [[1, 2], [3, 4]])
with nose.tools.assert_raises(ValueError):
toyplot.require.scalar_matrix([[1, 2], [3, 4]], rows=3)
with nose.tools.assert_raises(ValueError):
toyplot.require.scalar_matrix([[1, 2], [3, 4]], columns=3)
with nose.tools.assert_raises(ValueError):
toyplot.require.scalar_matrix([[[1, 2], [3, 4]]])
def test_require_string():
nose.tools.assert_equal(toyplot.require.string("foo"), "foo")
nose.tools.assert_equal(toyplot.require.string(u"foo"), u"foo")
with nose.tools.assert_raises(ValueError):
nose.tools.assert_equal(toyplot.require.string(None), None)
with nose.tools.assert_raises(ValueError):
nose.tools.assert_equal(toyplot.require.string(1), 1)
def test_require_string_vector():
numpy.testing.assert_array_equal(toyplot.require.string_vector("a"), ["a"])
numpy.testing.assert_array_equal(
toyplot.require.string_vector(["a", "b", "c"]), ["a", "b", "c"])
numpy.testing.assert_array_equal(
toyplot.require.string_vector([1, 2, 3]), ["1", "2", "3"])
def test_require_optional_string():
nose.tools.assert_equal(toyplot.require.optional_string("foo"), "foo")
nose.tools.assert_equal(toyplot.require.optional_string(u"foo"), u"foo")
nose.tools.assert_equal(toyplot.require.optional_string(None), None)
with nose.tools.assert_raises(ValueError):
nose.tools.assert_equal(toyplot.require.optional_string(1), 1)
def test_require_marker_array():
numpy.testing.assert_array_equal(
toyplot.require.marker_array("foo"), ["foo"])
numpy.testing.assert_array_equal(
toyplot.require.marker_array(["foo", "bar"]), ["foo", "bar"])
numpy.testing.assert_array_equal(
toyplot.require.marker_array([1, 2]), [1, 2])
numpy.testing.assert_array_equal(
toyplot.require.marker_array([1, 2, "foo"]), [1, 2, "foo"])
with nose.tools.assert_raises(ValueError):
toyplot.require.marker_array(["foo", "bar"], 3)
def test_broadcast_scalar():
numpy.testing.assert_equal(toyplot.broadcast.scalar(1, 3), [1, 1, 1])
numpy.testing.assert_equal(
toyplot.broadcast.scalar(1, (3, 3)), [[1, 1, 1], [1, 1, 1], [1, 1, 1]])
numpy.testing.assert_equal(
toyplot.broadcast.scalar([1, 2, 3], 3), [1, 2, 3])
numpy.testing.assert_equal(
toyplot.broadcast.scalar([1, 2, 3], (3, 3)), [[1, 2, 3], [1, 2, 3], [1, 2, 3]])
numpy.testing.assert_equal(toyplot.broadcast.scalar(
[[1, 2, 3], [4, 5, 6], [7, 8, 9]], (3, 3)), [[1, 2, 3], [4, 5, 6], [7, 8, 9]])
numpy.testing.assert_equal(
toyplot.broadcast.scalar([1, 2, 3], (3, 1)), [[1], [2], [3]])
def test_broadcast_string():
numpy.testing.assert_equal(
toyplot.broadcast.string("1", 3), ["1", "1", "1"])
numpy.testing.assert_equal(toyplot.broadcast.string(
"1", (3, 3)), [["1", "1", "1"], ["1", "1", "1"], ["1", "1", "1"]])
numpy.testing.assert_equal(
toyplot.broadcast.string(["1", "2", "3"], 3), ["1", "2", "3"])
numpy.testing.assert_equal(toyplot.broadcast.string(
["1", "2", "3"], (3, 3)), [["1", "2", "3"], ["1", "2", "3"], ["1", "2", "3"]])
numpy.testing.assert_equal(toyplot.broadcast.string([["1", "2", "3"], ["4", "5", "6"], [
"7", "8", "9"]], (3, 3)), [["1", "2", "3"], ["4", "5", "6"], ["7", "8", "9"]])
numpy.testing.assert_equal(
toyplot.broadcast.string(["1", "2", "3"], (3, 1)), [["1"], ["2"], ["3"]])
def test_axes_coordinates_show():
canvas = toyplot.Canvas()
axes = canvas.axes()
axes.coordinates.show = False
nose.tools.assert_equal(axes.coordinates.show, False)
assert_canvas_matches(canvas, "axes-coordinates-show")
def test_axes_coordinates_style():
canvas = toyplot.Canvas()
axes = canvas.axes()
axes.coordinates.style = {"stroke": "red", "fill": "ivory"}
nose.tools.assert_equal(axes.coordinates.style["stroke"], "red")
assert_canvas_matches(canvas, "axes-coordinates-style")
def test_axes_coordinates_label():
canvas = toyplot.Canvas()
axes = canvas.axes()
axes.coordinates.label.style = {"fill": "red"}
nose.tools.assert_equal(axes.coordinates.label.style["fill"], "red")
assert_canvas_matches(canvas, "axes-coordinates-label")
def test_axes_tick_titles():
canvas = toyplot.Canvas()
axes = canvas.axes()
axes.x.ticks.locator = toyplot.locator.Explicit(
locations=[-0.5, 0, 0.5], titles=["Foo", "Bar", "Baz"])
axes.y.ticks.locator = toyplot.locator.Explicit(
locations=[-0.5, 0, 0.5], titles=["Red", "Green", "Blue"])
assert_canvas_matches(canvas, "axes-tick-titles")
def test_axes_colorbar():
canvas = toyplot.Canvas()
axes = canvas.axes(label="Axes Label", xlabel="X Label", ylabel="Y Label")
colorbar = axes.colorbar()
assert_canvas_matches(canvas, "axes-colorbar")
def test_axes_colorbar_data():
canvas = toyplot.Canvas()
axes = canvas.axes(label="Axes Label", xlabel="X Label", ylabel="Y Label")
colorbar = axes.colorbar(
numpy.arange(100), palette=toyplot.color.brewer("BlueYellowRed"))
assert_canvas_matches(canvas, "axes-colorbar-ticks-data")
def test_axes_colorbar_domain():
canvas = toyplot.Canvas()
axes = canvas.axes(label="Axes Label", xlabel="X Label", ylabel="Y Label")
colorbar = axes.colorbar(palette=toyplot.color.brewer("BlueYellowRed"))
colorbar.domain.min = 0
nose.tools.assert_equal(colorbar.domain.min, 0)
colorbar.domain.max = 1
nose.tools.assert_equal(colorbar.domain.max, 1)
assert_canvas_matches(canvas, "axes-colorbar-domain")
def test_axes_colorbar_label():
canvas = toyplot.Canvas()
axes = canvas.axes(label="Axes Label", xlabel="X Label", ylabel="Y Label")
colorbar = axes.colorbar(palette=toyplot.color.brewer("BlueYellowRed"))
colorbar.label.text = "Colorbar Label"
nose.tools.assert_equal(colorbar.label.text, "Colorbar Label")
colorbar.label.style = {"fill": "red"}
nose.tools.assert_equal(colorbar.label.style["fill"], "red")
assert_canvas_matches(canvas, "axes-colorbar-label")
def test_axes_colorbar_ticks_show():
canvas = toyplot.Canvas()
axes = canvas.axes(label="Axes Label", xlabel="X Label", ylabel="Y Label")
colorbar = axes.colorbar(palette=toyplot.color.brewer("BlueYellowRed"))
colorbar.ticks.show = True
nose.tools.assert_equal(colorbar.ticks.show, True)
assert_canvas_matches(canvas, "axes-colorbar-ticks-show")
def test_axes_colorbar_ticks_length():
canvas = toyplot.Canvas()
axes = canvas.axes(label="Axes Label", xlabel="X Label", ylabel="Y Label")
colorbar = axes.colorbar(palette=toyplot.color.brewer("BlueYellowRed"))
colorbar.ticks.show = True
colorbar.ticks.length = 20
nose.tools.assert_equal(colorbar.ticks.length, 20)
assert_canvas_matches(canvas, "axes-colorbar-ticks-length")
def test_axes_colorbar_ticks_style():
canvas = toyplot.Canvas()
axes = canvas.axes(label="Axes Label", xlabel="X Label", ylabel="Y Label")
colorbar = axes.colorbar(palette=toyplot.color.brewer("BlueYellowRed"))
colorbar.ticks.show = True
colorbar.ticks.style = {"stroke": "red"}
nose.tools.assert_equal(colorbar.ticks.style["stroke"], "red")
assert_canvas_matches(canvas, "axes-colorbar-ticks-style")
def test_axes_colorbar_ticks_locator():
canvas = toyplot.Canvas()
axes = canvas.axes(label="Axes Label", xlabel="X Label", ylabel="Y Label")
colorbar = axes.colorbar(palette=toyplot.color.brewer("BlueYellowRed"))
colorbar.ticks.show = True
locator = toyplot.locator.Explicit(
locations=[-0.5, 0.0, 0.5], titles=["blue", "yellow", "red"])
colorbar.ticks.locator = locator
nose.tools.assert_is(colorbar.ticks.locator, locator)
assert_canvas_matches(canvas, "axes-colorbar-ticks-locator")
def test_axes_colorbar_ticks_tick_index_style():
canvas = toyplot.Canvas()
axes = canvas.axes(label="Axes Label", xlabel="X Label", ylabel="Y Label")
colorbar = axes.colorbar(palette=toyplot.color.brewer("BlueYellowRed"))
colorbar.ticks.show = True
colorbar.ticks.tick(index=1).style = {"stroke": "red"}
nose.tools.assert_equal(
colorbar.ticks.tick(index=1).style["stroke"], "red")
assert_canvas_matches(canvas, "axes-colorbar-ticks-tick-index-style")
def test_axes_colorbar_ticks_tick_value_style():
canvas = toyplot.Canvas()
axes = canvas.axes(label="Axes Label", xlabel="X Label", ylabel="Y Label")
colorbar = axes.colorbar(palette=toyplot.color.brewer("BlueYellowRed"))
colorbar.ticks.show = True
colorbar.ticks.tick(value=0.5).style = {"stroke": "red"}
nose.tools.assert_equal(
colorbar.ticks.tick(value=0.5).style["stroke"], "red")
assert_canvas_matches(canvas, "axes-colorbar-ticks-tick-value-style")
def test_axes_colorbar_ticks_labels_show():
canvas = toyplot.Canvas()
axes = canvas.axes(label="Axes Label", xlabel="X Label", ylabel="Y Label")
colorbar = axes.colorbar(palette=toyplot.color.brewer("BlueYellowRed"))
colorbar.ticks.labels.show = False
nose.tools.assert_equal(colorbar.ticks.labels.show, False)
assert_canvas_matches(canvas, "axes-colorbar-ticks-labels-show")
def test_axes_colorbar_ticks_labels_style():
canvas = toyplot.Canvas()
axes = canvas.axes(label="Axes Label", xlabel="X Label", ylabel="Y Label")
colorbar = axes.colorbar(palette=toyplot.color.brewer("BlueYellowRed"))
colorbar.ticks.labels.style = {"fill": "red"}
nose.tools.assert_equal(colorbar.ticks.labels.style["fill"], "red")
assert_canvas_matches(canvas, "axes-colorbar-ticks-labels-style")
def test_axes_colorbar_ticks_labels_label_index_style():
canvas = toyplot.Canvas()
axes = canvas.axes(label="Axes Label", xlabel="X Label", ylabel="Y Label")
colorbar = axes.colorbar(palette=toyplot.color.brewer("BlueYellowRed"))
colorbar.ticks.labels.label(index=1).style = {"fill": "red"}
nose.tools.assert_equal(
colorbar.ticks.labels.label(index=1).style["fill"], "red")
assert_canvas_matches(
canvas, "axes-colorbar-ticks-labels-label-index-style")
def test_axes_colorbar_ticks_labels_label_value_style():
canvas = toyplot.Canvas()
axes = canvas.axes(label="Axes Label", xlabel="X Label", ylabel="Y Label")
colorbar = axes.colorbar(palette=toyplot.color.brewer("BlueYellowRed"))
colorbar.ticks.labels.label(value=0.5).style = {"fill": "red"}
nose.tools.assert_equal(
colorbar.ticks.labels.label(value=0.5).style["fill"], "red")
assert_canvas_matches(
canvas, "axes-colorbar-ticks-labels-label-value-style")
def test_axes_palette():
canvas = toyplot.Canvas()
axes = canvas.axes()
for i in range(10):
axes.plot(numpy.arange(2), numpy.repeat(i, 2))
assert_canvas_matches(canvas, "axes-palette")
def test_axes_palettes():
canvas = toyplot.Canvas()
axes = canvas.axes()
axes.fill(numpy.sin(numpy.linspace(0, 2 * numpy.pi, 100)),
style={"fill-opacity": 0.5})
axes.plot(numpy.sin(numpy.linspace(0, 2 * numpy.pi, 100)), marker="o")
axes.fill(numpy.cos(numpy.linspace(0, 2 * numpy.pi, 100)),
style={"fill-opacity": 0.5})
axes.plot(numpy.cos(numpy.linspace(0, 2 * numpy.pi, 100)), marker="o")
axes.fill((numpy.linspace(0, 1, 100)), style={"fill-opacity": 0.5})
axes.plot((numpy.linspace(0, 1, 100)), marker="o")
assert_canvas_matches(canvas, "axes-palettes")
def test_bars_one_magnitude():
numpy.random.seed(1234)
observations = numpy.random.normal(loc=1, size=(25, 100))
y = numpy.mean(observations, axis=1)
canvas, axes, mark = toyplot.bars(y)
assert_canvas_matches(canvas, "bars-one-magnitude")
def test_axes_bars_one_magnitude():
numpy.random.seed(1234)
observations = numpy.random.normal(loc=1, size=(25, 100))
y = numpy.mean(observations, axis=1)
canvas = toyplot.Canvas()
axes = canvas.axes()
axes.bars(y)
assert_canvas_matches(canvas, "axes-bars-one-magnitude")
def test_axes_bars_one_magnitude_centers():
numpy.random.seed(1234)
observations = numpy.random.normal(loc=1, size=(25, 100))
x = numpy.linspace(0, 1, len(observations))
y = numpy.mean(observations, axis=1)
canvas = toyplot.Canvas()
axes = canvas.axes()
axes.bars(x, y)
assert_canvas_matches(canvas, "axes-bars-one-magnitude-centers")
def test_axes_bars_one_magnitude_edges():
numpy.random.seed(1234)
observations = numpy.random.normal(loc=1, size=(25, 100))
x = numpy.cumsum(numpy.random.random(len(observations) + 1))
x1 = x[:-1]
x2 = x[1:]
y = numpy.mean(observations, axis=1)
canvas = toyplot.Canvas()
axes = canvas.axes()
axes.bars(x1, x2, y)
assert_canvas_matches(canvas, "axes-bars-one-magnitude-edges")
def test_axes_bars_n_magnitudes():
numpy.random.seed(1234)
observations = numpy.random.normal(loc=1, size=(25, 100))
series = numpy.column_stack(
(numpy.mean(observations, axis=1), numpy.std(observations, axis=1)))
canvas = toyplot.Canvas()
axes = canvas.axes()
axes.bars(series)
assert_canvas_matches(canvas, "axes-bars-n-magnitudes")
def test_axes_bars_n_magnitudes_along_y():
numpy.random.seed(1234)
observations = numpy.random.normal(loc=1, size=(25, 100))
series = numpy.column_stack(
(numpy.mean(observations, axis=1), numpy.std(observations, axis=1)))
canvas = toyplot.Canvas()
axes = canvas.axes()
axes.bars(series, along="y")
assert_canvas_matches(canvas, "axes-bars-n-magnitudes-along-y")
def test_axes_bars_n_magnitudes_centers():
numpy.random.seed(1234)
observations = numpy.random.normal(loc=1, size=(25, 100))
x = numpy.linspace(0, 1, len(observations))
series = numpy.column_stack(
(numpy.mean(observations, axis=1), numpy.std(observations, axis=1)))
canvas = toyplot.Canvas()
axes = canvas.axes()
axes.bars(x, series)
assert_canvas_matches(canvas, "axes-bars-n-magnitudes-centers")
def test_axes_bars_n_magnitudes_edges():
numpy.random.seed(1234)
observations = numpy.random.normal(loc=1, size=(25, 100))
x = numpy.cumsum(numpy.random.random(len(observations) + 1))
x1 = x[:-1]
x2 = x[1:]
series = numpy.column_stack(
(numpy.mean(observations, axis=1), numpy.std(observations, axis=1)))
canvas = toyplot.Canvas()
axes = canvas.axes()
axes.bars(x1, x2, series)
assert_canvas_matches(canvas, "axes-bars-n-magnitudes-edges")
def test_axes_bars_n_magnitudes_symmetric():
numpy.random.seed(1234)
observations = numpy.random.normal(loc=1, size=(25, 100))
series = numpy.column_stack(
(numpy.mean(observations, axis=1), numpy.std(observations, axis=1)))
canvas = toyplot.Canvas()
axes = canvas.axes()
axes.bars(series, baseline="symmetric")
assert_canvas_matches(canvas, "axes-bars-n-magnitudes-symmetric")
def test_axes_bars_n_magnitudes_wiggle():
numpy.random.seed(1234)
observations = numpy.random.normal(loc=1, size=(25, 100))
series = numpy.column_stack(
(numpy.mean(observations, axis=1), numpy.std(observations, axis=1)))
canvas = toyplot.Canvas()
axes = canvas.axes()
axes.bars(series, baseline="wiggle")
assert_canvas_matches(canvas, "axes-bars-n-magnitudes-wiggle")
def test_axes_bars_n_magnitudes_titles():
numpy.random.seed(1234)
observations = numpy.random.normal(loc=1, size=(25, 100))
series = numpy.column_stack(
(numpy.mean(observations, axis=1), numpy.std(observations, axis=1)))
canvas = toyplot.Canvas()
axes = canvas.axes()
axes.bars(series, baseline="stacked", title=["mean", "standard deviation"])
assert_canvas_matches(canvas, "axes-bars-n-magnitudes-titles")
def test_axes_bars_histogram():
numpy.random.seed(1234)
canvas = toyplot.Canvas()
axes = canvas.axes()
axes.bars(numpy.histogram(numpy.random.normal(size=10000), 100))
assert_canvas_matches(canvas, "axes-bars-histogram")
def test_axes_bars_one_boundary():
numpy.random.seed(1234)
observations = numpy.random.normal(loc=1, size=(25, 100))
y1 = numpy.mean(observations, axis=1)
canvas = toyplot.Canvas()
axes = canvas.axes()
axes.bars(y1, baseline=None)
assert_canvas_matches(canvas, "axes-bars-one-boundary")
def test_axes_bars_one_boundary_centers():
numpy.random.seed(1234)
observations = numpy.random.normal(loc=1, size=(25, 100))
x = numpy.linspace(0, 1, len(observations))
y1 = numpy.mean(observations, axis=1)
canvas = toyplot.Canvas()
axes = canvas.axes()
axes.bars(x, y1, baseline=None)
assert_canvas_matches(canvas, "axes-bars-one-boundary-centers")
def test_axes_bars_one_boundary_edges():
numpy.random.seed(1234)
observations = numpy.random.normal(loc=1, size=(25, 100))
x = numpy.cumsum(numpy.random.random(len(observations) + 1))
x1 = x[:-1]
x2 = x[1:]
y1 = numpy.mean(observations, axis=1)
canvas = toyplot.Canvas()
axes = canvas.axes()
axes.bars(x1, x2, y1, baseline=None)
assert_canvas_matches(canvas, "axes-bars-one-boundary-edges")
def test_axes_bars_n_boundaries():
numpy.random.seed(1234)
observations = numpy.random.normal(loc=1, size=(25, 100))
series = numpy.column_stack((numpy.min(observations, axis=1), numpy.mean(
observations, axis=1), numpy.max(observations, axis=1)))
canvas = toyplot.Canvas()
axes = canvas.axes()
axes.bars(series, baseline=None)
assert_canvas_matches(canvas, "axes-bars-n-boundaries")
def test_axes_bars_n_boundaries_along_y():
numpy.random.seed(1234)
observations = numpy.random.normal(loc=1, size=(25, 100))
series = numpy.column_stack((numpy.min(observations, axis=1), numpy.mean(
observations, axis=1), numpy.max(observations, axis=1)))
canvas = toyplot.Canvas()
axes = canvas.axes()
axes.bars(series, along="y", baseline=None)
assert_canvas_matches(canvas, "axes-bars-n-boundaries-along-y")
def test_axes_bars_n_boundaries_centers():
numpy.random.seed(1234)
observations = numpy.random.normal(loc=1, size=(25, 100))
x = numpy.linspace(0, 1, len(observations))
series = numpy.column_stack((numpy.min(observations, axis=1), numpy.mean(
observations, axis=1), numpy.max(observations, axis=1)))
canvas = toyplot.Canvas()
axes = canvas.axes()
axes.bars(x, series, baseline=None)
assert_canvas_matches(canvas, "axes-bars-n-boundaries-centers")
def test_axes_bars_n_boundaries_edges():
numpy.random.seed(1234)
observations = numpy.random.normal(loc=1, size=(25, 100))
x = numpy.cumsum(numpy.random.random(len(observations) + 1))
x1 = x[:-1]
x2 = x[1:]
series = numpy.column_stack((numpy.min(observations, axis=1), numpy.mean(
observations, axis=1), numpy.max(observations, axis=1)))
canvas = toyplot.Canvas()
axes = canvas.axes()
axes.bars(x1, x2, series, baseline=None)
assert_canvas_matches(canvas, "axes-bars-n-boundaries-edges")
def test_axes_bars_n_boundaries_titles():
numpy.random.seed(1234)
observations = numpy.random.normal(loc=1, size=(25, 100))
series = numpy.column_stack(
(numpy.min(
observations, axis=1), numpy.percentile(
observations, 25, axis=1), numpy.percentile(
observations, 50, axis=1), numpy.percentile(
observations, 75, axis=1), numpy.max(
observations, axis=1)))
canvas = toyplot.Canvas()
axes = canvas.axes()
axes.bars(
series,
title=[
"1st quartile",
"2nd quartile",
"3rd quartile",
"4th quartile"],
baseline=None)
assert_canvas_matches(canvas, "axes-bars-n-boundaries-titles")
def test_axes_plot_one_variable():
numpy.random.seed(1234)
observations = numpy.random.normal(loc=1, size=(25, 100))
y = numpy.mean(observations, axis=1)
canvas = toyplot.Canvas()
axes = canvas.axes()
axes.plot(y)
assert_canvas_matches(canvas, "axes-plot-one-variable")
def test_axes_plot_one_variable_x():
numpy.random.seed(1234)
observations = numpy.random.normal(loc=1, size=(25, 100))
x = numpy.linspace(0, 1, len(observations))
y = numpy.mean(observations, axis=1)
canvas = toyplot.Canvas()
axes = canvas.axes()
axes.plot(x, y)
assert_canvas_matches(canvas, "axes-plot-one-variable-x")
def test_axes_plot_n_variables():
numpy.random.seed(1234)
observations = numpy.random.normal(loc=1, size=(25, 100))
series = numpy.column_stack((numpy.min(observations, axis=1), numpy.mean(
observations, axis=1), numpy.max(observations, axis=1)))
canvas = toyplot.Canvas()
axes = canvas.axes()
axes.plot(series)
assert_canvas_matches(canvas, "axes-plot-n-variables")
def test_axes_plot_n_variables_x():
numpy.random.seed(1234)
observations = numpy.random.normal(loc=1, size=(25, 100))
x = numpy.linspace(0, 1, len(observations))
series = numpy.column_stack((numpy.min(observations, axis=1), numpy.mean(
observations, axis=1), numpy.max(observations, axis=1)))
canvas = toyplot.Canvas()
axes = canvas.axes()
axes.plot(x, series)
assert_canvas_matches(canvas, "axes-plot-n-variables-x")
def test_axes_plot_n_variables_along_y():
numpy.random.seed(1234)
observations = numpy.random.normal(loc=1, size=(25, 100))
series = numpy.column_stack((numpy.min(observations, axis=1), numpy.mean(
observations, axis=1), numpy.max(observations, axis=1)))
canvas = toyplot.Canvas()
axes = canvas.axes()
axes.plot(series, along="y")
assert_canvas_matches(canvas, "axes-plot-n-variables-along-y")
def test_axes_plot_masked_nan():
x = numpy.linspace(0, 2 * numpy.pi, 51)
y = numpy.ma.column_stack((
1 + 0.5 * numpy.sin(x),
1 + 0.5 * numpy.cos(x),
1 + 0.2 * numpy.sin(2 * x),
))
y[8:18, 0] = numpy.nan
y[33:43, 1] = numpy.ma.masked
canvas, axes, mark = toyplot.plot(x, y, marker="o", size="64")
assert_canvas_matches(canvas, "axes-plot-masked-nan")
def test_axes_bars_magnitudes_masked_nan():
x = numpy.linspace(0, 2 * numpy.pi, 51)
y = numpy.ma.column_stack((
1 + 0.5 * numpy.sin(x),
1 + 0.5 * numpy.cos(x),
1 + 0.2 * numpy.sin(2 * x),
))
y[8:18, 0] = numpy.nan
y[33:43, 1] = numpy.ma.masked
canvas, axes, mark = toyplot.bars(x, y)
assert_canvas_matches(canvas, "axes-bars-magnitudes-masked-nan")
def test_axes_fill_magnitudes_masked_nan():
x = numpy.linspace(0, 2 * numpy.pi, 51)
y = numpy.ma.column_stack((
1 + 0.5 * numpy.sin(x),
1 + 0.5 * numpy.cos(x),
1 + 0.2 * numpy.sin(2 * x),
))
y[8:18, 0] = numpy.nan
y[33:43, 1] = numpy.ma.masked
canvas, axes, mark = toyplot.fill(x, y, baseline="stacked")
assert_canvas_matches(canvas, "axes-fill-magnitudes-masked-nan")
def test_axes_bars_boundaries_masked_nan():
numpy.random.seed(1234)
observations = numpy.random.normal(size=(50, 50))
b = numpy.ma.column_stack((numpy.min(observations, axis=1), numpy.median(
observations, axis=1), numpy.max(observations, axis=1)))
b[10:20, 0] = numpy.nan
b[30:40, 1] = numpy.ma.masked
b[20:30, 2] = numpy.nan
canvas, axes, mark = toyplot.bars(b, baseline=None)
assert_canvas_matches(canvas, "axes-bars-boundaries-masked-nan")
def test_axes_fill_boundaries_masked_nan():
numpy.random.seed(1234)
observations = numpy.random.normal(size=(50, 50))
b = numpy.ma.column_stack((numpy.min(observations, axis=1), numpy.median(
observations, axis=1), numpy.max(observations, axis=1)))
b[10:20, 0] = numpy.nan
b[30:40, 1] = numpy.ma.masked
b[20:30, 2] = numpy.nan
canvas, axes, mark = toyplot.fill(b)
assert_canvas_matches(canvas, "axes-fill-boundaries-masked-nan")
def test_scatterplot_one_variable():
numpy.random.seed(1234)
observations = numpy.random.normal(loc=1, size=(25, 100))
y = numpy.mean(observations, axis=1)
canvas, axes, mark = toyplot.scatterplot(y)
assert_canvas_matches(canvas, "scatterplot-one-variable")
def test_axes_scatterplot_one_variable():
numpy.random.seed(1234)
observations = numpy.random.normal(loc=1, size=(25, 100))
y = numpy.mean(observations, axis=1)
canvas = toyplot.Canvas()
axes = canvas.axes()
axes.scatterplot(y)
assert_canvas_matches(canvas, "axes-scatterplot-one-variable")
def test_axes_scatterplot_one_variable_x():
numpy.random.seed(1234)
observations = numpy.random.normal(loc=1, size=(25, 100))
x = numpy.linspace(0, 1, len(observations))
y = numpy.mean(observations, axis=1)
canvas = toyplot.Canvas()
axes = canvas.axes()
axes.scatterplot(x, y)
assert_canvas_matches(canvas, "axes-scatterplot-one-variable-x")
def test_axes_scatterplot_one_variable_fill():
numpy.random.seed(1234)
observations = numpy.random.normal(loc=1, size=(25, 100))
y = numpy.mean(observations, axis=1)
fill = numpy.arange(len(observations))
canvas = toyplot.Canvas()
axes = canvas.axes()
axes.scatterplot(y, fill=fill)
assert_canvas_matches(canvas, "axes-scatterplot-one-variable-fill")
def test_axes_scatterplot_n_variables():
numpy.random.seed(1234)
observations = numpy.random.normal(loc=1, size=(25, 100))
series = numpy.column_stack((numpy.min(observations, axis=1), numpy.mean(
observations, axis=1), numpy.max(observations, axis=1)))
canvas = toyplot.Canvas()
axes = canvas.axes()
axes.scatterplot(series)
assert_canvas_matches(canvas, "axes-scatterplot-n-variables")
def test_axes_scatterplot_n_variables_x():
numpy.random.seed(1234)
observations = numpy.random.normal(loc=1, size=(25, 100))
x = numpy.linspace(0, 1, len(observations))
series = numpy.column_stack((numpy.min(observations, axis=1), numpy.mean(
observations, axis=1), numpy.max(observations, axis=1)))
canvas = toyplot.Canvas()
axes = canvas.axes()
axes.scatterplot(x, series)
assert_canvas_matches(canvas, "axes-scatterplot-n-variables-x")
def test_axes_scatterplot_n_variables_along_y():
numpy.random.seed(1234)
observations = numpy.random.normal(loc=1, size=(25, 100))
series = numpy.column_stack((numpy.min(observations, axis=1), numpy.mean(
observations, axis=1), numpy.max(observations, axis=1)))
canvas = toyplot.Canvas()
axes = canvas.axes()
axes.scatterplot(series, along="y")
assert_canvas_matches(canvas, "axes-scatterplot-n-variables-along-y")
def test_axes_scatterplot_singular():
x = numpy.linspace(0, 2 * numpy.pi)
y = numpy.sin(x)
canvas = toyplot.Canvas()
axes = canvas.axes()
axes.plot(x, y)
axes.scatterplot(x[0], y[0], fill="red")
assert_canvas_matches(canvas, "axes-scatterplot-singular")
def test_axes_scatterplot_markers():
marker_style = {"stroke": toyplot.color.near_black, "fill": "cornsilk"}
label_style = {"stroke": "none", "fill": toyplot.color.near_black}
markers = [
None,
"",
"|",
"-",
{"shape": "|", "angle": -45},
{"shape": "|", "angle": 45},
"+",
"x",
{"shape": "x", "angle": -22.5},
"*",
"^",
{"shape": ">", "mstyle": {"stroke": "red"}},
{"shape": "v", "mstyle": {"stroke": "red", "fill": "yellow"}},
"<",
"s",
"d",
"o",
"oo",
"o|",
"o-",
"o+",
"ox",
"o*",
{"shape": "", "label": "A"},
{"shape": "o", "label": "1"},
{"shape": "s", "mstyle": {"stroke": "blue", "fill": "white"},
"label": "B", "lstyle": {"fill": "blue"}},
{"shape": "d", "label": "2", "lstyle": {"fill": "green"}},
]
canvas = toyplot.Canvas()
axes = canvas.axes()
axes.scatterplot(
numpy.arange(
len(markers)),
fill="steelblue",
marker=markers,
size=100,
mstyle=marker_style,
mlstyle=label_style)
assert_canvas_matches(canvas, "axes-scatterplot-markers")
def test_axes_rect_singular():
canvas = toyplot.Canvas()
axes = canvas.axes(xmin=0, xmax=1, ymin=0, ymax=1)
axes.rect(0.1, 0.2, 0.3, 0.6)
assert_canvas_matches(canvas, "axes-rect-singular")
def test_axes_rect_singular_along_y():
canvas = toyplot.Canvas()
axes = canvas.axes(xmin=0, xmax=1, ymin=0, ymax=1)
axes.rect(0.1, 0.2, 0.3, 0.6, along="y")
assert_canvas_matches(canvas, "axes-rect-singular-along-y")
def test_axes_rect():
x1 = numpy.arange(1, 10)
x2 = x1 + 0.5
y1 = x1 - 0.5
y2 = x1 ** 1.5
fill = x1
title = x1
palette = toyplot.color.brewer("BlueRed")
canvas = toyplot.Canvas()
axes = canvas.axes()
axes.rect(x1, x2, y1, y2, fill=(fill, palette), title=title)
assert_canvas_matches(canvas, "axes-rect")
def test_axes_text():
canvas = toyplot.Canvas()
axes = canvas.axes()
x = numpy.linspace(0, 1)
y = numpy.sin(x * 10)
text = ["s%s" % index for index in range(len(x))]
axes.text(x, y, text, annotation=False)
assert_canvas_matches(canvas, "axes-text")
def test_axes_text_angle_fill():
x = numpy.zeros(10)
y = x
angle = numpy.linspace(-90, 0, len(x), endpoint=True)
fill = numpy.linspace(1, 0, len(x))
canvas = toyplot.Canvas(400, 400)
axes = canvas.axes(xmin=-0.25, xmax=0.5, ymin=-0.5, ymax=0.25)
axes.text(
x,
y,
text="Toyplot!",
angle=angle,
fill=fill,
style={
"font-size": "36px",
"font-weight": "bold",
"stroke": "white",
"text-anchor": "start"})
assert_canvas_matches(canvas, "axes-text-angle-fill")
def test_axes_legend():
canvas = toyplot.Canvas()
axes = canvas.axes(grid=(2, 2, 1, 1))
axes.legend(
(("foo", "s"), ("bar", "o")), corner=("bottom-left", 30, 100, 50))
assert_canvas_matches(canvas, "axes-legend")
def test_animation_frame_sanity_checks():
frame = toyplot.canvas.AnimationFrame(
index=1,
begin=2.3,
end=2.4,
changes=collections.defaultdict(
lambda: collections.defaultdict(list)))
nose.tools.assert_equal(frame.index(), 1)
nose.tools.assert_equal(frame.time(), 2.3)
numpy.testing.assert_almost_equal(frame.duration(), 0.1)
with nose.tools.assert_raises(ValueError):
frame.set_mark_style(None, {})
with nose.tools.assert_raises(ValueError):
frame.set_datum_style(None, 0, 0, {})
with nose.tools.assert_raises(ValueError):
frame.set_datum_text(None, 0, 0, "")
def test_canvas_defaults():
canvas = toyplot.Canvas()
assert_canvas_matches(canvas, "canvas-defaults")
def test_canvas_time():
canvas = toyplot.Canvas()
frame = canvas.time(0.3, 0.4)
nose.tools.assert_equal(frame.time(), 0.3)
numpy.testing.assert_almost_equal(frame.duration(), 0.1)
nose.tools.assert_equal(frame.index(), 0)
frame = canvas.time(0.3, 0.4, 5)
nose.tools.assert_equal(frame.time(), 0.3)
numpy.testing.assert_almost_equal(frame.duration(), 0.1)
nose.tools.assert_equal(frame.index(), 5)
def test_canvas_repr_html():
canvas = toyplot.Canvas(autorender="html")
html = canvas._repr_html_()
nose.tools.assert_is_instance(html, toyplot.compatibility.unicode_type)
def test_explicit_tick_locator_failure():
with nose.tools.assert_raises(ValueError):
toyplot.locator.Explicit()
##########################################################################
# toyplot.html
def test_color_require_color():
assert_color_equal(toyplot.color._require_color("red"), (1, 0, 0, 1))
assert_color_equal(
toyplot.color._require_color(toyplot.color.rgb(1, 1, 0)), (1, 1, 0, 1))
assert_color_equal(toyplot.color._require_color(
toyplot.color.rgba(1, 1, 0, 0.5)), (1, 1, 0, 0.5))
assert_color_equal(toyplot.color._require_color((1, 1, 0)), (1, 1, 0, 1))
assert_color_equal(
toyplot.color._require_color((1, 1, 0, 0.5)), (1, 1, 0, 0.5))
with nose.tools.assert_raises(ValueError):
toyplot.color._require_color(5)
##########################################################################
# toyplot.html
def test_html_render_animation():
canvas = toyplot.Canvas()
axes = canvas.axes()
text = canvas.text(100, 100, "")
scatterplot = axes.scatterplot(numpy.arange(10))
def callback(frame):
frame.set_mark_style(text, {"fill-opacity": frame.time()})
frame.set_datum_text(text, 0, 0, "frame %s time %s duration %s" % (
frame.index(), frame.time(), frame.duration()))
frame.set_datum_style(
scatterplot, 0, frame.index(), {"stroke": "none"})
canvas.animate(10, callback)
dom = toyplot.html.render(canvas)
def test_html_ipython_html():
canvas = toyplot.Canvas()
canvas.axes()
canvas._repr_html_()
##########################################################################
# toyplot.png
def test_png_render_frames():
if hasattr(toyplot, "png"):
canvas = toyplot.Canvas()
axes = canvas.axes()
text = canvas.text(100, 100, "")
scatterplot = axes.scatterplot(numpy.arange(10))
def callback(frame):
frame.set_mark_style(text, {"fill-opacity": frame.time()})
frame.set_datum_text(text, 0, 0, "frame %s" % frame.index())
frame.set_datum_style(
scatterplot, 0, frame.index(), {"stroke": "none"})
canvas.animate(10, callback)
for frame in toyplot.png.render_frames(canvas):
nose.tools.assert_is_instance(
frame, toyplot.compatibility.string_type)
nose.tools.assert_equal(frame[1:4], "PNG")
def test_cairo_small_font():
if hasattr(toyplot, "png"):
canvas = toyplot.Canvas()
axes = canvas.axes(label="Small Text!")
axes.label.style = {"font-size": "8px"}
toyplot.png.render(canvas)
##########################################################################
# High-level tests that combine multiple API calls into whole figures.
def test_basic_api():
numpy.random.seed(1234)
x = numpy.linspace(0, 1, 100)
y = x + (0.1 * x * numpy.random.random(len(x)))
canvas = toyplot.Canvas()
axes = canvas.axes(grid=(2, 2, 0, 1, 0, 2))
axes.plot(
x, y, style={"stroke": "steelblue", "stroke-width": 1.0}, marker="o")
axes.x.label.text = "1st Axis"
axes.y.label.text = "2nd Axis"
axes = canvas.axes(grid=(2, 2, 2), label="2nd Axes")
axes.plot(x, y, style={"stroke": "red"})
axes = canvas.axes(grid=(2, 2, 3), label="3rd Axes")
axes.plot(x, y, style={"stroke": "green"})
canvas.text(
300,
30,
"Plot Title",
style={
"font-size": "16px",
"font-weight": "bold",
"text-anchor": "middle"},
title="The plot's title")
assert_canvas_matches(canvas, "basic-api")
def test_axes_clipping():
x = numpy.linspace(0, 2 * numpy.pi, 100)
canvas = toyplot.Canvas()
axes = canvas.axes(xmin=0.5, xmax=4.5, ymin=-0.5, ymax=0.5)
axes.plot(x, numpy.sin(x))
axes.plot(x, numpy.cos(x))
assert_canvas_matches(canvas, "axes-clipping")
def test_axes():
x = numpy.linspace(0, 2 * numpy.pi, 200)
canvas = toyplot.Canvas(800, 400)
axes = canvas.axes(xlabel="Time", ylabel="Value")
axes.hlines(0, style={"stroke-dasharray": "5,5"}, title="y = 0")
axes.vlines(0, style={"stroke-dasharray": "5,5"}, title="x = 0")
for i in numpy.linspace(1, 2, 7):
axes.plot(x, 0.5 * i * numpy.sin(x * i),
title="%s * sin(x * %s)" % (0.5 * i, i))
assert_canvas_matches(canvas, "axes")
def test_axes_layout():
canvas = toyplot.Canvas()
axes = canvas.axes(label="Title", xlabel="X Label",
ylabel="Y Label", xmin=0, xmax=1, ymin=0, ymax=1)
axes.text(
0.5,
0.5,
"Axes Region",
style={
"fill": toyplot.color.near_black,
"text-anchor": "middle",
"alignment-baseline": "middle"})
assert_canvas_matches(canvas, "axes-layout")
def test_legend():
x = numpy.linspace(0, 2 * numpy.pi, 200)
canvas = toyplot.Canvas(800, 600)
axes = canvas.axes()
plots = [axes.plot(x, 0.5 * i * numpy.sin(x * i))
for i in numpy.linspace(1, 2, 3)]
fill = axes.fill(x, numpy.sin(x) * 0.1, numpy.cos(x) * 0.1)
axes.x.label.text = "Time"
axes.y.label.text = "Temp"
canvas.legend([
("Plot 1", plots[0]),
("Plot 2", plots[1]),
("Plot 3", plots[2]),
("Fill", fill),
("Explicit Line", "line", {"stroke": "red", "stroke-width": 1.0}),
("Explicit Rect", "rect", {"fill": "green"}),
("Explicit Marker", "^", {
"fill": "lightblue", "stroke": toyplot.color.near_black}),
("Explicit Marker", "o", {
"fill": "yellow", "stroke": toyplot.color.near_black}),
("Explicit Marker", "s", {"fill": "pink", "stroke": toyplot.color.near_black})],
bounds=(100, 200, 350, 550),
)
assert_canvas_matches(canvas, "legend")
def test_bounds_placement():
style = {"stroke": toyplot.color.near_black}
canvas = toyplot.Canvas(
800, 600, style={"border": "1px solid %s" % toyplot.color.near_black})
canvas.legend([], style=style, bounds=(400 - 100, 400 + 100, 20, 50))
canvas.legend([], style=style, bounds=("25%", 400 + 100, 80, 110))
canvas.legend([], style=style, bounds=(400 - 100, "75%", 140, 170))
canvas.legend([], style=style, bounds=("33%", "66%", 200, 230))
canvas.legend([], style=style, bounds=(20, 50, 450 - 20, 450 + 20))
canvas.legend([], style=style, bounds=(80, 110, "50%", 450 + 20))
canvas.legend([], style=style, bounds=(140, 170, 450 - 20, "90%"))
canvas.legend([], style=style, bounds=(200, 230, "50%", "90%"))
canvas.legend([], style=style, bounds=("66%", "90%", "66%", "90%"))
assert_canvas_matches(canvas, "bounds-placement")
def test_corner_placement():
style = {"stroke": toyplot.color.near_black}
canvas = toyplot.Canvas(
800, 600, style={"border": "1px solid %s" % toyplot.color.near_black})
canvas.legend([("top", "^")], style=style, corner=("top", 10, 100, 30))
canvas.legend(
[("top-right", "^")], style=style, corner=("top-right", 10, 100, 30))
canvas.legend([("right", "^")], style=style, corner=("right", 10, 100, 30))
canvas.legend(
[("bottom-right", "^")], style=style, corner=("bottom-right", 10, 150, 30))
canvas.legend(
[("bottom", "^")], style=style, corner=("bottom", 10, 100, 30))
canvas.legend(
[("bottom-left", "^")], style=style, corner=("bottom-left", 10, 100, 30))
canvas.legend([("left", "^")], style=style, corner=("left", 10, 100, 30))
canvas.legend(
[("top-left", "^")], style=style, corner=("top-left", 10, 100, 30))
canvas.legend(
[("top-left", "^")], style=style, corner=("top-left", 50, 100, 30))
canvas.legend(
[("top-left", "^")], style=style, corner=("top-left", 100, "20%", "10%"))
assert_canvas_matches(canvas, "corner-placement")
def test_grid_placement():
x = numpy.linspace(0, 2 * numpy.pi, 1000)
canvas = toyplot.Canvas(
800, 600, style={"border": "1px solid %s" % toyplot.color.near_black})
inset = canvas.axes(grid=(3, 3, 0), label="3x3 Grid")
inset.plot(x, numpy.sin(x))
inset = canvas.axes(grid=(3, 3, 0, 1, 1, 2), label="3x3 Grid colspan=2")
inset.plot(x, numpy.sin(x))
inset = canvas.axes(
grid=(3, 3, 1, 0), gutter=20, label="3x3 Grid gutter=20")
inset.plot(x, numpy.sin(x))
inset = canvas.axes(
grid=(3, 3, 1, 1), gutter=20, label="3x3 Grid gutter=20")
inset.plot(x, numpy.sin(x))
inset = canvas.axes(
grid=(
3,
3,
1,
2,
2,
1),
gutter=20,
label="3x3 Grid gutter=20 rowspan=2")
inset.plot(x, numpy.sin(x))
assert_canvas_matches(canvas, "grid-placement")
| |
#!/usr/bin/env python
# Copyright 2014 the V8 project authors. All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# for py2/py3 compatibility
from __future__ import print_function
import argparse
from collections import OrderedDict
import sys
from common_includes import *
from git_recipes import GetCommitMessageFooterMap
def IsSvnNumber(rev):
return rev.isdigit() and len(rev) < 8
class Preparation(Step):
MESSAGE = "Preparation."
def RunStep(self):
if os.path.exists(self.Config("ALREADY_MERGING_SENTINEL_FILE")):
if self._options.force:
os.remove(self.Config("ALREADY_MERGING_SENTINEL_FILE"))
elif self._options.step == 0: # pragma: no cover
self.Die("A merge is already in progress")
open(self.Config("ALREADY_MERGING_SENTINEL_FILE"), "a").close()
self.InitialEnvironmentChecks(self.default_cwd)
self["merge_to_branch"] = self._options.branch
self.CommonPrepare()
self.PrepareBranch()
class CreateBranch(Step):
MESSAGE = "Create a fresh branch for the patch."
def RunStep(self):
self.GitCreateBranch(self.Config("BRANCHNAME"),
self.vc.RemoteBranch(self["merge_to_branch"]))
class SearchArchitecturePorts(Step):
MESSAGE = "Search for corresponding architecture ports."
def RunStep(self):
self["full_revision_list"] = list(OrderedDict.fromkeys(
self._options.revisions))
port_revision_list = []
for revision in self["full_revision_list"]:
# Search for commits which matches the "Port XXX" pattern.
git_hashes = self.GitLog(reverse=True, format="%H",
grep="^[Pp]ort %s" % revision,
branch=self.vc.RemoteMasterBranch())
for git_hash in git_hashes.splitlines():
revision_title = self.GitLog(n=1, format="%s", git_hash=git_hash)
# Is this revision included in the original revision list?
if git_hash in self["full_revision_list"]:
print("Found port of %s -> %s (already included): %s"
% (revision, git_hash, revision_title))
else:
print("Found port of %s -> %s: %s"
% (revision, git_hash, revision_title))
port_revision_list.append(git_hash)
# Do we find any port?
if len(port_revision_list) > 0:
if self.Confirm("Automatically add corresponding ports (%s)?"
% ", ".join(port_revision_list)):
#: 'y': Add ports to revision list.
self["full_revision_list"].extend(port_revision_list)
class CreateCommitMessage(Step):
MESSAGE = "Create commit message."
def _create_commit_description(self, commit_hash):
patch_merge_desc = self.GitLog(n=1, format="%s", git_hash=commit_hash)
description = "Merged: " + patch_merge_desc + "\n"
description += "Revision: " + commit_hash + "\n\n"
return description
def RunStep(self):
# Stringify: ["abcde", "12345"] -> "abcde, 12345"
self["revision_list"] = ", ".join(self["full_revision_list"])
if not self["revision_list"]: # pragma: no cover
self.Die("Revision list is empty.")
msg_pieces = []
if len(self["full_revision_list"]) > 1:
self["commit_title"] = "Merged: Squashed multiple commits."
for commit_hash in self["full_revision_list"]:
msg_pieces.append(self._create_commit_description(commit_hash))
else:
commit_hash = self["full_revision_list"][0]
full_description = self._create_commit_description(commit_hash).split("\n")
#Truncate title because of code review tool
title = full_description[0]
if len(title) > 100:
title = title[:96] + " ..."
self["commit_title"] = title
msg_pieces.append(full_description[1] + "\n\n")
bugs = []
for commit_hash in self["full_revision_list"]:
msg = self.GitLog(n=1, git_hash=commit_hash)
for bug in re.findall(r"^[ \t]*BUG[ \t]*=[ \t]*(.*?)[ \t]*$", msg, re.M):
bugs.extend(s.strip() for s in bug.split(","))
gerrit_bug = GetCommitMessageFooterMap(msg).get('Bug', '')
bugs.extend(s.strip() for s in gerrit_bug.split(","))
bug_aggregate = ",".join(
sorted(filter(lambda s: s and s != "none", set(bugs))))
if bug_aggregate:
# TODO(machenbach): Use proper gerrit footer for bug after switch to
# gerrit. Keep BUG= for now for backwards-compatibility.
msg_pieces.append("BUG=%s\nLOG=N\n" % bug_aggregate)
msg_pieces.append("NOTRY=true\nNOPRESUBMIT=true\nNOTREECHECKS=true\n")
self["new_commit_msg"] = "".join(msg_pieces)
class ApplyPatches(Step):
MESSAGE = "Apply patches for selected revisions."
def RunStep(self):
for commit_hash in self["full_revision_list"]:
print("Applying patch for %s to %s..."
% (commit_hash, self["merge_to_branch"]))
patch = self.GitGetPatch(commit_hash)
TextToFile(patch, self.Config("TEMPORARY_PATCH_FILE"))
self.ApplyPatch(self.Config("TEMPORARY_PATCH_FILE"))
if self._options.patch:
self.ApplyPatch(self._options.patch)
class CommitLocal(Step):
MESSAGE = "Commit to local branch."
def RunStep(self):
# Add a commit message title.
self["new_commit_msg"] = "%s\n\n%s" % (self["commit_title"],
self["new_commit_msg"])
TextToFile(self["new_commit_msg"], self.Config("COMMITMSG_FILE"))
self.GitCommit(file_name=self.Config("COMMITMSG_FILE"))
class CommitRepository(Step):
MESSAGE = "Commit to the repository."
def RunStep(self):
self.GitCheckout(self.Config("BRANCHNAME"))
self.WaitForLGTM()
self.GitPresubmit()
self.vc.CLLand()
class CleanUp(Step):
MESSAGE = "Cleanup."
def RunStep(self):
self.CommonCleanup()
print("*** SUMMARY ***")
print("branch: %s" % self["merge_to_branch"])
if self["revision_list"]:
print("patches: %s" % self["revision_list"])
class MergeToBranch(ScriptsBase):
def _Description(self):
return ("Performs the necessary steps to merge revisions from "
"master to release branches like 4.5. This script does not "
"version the commit. See http://goo.gl/9ke2Vw for more "
"information.")
def _PrepareOptions(self, parser):
group = parser.add_mutually_exclusive_group(required=True)
group.add_argument("--branch", help="The branch to merge to.")
parser.add_argument("revisions", nargs="*",
help="The revisions to merge.")
parser.add_argument("-f", "--force",
help="Delete sentinel file.",
default=False, action="store_true")
parser.add_argument("-m", "--message",
help="A commit message for the patch.")
parser.add_argument("-p", "--patch",
help="A patch file to apply as part of the merge.")
def _ProcessOptions(self, options):
if len(options.revisions) < 1:
if not options.patch:
print("Either a patch file or revision numbers must be specified")
return False
if not options.message:
print("You must specify a merge comment if no patches are specified")
return False
options.bypass_upload_hooks = True
# CC ulan to make sure that fixes are merged to Google3.
options.cc = "ulan@chromium.org"
if len(options.branch.split('.')) > 2:
print ("This script does not support merging to roll branches. "
"Please use tools/release/roll_merge.py for this use case.")
return False
# Make sure to use git hashes in the new workflows.
for revision in options.revisions:
if (IsSvnNumber(revision) or
(revision[0:1] == "r" and IsSvnNumber(revision[1:]))):
print("Please provide full git hashes of the patches to merge.")
print("Got: %s" % revision)
return False
return True
def _Config(self):
return {
"BRANCHNAME": "prepare-merge",
"PERSISTFILE_BASENAME": RELEASE_WORKDIR + "v8-merge-to-branch-tempfile",
"ALREADY_MERGING_SENTINEL_FILE":
RELEASE_WORKDIR + "v8-merge-to-branch-tempfile-already-merging",
"TEMPORARY_PATCH_FILE":
RELEASE_WORKDIR + "v8-prepare-merge-tempfile-temporary-patch",
"COMMITMSG_FILE": RELEASE_WORKDIR + "v8-prepare-merge-tempfile-commitmsg",
}
def _Steps(self):
return [
Preparation,
CreateBranch,
SearchArchitecturePorts,
CreateCommitMessage,
ApplyPatches,
CommitLocal,
UploadStep,
CommitRepository,
CleanUp,
]
if __name__ == "__main__": # pragma: no cover
sys.exit(MergeToBranch().Run())
| |
from __future__ import absolute_import, division
from sys import stdout
from os import remove
from os.path import join, abspath, isdir
import os.path
from time import time, sleep
from multiprocessing import RawValue, Lock, Process, cpu_count
from string import Template
import numpy as np
from numpy.fft import irfftn as np_irfftn, rfftn as np_rfftn
from scipy.ndimage import binary_erosion, laplace
try:
from pyfftw import zeros_aligned, simd_alignment
from pyfftw.builders import rfftn as rfftn_builder, irfftn as irfftn_builder
PYFFTW = True
except ImportError:
PYFFTW = False
try:
import pyopencl as cl
import pyopencl.array as cl_array
from pyopencl.elementwise import ElementwiseKernel
from gpyfft import GpyFFT
OPENCL = True
except:
OPENCL = False
from ._powerfit import conj_multiply, calc_lcc, dilate_points
from ._extensions import rotate_grid3d
class _Counter(object):
"""Thread-safe counter object to follow PowerFit progress"""
def __init__(self):
self.val = RawValue('i', 0)
self.lock = Lock()
def increment(self):
with self.lock:
self.val.value += 1
def value(self):
with self.lock:
return self.val.value
class PowerFitter(object):
"""Wrapper around the Correlator classes for multiprocessing and GPU
accelerated searches providing an easy interface.
"""
def __init__(self, target, laplace=False):
self._target = target
self._rotations = None
self._template = None
self._mask = None
self._rotations = None
self._queues = None
self._nproc = 1
self._directory = abspath('./')
self._laplace = laplace
@property
def directory(self):
return self._directory
@directory.setter
def directory(self, directory):
if isdir(directory):
self._directory = abspath(directory)
else:
raise ValueError("Directory does not exist.")
def scan(self):
if self._queues is None:
self._cpu_scan()
else:
self._gpu_scan()
def _gpu_scan(self):
self._corr = GPUCorrelator(self._target.array, self._queues[0],
laplace=self._laplace)
self._corr.template = self._template.array
self._corr.mask = self._mask.array
self._corr.rotations = self._rotations
self._corr.scan()
self._lcc = self._corr.lcc
self._rot = self._corr.rot
def _cpu_scan(self):
nrot = self._rotations.shape[0]
self._nrot_per_job = nrot // self._nproc
processes = []
self._counter = _Counter()
self._njobs = self._nproc
if self._queues is not None:
self._njobs = len(self._queues)
for n in xrange(self._njobs):
init_rot = n * self._nrot_per_job
end_rot = init_rot + self._nrot_per_job
if n == self._njobs - 1:
end_rot = None
sub_rotations = self._rotations[init_rot: end_rot]
processes.append(Process(
target=self._run_correlator_instance,
args=(self._target, self._template, self._mask,
sub_rotations, self._laplace, self._counter, n,
self._queues, self._directory)
))
time0 = time()
for n in xrange(self._njobs):
processes[n].start()
while self._counter.value() < nrot:
n = self._counter.value()
p_done = (n + 1) / float(nrot) * 100
now = time()
eta = ((now - time0) / p_done) * (100 - p_done)
total = (now - time0) / p_done * (100)
stdout.write('{:7.2%} {:.0f}s {:.0f}s \r'.format(n / float(nrot), eta, total))
stdout.flush()
sleep(0.5)
stdout.write('\n')
for n in xrange(self._njobs):
processes[n].join()
self._combine()
@staticmethod
def _run_correlator_instance(target, template, mask, rotations, laplace,
counter, jobid, queues, directory):
correlator = CPUCorrelator(target.array, laplace=laplace)
correlator.template = template.array
correlator.mask = mask.array
correlator.rotations = rotations
correlator._counter = counter
correlator.scan()
np.save(join(directory, '_lcc_part_{:d}.npy').format(jobid), correlator._lcc)
np.save(join(directory, '_rot_part_{:d}.npy').format(jobid), correlator._rot)
def _combine(self):
# Combine all the intermediate results
lcc = np.zeros(self._target.shape)
rot = np.zeros(self._target.shape)
ind = np.zeros(lcc.shape, dtype=np.bool)
for n in range(self._njobs):
lcc_file = join(self._directory, '_lcc_part_{:d}.npy').format(n)
rot_file = join(self._directory, '_rot_part_{:d}.npy').format(n)
part_lcc = np.load(lcc_file)
part_rot = np.load(rot_file)
np.greater(part_lcc, lcc, ind)
lcc[ind] = part_lcc[ind]
# take care of the rotation index offset for each independent job
rot[ind] = part_rot[ind] + self._nrot_per_job * n
remove(lcc_file)
remove(rot_file)
self._lcc = lcc
self._rot = rot
class BaseCorrelator(object):
"""Base class that calculates the local cross-correlation"""
def __init__(self, target, laplace=False):
self._target = target / target.max()
self._rotations = None
self._template = None
self._mask = None
self._laplace = laplace
self._lcc_mask = self._get_lcc_mask(self._target)
self._rmax = min(target.shape) // 2
@staticmethod
def _get_lcc_mask(target):
return (target > target.max() * 0.05).astype(np.uint8)
@property
def target(self):
return self._target
@property
def mask(self):
return self._mask
@mask.setter
def mask(self, mask):
if self._template is None:
raise ValueError("First set the template.")
if self._target.shape != mask.shape:
raise ValueError("Shape of the mask is different from target.")
ind = mask != 0
# remember the normalization factor for the cross-correlation
self._norm_factor = ind.sum()
# If mask is only zeros, raise error
if self._norm_factor == 0:
raise ValueError('Zero-filled mask is not allowed.')
self._mask = mask.copy()
if self._laplace:
self._template = self._laplace_filter(self._template)
self._template *= self._mask
self._normalize_template(ind)
# multiply again for core-weighted correlation score
self._template *= self._mask
@staticmethod
def _laplace_filter(array):
"""Laplace transform"""
return laplace(array, mode='wrap')
def _normalize_template(self, ind):
# normalize the template over the mask
self._template[ind] -= self._template[ind].mean()
self._template[ind] /= self._template[ind].std()
@property
def rotations(self):
return self._rotations
@rotations.setter
def rotations(self, rotations):
"""Set the rotations that will be sampled."""
rotations = np.asarray(rotations, dtype=np.float64).reshape(-1, 3, 3)
self._rotations = rotations
@property
def template(self):
return self._template
@template.setter
def template(self, template):
if template.shape != self._target.shape:
raise ValueError("Shape of template does not match the target.")
# reset the mask
self._mask = None
self._template = template.copy()
@property
def lcc(self):
return self._lcc
@property
def rot(self):
return self._rot
def scan(self):
if any([req is None for req in (self._template, self._mask, self._rotations)]):
raise ValueError("First set the template, mask, and rotations.")
class CPUCorrelator(BaseCorrelator):
"""CPU implementation for calculating the local cross-correlation."""
def __init__(self, target, laplace=False, fftw=True):
super(CPUCorrelator, self).__init__(target, laplace=laplace)
self._fftw = PYFFTW and fftw
self._allocate_arrays(self._target.shape)
self._build_ffts()
target = self._target
if self._laplace:
target = self._laplace_filter(self._target)
# pre-calculate the FFTs of the target
if self._fftw:
self._rfftn(target, self._ft_target)
self._rfftn(target**2, self._ft_target2)
else:
self._ft_target = self._rfftn(target)
self._ft_target2 = self._rfftn(target**2)
def _allocate_arrays(self, shape):
# allocate all the required arrays
# real arrays
arrays = '_rot_template _rot_mask _rot_mask2 _gcc _ave _ave2 _lcc_scan _lcc _rot'.split()
for arr in arrays:
setattr(self, arr, self._allocate_array(shape, np.float64, self._fftw))
self._ind = np.zeros(shape, dtype=np.bool)
# complex arrays
self._ft_shape = self._get_ft_shape(shape)
arrays = '_target _target2 _template _mask _mask2 _gcc _ave _ave2'.split()
for arr in arrays:
setattr(self, '_ft' + arr,
self._allocate_array(self._ft_shape, np.complex128, self._fftw))
@staticmethod
def _allocate_array(shape, dtype, fftw):
if fftw:
return zeros_aligned(shape, dtype=dtype, n=simd_alignment)
else:
return np.zeros(shape, dtype)
@staticmethod
def _get_ft_shape(shape):
return list(shape[:-1]) + [shape[-1] // 2 + 1]
def _build_ffts(self):
# build the ffts
if self._fftw:
self._rfftn = rfftn_builder(self._gcc)
self._irfftn = irfftn_builder(self._ft_gcc, s=self._target.shape)
else:
# monkey patch the numpy fft interface
self._rfftn = np_rfftn
self._irfftn = np_irfftn
def scan(self):
super(CPUCorrelator, self).scan()
self._lcc.fill(0)
self._rot.fill(0)
for n in xrange(self._rotations.shape[0]):
# rotate template and mask
self._translational_scan(self._rotations[n])
# get the indices where the scanned lcc is greater
np.greater(self._lcc_scan, self._lcc, self._ind)
# remember lcc and rotation index
self._lcc[self._ind] = self._lcc_scan[self._ind]
self._rot[self._ind] = n
if hasattr(self, '_counter'):
self._counter.increment()
def _translational_scan(self, rotmat):
self._rotate_grids(rotmat)
self._get_lcc()
def _rotate_grids(self, rotmat):
rotate_grid3d(
self._template, rotmat, self._rmax,
self._rot_template, False
)
rotate_grid3d(
self._mask, rotmat, self._rmax,
self._rot_mask, True
)
def _get_lcc(self):
np.multiply(self._rot_mask, self._rot_mask, self._rot_mask2)
self._forward_ffts()
conj_multiply(
self._ft_template.ravel(), self._ft_target.ravel(),
self._ft_gcc.ravel()
)
conj_multiply(
self._ft_mask.ravel(), self._ft_target.ravel(),
self._ft_ave.ravel()
)
conj_multiply(
self._ft_mask2.ravel(), self._ft_target2.ravel(),
self._ft_ave2.ravel()
)
self._backward_ffts()
self._ave2 *= self._norm_factor
calc_lcc(
self._gcc.ravel(), self._ave.ravel(), self._ave2.ravel(),
self._lcc_mask.ravel(), self._lcc_scan.ravel()
)
def _forward_ffts(self):
if self._fftw:
self._rfftn(self._rot_template, self._ft_template)
self._rfftn(self._rot_mask, self._ft_mask)
self._rfftn(self._rot_mask2, self._ft_mask2)
else:
self._ft_template = self._rfftn(self._rot_template)
self._ft_mask = self._rfftn(self._rot_mask)
self._ft_mask2 = self._rfftn(self._rot_mask2)
def _backward_ffts(self):
if self._fftw:
self._irfftn(self._ft_gcc, self._gcc)
self._irfftn(self._ft_ave, self._ave)
self._irfftn(self._ft_ave2, self._ave2)
else:
self._gcc = self._irfftn(self._ft_gcc, s=self.target.shape)
self._ave = self._irfftn(self._ft_ave, s=self.target.shape)
self._ave2 = self._irfftn(self._ft_ave2, s=self.target.shape)
if OPENCL:
class GPUCorrelator(BaseCorrelator):
def __init__(self, target, queue, laplace=False):
super(GPUCorrelator, self).__init__(target, laplace=laplace)
self._queue = queue
self._ctx = self._queue.context
self._gpu = self._queue.device
self._allocate_arrays()
self._build_ffts()
self._generate_kernels()
target = self._target
if self._laplace:
target = self._laplace_filter(self._target)
# move some arrays to the GPU
self._gtarget = cl_array.to_device(self._queue, target.astype(np.float32))
self._lcc_mask = cl_array.to_device(self._queue,
self._lcc_mask.astype(np.int32))
# Do some one-time precalculations
self._rfftn(self._gtarget, self._ft_target)
self._k.multiply(self._gtarget, self._gtarget, self._target2)
self._rfftn(self._target2, self._ft_target2)
self._gshape = np.asarray(
list(self._target.shape) + [np.product(self._target.shape)],
dtype=np.int32)
def _allocate_arrays(self):
# Determine the required shape and size of an array
self._ft_shape = tuple(
[self._target.shape[0] // 2 + 1] + list(self._target.shape[1:])
)
self._shape = self._target.shape
# Allocate arrays on CPU
self._lcc = np.zeros(self._target.shape, dtype=np.float32)
self._rot = np.zeros(self._target.shape, dtype=np.int32)
# Allocate arrays on GPU
arrays = '_target2 _rot_template _rot_mask _rot_mask2 _gcc _ave _ave2 _glcc'.split()
for array in arrays:
setattr(self, array,
cl_array.zeros( self._queue, self._shape, dtype=np.float32)
)
self._grot = cl_array.zeros(self._queue, self._shape, dtype=np.int32)
# Allocate all complex arrays
ft_arrays = 'target target2 template mask mask2 gcc ave ave2 lcc'.split()
for ft_array in ft_arrays:
setattr(self, '_ft_' + ft_array,
cl_array.to_device(self._queue,
np.zeros(self._ft_shape, dtype=np.complex64))
)
def _build_ffts(self, batch_size=1):
self._rfftn = grfftn_builder(self._ctx, self._target.shape,
batch_size=batch_size)
self._irfftn = grfftn_builder(self._ctx, self._target.shape,
forward=False, batch_size=batch_size)
self._rfftn.bake(self._queue)
self._irfftn.bake(self._queue)
@property
def mask(self):
return BaseCorrelator.mask
@mask.setter
def mask(self, mask):
BaseCorrelator.mask.fset(self, mask)
self._norm_factor = np.float32(self._norm_factor)
self._rmax = np.int32(self._rmax)
self._gtemplate = cl.image_from_array(self._queue.context,
self._template.astype(np.float32))
self._gmask = cl.image_from_array(self._queue.context,
self._mask.astype(np.float32))
@property
def rotations(self):
return BaseCorrelator.rotations
@rotations.setter
def rotations(self, rotations):
BaseCorrelator.rotations.fset(self, rotations)
self._cl_rotations = np.zeros((self._rotations.shape[0], 16),
dtype=np.float32)
self._cl_rotations[:, :9] = self._rotations.reshape(-1, 9)
def _cl_rotate_grids(self, rotmat):
self._k.rotate_image3d(self._queue, self._gtemplate, rotmat,
self._rot_template)
self._k.rotate_image3d(self._queue, self._gmask, rotmat,
self._rot_mask, nearest=True)
self._queue.finish()
def _cl_get_gcc(self):
self._rfftn(self._rot_template, self._ft_template)
self._k.conj_multiply(self._ft_template, self._ft_target, self._ft_gcc)
self._irfftn(self._ft_gcc, self._gcc)
self._queue.finish()
def _cl_get_ave(self):
self._rfftn(self._rot_mask, self._ft_mask)
self._k.conj_multiply(self._ft_mask, self._ft_target, self._ft_ave)
self._irfftn(self._ft_ave, self._ave)
self._queue.finish()
def _cl_get_ave2(self):
self._k.multiply(self._rot_mask, self._rot_mask, self._rot_mask2)
self._rfftn(self._rot_mask2, self._ft_mask2)
self._k.conj_multiply(self._ft_mask2, self._ft_target2, self._ft_ave2)
self._irfftn(self._ft_ave2, self._ave2)
self._queue.finish()
def scan(self):
super(GPUCorrelator, self).scan()
self._glcc.fill(0)
self._grot.fill(0)
time0 = time()
for n in xrange(0, self._rotations.shape[0]):
rotmat = self._cl_rotations[n]
self._cl_rotate_grids(rotmat)
self._cl_get_gcc()
self._cl_get_ave()
self._cl_get_ave2()
self._k.calc_lcc_and_take_best(self._gcc, self._ave,
self._ave2, self._lcc_mask, self._norm_factor,
np.int32(n), self._glcc, self._grot)
self._queue.finish()
self._print_progress(n, self._rotations.shape[0], time0)
self._glcc.get(ary=self._lcc)
self._grot.get(ary=self._rot)
self._queue.finish()
@staticmethod
def _print_progress(n, nrot, time0):
p_done = (n + 1) / float(nrot) * 100
now = time()
eta = ((now - time0) / p_done) * (100 - p_done)
total = (now - time0) / p_done * (100)
stdout.write('{:7.2%} {:.0f}s {:.0f}s \r'.format(n / float(nrot), eta, total))
stdout.flush()
def _generate_kernels(self):
kernel_values = {'shape_x': self._shape[2],
'shape_y': self._shape[1],
'shape_z': self._shape[0],
'llength': self._rmax,
}
self._k = CLKernels(self._ctx, kernel_values)
class CLKernels(object):
def __init__(self, ctx, values):
self.sampler_nearest = cl.Sampler(ctx, True,
cl.addressing_mode.REPEAT, cl.filter_mode.NEAREST)
self.sampler_linear = cl.Sampler(ctx, True,
cl.addressing_mode.REPEAT, cl.filter_mode.LINEAR)
self.multiply = ElementwiseKernel(ctx,
"float *x, float *y, float *z",
"z[i] = x[i] * y[i];"
)
self.conj_multiply = ElementwiseKernel(ctx,
"cfloat_t *x, cfloat_t *y, cfloat_t *z",
"z[i] = cfloat_mul(cfloat_conj(x[i]), y[i]);"
)
self.calc_lcc_and_take_best = ElementwiseKernel(ctx,
"""float *gcc, float *ave, float *ave2, int *mask,
float norm_factor, int nrot, float *lcc, int *grot""",
"""float _lcc;
if (mask[i] > 0) {
_lcc = gcc[i] / sqrt(ave2[i] * norm_factor - ave[i] * ave[i]);
if (_lcc > lcc[i]) {
lcc[i] = _lcc;
grot[i] = nrot;
};
};
"""
)
kernel_file = os.path.join(os.path.dirname(__file__), 'kernels.cl')
with open(kernel_file) as f:
t = Template(f.read()).substitute(**values)
self._program = cl.Program(ctx, t).build()
self._gws_rotate_grid3d = (96, 64, 1)
def rotate_grid3d(self, queue, grid, rotmat, out, nearest=False):
args = (grid.data, rotmat, out.data, np.int32(nearest))
self._program.rotate_grid3d(queue, self._gws_rotate_grid3d, None, *args)
def rotate_image3d(self, queue, image, rotmat, out, nearest=False):
if nearest:
args = (image, self.sampler_nearest, rotmat, out.data)
else:
args = (image, self.sampler_linear, rotmat, out.data)
self._program.rotate_image3d(queue, self._gws_rotate_grid3d, None, *args)
class grfftn_builder(object):
_G = GpyFFT()
CLFFT_HERMITIAN_INTERLEAVED = 3
CLFFT_REAL = 5
def __init__(self, ctx, shape, forward=True, batch_size=1):
self.ctx = ctx
self.shape = shape
self.plan = self._G.create_plan(self.ctx, shape)
if forward:
layouts = (self.CLFFT_REAL, self.CLFFT_HERMITIAN_INTERLEAVED)
else:
layouts = (self.CLFFT_HERMITIAN_INTERLEAVED, self.CLFFT_REAL)
self.plan.layouts = layouts
self.plan.inplace = False
size = np.prod(shape)
ft_size = np.prod([shape[0] // 2 + 1] + list(shape)[1:])
if forward:
self.distances = (size, ft_size)
else:
self.distances = (ft_size, size)
self.plan.batch_size = batch_size
strides = (shape[-2] * shape[-1], shape[-1], 1)
self.plan.strides_in = strides
self.plan.strides_out = strides
self.forward = forward
def bake(self, queue):
self.queue = queue
self.plan.bake(queue)
def __call__(self, inarray, outarray):
self.plan.enqueue_transform(self.queue, inarray.data,
outarray.data, direction_forward=self.forward)
| |
from collections import defaultdict
from corehq.apps.users.models import DomainMembershipError
from django.conf import settings
from django.core.cache import cache
from django.urls import reverse, resolve, Resolver404
from django.utils.translation import get_language
from corehq.apps.domain.models import Domain
from corehq.tabs import extension_points
from corehq.tabs.exceptions import UrlPrefixFormatError, UrlPrefixFormatsSuggestion
from corehq.tabs.utils import sidebar_to_dropdown, dropdown_dict
from memoized import memoized
from dimagi.utils.django.cache import make_template_fragment_key
from dimagi.utils.web import get_url_base
def url_is_location_safe(url):
from corehq.apps.locations.permissions import is_location_safe
url = url.split(get_url_base())[-1] if url else None
try:
match = resolve(url)
except Resolver404:
return False
# pass empty request, since we should exclude any url that requires request context
return is_location_safe(match.func, None, match.args, match.kwargs)
class UITab(object):
title = None
view = None
# Tuple of prefixes that this UITab claims e.g.
# ('/a/{domain}/reports/', '/a/{domain}/otherthing/')
# This is a required field.
url_prefix_formats = ()
show_by_default = True
# must be instance of GaTracker
ga_tracker = None
def __init__(self, request, domain=None, couch_user=None, project=None):
self.domain = domain
self.couch_user = couch_user
self._project = project
# This should not be considered as part of the subclass API unless it
# is necessary. Try to add new explicit parameters instead.
self._request = request
# must be set manually (i.e. `tab.is_active_tab = True`)
self.is_active_tab = False
# Do some preemptive checks on the subclass's configuration (if DEBUG)
if settings.DEBUG:
if not self.url_prefix_formats:
raise UrlPrefixFormatsSuggestion(
'Class {} must define url_prefix_formats. Try\n'
'url_prefix_formats = {}'
.format(self.__class__.__name__,
self.get_url_prefix_formats_suggestion()))
for url_prefix_formats in self.url_prefix_formats:
try:
url_prefix_formats.format(domain='')
except (IndexError, KeyError):
raise UrlPrefixFormatError(
'Class {} has url_prefix_format has an issue: {}'
.format(self.__class__.__name__, url_prefix_formats))
@property
def divider(self):
return dropdown_dict(None, is_divider=True)
@property
def project(self):
if not self._project and self.domain:
self._project = Domain.get_by_name(self.domain)
return self._project
@property
def request_path(self):
return self._request.get_full_path()
@property
def can_access_all_locations(self):
"""Is this a web user who can access project-wide data?"""
return getattr(self._request, 'can_access_all_locations', True)
@property
def dropdown_items(self):
return sidebar_to_dropdown(sidebar_items=self.sidebar_items,
domain=self.domain, current_url=self.url)
@property
def filtered_dropdown_items(self):
items = self.dropdown_items
tab_name = self.__class__.__name__
items.extend([
dropdown_dict(**item)
for item in extension_points.uitab_dropdown_items(
tab_name, self, domain=self.domain, request=self._request
)
])
if self.can_access_all_locations:
return items
filtered = []
for item in items:
if url_is_location_safe(item['url']):
filtered.append(item)
return filtered
@property
def sidebar_items(self):
return []
@property
@memoized
def filtered_sidebar_items(self):
items = self.sidebar_items
tab_name = self.__class__.__name__
items.extend(extension_points.uitab_sidebar_items(
tab_name=tab_name, tab=self, domain=self.domain, request=self._request
))
grouped = defaultdict(list)
headings_order = []
for heading, pages in items:
if heading not in headings_order:
headings_order.append(heading)
grouped[heading].extend(pages)
items = [
(heading, grouped[heading]) for heading in headings_order
]
if self.can_access_all_locations:
return items
filtered = []
for heading, pages in items:
safe_pages = [p for p in pages if url_is_location_safe(p['url'])]
if safe_pages:
filtered.append((heading, safe_pages))
return filtered
@property
def _is_viewable(self):
"""
Whether the tab should be displayed. Subclass implementations can skip
checking whether domain, couch_user, or project is not None before
accessing an attribute of them -- this property is accessed in
should_show and wrapped in a try block that returns False in the
case of an AttributeError for any of those variables.
"""
raise NotImplementedError()
@memoized
def should_show(self):
if not self.show_by_default and not self.is_active_tab:
return False
# Run tab-specific logic first, so that dropdown generation can assume any necessary data is present
try:
if not self._is_viewable:
return False
except AttributeError:
return False
if not self.can_access_all_locations:
if self.dropdown_items and not self.filtered_dropdown_items:
# location-safe filtering makes this whole tab inaccessible
return False
# Just a button tab, determine if it's location safe
if not self.dropdown_items and not url_is_location_safe(self.url):
return False
return True
@property
@memoized
def url(self):
try:
if self.domain:
return reverse(self.view, args=[self.domain])
except Exception:
pass
try:
return reverse(self.view)
except Exception:
return None
@property
def url_prefixes(self):
# Use self._request.domain instead of self.domain to generate url-prefixes
# because the latter might have a normalized domain name which might not match the
# domain name mentioned in the URL. for example domain-name 'hq_test' is normalized to
# 'hq-test'
return [url_prefix_format.format(domain=getattr(self._request, 'domain', None))
for url_prefix_format in self.url_prefix_formats]
def get_url_prefix_formats_suggestion(self):
import six.moves.urllib.parse
accepted_urls = []
# sorted shortest first
all_urls = sorted(
six.moves.urllib.parse.urlparse(url).path
# replace the actual domain with {domain}
.replace('/a/{}'.format(self.domain), '/a/{domain}')
for url in self._get_inferred_urls
)
# accept only urls that don't start with an already-accepted prefix
for url in all_urls:
for prefix in accepted_urls:
if url.startswith(prefix):
break
else:
accepted_urls.append(url)
return tuple(accepted_urls)
@property
@memoized
def _get_inferred_urls(self):
urls = [self.url] if self.url else []
for name, section in self.sidebar_items:
urls.extend(item['url'] for item in section)
return urls
@classmethod
def clear_dropdown_cache(cls, domain, user):
user_id = user.get_id
try:
user_role = user.get_role(domain, allow_enterprise=True)
role_version = user_role.cache_version if user_role else None
except DomainMembershipError:
role_version = None
for is_active in True, False:
key = make_template_fragment_key('header_tab', [
cls.class_name(),
domain,
is_active,
user_id,
role_version,
get_language(),
])
cache.delete(key)
@classmethod
def clear_dropdown_cache_for_all_domain_users(cls, domain):
from corehq.apps.users.models import CouchUser
for user_id in CouchUser.ids_by_domain(domain):
user = CouchUser.get_by_user_id(user_id)
cls.clear_dropdown_cache(domain, user)
@property
def css_id(self):
return self.__class__.__name__
@classmethod
def class_name(cls):
return cls.__name__
| |
#
# Copyright (c) 2008-2015 Citrix Systems, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License")
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from nssrc.com.citrix.netscaler.nitro.resource.base.base_resource import base_resource
from nssrc.com.citrix.netscaler.nitro.resource.base.base_resource import base_response
from nssrc.com.citrix.netscaler.nitro.service.options import options
from nssrc.com.citrix.netscaler.nitro.exception.nitro_exception import nitro_exception
from nssrc.com.citrix.netscaler.nitro.util.nitro_util import nitro_util
class authenticationvserver_auditsyslogpolicy_binding(base_resource) :
""" Binding class showing the auditsyslogpolicy that can be bound to authenticationvserver.
"""
def __init__(self) :
self._policy = ""
self._priority = 0
self._name = ""
self._secondary = False
self._groupextraction = False
self._nextfactor = ""
self._gotopriorityexpression = ""
self.___count = 0
@property
def priority(self) :
ur"""The priority, if any, of the vpn vserver policy.
"""
try :
return self._priority
except Exception as e:
raise e
@priority.setter
def priority(self, priority) :
ur"""The priority, if any, of the vpn vserver policy.
"""
try :
self._priority = priority
except Exception as e:
raise e
@property
def name(self) :
ur"""Name of the authentication virtual server to which to bind the policy.<br/>Minimum length = 1.
"""
try :
return self._name
except Exception as e:
raise e
@name.setter
def name(self, name) :
ur"""Name of the authentication virtual server to which to bind the policy.<br/>Minimum length = 1
"""
try :
self._name = name
except Exception as e:
raise e
@property
def nextfactor(self) :
ur"""Applicable only while binding advance authentication policy as classic authentication policy does not support nFactor.
"""
try :
return self._nextfactor
except Exception as e:
raise e
@nextfactor.setter
def nextfactor(self, nextfactor) :
ur"""Applicable only while binding advance authentication policy as classic authentication policy does not support nFactor
"""
try :
self._nextfactor = nextfactor
except Exception as e:
raise e
@property
def gotopriorityexpression(self) :
ur"""Applicable only to advance authentication policy. Expression or other value specifying the next policy to be evaluated if the current policy evaluates to TRUE. Specify one of the following values:
* NEXT - Evaluate the policy with the next higher priority number.
* END - End policy evaluation.
* USE_INVOCATION_RESULT - Applicable if this policy invokes another policy label. If the final goto in the invoked policy label has a value of END, the evaluation stops. If the final goto is anything other than END, the current policy label performs a NEXT.
* A default syntax expression that evaluates to a number.
If you specify an expression, the number to which it evaluates determines the next policy to evaluate, as follows:
* If the expression evaluates to a higher numbered priority, the policy with that priority is evaluated next.
* If the expression evaluates to the priority of the current policy, the policy with the next higher numbered priority is evaluated next.
* If the expression evaluates to a priority number that is numerically higher than the highest numbered priority, policy evaluation ends.
An UNDEF event is triggered if:
* The expression is invalid.
* The expression evaluates to a priority number that is numerically lower than the current policy's priority.
* The expression evaluates to a priority number that is between the current policy's priority number (say, 30) and the highest priority number (say, 100), but does not match any configured priority number (for example, the expression evaluates to the number 85). This example assumes that the priority number increments by 10 for every successive policy, and therefore a priority number of 85 does not exist in the policy label.
"""
try :
return self._gotopriorityexpression
except Exception as e:
raise e
@gotopriorityexpression.setter
def gotopriorityexpression(self, gotopriorityexpression) :
ur"""Applicable only to advance authentication policy. Expression or other value specifying the next policy to be evaluated if the current policy evaluates to TRUE. Specify one of the following values:
* NEXT - Evaluate the policy with the next higher priority number.
* END - End policy evaluation.
* USE_INVOCATION_RESULT - Applicable if this policy invokes another policy label. If the final goto in the invoked policy label has a value of END, the evaluation stops. If the final goto is anything other than END, the current policy label performs a NEXT.
* A default syntax expression that evaluates to a number.
If you specify an expression, the number to which it evaluates determines the next policy to evaluate, as follows:
* If the expression evaluates to a higher numbered priority, the policy with that priority is evaluated next.
* If the expression evaluates to the priority of the current policy, the policy with the next higher numbered priority is evaluated next.
* If the expression evaluates to a priority number that is numerically higher than the highest numbered priority, policy evaluation ends.
An UNDEF event is triggered if:
* The expression is invalid.
* The expression evaluates to a priority number that is numerically lower than the current policy's priority.
* The expression evaluates to a priority number that is between the current policy's priority number (say, 30) and the highest priority number (say, 100), but does not match any configured priority number (for example, the expression evaluates to the number 85). This example assumes that the priority number increments by 10 for every successive policy, and therefore a priority number of 85 does not exist in the policy label.
"""
try :
self._gotopriorityexpression = gotopriorityexpression
except Exception as e:
raise e
@property
def secondary(self) :
ur"""Applicable only while bindind classic authentication policy as advance authentication policy use nFactor.
"""
try :
return self._secondary
except Exception as e:
raise e
@secondary.setter
def secondary(self, secondary) :
ur"""Applicable only while bindind classic authentication policy as advance authentication policy use nFactor
"""
try :
self._secondary = secondary
except Exception as e:
raise e
@property
def policy(self) :
ur"""The name of the policy, if any, bound to the authentication vserver.
"""
try :
return self._policy
except Exception as e:
raise e
@policy.setter
def policy(self, policy) :
ur"""The name of the policy, if any, bound to the authentication vserver.
"""
try :
self._policy = policy
except Exception as e:
raise e
@property
def groupextraction(self) :
ur"""Applicable only while bindind classic authentication policy as advance authentication policy use nFactor.
"""
try :
return self._groupextraction
except Exception as e:
raise e
@groupextraction.setter
def groupextraction(self, groupextraction) :
ur"""Applicable only while bindind classic authentication policy as advance authentication policy use nFactor
"""
try :
self._groupextraction = groupextraction
except Exception as e:
raise e
def _get_nitro_response(self, service, response) :
ur""" converts nitro response into object and returns the object array in case of get request.
"""
try :
result = service.payload_formatter.string_to_resource(authenticationvserver_auditsyslogpolicy_binding_response, response, self.__class__.__name__)
if(result.errorcode != 0) :
if (result.errorcode == 444) :
service.clear_session(self)
if result.severity :
if (result.severity == "ERROR") :
raise nitro_exception(result.errorcode, str(result.message), str(result.severity))
else :
raise nitro_exception(result.errorcode, str(result.message), str(result.severity))
return result.authenticationvserver_auditsyslogpolicy_binding
except Exception as e :
raise e
def _get_object_name(self) :
ur""" Returns the value of object identifier argument
"""
try :
if self.name is not None :
return str(self.name)
return None
except Exception as e :
raise e
@classmethod
def add(cls, client, resource) :
try :
if resource and type(resource) is not list :
updateresource = authenticationvserver_auditsyslogpolicy_binding()
updateresource.name = resource.name
updateresource.policy = resource.policy
updateresource.priority = resource.priority
updateresource.secondary = resource.secondary
updateresource.groupextraction = resource.groupextraction
updateresource.nextfactor = resource.nextfactor
updateresource.gotopriorityexpression = resource.gotopriorityexpression
return updateresource.update_resource(client)
else :
if resource and len(resource) > 0 :
updateresources = [authenticationvserver_auditsyslogpolicy_binding() for _ in range(len(resource))]
for i in range(len(resource)) :
updateresources[i].name = resource[i].name
updateresources[i].policy = resource[i].policy
updateresources[i].priority = resource[i].priority
updateresources[i].secondary = resource[i].secondary
updateresources[i].groupextraction = resource[i].groupextraction
updateresources[i].nextfactor = resource[i].nextfactor
updateresources[i].gotopriorityexpression = resource[i].gotopriorityexpression
return cls.update_bulk_request(client, updateresources)
except Exception as e :
raise e
@classmethod
def delete(cls, client, resource) :
try :
if resource and type(resource) is not list :
deleteresource = authenticationvserver_auditsyslogpolicy_binding()
deleteresource.name = resource.name
deleteresource.policy = resource.policy
deleteresource.secondary = resource.secondary
deleteresource.groupextraction = resource.groupextraction
return deleteresource.delete_resource(client)
else :
if resource and len(resource) > 0 :
deleteresources = [authenticationvserver_auditsyslogpolicy_binding() for _ in range(len(resource))]
for i in range(len(resource)) :
deleteresources[i].name = resource[i].name
deleteresources[i].policy = resource[i].policy
deleteresources[i].secondary = resource[i].secondary
deleteresources[i].groupextraction = resource[i].groupextraction
return cls.delete_bulk_request(client, deleteresources)
except Exception as e :
raise e
@classmethod
def get(cls, service, name) :
ur""" Use this API to fetch authenticationvserver_auditsyslogpolicy_binding resources.
"""
try :
obj = authenticationvserver_auditsyslogpolicy_binding()
obj.name = name
response = obj.get_resources(service)
return response
except Exception as e:
raise e
@classmethod
def get_filtered(cls, service, name, filter_) :
ur""" Use this API to fetch filtered set of authenticationvserver_auditsyslogpolicy_binding resources.
Filter string should be in JSON format.eg: "port:80,servicetype:HTTP".
"""
try :
obj = authenticationvserver_auditsyslogpolicy_binding()
obj.name = name
option_ = options()
option_.filter = filter_
response = obj.getfiltered(service, option_)
return response
except Exception as e:
raise e
@classmethod
def count(cls, service, name) :
ur""" Use this API to count authenticationvserver_auditsyslogpolicy_binding resources configued on NetScaler.
"""
try :
obj = authenticationvserver_auditsyslogpolicy_binding()
obj.name = name
option_ = options()
option_.count = True
response = obj.get_resources(service, option_)
if response :
return response[0].__dict__['___count']
return 0
except Exception as e:
raise e
@classmethod
def count_filtered(cls, service, name, filter_) :
ur""" Use this API to count the filtered set of authenticationvserver_auditsyslogpolicy_binding resources.
Filter string should be in JSON format.eg: "port:80,servicetype:HTTP".
"""
try :
obj = authenticationvserver_auditsyslogpolicy_binding()
obj.name = name
option_ = options()
option_.count = True
option_.filter = filter_
response = obj.getfiltered(service, option_)
if response :
return response[0].__dict__['___count']
return 0
except Exception as e:
raise e
class authenticationvserver_auditsyslogpolicy_binding_response(base_response) :
def __init__(self, length=1) :
self.authenticationvserver_auditsyslogpolicy_binding = []
self.errorcode = 0
self.message = ""
self.severity = ""
self.sessionid = ""
self.authenticationvserver_auditsyslogpolicy_binding = [authenticationvserver_auditsyslogpolicy_binding() for _ in range(length)]
| |
# -*- coding: utf-8 -*-
from ccxt.base.exchange import Exchange
import hashlib
from ccxt.base.errors import ExchangeError
class bitbay (Exchange):
def describe(self):
return self.deep_extend(super(bitbay, self).describe(), {
'id': 'bitbay',
'name': 'BitBay',
'countries': ['PL', 'EU'], # Poland
'rateLimit': 1000,
'hasCORS': True,
'hasWithdraw': True,
'urls': {
'logo': 'https://user-images.githubusercontent.com/1294454/27766132-978a7bd8-5ece-11e7-9540-bc96d1e9bbb8.jpg',
'www': 'https://bitbay.net',
'api': {
'public': 'https://bitbay.net/API/Public',
'private': 'https://bitbay.net/API/Trading/tradingApi.php',
},
'doc': [
'https://bitbay.net/public-api',
'https://bitbay.net/account/tab-api',
'https://github.com/BitBayNet/API',
],
},
'api': {
'public': {
'get': [
'{id}/all',
'{id}/market',
'{id}/orderbook',
'{id}/ticker',
'{id}/trades',
],
},
'private': {
'post': [
'info',
'trade',
'cancel',
'orderbook',
'orders',
'transfer',
'withdraw',
'history',
'transactions',
],
},
},
'markets': {
'BTC/USD': {'id': 'BTCUSD', 'symbol': 'BTC/USD', 'base': 'BTC', 'quote': 'USD', 'baseId': 'BTC', 'quoteId': 'USD'},
'BTC/EUR': {'id': 'BTCEUR', 'symbol': 'BTC/EUR', 'base': 'BTC', 'quote': 'EUR', 'baseId': 'BTC', 'quoteId': 'EUR'},
'BTC/PLN': {'id': 'BTCPLN', 'symbol': 'BTC/PLN', 'base': 'BTC', 'quote': 'PLN', 'baseId': 'BTC', 'quoteId': 'PLN'},
'LTC/USD': {'id': 'LTCUSD', 'symbol': 'LTC/USD', 'base': 'LTC', 'quote': 'USD', 'baseId': 'LTC', 'quoteId': 'USD'},
'LTC/EUR': {'id': 'LTCEUR', 'symbol': 'LTC/EUR', 'base': 'LTC', 'quote': 'EUR', 'baseId': 'LTC', 'quoteId': 'EUR'},
'LTC/PLN': {'id': 'LTCPLN', 'symbol': 'LTC/PLN', 'base': 'LTC', 'quote': 'PLN', 'baseId': 'LTC', 'quoteId': 'PLN'},
'LTC/BTC': {'id': 'LTCBTC', 'symbol': 'LTC/BTC', 'base': 'LTC', 'quote': 'BTC', 'baseId': 'LTC', 'quoteId': 'BTC'},
'ETH/USD': {'id': 'ETHUSD', 'symbol': 'ETH/USD', 'base': 'ETH', 'quote': 'USD', 'baseId': 'ETH', 'quoteId': 'USD'},
'ETH/EUR': {'id': 'ETHEUR', 'symbol': 'ETH/EUR', 'base': 'ETH', 'quote': 'EUR', 'baseId': 'ETH', 'quoteId': 'EUR'},
'ETH/PLN': {'id': 'ETHPLN', 'symbol': 'ETH/PLN', 'base': 'ETH', 'quote': 'PLN', 'baseId': 'ETH', 'quoteId': 'PLN'},
'ETH/BTC': {'id': 'ETHBTC', 'symbol': 'ETH/BTC', 'base': 'ETH', 'quote': 'BTC', 'baseId': 'ETH', 'quoteId': 'BTC'},
'LSK/USD': {'id': 'LSKUSD', 'symbol': 'LSK/USD', 'base': 'LSK', 'quote': 'USD', 'baseId': 'LSK', 'quoteId': 'USD'},
'LSK/EUR': {'id': 'LSKEUR', 'symbol': 'LSK/EUR', 'base': 'LSK', 'quote': 'EUR', 'baseId': 'LSK', 'quoteId': 'EUR'},
'LSK/PLN': {'id': 'LSKPLN', 'symbol': 'LSK/PLN', 'base': 'LSK', 'quote': 'PLN', 'baseId': 'LSK', 'quoteId': 'PLN'},
'LSK/BTC': {'id': 'LSKBTC', 'symbol': 'LSK/BTC', 'base': 'LSK', 'quote': 'BTC', 'baseId': 'LSK', 'quoteId': 'BTC'},
'BCH/USD': {'id': 'BCCUSD', 'symbol': 'BCH/USD', 'base': 'BCH', 'quote': 'USD', 'baseId': 'BCC', 'quoteId': 'USD'},
'BCH/EUR': {'id': 'BCCEUR', 'symbol': 'BCH/EUR', 'base': 'BCH', 'quote': 'EUR', 'baseId': 'BCC', 'quoteId': 'EUR'},
'BCH/PLN': {'id': 'BCCPLN', 'symbol': 'BCH/PLN', 'base': 'BCH', 'quote': 'PLN', 'baseId': 'BCC', 'quoteId': 'PLN'},
'BCH/BTC': {'id': 'BCCBTC', 'symbol': 'BCH/BTC', 'base': 'BCH', 'quote': 'BTC', 'baseId': 'BCC', 'quoteId': 'BTC'},
'BTG/USD': {'id': 'BTGUSD', 'symbol': 'BTG/USD', 'base': 'BTG', 'quote': 'USD', 'baseId': 'BTG', 'quoteId': 'USD'},
'BTG/EUR': {'id': 'BTGEUR', 'symbol': 'BTG/EUR', 'base': 'BTG', 'quote': 'EUR', 'baseId': 'BTG', 'quoteId': 'EUR'},
'BTG/PLN': {'id': 'BTGPLN', 'symbol': 'BTG/PLN', 'base': 'BTG', 'quote': 'PLN', 'baseId': 'BTG', 'quoteId': 'PLN'},
'BTG/BTC': {'id': 'BTGBTC', 'symbol': 'BTG/BTC', 'base': 'BTG', 'quote': 'BTC', 'baseId': 'BTG', 'quoteId': 'BTC'},
'DASH/USD': {'id': 'DASHUSD', 'symbol': 'DASH/USD', 'base': 'DASH', 'quote': 'USD', 'baseId': 'DASH', 'quoteId': 'USD'},
'DASH/EUR': {'id': 'DASHEUR', 'symbol': 'DASH/EUR', 'base': 'DASH', 'quote': 'EUR', 'baseId': 'DASH', 'quoteId': 'EUR'},
'DASH/PLN': {'id': 'DASHPLN', 'symbol': 'DASH/PLN', 'base': 'DASH', 'quote': 'PLN', 'baseId': 'DASH', 'quoteId': 'PLN'},
'DASH/BTC': {'id': 'DASHBTC', 'symbol': 'DASH/BTC', 'base': 'DASH', 'quote': 'BTC', 'baseId': 'DASH', 'quoteId': 'BTC'},
'GAME/USD': {'id': 'GAMEUSD', 'symbol': 'GAME/USD', 'base': 'GAME', 'quote': 'USD', 'baseId': 'GAME', 'quoteId': 'USD'},
'GAME/EUR': {'id': 'GAMEEUR', 'symbol': 'GAME/EUR', 'base': 'GAME', 'quote': 'EUR', 'baseId': 'GAME', 'quoteId': 'EUR'},
'GAME/PLN': {'id': 'GAMEPLN', 'symbol': 'GAME/PLN', 'base': 'GAME', 'quote': 'PLN', 'baseId': 'GAME', 'quoteId': 'PLN'},
'GAME/BTC': {'id': 'GAMEBTC', 'symbol': 'GAME/BTC', 'base': 'GAME', 'quote': 'BTC', 'baseId': 'GAME', 'quoteId': 'BTC'},
},
'fees': {
'trading': {
'maker': 0.3 / 100,
'taker': 0.0043,
},
},
})
def fetch_balance(self, params={}):
response = self.privatePostInfo()
if 'balances' in response:
balance = response['balances']
result = {'info': balance}
codes = list(self.currencies.keys())
for i in range(0, len(codes)):
code = codes[i]
currency = self.currencies[code]
id = currency['id']
account = self.account()
if id in balance:
account['free'] = float(balance[id]['available'])
account['used'] = float(balance[id]['locked'])
account['total'] = self.sum(account['free'], account['used'])
result[code] = account
return self.parse_balance(result)
raise ExchangeError(self.id + ' empty balance response ' + self.json(response))
def fetch_order_book(self, symbol, params={}):
orderbook = self.publicGetIdOrderbook(self.extend({
'id': self.market_id(symbol),
}, params))
return self.parse_order_book(orderbook)
def fetch_ticker(self, symbol, params={}):
ticker = self.publicGetIdTicker(self.extend({
'id': self.market_id(symbol),
}, params))
timestamp = self.milliseconds()
baseVolume = self.safe_float(ticker, 'volume')
vwap = self.safe_float(ticker, 'vwap')
quoteVolume = baseVolume * vwap
return {
'symbol': symbol,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'high': self.safe_float(ticker, 'max'),
'low': self.safe_float(ticker, 'min'),
'bid': self.safe_float(ticker, 'bid'),
'ask': self.safe_float(ticker, 'ask'),
'vwap': vwap,
'open': None,
'close': None,
'first': None,
'last': self.safe_float(ticker, 'last'),
'change': None,
'percentage': None,
'average': self.safe_float(ticker, 'average'),
'baseVolume': baseVolume,
'quoteVolume': quoteVolume,
'info': ticker,
}
def parse_trade(self, trade, market):
timestamp = trade['date'] * 1000
return {
'id': trade['tid'],
'info': trade,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'symbol': market['symbol'],
'type': None,
'side': trade['type'],
'price': trade['price'],
'amount': trade['amount'],
}
def fetch_trades(self, symbol, since=None, limit=None, params={}):
market = self.market(symbol)
response = self.publicGetIdTrades(self.extend({
'id': market['id'],
}, params))
return self.parse_trades(response, market, since, limit)
def create_order(self, symbol, type, side, amount, price=None, params={}):
market = self.market(symbol)
return self.privatePostTrade(self.extend({
'type': side,
'currency': market['baseId'],
'amount': amount,
'payment_currency': market['quoteId'],
'rate': price,
}, params))
def cancel_order(self, id, symbol=None, params={}):
return self.privatePostCancel({'id': id})
def is_fiat(self, currency):
fiatCurrencies = {
'USD': True,
'EUR': True,
'PLN': True,
}
if currency in fiatCurrencies:
return True
return False
def withdraw(self, code, amount, address, params={}):
self.load_markets()
method = None
currency = self.currency(code)
request = {
'currency': currency['id'],
'quantity': amount,
}
if self.is_fiat(code):
method = 'privatePostWithdraw'
# request['account'] = params['account'] # they demand an account number
# request['express'] = params['express'] # whatever it means, they don't explain
# request['bic'] = ''
else:
method = 'privatePostTransfer'
request['address'] = address
response = getattr(self, method)(self.extend(request, params))
return {
'info': response,
'id': None,
}
def sign(self, path, api='public', method='GET', params={}, headers=None, body=None):
url = self.urls['api'][api]
if api == 'public':
url += '/' + self.implode_params(path, params) + '.json'
else:
self.check_required_credentials()
body = self.urlencode(self.extend({
'method': path,
'moment': self.nonce(),
}, params))
headers = {
'Content-Type': 'application/x-www-form-urlencoded',
'API-Key': self.apiKey,
'API-Hash': self.hmac(self.encode(body), self.encode(self.secret), hashlib.sha512),
}
return {'url': url, 'method': method, 'body': body, 'headers': headers}
| |
import matplotlib.pyplot as p
from astropy.table import Table
import os
from os.path import exists
from os.path import join as osjoin
from corner import hist2d
import seaborn as sb
from scipy.stats import binned_statistic
import astropy.units as u
from cube_analysis.h2_models import (krumholz2013_ratio_model,
krumholz2013_sigmaHI,
optimize_clump_factors)
from paths import (fourteenB_HI_data_wGBT_path,
allfigs_path)
from plotting_styles import (onecolumn_figure, default_figure,
twocolumn_figure, twocolumn_twopanel_figure)
fig_path = osjoin(allfigs_path(""), "co_vs_hi/h2_formation_models")
if not exists(fig_path):
os.mkdir(fig_path)
# Bin radius should match whatever was used in co_radial_profile.py
dr = 100.0 * u.pc
# Read in the radial profiles table
tab_name = "tables/co21_hi_radialprofiles_{}pc.fits".format(int(dr.value))
try:
tab = Table.read(fourteenB_HI_data_wGBT_path(tab_name))
except OSError:
raise OSError("Table does not exist in the 14B-088 data path. "
"Run co_radial_profile.py first:"
" {}".format(tab_name))
rs = u.Quantity(tab["Radius"])
sd = u.Quantity(tab["CO_Sigma"])
sd_sigma = u.Quantity(tab["CO_Sigma_std"])
sd_hi = u.Quantity(tab["HI_Sigma"])
sd_sigma_hi = u.Quantity(tab["HI_Sigma_std"])
# Now plot their ratio against the total gas surface density
gas_ratio = sd.value / sd_hi.value
gas_ratio_sigma = \
(gas_ratio *
np.sqrt((sd_sigma / sd)**2 + (sd_sigma_hi / sd_hi)**2)).value
log_gas_ratio_sigma = gas_ratio_sigma / (gas_ratio * np.log(10))
total_sd = sd.value + sd_hi.value
total_sd_sigma = \
(total_sd *
np.sqrt((sd_sigma / sd)**2 + (sd_sigma_hi / sd_hi)**2)).value
# Overplot the Krumholz model with a few different clumping factors.
# Theoretically, c -> 1 at a resolution of 100 pc. but I'm finding a better
# match when c=4-6. The model is supposed to take metallicity into account,
# but maybe the gradient is causing some issues? Schruba+11 finds c~2 for
# their entire sample, with a lot of scatter
sds = np.arange(1, 40, 0.2)
onecolumn_figure(font_scale=1.0)
# p.semilogy(total_sd, gas_ratio, 'bD')
p.errorbar(total_sd, np.log10(gas_ratio), yerr=log_gas_ratio_sigma,
xerr=total_sd_sigma, alpha=0.6, fmt='D')
p.plot(sds, np.log10(krumholz2013_ratio_model(sds, c=1, Z=1.0)), "--",
label="c=1, Z=1.0")
p.plot(sds, np.log10(krumholz2013_ratio_model(sds, c=2, Z=0.5)), "-.",
label="c=2, Z=0.5")
p.plot(sds, np.log10(krumholz2013_ratio_model(sds, c=3, Z=0.5)), ":",
label="c=3, Z=0.5")
p.plot(sds, np.log10(krumholz2013_ratio_model(sds, c=3, Z=1.0)), "-",
label="c=3, Z=1.0")
p.xlabel("$\Sigma_{\mathrm{Gas}}$ (M$_{\odot}$ pc$^{-2}$)")
p.ylabel("log H$_2$-to-HI Ratio $\Sigma_{\mathrm{H2}} /"
" \Sigma_{\mathrm{HI}}$")
p.xlim([2, 22])
p.ylim([-4, 1])
p.legend(loc='lower right', frameon=True)
p.grid()
p.tight_layout()
save_name = "ratio_totalsigma_w_krumholzmodel_dr"
p.savefig(osjoin(fig_path, "{0}_{1}pc.pdf".format(save_name,
int(dr.value))))
p.savefig(osjoin(fig_path, "{0}_{1}pc.png".format(save_name,
int(dr.value))))
p.close()
# Gratier+16 find evidence for a dark CO component, at about ~5 Msol/pc^2.
# Let's add this in, assuming the dark component is *only* in the CO and
# not due to optically thick HI (some portion probably is).
sd_dark = sd + 5 * u.solMass / u.pc**2
sd_dark_sigma = (sd_dark * sd_sigma) / sd
gas_ratio_dark = sd_dark.value / sd_hi.value
gas_ratio_dark_sigma = \
(gas_ratio_dark *
np.sqrt((sd_dark_sigma / sd_dark)**2 +
(sd_sigma_hi / sd_hi)**2)).value
log_gas_ratio_dark_sigma = gas_ratio_dark_sigma / \
(gas_ratio_dark * np.log(10))
total_sd_plus_dark = sd_dark.value + sd_hi.value
total_sd_plus_dark_sigma = \
(total_sd_plus_dark *
np.sqrt((sd_dark_sigma / sd_dark)**2 +
(sd_sigma_hi / sd_hi)**2)).value
p.plot(total_sd, np.log10(gas_ratio), 'D',
alpha=0.6, label=r"H$_2$ + HI")
# p.errorbar(total_sd, np.log10(gas_ratio), yerr=log_gas_ratio_sigma,
# xerr=total_sd_sigma, alpha=0.6, fmt='D',
# label=r"H$_2$ + HI")
p.plot(sds, np.log10(krumholz2013_ratio_model(sds, c=1, Z=1.0)), "--",
label="c=1, Z=1.0")
p.plot(sds, np.log10(krumholz2013_ratio_model(sds, c=2, Z=0.5)), "-.",
label="c=2, Z=0.5")
p.plot(sds, np.log10(krumholz2013_ratio_model(sds, c=3, Z=0.5)), ":",
label="c=3, Z=0.5")
p.plot(sds, np.log10(krumholz2013_ratio_model(sds, c=3, Z=1.0)), "-",
label="c=3, Z=1.0")
p.plot(total_sd_plus_dark, np.log10(gas_ratio_dark), 'o',
alpha=0.6, label=r"H$_2$ + HI + CO-dark H$_2$")
# p.errorbar(total_sd_plus_dark, np.log10(gas_ratio_dark),
# yerr=log_gas_ratio_dark_sigma,
# xerr=total_sd_plus_dark_sigma, alpha=0.6, fmt='o',
# label=r"H$_2$ + HI + CO-dark H$_2$")
p.xlabel("$\Sigma_{\mathrm{Gas}}$ (M$_{\odot}$ pc$^{-2}$)")
p.ylabel("log H$_2$-to-HI Ratio $\Sigma_{\mathrm{H2}} /"
" \Sigma_{\mathrm{HI}}$")
p.xlim([2, 25])
p.ylim([-4, 1])
p.legend(loc='lower right', frameon=True)
p.grid()
p.tight_layout()
save_name = "ratio_totalsigma_dark_w_krumholzmodel_dr"
p.savefig(osjoin(fig_path, "{0}_{1}pc.pdf".format(save_name,
int(dr.value))))
p.savefig(osjoin(fig_path, "{0}_{1}pc.png".format(save_name,
int(dr.value))))
p.close()
# But M33 has a known metallicity gradient, so we can do a bit better
# Clumping factors should converge to 1 on 100 pc, based on the Krumholz
# model. This isn't happening here, so let's what c needs to be for the
# curve to intersect with each point we have.
# Metallicity of 0.5
clump_constz = optimize_clump_factors(total_sd, gas_ratio, Z=0.5,
c_init=3.5)
# Metallicity Gradient from Roso & Simon (2005)
def ros_sim_metallicity(radius):
return 10 ** (8.36 - 0.027 * radius - 8.8)
clump_rossim = optimize_clump_factors(total_sd, gas_ratio,
Z=ros_sim_metallicity(rs.value),
c_init=7.0)
# And from Bresolin 2011, Equation 1
def bresolin_metallicity(radius):
return 10 ** (8.48 - 0.042 * radius - 8.8)
# return 10 ** (8.82 - 0.03 * radius - 8.8)
clump_bresolin = optimize_clump_factors(total_sd, gas_ratio,
Z=bresolin_metallicity(rs.value),
c_init=7.)
onecolumn_figure(font_scale=1.0)
p.plot(rs.value[:-1], clump_constz[:-1], 'D-', label="Z=0.5")
p.plot(rs.value[:-1], clump_rossim[:-1], 'o--',
label="Rosolowsky \& Simon (2008)")
p.plot(rs.value[:-1], clump_bresolin[:-1], 's-.', label="Bresolin (2011)")
p.legend(loc='best', frameon=True)
p.ylim([2, 8])
p.grid()
p.ylabel("Clumping Factor")
p.xlabel("Radius (kpc)")
p.tight_layout()
save_name = "clumpfactor_krumholzmodel_dr"
p.savefig(osjoin(fig_path, "{0}_{1}pc.pdf".format(save_name,
int(dr.value))))
p.savefig(osjoin(fig_path, "{0}_{1}pc.png".format(save_name,
int(dr.value))))
p.close()
# What are the properties like on a per pixel basis?
# Load in the per-pixel column densities
tab = Table.read(fourteenB_HI_data_wGBT_path("tables/column_densities_perpix.fits"))
hi_coldens = tab['Sigma_HI'] * u.solMass / u.pc**2
co_coldens = tab['Sigma_H2'] * u.solMass / u.pc**2
radii_pts = tab['Radius'] * u.kpc
pang_pts = tab['PA'] * u.deg
gas_ratio_pix = tab['Ratio'] * u.dimensionless_unscaled
total_sd_pix = tab['Sigma_Total'] * u.solMass / u.pc**2
sds = np.arange(0.1, 75, 0.2)
onecolumn_figure()
hist2d(total_sd_pix.value, np.log10(gas_ratio_pix.value),
data_kwargs={"alpha": 0.3})
p.xlim([0, 75])
p.ylim([-1.4, 0.8])
p.xlabel("$\Sigma_{\mathrm{Gas}}$ (M$_{\odot}$ pc$^{-2}$)")
p.ylabel("log H$_2$-to-HI Ratio $\Sigma_{\mathrm{H2}} /"
" \Sigma_{\mathrm{HI}}$")
p.plot(sds, np.log10(krumholz2013_ratio_model(sds, c=1, Z=0.5)), "-",
label="c=1, Z=0.5", linewidth=2, alpha=0.95)
p.plot(sds, np.log10(krumholz2013_ratio_model(sds, c=1, Z=1.0)), "--",
label="c=1, Z=1.0", linewidth=2, alpha=0.95)
p.plot(sds, np.log10(krumholz2013_ratio_model(sds, c=2, Z=0.5)), "-.",
label="c=2, Z=0.5", linewidth=2, alpha=0.95)
p.plot(sds, np.log10(krumholz2013_ratio_model(sds, c=3, Z=0.5)), ":",
label="c=3, Z=0.5", linewidth=2, alpha=0.95)
p.plot(sds, np.log10(krumholz2013_ratio_model(sds, c=3, Z=1.0)), "-",
label="c=3, Z=1.0", linewidth=2, alpha=0.95)
p.legend(loc='lower right', frameon=True)
p.grid()
p.tight_layout()
save_name = "ratio_totalsigma_w_krumholzmodel_perpix"
p.savefig(osjoin(fig_path, "{0}.pdf".format(save_name)))
p.savefig(osjoin(fig_path, "{0}.png".format(save_name)))
p.close()
# Overplot the radial averages
hist2d(total_sd_pix.value, np.log10(gas_ratio_pix.value),
data_kwargs={"alpha": 0.3})
p.xlim([0, 75])
p.ylim([-1.8, 0.8])
p.xlabel("$\Sigma_{\mathrm{Gas}}$ (M$_{\odot}$ pc$^{-2}$)")
p.ylabel("log H$_2$-to-HI Ratio $\Sigma_{\mathrm{H2}} /"
" \Sigma_{\mathrm{HI}}$")
p.plot(sds, np.log10(krumholz2013_ratio_model(sds, c=1, Z=0.5)), "-",
label="c=1, Z=0.5", linewidth=2, alpha=0.95)
p.plot(sds, np.log10(krumholz2013_ratio_model(sds, c=1, Z=1.0)), "--",
label="c=1, Z=1.0", linewidth=2, alpha=0.95)
p.plot(sds, np.log10(krumholz2013_ratio_model(sds, c=2, Z=0.5)), "-.",
label="c=2, Z=0.5", linewidth=2, alpha=0.95)
p.plot(sds, np.log10(krumholz2013_ratio_model(sds, c=3, Z=0.5)), ":",
label="c=3, Z=0.5", linewidth=2, alpha=0.95)
p.plot(sds, np.log10(krumholz2013_ratio_model(sds, c=3, Z=1.0)), "-",
label="c=3, Z=1.0", linewidth=2, alpha=0.95)
p.plot(total_sd, np.log10(gas_ratio), 'D', markeredgecolor='k',
markeredgewidth=0.25,)
p.legend(loc='lower right', frameon=True)
p.grid()
p.tight_layout()
save_name = "ratio_totalsigma_w_krumholzmodel_perpix_w_radavg"
p.savefig(osjoin(fig_path, "{0}_{1}pc.pdf".format(save_name,
int(dr.value))))
p.savefig(osjoin(fig_path, "{0}_{1}pc.png".format(save_name,
int(dr.value))))
p.close()
# Sigma HI vs total
cpal = sb.color_palette()
hist2d(total_sd_pix.value, hi_coldens.value,
data_kwargs={"alpha": 0.3})
p.xlim([0, 75])
p.ylim([-3, 27])
p.xlabel("$\Sigma_{\mathrm{Gas}}$ (M$_{\odot}$ pc$^{-2}$)")
p.ylabel("$\Sigma_{\mathrm{HI}}$ (M$_{\odot}$ pc$^{-2}$)")
# p.axhline(krumholz_maxhi_sigma(c=1, Z=1.0).value, linestyle="--",
# label="c=1, Z=1.0", linewidth=2, alpha=0.95,
# color=cpal[1])
# p.axhline(krumholz_maxhi_sigma(c=2, Z=0.5).value, linestyle="-.",
# label="c=2, Z=0.5", linewidth=2, alpha=0.95,
# color=cpal[2])
# p.axhline(krumholz_maxhi_sigma(c=3, Z=0.5).value, linestyle=":",
# label="c=3, Z=0.5", linewidth=2, alpha=0.95,
# color=cpal[3])
# p.axhline(krumholz_maxhi_sigma(c=3, Z=1.0).value, linestyle="-",
# label="c=3, Z=1.0", linewidth=2, alpha=0.95,
# color=cpal[4])
p.plot(sds, krumholz2013_sigmaHI(sds, c=1, Z=1.0), "--",
label="c=1, Z=1.0", linewidth=2, alpha=0.95,
color=cpal[1])
p.plot(sds, krumholz2013_sigmaHI(sds, c=2, Z=0.5), "-.",
label="c=2, Z=0.5", linewidth=2, alpha=0.95,
color=cpal[2])
p.plot(sds, krumholz2013_sigmaHI(sds, c=3, Z=0.5), ":",
label="c=3, Z=0.5", linewidth=2, alpha=0.95,
color=cpal[3])
p.plot(sds, krumholz2013_sigmaHI(sds, c=3, Z=1.0), "-",
label="c=3, Z=1.0", linewidth=2, alpha=0.95,
color=cpal[4])
p.plot(total_sd, sd_hi, 'D', markeredgecolor='k',
markeredgewidth=0.25, color=cpal[0])
p.plot([0, 27], [0, 27], '-', linewidth=4, alpha=0.6, color=cpal[5])
p.legend(loc='lower right', frameon=True, ncol=2)
p.grid()
p.tight_layout()
save_name = "sigma_hi_vs_total_w_krumholzmodel_perpix_w_radavg"
p.savefig(osjoin(fig_path, "{0}_{1}pc.pdf".format(save_name,
int(dr.value))))
p.savefig(osjoin(fig_path, "{0}_{1}pc.png".format(save_name,
int(dr.value))))
p.close()
# Can the metallicity gradient account for the variation with radius?
# Solve for clumping factor for every pixel.
# If the metallicity accounts for the variation, the clumping factors
# should be constant with radius, on average.
clump_per_pix_rossim = \
optimize_clump_factors(total_sd_pix.value,
gas_ratio_pix.value,
Z=ros_sim_metallicity(radii_pts.value),
c_init=10.)
clump_per_pix_bresolin = \
optimize_clump_factors(total_sd_pix.value,
gas_ratio_pix.value,
Z=bresolin_metallicity(radii_pts.value),
c_init=10.)
clump_per_pix_constz = \
optimize_clump_factors(total_sd_pix.value,
gas_ratio_pix.value,
Z=0.5,
c_init=10.)
# Note that plotting the scatter of all clump factors does not show the
# population well, but it REALLY highlights those low SB outlier points!
# Make radial bins and find the median in each
rad_bins = np.arange(0, 7.0, 0.5)
med_ratio, bin_edges, cts = binned_statistic(radii_pts.value,
clump_per_pix_rossim,
bins=rad_bins,
statistic=np.median)
lower_ratio = binned_statistic(radii_pts.value,
clump_per_pix_rossim,
bins=rad_bins,
statistic=lambda x: np.percentile(x, 15))[0]
upper_ratio = binned_statistic(radii_pts.value,
clump_per_pix_rossim,
bins=rad_bins,
statistic=lambda x: np.percentile(x, 85))[0]
bin_cents = (bin_edges[1:] + bin_edges[:-1]) / 2.
med_ratio_bres, bin_edges_bres, cts_bres = \
binned_statistic(radii_pts.value, clump_per_pix_bresolin,
bins=rad_bins,
statistic=np.median)
lower_ratio_bres = binned_statistic(radii_pts.value,
clump_per_pix_bresolin,
bins=rad_bins,
statistic=lambda x: np.percentile(x, 15))[0]
upper_ratio_bres = binned_statistic(radii_pts.value,
clump_per_pix_bresolin,
bins=rad_bins,
statistic=lambda x: np.percentile(x, 85))[0]
med_ratio_const, bin_edges_const, cts_const = \
binned_statistic(radii_pts.value, clump_per_pix_constz,
bins=rad_bins,
statistic=np.median)
lower_ratio_const = binned_statistic(radii_pts.value,
clump_per_pix_constz,
bins=rad_bins,
statistic=lambda x: np.percentile(x, 15))[0]
upper_ratio_const = binned_statistic(radii_pts.value,
clump_per_pix_constz,
bins=rad_bins,
statistic=lambda x: np.percentile(x, 85))[0]
onecolumn_figure()
p.errorbar(bin_cents, med_ratio_const, fmt='D-',
yerr=[med_ratio_const - lower_ratio_const,
upper_ratio_const - med_ratio_const],
label='Z=0.5')
p.errorbar(bin_cents, med_ratio, fmt='o--',
yerr=[med_ratio - lower_ratio, upper_ratio - med_ratio],
label='Rosolowsky \& Simon (2008)')
p.errorbar(bin_cents, med_ratio_bres, fmt='s-.',
yerr=[med_ratio_bres - lower_ratio_bres,
upper_ratio_bres - med_ratio_bres],
label='Bresolin (2011)')
p.grid()
p.legend(frameon=True, loc='upper right')
p.ylabel("Clumping Factor")
p.xlabel("Radius (kpc)")
p.ylim([0.8, 6.7])
p.tight_layout()
save_name = "clumpfactor_krumholzmodel_perpix_500pc"
p.savefig(osjoin(fig_path, "{0}.pdf".format(save_name)))
p.savefig(osjoin(fig_path, "{0}.png".format(save_name)))
p.close()
# Compare the ratio of the averages. If the gradient takes into account
# the clump factor changes, it is accounting for the differences.
p.plot(bin_cents, med_ratio / med_ratio_const, 'D-',
label='Ros. + Simon / Const Z.')
p.plot(bin_cents, med_ratio_bres / med_ratio_const, 'o--',
label='Bresolin / Const Z.')
p.legend(frameon=True, loc='upper left')
p.ylabel("Clumping Factor Ratio")
p.xlabel("Radius (kpc)")
p.grid()
p.tight_layout()
save_name = "clumpfactor_ratio_krumholzmodel_perpix_500pc"
p.savefig(osjoin(fig_path, "{0}.pdf".format(save_name)))
p.savefig(osjoin(fig_path, "{0}.png".format(save_name)))
p.close()
default_figure()
| |
import json
import time
from calendar import timegm
from datetime import datetime, timedelta
from django.conf import settings
from django.core import signing
from django.urls import reverse
from django.test import RequestFactory
import jwt
import mock
from freezegun import freeze_time
from rest_framework.exceptions import AuthenticationFailed
from rest_framework.permissions import IsAuthenticated
from rest_framework.response import Response
from rest_framework.views import APIView
from rest_framework_jwt.views import refresh_jwt_token
from olympia import core
from olympia.amo.templatetags.jinja_helpers import absolutify
from olympia.amo.tests import (
APITestClient, TestCase, WithDynamicEndpoints, user_factory)
from olympia.api.authentication import (
JWTKeyAuthentication, WebTokenAuthentication)
from olympia.api.tests.test_jwt_auth import JWTAuthKeyTester
class JWTKeyAuthTestView(APIView):
"""
This is an example of a view that would be protected by
JWTKeyAuthentication, used in TestJWTKeyAuthProtectedView below.
"""
permission_classes = [IsAuthenticated]
authentication_classes = [JWTKeyAuthentication]
def get(self, request):
return Response('some get response')
def post(self, request):
return Response({'user_pk': request.user.pk})
class TestJWTKeyAuthentication(JWTAuthKeyTester, TestCase):
client_class = APITestClient
def setUp(self):
super(TestJWTKeyAuthentication, self).setUp()
self.factory = RequestFactory()
self.auth = JWTKeyAuthentication()
self.user = user_factory(read_dev_agreement=datetime.now())
def request(self, token):
return self.factory.get('/', HTTP_AUTHORIZATION='JWT {}'.format(token))
def _create_token(self, api_key=None):
if api_key is None:
api_key = self.create_api_key(self.user)
return self.create_auth_token(api_key.user, api_key.key,
api_key.secret)
def test_get_user(self):
core.set_remote_addr('15.16.23.42')
user, _ = self.auth.authenticate(self.request(self._create_token()))
assert user == self.user
assert user.last_login_ip == '15.16.23.42'
self.assertCloseToNow(user.last_login)
def test_wrong_type_for_iat(self):
api_key = self.create_api_key(self.user)
# Manually create a broken payload where 'iat' is a string containing
# a timestamp..
issued_at = int(time.mktime(datetime.utcnow().timetuple()))
payload = {
'iss': api_key.key,
'iat': unicode(issued_at),
'exp': unicode(
issued_at + settings.MAX_APIKEY_JWT_AUTH_TOKEN_LIFETIME),
}
token = self.encode_token_payload(payload, api_key.secret)
core.set_remote_addr('1.2.3.4')
with self.assertRaises(AuthenticationFailed) as ctx:
self.auth.authenticate(self.request(token))
assert ctx.exception.detail == (
'Wrong type for one or more keys in payload')
def test_unknown_issuer(self):
api_key = self.create_api_key(self.user)
payload = self.auth_token_payload(self.user, api_key.key)
payload['iss'] = 'non-existant-issuer'
token = self.encode_token_payload(payload, api_key.secret)
with self.assertRaises(AuthenticationFailed) as ctx:
self.auth.authenticate(self.request(token))
assert ctx.exception.detail == 'Unknown JWT iss (issuer).'
def test_deleted_user(self):
in_the_past = self.days_ago(42)
self.user.update(
last_login_ip='48.15.16.23', last_login=in_the_past, deleted=True)
with self.assertRaises(AuthenticationFailed) as ctx:
self.auth.authenticate(self.request(self._create_token()))
assert ctx.exception.detail == 'User account is disabled.'
self.user.reload()
assert self.user.last_login == in_the_past
assert self.user.last_login_ip == '48.15.16.23'
def test_user_has_not_read_agreement(self):
self.user.update(read_dev_agreement=None)
with self.assertRaises(AuthenticationFailed) as ctx:
self.auth.authenticate(self.request(self._create_token()))
assert ctx.exception.detail == 'User has not read developer agreement.'
@mock.patch('olympia.api.jwt_auth.jwt_decode_handler')
def test_decode_authentication_failed(self, jwt_decode_handler):
jwt_decode_handler.side_effect = AuthenticationFailed
with self.assertRaises(AuthenticationFailed) as ctx:
self.auth.authenticate(self.request('whatever'))
assert ctx.exception.detail == 'Incorrect authentication credentials.'
@mock.patch('olympia.api.jwt_auth.jwt_decode_handler')
def test_decode_expired_signature(self, jwt_decode_handler):
jwt_decode_handler.side_effect = jwt.ExpiredSignature
with self.assertRaises(AuthenticationFailed) as ctx:
self.auth.authenticate(self.request('whatever'))
assert ctx.exception.detail == 'Signature has expired.'
@mock.patch('olympia.api.jwt_auth.jwt_decode_handler')
def test_decode_decoding_error(self, jwt_decode_handler):
jwt_decode_handler.side_effect = jwt.DecodeError
with self.assertRaises(AuthenticationFailed) as ctx:
self.auth.authenticate(self.request('whatever'))
assert ctx.exception.detail == 'Error decoding signature.'
@mock.patch('olympia.api.jwt_auth.jwt_decode_handler')
def test_decode_invalid_token(self, jwt_decode_handler):
jwt_decode_handler.side_effect = jwt.InvalidTokenError
with self.assertRaises(AuthenticationFailed) as ctx:
self.auth.authenticate(self.request('whatever'))
assert ctx.exception.detail == 'Invalid JWT Token.'
def test_refuse_refreshable_tokens(self):
# We should not accept refreshable tokens.
api_key = self.create_api_key(self.user)
payload = self.auth_token_payload(self.user, api_key.key)
payload['orig_iat'] = timegm(payload['iat'].utctimetuple())
token = self.encode_token_payload(payload, api_key.secret)
with self.assertRaises(AuthenticationFailed) as ctx:
self.auth.authenticate(self.request(token))
assert ctx.exception.detail == (
"API key based tokens are not refreshable, don't include "
"`orig_iat` in their payload.")
def test_cant_refresh_token(self):
# Developers generate tokens, not us, they should not be refreshable,
# the refresh implementation does not even know how to decode them.
api_key = self.create_api_key(self.user)
payload = self.auth_token_payload(self.user, api_key.key)
payload['orig_iat'] = timegm(payload['iat'].utctimetuple())
token = self.encode_token_payload(payload, api_key.secret)
request = self.factory.post('/lol-refresh', {'token': token})
response = refresh_jwt_token(request)
response.render()
assert response.status_code == 400
data = json.loads(response.content)
assert data == {'non_field_errors': ['Error decoding signature.']}
class TestJWTKeyAuthProtectedView(
WithDynamicEndpoints, JWTAuthKeyTester, TestCase):
client_class = APITestClient
def setUp(self):
super(TestJWTKeyAuthProtectedView, self).setUp()
self.endpoint(JWTKeyAuthTestView)
self.client.logout_api() # just to be sure!
self.user = user_factory(read_dev_agreement=datetime.now())
def request(self, method, *args, **kw):
handler = getattr(self.client, method)
return handler(reverse('test-dynamic-endpoint'), *args, **kw)
def jwt_request(self, token, method, *args, **kw):
return self.request(method,
HTTP_AUTHORIZATION='JWT {}'.format(token),
*args, **kw)
def test_get_requires_auth(self):
res = self.request('get')
assert res.status_code == 401, res.content
def test_post_requires_auth(self):
res = self.request('post', {})
assert res.status_code == 401, res.content
def test_can_post_with_jwt_header(self):
api_key = self.create_api_key(self.user)
token = self.create_auth_token(api_key.user, api_key.key,
api_key.secret)
res = self.jwt_request(token, 'post', {})
assert res.status_code == 200, res.content
data = json.loads(res.content)
assert data['user_pk'] == self.user.pk
def test_api_key_must_be_active(self):
api_key = self.create_api_key(self.user, is_active=None)
token = self.create_auth_token(api_key.user, api_key.key,
api_key.secret)
res = self.jwt_request(token, 'post', {})
assert res.status_code == 401, res.content
class TestWebTokenAuthentication(TestCase):
client_class = APITestClient
def setUp(self):
super(TestWebTokenAuthentication, self).setUp()
self.auth = WebTokenAuthentication()
self.factory = RequestFactory()
self.user = user_factory(read_dev_agreement=datetime.now())
def _authenticate(self, token):
url = absolutify('/api/v3/whatever/')
prefix = WebTokenAuthentication.auth_header_prefix
request = self.factory.post(
url, HTTP_HOST='testserver',
HTTP_AUTHORIZATION='{0} {1}'.format(prefix, token))
return self.auth.authenticate(request)
def test_success(self):
token = self.client.generate_api_token(self.user)
user, _ = self._authenticate(token)
assert user == self.user
def test_authenticate_header(self):
request = self.factory.post('/api/v3/whatever/')
assert (self.auth.authenticate_header(request) ==
'bearer realm="api"')
def test_wrong_header_only_prefix(self):
request = self.factory.post(
'/api/v3/whatever/',
HTTP_AUTHORIZATION=WebTokenAuthentication.auth_header_prefix)
with self.assertRaises(AuthenticationFailed) as exp:
self.auth.authenticate(request)
assert exp.exception.detail['code'] == 'ERROR_INVALID_HEADER'
assert exp.exception.detail['detail'] == (
'Invalid Authorization header. No credentials provided.')
def test_wrong_header_too_many_spaces(self):
request = self.factory.post(
'/api/v3/whatever/',
HTTP_AUTHORIZATION='{} foo bar'.format(
WebTokenAuthentication.auth_header_prefix))
with self.assertRaises(AuthenticationFailed) as exp:
self.auth.authenticate(request)
assert exp.exception.detail['code'] == 'ERROR_INVALID_HEADER'
assert exp.exception.detail['detail'] == (
'Invalid Authorization header. '
'Credentials string should not contain spaces.')
def test_no_token(self):
request = self.factory.post('/api/v3/whatever/')
self.auth.authenticate(request) is None
def test_expired_token(self):
old_date = datetime.now() - timedelta(
seconds=settings.SESSION_COOKIE_AGE + 1)
with freeze_time(old_date):
token = self.client.generate_api_token(self.user)
with self.assertRaises(AuthenticationFailed) as exp:
self._authenticate(token)
assert exp.exception.detail['code'] == 'ERROR_SIGNATURE_EXPIRED'
assert exp.exception.detail['detail'] == 'Signature has expired.'
def test_still_valid_token(self):
not_so_old_date = datetime.now() - timedelta(
seconds=settings.SESSION_COOKIE_AGE - 30)
with freeze_time(not_so_old_date):
token = self.client.generate_api_token(self.user)
assert self._authenticate(token)[0] == self.user
def test_bad_token(self):
token = 'garbage'
with self.assertRaises(AuthenticationFailed) as exp:
self._authenticate(token)
assert exp.exception.detail['code'] == 'ERROR_DECODING_SIGNATURE'
assert exp.exception.detail['detail'] == 'Error decoding signature.'
def test_user_id_is_none(self):
token = self.client.generate_api_token(self.user, user_id=None)
with self.assertRaises(AuthenticationFailed):
self._authenticate(token)
def test_no_user_id_in_payload(self):
data = {
'auth_hash': self.user.get_session_auth_hash(),
}
token = signing.dumps(data, salt=WebTokenAuthentication.salt)
with self.assertRaises(AuthenticationFailed):
self._authenticate(token)
def test_no_auth_hash_in_payload(self):
data = {
'user_id': self.user.pk,
}
token = signing.dumps(data, salt=WebTokenAuthentication.salt)
with self.assertRaises(AuthenticationFailed):
self._authenticate(token)
def test_user_deleted(self):
self.user.delete()
token = self.client.generate_api_token(self.user)
with self.assertRaises(AuthenticationFailed):
self._authenticate(token)
def test_invalid_user_not_found(self):
token = self.client.generate_api_token(self.user, user_id=-1)
with self.assertRaises(AuthenticationFailed):
self._authenticate(token)
def test_invalid_user_other_user(self):
user2 = user_factory(read_dev_agreement=datetime.now())
token = self.client.generate_api_token(self.user, user_id=user2.pk)
with self.assertRaises(AuthenticationFailed):
self._authenticate(token)
def test_wrong_auth_id(self):
token = self.client.generate_api_token(self.user)
self.user.update(auth_id=self.user.auth_id + 42)
with self.assertRaises(AuthenticationFailed):
self._authenticate(token)
def test_make_sure_token_is_decodable(self):
token = self.client.generate_api_token(self.user)
# A token is really a string containing the json dict,
# a timestamp and a signature, separated by ':'. The base64 encoding
# lacks padding, which is why we need to use signing.b64_decode() which
# handles that for us.
data = json.loads(signing.b64_decode(token.split(':')[0]))
assert data['user_id'] == self.user.pk
assert data['auth_hash'] == self.user.get_session_auth_hash()
| |
import http.client
import threading
import time
import unittest
import urllib.error
import urllib.parse
import urllib.request
from unittest import mock
import responses
from cumulusci.oauth.salesforce import SalesforceOAuth2
from cumulusci.oauth.salesforce import CaptureSalesforceOAuth
class TestSalesforceOAuth(unittest.TestCase):
def _create_oauth(self):
return SalesforceOAuth2(
client_id="foo_id",
client_secret="foo_secret",
callback_url="http://localhost:8080",
)
@responses.activate
def test_refresh_token(self):
oauth = self._create_oauth()
responses.add(
responses.POST,
"https://login.salesforce.com/services/oauth2/token",
body=b"SENTINEL",
)
resp = oauth.refresh_token("token")
self.assertEqual(resp.text, "SENTINEL")
@responses.activate
def test_revoke_token(self):
oauth = self._create_oauth()
responses.add(
responses.POST,
"https://login.salesforce.com/services/oauth2/revoke",
status=http.client.OK,
)
resp = oauth.revoke_token("token")
self.assertEqual(200, resp.status_code)
@mock.patch("webbrowser.open", mock.MagicMock(return_value=None))
class TestCaptureSalesforceOAuth(unittest.TestCase):
def _create_oauth(self):
return CaptureSalesforceOAuth(
self.client_id,
self.client_secret,
self.callback_url,
self.auth_site,
self.scope,
)
def setUp(self):
self.client_id = "foo_id"
self.client_secret = "foo_secret"
self.callback_url = "http://localhost:8080"
self.scope = "refresh_token web full"
self.auth_site = "https://login.salesforce.com"
@responses.activate
@mock.patch("time.sleep", time.sleep) # undo mock from conftest
def test_oauth_flow_simple(self):
# mock response to URL validation
responses.add(
responses.GET,
"https://login.salesforce.com/services/oauth2/authorize",
status=http.client.OK,
)
# mock response for SalesforceOAuth2.get_token()
expected_response = {
u"access_token": u"abc123",
u"id_token": u"abc123",
u"token_type": u"Bearer",
u"signature": u"abc123",
u"issued_at": u"12345",
u"scope": u"{}".format(self.scope),
u"instance_url": u"https://na15.salesforce.com",
u"id": u"https://login.salesforce.com/id/abc/xyz",
u"refresh_token": u"abc123",
}
responses.add(
responses.POST,
"https://login.salesforce.com/services/oauth2/token",
status=http.client.OK,
json=expected_response,
)
# create CaptureSalesforceOAuth instance
o = self._create_oauth()
# call OAuth object on another thread - this spawns local httpd
t = threading.Thread(target=o.__call__)
t.start()
while True:
if o.httpd:
break
print("waiting for o.httpd")
time.sleep(0.01)
# simulate callback from browser
response = urllib.request.urlopen(self.callback_url + "?code=123")
# wait for thread to complete
t.join()
# verify
self.assertEqual(o.response.json(), expected_response)
self.assertIn(b"Congratulations", response.read())
@mock.patch("time.sleep", time.sleep) # undo mock from conftest
@responses.activate
def test_oauth_flow_error_from_auth(self):
# mock response to URL validation
responses.add(
responses.GET,
"https://login.salesforce.com/services/oauth2/authorize",
status=http.client.OK,
)
# mock response for SalesforceOAuth2.get_token()
expected_response = {
u"access_token": u"abc123",
u"id_token": u"abc123",
u"token_type": u"Bearer",
u"signature": u"abc123",
u"issued_at": u"12345",
u"scope": u"{}".format(self.scope),
u"instance_url": u"https://na15.salesforce.com",
u"id": u"https://login.salesforce.com/id/abc/xyz",
u"refresh_token": u"abc123",
}
responses.add(
responses.POST,
"https://login.salesforce.com/services/oauth2/token",
status=http.client.OK,
json=expected_response,
)
# create CaptureSalesforceOAuth instance
o = self._create_oauth()
# call OAuth object on another thread - this spawns local httpd
t = threading.Thread(target=o.__call__)
t.start()
while True:
if o.httpd:
break
print("waiting for o.httpd")
time.sleep(0.01)
# simulate callback from browser
with self.assertRaises(urllib.error.HTTPError):
urllib.request.urlopen(
self.callback_url + "?error=123&error_description=broken"
)
# wait for thread to complete
t.join()
@mock.patch("time.sleep", time.sleep) # undo mock from conftest
@responses.activate
def test_oauth_flow_error_from_token(self):
# mock response to URL validation
responses.add(
responses.GET,
"https://login.salesforce.com/services/oauth2/authorize",
status=http.client.OK,
)
# mock response for SalesforceOAuth2.get_token()
responses.add(
responses.POST,
"https://login.salesforce.com/services/oauth2/token",
status=http.client.FORBIDDEN,
)
# create CaptureSalesforceOAuth instance
o = self._create_oauth()
# call OAuth object on another thread - this spawns local httpd
t = threading.Thread(target=o.__call__)
t.start()
while True:
if o.httpd:
break
print("waiting for o.httpd")
time.sleep(0.01)
# simulate callback from browser
with self.assertRaises(urllib.error.HTTPError):
urllib.request.urlopen(self.callback_url + "?code=123")
# wait for thread to complete
t.join()
| |
# coding: utf-8
"""
Copyright 2016 SmartBear Software
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
Ref: https://github.com/swagger-api/swagger-codegen
"""
from pprint import pformat
from six import iteritems
class Alert(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
def __init__(self):
"""
Alert - a model defined in Swagger
:param dict swaggerTypes: The key is attribute name
and the value is attribute type.
:param dict attributeMap: The key is attribute name
and the value is json key in definition.
"""
self.swagger_types = {
'active_maintenance_windows': 'list[str]',
'additional_information': 'str',
'condition': 'str',
'condition_qb_enabled': 'bool',
'condition_qb_serialization': 'str',
'created': 'int',
'display_expression': 'str',
'display_expression_qb_enabled': 'bool',
'display_expression_qb_serialization': 'str',
'event': 'ReportEvent',
'failing_host_label_pairs_override': 'list[HostLabelPair]',
'hosts_used': 'list[str]',
'in_maintenance_host_label_pairs': 'list[HostLabelPair]',
'in_trash': 'bool',
'last_error_message': 'str',
'last_failed_time': 'int',
'last_updated': 'int',
'metrics_used': 'list[str]',
'minutes': 'int',
'name': 'str',
'notificants': 'list[str]',
'prefiring_host_label_pairs': 'list[HostLabelPair]',
'query_failing': 'bool',
'resolve_after_minutes': 'int',
'severity': 'str',
'snoozed': 'int',
'tags': 'Tags',
'target': 'str',
'update_user_id': 'str',
'updated': 'int'
}
self.attribute_map = {
'active_maintenance_windows': 'activeMaintenanceWindows',
'additional_information': 'additionalInformation',
'condition': 'condition',
'condition_qb_enabled': 'conditionQBEnabled',
'condition_qb_serialization': 'conditionQBSerialization',
'created': 'created',
'display_expression': 'displayExpression',
'display_expression_qb_enabled': 'displayExpressionQBEnabled',
'display_expression_qb_serialization': 'displayExpressionQBSerialization',
'event': 'event',
'failing_host_label_pairs_override': 'failingHostLabelPairsOverride',
'hosts_used': 'hostsUsed',
'in_maintenance_host_label_pairs': 'inMaintenanceHostLabelPairs',
'in_trash': 'inTrash',
'last_error_message': 'lastErrorMessage',
'last_failed_time': 'lastFailedTime',
'last_updated': 'lastUpdated',
'metrics_used': 'metricsUsed',
'minutes': 'minutes',
'name': 'name',
'notificants': 'notificants',
'prefiring_host_label_pairs': 'prefiringHostLabelPairs',
'query_failing': 'queryFailing',
'resolve_after_minutes': 'resolveAfterMinutes',
'severity': 'severity',
'snoozed': 'snoozed',
'tags': 'tags',
'target': 'target',
'update_user_id': 'updateUserId',
'updated': 'updated'
}
self._active_maintenance_windows = None
self._additional_information = None
self._condition = None
self._condition_qb_enabled = False
self._condition_qb_serialization = None
self._created = None
self._display_expression = None
self._display_expression_qb_enabled = False
self._display_expression_qb_serialization = None
self._event = None
self._failing_host_label_pairs_override = None
self._hosts_used = None
self._in_maintenance_host_label_pairs = None
self._in_trash = False
self._last_error_message = None
self._last_failed_time = None
self._last_updated = None
self._metrics_used = None
self._minutes = None
self._name = None
self._notificants = None
self._prefiring_host_label_pairs = None
self._query_failing = False
self._resolve_after_minutes = None
self._severity = None
self._snoozed = None
self._tags = None
self._target = None
self._update_user_id = None
self._updated = None
@property
def active_maintenance_windows(self):
"""
Gets the active_maintenance_windows of this Alert.
:return: The active_maintenance_windows of this Alert.
:rtype: list[str]
"""
return self._active_maintenance_windows
@active_maintenance_windows.setter
def active_maintenance_windows(self, active_maintenance_windows):
"""
Sets the active_maintenance_windows of this Alert.
:param active_maintenance_windows: The active_maintenance_windows of this Alert.
:type: list[str]
"""
self._active_maintenance_windows = active_maintenance_windows
@property
def additional_information(self):
"""
Gets the additional_information of this Alert.
Additional information of the alert for runbooks, etc.
:return: The additional_information of this Alert.
:rtype: str
"""
return self._additional_information
@additional_information.setter
def additional_information(self, additional_information):
"""
Sets the additional_information of this Alert.
Additional information of the alert for runbooks, etc.
:param additional_information: The additional_information of this Alert.
:type: str
"""
self._additional_information = additional_information
@property
def condition(self):
"""
Gets the condition of this Alert.
The condition in which to evaluate whether the alert is firing
:return: The condition of this Alert.
:rtype: str
"""
return self._condition
@condition.setter
def condition(self, condition):
"""
Sets the condition of this Alert.
The condition in which to evaluate whether the alert is firing
:param condition: The condition of this Alert.
:type: str
"""
self._condition = condition
@property
def condition_qb_enabled(self):
"""
Gets the condition_qb_enabled of this Alert.
:return: The condition_qb_enabled of this Alert.
:rtype: bool
"""
return self._condition_qb_enabled
@condition_qb_enabled.setter
def condition_qb_enabled(self, condition_qb_enabled):
"""
Sets the condition_qb_enabled of this Alert.
:param condition_qb_enabled: The condition_qb_enabled of this Alert.
:type: bool
"""
self._condition_qb_enabled = condition_qb_enabled
@property
def condition_qb_serialization(self):
"""
Gets the condition_qb_serialization of this Alert.
:return: The condition_qb_serialization of this Alert.
:rtype: str
"""
return self._condition_qb_serialization
@condition_qb_serialization.setter
def condition_qb_serialization(self, condition_qb_serialization):
"""
Sets the condition_qb_serialization of this Alert.
:param condition_qb_serialization: The condition_qb_serialization of this Alert.
:type: str
"""
self._condition_qb_serialization = condition_qb_serialization
@property
def created(self):
"""
Gets the created of this Alert.
The creation time in milliseconds for the alert
:return: The created of this Alert.
:rtype: int
"""
return self._created
@created.setter
def created(self, created):
"""
Sets the created of this Alert.
The creation time in milliseconds for the alert
:param created: The created of this Alert.
:type: int
"""
self._created = created
@property
def display_expression(self):
"""
Gets the display_expression of this Alert.
:return: The display_expression of this Alert.
:rtype: str
"""
return self._display_expression
@display_expression.setter
def display_expression(self, display_expression):
"""
Sets the display_expression of this Alert.
:param display_expression: The display_expression of this Alert.
:type: str
"""
self._display_expression = display_expression
@property
def display_expression_qb_enabled(self):
"""
Gets the display_expression_qb_enabled of this Alert.
:return: The display_expression_qb_enabled of this Alert.
:rtype: bool
"""
return self._display_expression_qb_enabled
@display_expression_qb_enabled.setter
def display_expression_qb_enabled(self, display_expression_qb_enabled):
"""
Sets the display_expression_qb_enabled of this Alert.
:param display_expression_qb_enabled: The display_expression_qb_enabled of this Alert.
:type: bool
"""
self._display_expression_qb_enabled = display_expression_qb_enabled
@property
def display_expression_qb_serialization(self):
"""
Gets the display_expression_qb_serialization of this Alert.
:return: The display_expression_qb_serialization of this Alert.
:rtype: str
"""
return self._display_expression_qb_serialization
@display_expression_qb_serialization.setter
def display_expression_qb_serialization(self, display_expression_qb_serialization):
"""
Sets the display_expression_qb_serialization of this Alert.
:param display_expression_qb_serialization: The display_expression_qb_serialization of this Alert.
:type: str
"""
self._display_expression_qb_serialization = display_expression_qb_serialization
@property
def event(self):
"""
Gets the event of this Alert.
The event associated with the firing of the alert. Can be null if the alert has never fired. If the alert is not currently firing, the event holds the last known firing of the alert
:return: The event of this Alert.
:rtype: ReportEvent
"""
return self._event
@event.setter
def event(self, event):
"""
Sets the event of this Alert.
The event associated with the firing of the alert. Can be null if the alert has never fired. If the alert is not currently firing, the event holds the last known firing of the alert
:param event: The event of this Alert.
:type: ReportEvent
"""
self._event = event
@property
def failing_host_label_pairs_override(self):
"""
Gets the failing_host_label_pairs_override of this Alert.
Failing host/metric pairs X
:return: The failing_host_label_pairs_override of this Alert.
:rtype: list[HostLabelPair]
"""
return self._failing_host_label_pairs_override
@failing_host_label_pairs_override.setter
def failing_host_label_pairs_override(self, failing_host_label_pairs_override):
"""
Sets the failing_host_label_pairs_override of this Alert.
Failing host/metric pairs X
:param failing_host_label_pairs_override: The failing_host_label_pairs_override of this Alert.
:type: list[HostLabelPair]
"""
self._failing_host_label_pairs_override = failing_host_label_pairs_override
@property
def hosts_used(self):
"""
Gets the hosts_used of this Alert.
:return: The hosts_used of this Alert.
:rtype: list[str]
"""
return self._hosts_used
@hosts_used.setter
def hosts_used(self, hosts_used):
"""
Sets the hosts_used of this Alert.
:param hosts_used: The hosts_used of this Alert.
:type: list[str]
"""
self._hosts_used = hosts_used
@property
def in_maintenance_host_label_pairs(self):
"""
Gets the in_maintenance_host_label_pairs of this Alert.
:return: The in_maintenance_host_label_pairs of this Alert.
:rtype: list[HostLabelPair]
"""
return self._in_maintenance_host_label_pairs
@in_maintenance_host_label_pairs.setter
def in_maintenance_host_label_pairs(self, in_maintenance_host_label_pairs):
"""
Sets the in_maintenance_host_label_pairs of this Alert.
:param in_maintenance_host_label_pairs: The in_maintenance_host_label_pairs of this Alert.
:type: list[HostLabelPair]
"""
self._in_maintenance_host_label_pairs = in_maintenance_host_label_pairs
@property
def in_trash(self):
"""
Gets the in_trash of this Alert.
:return: The in_trash of this Alert.
:rtype: bool
"""
return self._in_trash
@in_trash.setter
def in_trash(self, in_trash):
"""
Sets the in_trash of this Alert.
:param in_trash: The in_trash of this Alert.
:type: bool
"""
self._in_trash = in_trash
@property
def last_error_message(self):
"""
Gets the last_error_message of this Alert.
:return: The last_error_message of this Alert.
:rtype: str
"""
return self._last_error_message
@last_error_message.setter
def last_error_message(self, last_error_message):
"""
Sets the last_error_message of this Alert.
:param last_error_message: The last_error_message of this Alert.
:type: str
"""
self._last_error_message = last_error_message
@property
def last_failed_time(self):
"""
Gets the last_failed_time of this Alert.
:return: The last_failed_time of this Alert.
:rtype: int
"""
return self._last_failed_time
@last_failed_time.setter
def last_failed_time(self, last_failed_time):
"""
Sets the last_failed_time of this Alert.
:param last_failed_time: The last_failed_time of this Alert.
:type: int
"""
self._last_failed_time = last_failed_time
@property
def last_updated(self):
"""
Gets the last_updated of this Alert.
:return: The last_updated of this Alert.
:rtype: int
"""
return self._last_updated
@last_updated.setter
def last_updated(self, last_updated):
"""
Sets the last_updated of this Alert.
:param last_updated: The last_updated of this Alert.
:type: int
"""
self._last_updated = last_updated
@property
def metrics_used(self):
"""
Gets the metrics_used of this Alert.
:return: The metrics_used of this Alert.
:rtype: list[str]
"""
return self._metrics_used
@metrics_used.setter
def metrics_used(self, metrics_used):
"""
Sets the metrics_used of this Alert.
:param metrics_used: The metrics_used of this Alert.
:type: list[str]
"""
self._metrics_used = metrics_used
@property
def minutes(self):
"""
Gets the minutes of this Alert.
The time to elapse before firing or resolving the alert when the condition evaluates to true or false (respectively)
:return: The minutes of this Alert.
:rtype: int
"""
return self._minutes
@minutes.setter
def minutes(self, minutes):
"""
Sets the minutes of this Alert.
The time to elapse before firing or resolving the alert when the condition evaluates to true or false (respectively)
:param minutes: The minutes of this Alert.
:type: int
"""
self._minutes = minutes
@property
def name(self):
"""
Gets the name of this Alert.
The name of the alert
:return: The name of this Alert.
:rtype: str
"""
return self._name
@name.setter
def name(self, name):
"""
Sets the name of this Alert.
The name of the alert
:param name: The name of this Alert.
:type: str
"""
self._name = name
@property
def notificants(self):
"""
Gets the notificants of this Alert.
:return: The notificants of this Alert.
:rtype: list[str]
"""
return self._notificants
@notificants.setter
def notificants(self, notificants):
"""
Sets the notificants of this Alert.
:param notificants: The notificants of this Alert.
:type: list[str]
"""
self._notificants = notificants
@property
def prefiring_host_label_pairs(self):
"""
Gets the prefiring_host_label_pairs of this Alert.
:return: The prefiring_host_label_pairs of this Alert.
:rtype: list[HostLabelPair]
"""
return self._prefiring_host_label_pairs
@prefiring_host_label_pairs.setter
def prefiring_host_label_pairs(self, prefiring_host_label_pairs):
"""
Sets the prefiring_host_label_pairs of this Alert.
:param prefiring_host_label_pairs: The prefiring_host_label_pairs of this Alert.
:type: list[HostLabelPair]
"""
self._prefiring_host_label_pairs = prefiring_host_label_pairs
@property
def query_failing(self):
"""
Gets the query_failing of this Alert.
:return: The query_failing of this Alert.
:rtype: bool
"""
return self._query_failing
@query_failing.setter
def query_failing(self, query_failing):
"""
Sets the query_failing of this Alert.
:param query_failing: The query_failing of this Alert.
:type: bool
"""
self._query_failing = query_failing
@property
def resolve_after_minutes(self):
"""
Gets the resolve_after_minutes of this Alert.
:return: The resolve_after_minutes of this Alert.
:rtype: int
"""
return self._resolve_after_minutes
@resolve_after_minutes.setter
def resolve_after_minutes(self, resolve_after_minutes):
"""
Sets the resolve_after_minutes of this Alert.
:param resolve_after_minutes: The resolve_after_minutes of this Alert.
:type: int
"""
self._resolve_after_minutes = resolve_after_minutes
@property
def severity(self):
"""
Gets the severity of this Alert.
The severity of the alert
:return: The severity of this Alert.
:rtype: str
"""
return self._severity
@severity.setter
def severity(self, severity):
"""
Sets the severity of this Alert.
The severity of the alert
:param severity: The severity of this Alert.
:type: str
"""
allowed_values = ["INFO", "SMOKE", "WARN", "SEVERE"]
if severity not in allowed_values:
raise ValueError(
"Invalid value for `severity`, must be one of {0}"
.format(allowed_values)
)
self._severity = severity
@property
def snoozed(self):
"""
Gets the snoozed of this Alert.
Milliseconds since the epoch the alert is snoozed until. A value in the past indicates that the alert is not snoozed
:return: The snoozed of this Alert.
:rtype: int
"""
return self._snoozed
@snoozed.setter
def snoozed(self, snoozed):
"""
Sets the snoozed of this Alert.
Milliseconds since the epoch the alert is snoozed until. A value in the past indicates that the alert is not snoozed
:param snoozed: The snoozed of this Alert.
:type: int
"""
self._snoozed = snoozed
@property
def tags(self):
"""
Gets the tags of this Alert.
Associated tags
:return: The tags of this Alert.
:rtype: Tags
"""
return self._tags
@tags.setter
def tags(self, tags):
"""
Sets the tags of this Alert.
Associated tags
:param tags: The tags of this Alert.
:type: Tags
"""
self._tags = tags
@property
def target(self):
"""
Gets the target of this Alert.
The email address or integration endpoint (such as PagerDuty) to notify when the alert state changes
:return: The target of this Alert.
:rtype: str
"""
return self._target
@target.setter
def target(self, target):
"""
Sets the target of this Alert.
The email address or integration endpoint (such as PagerDuty) to notify when the alert state changes
:param target: The target of this Alert.
:type: str
"""
self._target = target
@property
def update_user_id(self):
"""
Gets the update_user_id of this Alert.
:return: The update_user_id of this Alert.
:rtype: str
"""
return self._update_user_id
@update_user_id.setter
def update_user_id(self, update_user_id):
"""
Sets the update_user_id of this Alert.
:param update_user_id: The update_user_id of this Alert.
:type: str
"""
self._update_user_id = update_user_id
@property
def updated(self):
"""
Gets the updated of this Alert.
The last known update time of the alert by a user
:return: The updated of this Alert.
:rtype: int
"""
return self._updated
@updated.setter
def updated(self, updated):
"""
Sets the updated of this Alert.
The last known update time of the alert by a user
:param updated: The updated of this Alert.
:type: int
"""
self._updated = updated
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
| |
import sys
from django import http
from django.core import signals
from django.utils.encoding import force_unicode
from django.utils.importlib import import_module
class BaseHandler(object):
# Changes that are always applied to a response (in this order).
response_fixes = [
http.fix_location_header,
http.conditional_content_removal,
http.fix_IE_for_attach,
http.fix_IE_for_vary,
]
def __init__(self):
self._request_middleware = self._view_middleware = self._response_middleware = self._exception_middleware = None
def load_middleware(self):
"""
Populate middleware lists from settings.MIDDLEWARE_CLASSES.
Must be called after the environment is fixed (see __call__).
"""
from django.conf import settings
from django.core import exceptions
self._view_middleware = []
self._response_middleware = []
self._exception_middleware = []
request_middleware = []
for middleware_path in settings.MIDDLEWARE_CLASSES:
try:
dot = middleware_path.rindex('.')
except ValueError:
raise exceptions.ImproperlyConfigured, '%s isn\'t a middleware module' % middleware_path
mw_module, mw_classname = middleware_path[:dot], middleware_path[dot+1:]
try:
mod = import_module(mw_module)
except ImportError, e:
raise exceptions.ImproperlyConfigured, 'Error importing middleware %s: "%s"' % (mw_module, e)
try:
mw_class = getattr(mod, mw_classname)
except AttributeError:
raise exceptions.ImproperlyConfigured, 'Middleware module "%s" does not define a "%s" class' % (mw_module, mw_classname)
try:
mw_instance = mw_class()
except exceptions.MiddlewareNotUsed:
continue
if hasattr(mw_instance, 'process_request'):
request_middleware.append(mw_instance.process_request)
if hasattr(mw_instance, 'process_view'):
self._view_middleware.append(mw_instance.process_view)
if hasattr(mw_instance, 'process_response'):
self._response_middleware.insert(0, mw_instance.process_response)
if hasattr(mw_instance, 'process_exception'):
self._exception_middleware.insert(0, mw_instance.process_exception)
# We only assign to this when initialization is complete as it is used
# as a flag for initialization being complete.
self._request_middleware = request_middleware
def get_response(self, request):
"Returns an HttpResponse object for the given HttpRequest"
from django.core import exceptions, urlresolvers
from django.conf import settings
# Apply request middleware
for middleware_method in self._request_middleware:
response = middleware_method(request)
if response:
return response
# Get urlconf from request object, if available. Otherwise use default.
urlconf = getattr(request, "urlconf", settings.ROOT_URLCONF)
resolver = urlresolvers.RegexURLResolver(r'^/', urlconf)
try:
callback, callback_args, callback_kwargs = resolver.resolve(
request.path_info)
# Apply view middleware
for middleware_method in self._view_middleware:
response = middleware_method(request, callback, callback_args, callback_kwargs)
if response:
return response
try:
response = callback(request, *callback_args, **callback_kwargs)
except Exception, e:
# If the view raised an exception, run it through exception
# middleware, and if the exception middleware returns a
# response, use that. Otherwise, reraise the exception.
for middleware_method in self._exception_middleware:
response = middleware_method(request, e)
if response:
return response
raise
# Complain if the view returned None (a common error).
if response is None:
try:
view_name = callback.func_name # If it's a function
except AttributeError:
view_name = callback.__class__.__name__ + '.__call__' # If it's a class
raise ValueError, "The view %s.%s didn't return an HttpResponse object." % (callback.__module__, view_name)
return response
except http.Http404, e:
if settings.DEBUG:
from django.views import debug
return debug.technical_404_response(request, e)
else:
try:
callback, param_dict = resolver.resolve404()
return callback(request, **param_dict)
except:
try:
return self.handle_uncaught_exception(request, resolver, sys.exc_info())
finally:
receivers = signals.got_request_exception.send(sender=self.__class__, request=request)
except exceptions.PermissionDenied:
return http.HttpResponseForbidden('<h1>Permission denied</h1>')
except SystemExit:
# Allow sys.exit() to actually exit. See tickets #1023 and #4701
raise
except: # Handle everything else, including SuspiciousOperation, etc.
# Get the exception info now, in case another exception is thrown later.
exc_info = sys.exc_info()
receivers = signals.got_request_exception.send(sender=self.__class__, request=request)
return self.handle_uncaught_exception(request, resolver, exc_info)
def handle_uncaught_exception(self, request, resolver, exc_info):
"""
Processing for any otherwise uncaught exceptions (those that will
generate HTTP 500 responses). Can be overridden by subclasses who want
customised 500 handling.
Be *very* careful when overriding this because the error could be
caused by anything, so assuming something like the database is always
available would be an error.
"""
from django.conf import settings
from django.core.mail import mail_admins
if settings.DEBUG_PROPAGATE_EXCEPTIONS:
raise
if settings.DEBUG:
from django.views import debug
return debug.technical_500_response(request, *exc_info)
# When DEBUG is False, send an error message to the admins.
subject = 'Error (%s IP): %s' % ((request.META.get('REMOTE_ADDR') in settings.INTERNAL_IPS and 'internal' or 'EXTERNAL'), request.path)
try:
request_repr = repr(request)
except:
request_repr = "Request repr() unavailable"
message = "%s\n\n%s" % (self._get_traceback(exc_info), request_repr)
mail_admins(subject, message, fail_silently=True)
# Return an HttpResponse that displays a friendly error message.
callback, param_dict = resolver.resolve500()
return callback(request, **param_dict)
def _get_traceback(self, exc_info=None):
"Helper function to return the traceback as a string"
import traceback
return '\n'.join(traceback.format_exception(*(exc_info or sys.exc_info())))
def apply_response_fixes(self, request, response):
"""
Applies each of the functions in self.response_fixes to the request and
response, modifying the response in the process. Returns the new
response.
"""
for func in self.response_fixes:
response = func(request, response)
return response
def get_script_name(environ):
"""
Returns the equivalent of the HTTP request's SCRIPT_NAME environment
variable. If Apache mod_rewrite has been used, returns what would have been
the script name prior to any rewriting (so it's the script name as seen
from the client's perspective), unless DJANGO_USE_POST_REWRITE is set (to
anything).
"""
from django.conf import settings
if settings.FORCE_SCRIPT_NAME is not None:
return force_unicode(settings.FORCE_SCRIPT_NAME)
# If Apache's mod_rewrite had a whack at the URL, Apache set either
# SCRIPT_URL or REDIRECT_URL to the full resource URL before applying any
# rewrites. Unfortunately not every webserver (lighttpd!) passes this
# information through all the time, so FORCE_SCRIPT_NAME, above, is still
# needed.
script_url = environ.get('SCRIPT_URL', u'')
if not script_url:
script_url = environ.get('REDIRECT_URL', u'')
if script_url:
return force_unicode(script_url[:-len(environ.get('PATH_INFO', ''))])
return force_unicode(environ.get('SCRIPT_NAME', u''))
| |
from django import forms
from taggit.forms import TagField
from dcim.models import Site
from extras.forms import AddRemoveTagsForm, CustomFieldForm, CustomFieldBulkEditForm, CustomFieldFilterForm
from tenancy.forms import TenancyForm
from tenancy.forms import TenancyFilterForm
from tenancy.models import Tenant
from utilities.forms import (
APISelect, APISelectMultiple, add_blank_choice, BootstrapMixin, CommentField, CSVChoiceField,
FilterChoiceField, SmallTextarea, SlugField, StaticSelect2, StaticSelect2Multiple
)
from .constants import CIRCUIT_STATUS_CHOICES
from .models import Circuit, CircuitTermination, CircuitType, Provider
#
# Providers
#
class ProviderForm(BootstrapMixin, CustomFieldForm):
slug = SlugField()
comments = CommentField()
tags = TagField(
required=False
)
class Meta:
model = Provider
fields = [
'name', 'slug', 'asn', 'account', 'portal_url', 'noc_contact', 'admin_contact', 'comments', 'tags',
]
widgets = {
'noc_contact': SmallTextarea(
attrs={'rows': 5}
),
'admin_contact': SmallTextarea(
attrs={'rows': 5}
),
}
help_texts = {
'name': "Full name of the provider",
'asn': "BGP autonomous system number (if applicable)",
'portal_url': "URL of the provider's customer support portal",
'noc_contact': "NOC email address and phone number",
'admin_contact': "Administrative contact email address and phone number",
}
class ProviderCSVForm(forms.ModelForm):
slug = SlugField()
class Meta:
model = Provider
fields = Provider.csv_headers
help_texts = {
'name': 'Provider name',
'asn': '32-bit autonomous system number',
'portal_url': 'Portal URL',
'comments': 'Free-form comments',
}
class ProviderBulkEditForm(BootstrapMixin, AddRemoveTagsForm, CustomFieldBulkEditForm):
pk = forms.ModelMultipleChoiceField(
queryset=Provider.objects.all(),
widget=forms.MultipleHiddenInput
)
asn = forms.IntegerField(
required=False,
label='ASN'
)
account = forms.CharField(
max_length=30,
required=False,
label='Account number'
)
portal_url = forms.URLField(
required=False,
label='Portal'
)
noc_contact = forms.CharField(
required=False,
widget=SmallTextarea,
label='NOC contact'
)
admin_contact = forms.CharField(
required=False,
widget=SmallTextarea,
label='Admin contact'
)
comments = CommentField(
widget=SmallTextarea()
)
class Meta:
nullable_fields = [
'asn', 'account', 'portal_url', 'noc_contact', 'admin_contact', 'comments',
]
class ProviderFilterForm(BootstrapMixin, CustomFieldFilterForm):
model = Provider
q = forms.CharField(
required=False,
label='Search'
)
site = FilterChoiceField(
queryset=Site.objects.all(),
to_field_name='slug',
widget=APISelectMultiple(
api_url="/api/dcim/sites/",
value_field="slug",
)
)
asn = forms.IntegerField(
required=False,
label='ASN'
)
#
# Circuit types
#
class CircuitTypeForm(BootstrapMixin, forms.ModelForm):
slug = SlugField()
class Meta:
model = CircuitType
fields = [
'name', 'slug',
]
class CircuitTypeCSVForm(forms.ModelForm):
slug = SlugField()
class Meta:
model = CircuitType
fields = CircuitType.csv_headers
help_texts = {
'name': 'Name of circuit type',
}
#
# Circuits
#
class CircuitForm(BootstrapMixin, TenancyForm, CustomFieldForm):
comments = CommentField()
tags = TagField(
required=False
)
class Meta:
model = Circuit
fields = [
'cid', 'type', 'provider', 'status', 'install_date', 'commit_rate', 'description', 'tenant_group', 'tenant',
'comments', 'tags',
]
help_texts = {
'cid': "Unique circuit ID",
'install_date': "Format: YYYY-MM-DD",
'commit_rate': "Committed rate",
}
widgets = {
'provider': APISelect(
api_url="/api/circuits/providers/"
),
'type': APISelect(
api_url="/api/circuits/circuit-types/"
),
'status': StaticSelect2(),
}
class CircuitCSVForm(forms.ModelForm):
provider = forms.ModelChoiceField(
queryset=Provider.objects.all(),
to_field_name='name',
help_text='Name of parent provider',
error_messages={
'invalid_choice': 'Provider not found.'
}
)
type = forms.ModelChoiceField(
queryset=CircuitType.objects.all(),
to_field_name='name',
help_text='Type of circuit',
error_messages={
'invalid_choice': 'Invalid circuit type.'
}
)
status = CSVChoiceField(
choices=CIRCUIT_STATUS_CHOICES,
required=False,
help_text='Operational status'
)
tenant = forms.ModelChoiceField(
queryset=Tenant.objects.all(),
required=False,
to_field_name='name',
help_text='Name of assigned tenant',
error_messages={
'invalid_choice': 'Tenant not found.'
}
)
class Meta:
model = Circuit
fields = [
'cid', 'provider', 'type', 'status', 'tenant', 'install_date', 'commit_rate', 'description', 'comments',
]
class CircuitBulkEditForm(BootstrapMixin, AddRemoveTagsForm, CustomFieldBulkEditForm):
pk = forms.ModelMultipleChoiceField(
queryset=Circuit.objects.all(),
widget=forms.MultipleHiddenInput
)
type = forms.ModelChoiceField(
queryset=CircuitType.objects.all(),
required=False,
widget=APISelect(
api_url="/api/circuits/circuit-types/"
)
)
provider = forms.ModelChoiceField(
queryset=Provider.objects.all(),
required=False,
widget=APISelect(
api_url="/api/circuits/providers/"
)
)
status = forms.ChoiceField(
choices=add_blank_choice(CIRCUIT_STATUS_CHOICES),
required=False,
initial='',
widget=StaticSelect2()
)
tenant = forms.ModelChoiceField(
queryset=Tenant.objects.all(),
required=False,
widget=APISelect(
api_url="/api/tenancy/tenants/"
)
)
commit_rate = forms.IntegerField(
required=False,
label='Commit rate (Kbps)'
)
description = forms.CharField(
max_length=100,
required=False
)
comments = CommentField(
widget=SmallTextarea
)
class Meta:
nullable_fields = [
'tenant', 'commit_rate', 'description', 'comments',
]
class CircuitFilterForm(BootstrapMixin, TenancyFilterForm, CustomFieldFilterForm):
model = Circuit
field_order = ['q', 'type', 'provider', 'status', 'site', 'tenant_group', 'tenant', 'commit_rate']
q = forms.CharField(
required=False,
label='Search'
)
type = FilterChoiceField(
queryset=CircuitType.objects.all(),
to_field_name='slug',
widget=APISelectMultiple(
api_url="/api/circuits/circuit-types/",
value_field="slug",
)
)
provider = FilterChoiceField(
queryset=Provider.objects.all(),
to_field_name='slug',
widget=APISelectMultiple(
api_url="/api/circuits/providers/",
value_field="slug",
)
)
status = forms.MultipleChoiceField(
choices=CIRCUIT_STATUS_CHOICES,
required=False,
widget=StaticSelect2Multiple()
)
site = FilterChoiceField(
queryset=Site.objects.all(),
to_field_name='slug',
widget=APISelectMultiple(
api_url="/api/dcim/sites/",
value_field="slug",
)
)
commit_rate = forms.IntegerField(
required=False,
min_value=0,
label='Commit rate (Kbps)'
)
#
# Circuit terminations
#
class CircuitTerminationForm(BootstrapMixin, forms.ModelForm):
class Meta:
model = CircuitTermination
fields = [
'term_side', 'site', 'port_speed', 'upstream_speed', 'xconnect_id', 'pp_info', 'description',
]
help_texts = {
'port_speed': "Physical circuit speed",
'xconnect_id': "ID of the local cross-connect",
'pp_info': "Patch panel ID and port number(s)"
}
widgets = {
'term_side': forms.HiddenInput(),
'site': APISelect(
api_url="/api/dcim/sites/"
)
}
| |
"""
**********
Matplotlib
**********
Draw networks with matplotlib.
See Also
--------
matplotlib: http://matplotlib.sourceforge.net/
pygraphviz: http://networkx.lanl.gov/pygraphviz/
"""
# Copyright (C) 2004-2012 by
# Aric Hagberg <hagberg@lanl.gov>
# Dan Schult <dschult@colgate.edu>
# Pieter Swart <swart@lanl.gov>
# All rights reserved.
# BSD license.
import networkx as nx
from networkx.drawing.layout import shell_layout,\
circular_layout,spectral_layout,spring_layout,random_layout
__author__ = """Aric Hagberg (hagberg@lanl.gov)"""
__all__ = ['draw',
'draw_networkx',
'draw_networkx_nodes',
'draw_networkx_edges',
'draw_networkx_labels',
'draw_networkx_edge_labels',
'draw_circular',
'draw_random',
'draw_spectral',
'draw_spring',
'draw_shell',
'draw_graphviz']
def draw(G, pos=None, ax=None, hold=None, **kwds):
"""Draw the graph G with Matplotlib.
Draw the graph as a simple representation with no node
labels or edge labels and using the full Matplotlib figure area
and no axis labels by default. See draw_networkx() for more
full-featured drawing that allows title, axis labels etc.
Parameters
----------
G : graph
A networkx graph
pos : dictionary, optional
A dictionary with nodes as keys and positions as values.
If not specified a spring layout positioning will be computed.
See networkx.layout for functions that compute node positions.
ax : Matplotlib Axes object, optional
Draw the graph in specified Matplotlib axes.
hold : bool, optional
Set the Matplotlib hold state. If True subsequent draw
commands will be added to the current axes.
**kwds : optional keywords
See networkx.draw_networkx() for a description of optional keywords.
Examples
--------
>>> G=nx.dodecahedral_graph()
>>> nx.draw(G)
>>> nx.draw(G,pos=nx.spring_layout(G)) # use spring layout
See Also
--------
draw_networkx()
draw_networkx_nodes()
draw_networkx_edges()
draw_networkx_labels()
draw_networkx_edge_labels()
Notes
-----
This function has the same name as pylab.draw and pyplot.draw
so beware when using
>>> from networkx import *
since you might overwrite the pylab.draw function.
With pyplot use
>>> import matplotlib.pyplot as plt
>>> import networkx as nx
>>> G=nx.dodecahedral_graph()
>>> nx.draw(G) # networkx draw()
>>> plt.draw() # pyplot draw()
Also see the NetworkX drawing examples at
http://networkx.lanl.gov/gallery.html
"""
try:
import matplotlib.pyplot as plt
except ImportError:
raise ImportError("Matplotlib required for draw()")
except RuntimeError:
print("Matplotlib unable to open display")
raise
if ax is None:
cf = plt.gcf()
else:
cf = ax.get_figure()
cf.set_facecolor('w')
if ax is None:
if cf._axstack() is None:
ax = cf.add_axes((0, 0, 1, 1))
else:
ax = cf.gca()
if 'with_labels' not in kwds:
kwds['with_labels'] = 'labels' in kwds
b = plt.ishold()
# allow callers to override the hold state by passing hold=True|False
h = kwds.pop('hold', None)
if h is not None:
plt.hold(h)
try:
draw_networkx(G, pos=pos, ax=ax, **kwds)
ax.set_axis_off()
plt.draw_if_interactive()
except:
plt.hold(b)
raise
plt.hold(b)
return
def draw_networkx(G, pos=None, with_labels=True, **kwds):
"""Draw the graph G using Matplotlib.
Draw the graph with Matplotlib with options for node positions,
labeling, titles, and many other drawing features.
See draw() for simple drawing without labels or axes.
Parameters
----------
G : graph
A networkx graph
pos : dictionary, optional
A dictionary with nodes as keys and positions as values.
If not specified a spring layout positioning will be computed.
See networkx.layout for functions that compute node positions.
with_labels : bool, optional (default=True)
Set to True to draw labels on the nodes.
ax : Matplotlib Axes object, optional
Draw the graph in the specified Matplotlib axes.
nodelist : list, optional (default G.nodes())
Draw only specified nodes
edgelist : list, optional (default=G.edges())
Draw only specified edges
node_size : scalar or array, optional (default=300)
Size of nodes. If an array is specified it must be the
same length as nodelist.
node_color : color string, or array of floats, (default='r')
Node color. Can be a single color format string,
or a sequence of colors with the same length as nodelist.
If numeric values are specified they will be mapped to
colors using the cmap and vmin,vmax parameters. See
matplotlib.scatter for more details.
node_shape : string, optional (default='o')
The shape of the node. Specification is as matplotlib.scatter
marker, one of 'so^>v<dph8'.
alpha : float, optional (default=1.0)
The node transparency
cmap : Matplotlib colormap, optional (default=None)
Colormap for mapping intensities of nodes
vmin,vmax : float, optional (default=None)
Minimum and maximum for node colormap scaling
linewidths : [None | scalar | sequence]
Line width of symbol border (default =1.0)
width : float, optional (default=1.0)
Line width of edges
edge_color : color string, or array of floats (default='r')
Edge color. Can be a single color format string,
or a sequence of colors with the same length as edgelist.
If numeric values are specified they will be mapped to
colors using the edge_cmap and edge_vmin,edge_vmax parameters.
edge_cmap : Matplotlib colormap, optional (default=None)
Colormap for mapping intensities of edges
edge_vmin,edge_vmax : floats, optional (default=None)
Minimum and maximum for edge colormap scaling
style : string, optional (default='solid')
Edge line style (solid|dashed|dotted,dashdot)
labels : dictionary, optional (default=None)
Node labels in a dictionary keyed by node of text labels
font_size : int, optional (default=12)
Font size for text labels
font_color : string, optional (default='k' black)
Font color string
font_weight : string, optional (default='normal')
Font weight
font_family : string, optional (default='sans-serif')
Font family
label : string, optional
Label for graph legend
Examples
--------
>>> G=nx.dodecahedral_graph()
>>> nx.draw(G)
>>> nx.draw(G,pos=nx.spring_layout(G)) # use spring layout
>>> import matplotlib.pyplot as plt
>>> limits=plt.axis('off') # turn of axis
Also see the NetworkX drawing examples at
http://networkx.lanl.gov/gallery.html
See Also
--------
draw()
draw_networkx_nodes()
draw_networkx_edges()
draw_networkx_labels()
draw_networkx_edge_labels()
"""
try:
import matplotlib.pyplot as plt
except ImportError:
raise ImportError("Matplotlib required for draw()")
except RuntimeError:
print("Matplotlib unable to open display")
raise
if pos is None:
pos = nx.drawing.spring_layout(G) # default to spring layout
node_collection = draw_networkx_nodes(G, pos, **kwds)
edge_collection = draw_networkx_edges(G, pos, **kwds)
if with_labels:
draw_networkx_labels(G, pos, **kwds)
plt.draw_if_interactive()
def draw_networkx_nodes(G, pos,
nodelist=None,
node_size=300,
node_color='r',
node_shape='o',
alpha=1.0,
cmap=None,
vmin=None,
vmax=None,
ax=None,
linewidths=None,
label=None,
**kwds):
"""Draw the nodes of the graph G.
This draws only the nodes of the graph G.
Parameters
----------
G : graph
A networkx graph
pos : dictionary
A dictionary with nodes as keys and positions as values.
Positions should be sequences of length 2.
ax : Matplotlib Axes object, optional
Draw the graph in the specified Matplotlib axes.
nodelist : list, optional
Draw only specified nodes (default G.nodes())
node_size : scalar or array
Size of nodes (default=300). If an array is specified it must be the
same length as nodelist.
node_color : color string, or array of floats
Node color. Can be a single color format string (default='r'),
or a sequence of colors with the same length as nodelist.
If numeric values are specified they will be mapped to
colors using the cmap and vmin,vmax parameters. See
matplotlib.scatter for more details.
node_shape : string
The shape of the node. Specification is as matplotlib.scatter
marker, one of 'so^>v<dph8' (default='o').
alpha : float
The node transparency (default=1.0)
cmap : Matplotlib colormap
Colormap for mapping intensities of nodes (default=None)
vmin,vmax : floats
Minimum and maximum for node colormap scaling (default=None)
linewidths : [None | scalar | sequence]
Line width of symbol border (default =1.0)
label : [None| string]
Label for legend
Returns
-------
matplotlib.collections.PathCollection
`PathCollection` of the nodes.
Examples
--------
>>> G=nx.dodecahedral_graph()
>>> nodes=nx.draw_networkx_nodes(G,pos=nx.spring_layout(G))
Also see the NetworkX drawing examples at
http://networkx.lanl.gov/gallery.html
See Also
--------
draw()
draw_networkx()
draw_networkx_edges()
draw_networkx_labels()
draw_networkx_edge_labels()
"""
try:
import matplotlib.pyplot as plt
import numpy
except ImportError:
raise ImportError("Matplotlib required for draw()")
except RuntimeError:
print("Matplotlib unable to open display")
raise
if ax is None:
ax = plt.gca()
if nodelist is None:
nodelist = G.nodes()
if not nodelist or len(nodelist) == 0: # empty nodelist, no drawing
return None
try:
xy = numpy.asarray([pos[v] for v in nodelist])
except KeyError as e:
raise nx.NetworkXError('Node %s has no position.'%e)
except ValueError:
raise nx.NetworkXError('Bad value in node positions.')
node_collection = ax.scatter(xy[:, 0], xy[:, 1],
s=node_size,
c=node_color,
marker=node_shape,
cmap=cmap,
vmin=vmin,
vmax=vmax,
alpha=alpha,
linewidths=linewidths,
label=label)
node_collection.set_zorder(2)
return node_collection
def draw_networkx_edges(G, pos,
edgelist=None,
width=1.0,
edge_color='k',
style='solid',
alpha=None,
edge_cmap=None,
edge_vmin=None,
edge_vmax=None,
ax=None,
arrows=True,
label=None,
**kwds):
"""Draw the edges of the graph G.
This draws only the edges of the graph G.
Parameters
----------
G : graph
A networkx graph
pos : dictionary
A dictionary with nodes as keys and positions as values.
Positions should be sequences of length 2.
edgelist : collection of edge tuples
Draw only specified edges(default=G.edges())
width : float
Line width of edges (default =1.0)
edge_color : color string, or array of floats
Edge color. Can be a single color format string (default='r'),
or a sequence of colors with the same length as edgelist.
If numeric values are specified they will be mapped to
colors using the edge_cmap and edge_vmin,edge_vmax parameters.
style : string
Edge line style (default='solid') (solid|dashed|dotted,dashdot)
alpha : float
The edge transparency (default=1.0)
edge_ cmap : Matplotlib colormap
Colormap for mapping intensities of edges (default=None)
edge_vmin,edge_vmax : floats
Minimum and maximum for edge colormap scaling (default=None)
ax : Matplotlib Axes object, optional
Draw the graph in the specified Matplotlib axes.
arrows : bool, optional (default=True)
For directed graphs, if True draw arrowheads.
label : [None| string]
Label for legend
Returns
-------
matplotlib.collection.LineCollection
`LineCollection` of the edges
Notes
-----
For directed graphs, "arrows" (actually just thicker stubs) are drawn
at the head end. Arrows can be turned off with keyword arrows=False.
Yes, it is ugly but drawing proper arrows with Matplotlib this
way is tricky.
Examples
--------
>>> G=nx.dodecahedral_graph()
>>> edges=nx.draw_networkx_edges(G,pos=nx.spring_layout(G))
Also see the NetworkX drawing examples at
http://networkx.lanl.gov/gallery.html
See Also
--------
draw()
draw_networkx()
draw_networkx_nodes()
draw_networkx_labels()
draw_networkx_edge_labels()
"""
try:
import matplotlib
import matplotlib.pyplot as plt
import matplotlib.cbook as cb
from matplotlib.colors import colorConverter, Colormap
from matplotlib.collections import LineCollection
import numpy
except ImportError:
raise ImportError("Matplotlib required for draw()")
except RuntimeError:
print("Matplotlib unable to open display")
raise
if ax is None:
ax = plt.gca()
if edgelist is None:
edgelist = G.edges()
if not edgelist or len(edgelist) == 0: # no edges!
return None
# set edge positions
edge_pos = numpy.asarray([(pos[e[0]], pos[e[1]]) for e in edgelist])
if not cb.iterable(width):
lw = (width,)
else:
lw = width
if not cb.is_string_like(edge_color) \
and cb.iterable(edge_color) \
and len(edge_color) == len(edge_pos):
if numpy.alltrue([cb.is_string_like(c)
for c in edge_color]):
# (should check ALL elements)
# list of color letters such as ['k','r','k',...]
edge_colors = tuple([colorConverter.to_rgba(c, alpha)
for c in edge_color])
elif numpy.alltrue([not cb.is_string_like(c)
for c in edge_color]):
# If color specs are given as (rgb) or (rgba) tuples, we're OK
if numpy.alltrue([cb.iterable(c) and len(c) in (3, 4)
for c in edge_color]):
edge_colors = tuple(edge_color)
else:
# numbers (which are going to be mapped with a colormap)
edge_colors = None
else:
raise ValueError('edge_color must consist of either color names or numbers')
else:
if cb.is_string_like(edge_color) or len(edge_color) == 1:
edge_colors = (colorConverter.to_rgba(edge_color, alpha), )
else:
raise ValueError('edge_color must be a single color or list of exactly m colors where m is the number or edges')
edge_collection = LineCollection(edge_pos,
colors=edge_colors,
linewidths=lw,
antialiaseds=(1,),
linestyle=style,
transOffset = ax.transData,
)
edge_collection.set_zorder(1) # edges go behind nodes
edge_collection.set_label(label)
ax.add_collection(edge_collection)
# Note: there was a bug in mpl regarding the handling of alpha values for
# each line in a LineCollection. It was fixed in matplotlib in r7184 and
# r7189 (June 6 2009). We should then not set the alpha value globally,
# since the user can instead provide per-edge alphas now. Only set it
# globally if provided as a scalar.
if cb.is_numlike(alpha):
edge_collection.set_alpha(alpha)
if edge_colors is None:
if edge_cmap is not None:
assert(isinstance(edge_cmap, Colormap))
edge_collection.set_array(numpy.asarray(edge_color))
edge_collection.set_cmap(edge_cmap)
if edge_vmin is not None or edge_vmax is not None:
edge_collection.set_clim(edge_vmin, edge_vmax)
else:
edge_collection.autoscale()
arrow_collection = None
if G.is_directed() and arrows:
# a directed graph hack
# draw thick line segments at head end of edge
# waiting for someone else to implement arrows that will work
arrow_colors = edge_colors
a_pos = []
p = 1.0-0.25 # make head segment 25 percent of edge length
for src, dst in edge_pos:
x1, y1 = src
x2, y2 = dst
dx = x2-x1 # x offset
dy = y2-y1 # y offset
d = numpy.sqrt(float(dx**2 + dy**2)) # length of edge
if d == 0: # source and target at same position
continue
if dx == 0: # vertical edge
xa = x2
ya = dy*p+y1
if dy == 0: # horizontal edge
ya = y2
xa = dx*p+x1
else:
theta = numpy.arctan2(dy, dx)
xa = p*d*numpy.cos(theta)+x1
ya = p*d*numpy.sin(theta)+y1
a_pos.append(((xa, ya), (x2, y2)))
arrow_collection = LineCollection(a_pos,
colors=arrow_colors,
linewidths=[4*ww for ww in lw],
antialiaseds=(1,),
transOffset = ax.transData,
)
arrow_collection.set_zorder(1) # edges go behind nodes
arrow_collection.set_label(label)
ax.add_collection(arrow_collection)
# update view
minx = numpy.amin(numpy.ravel(edge_pos[:, :, 0]))
maxx = numpy.amax(numpy.ravel(edge_pos[:, :, 0]))
miny = numpy.amin(numpy.ravel(edge_pos[:, :, 1]))
maxy = numpy.amax(numpy.ravel(edge_pos[:, :, 1]))
w = maxx-minx
h = maxy-miny
padx, pady = 0.05*w, 0.05*h
corners = (minx-padx, miny-pady), (maxx+padx, maxy+pady)
ax.update_datalim(corners)
ax.autoscale_view()
# if arrow_collection:
return edge_collection
def draw_networkx_labels(G, pos,
labels=None,
font_size=12,
font_color='k',
font_family='sans-serif',
font_weight='normal',
alpha=1.0,
ax=None,
**kwds):
"""Draw node labels on the graph G.
Parameters
----------
G : graph
A networkx graph
pos : dictionary
A dictionary with nodes as keys and positions as values.
Positions should be sequences of length 2.
labels : dictionary, optional (default=None)
Node labels in a dictionary keyed by node of text labels
font_size : int
Font size for text labels (default=12)
font_color : string
Font color string (default='k' black)
font_family : string
Font family (default='sans-serif')
font_weight : string
Font weight (default='normal')
alpha : float
The text transparency (default=1.0)
ax : Matplotlib Axes object, optional
Draw the graph in the specified Matplotlib axes.
Returns
-------
dict
`dict` of labels keyed on the nodes
Examples
--------
>>> G=nx.dodecahedral_graph()
>>> labels=nx.draw_networkx_labels(G,pos=nx.spring_layout(G))
Also see the NetworkX drawing examples at
http://networkx.lanl.gov/gallery.html
See Also
--------
draw()
draw_networkx()
draw_networkx_nodes()
draw_networkx_edges()
draw_networkx_edge_labels()
"""
try:
import matplotlib.pyplot as plt
import matplotlib.cbook as cb
except ImportError:
raise ImportError("Matplotlib required for draw()")
except RuntimeError:
print("Matplotlib unable to open display")
raise
if ax is None:
ax = plt.gca()
if labels is None:
labels = dict((n, n) for n in G.nodes())
# set optional alignment
horizontalalignment = kwds.get('horizontalalignment', 'center')
verticalalignment = kwds.get('verticalalignment', 'center')
text_items = {} # there is no text collection so we'll fake one
for n, label in labels.items():
(x, y) = pos[n]
if not cb.is_string_like(label):
label = str(label) # this will cause "1" and 1 to be labeled the same
t = ax.text(x, y,
label,
size=font_size,
color=font_color,
family=font_family,
weight=font_weight,
horizontalalignment=horizontalalignment,
verticalalignment=verticalalignment,
transform=ax.transData,
clip_on=True,
)
text_items[n] = t
return text_items
def draw_networkx_edge_labels(G, pos,
edge_labels=None,
label_pos=0.5,
font_size=10,
font_color='k',
font_family='sans-serif',
font_weight='normal',
alpha=1.0,
bbox=None,
ax=None,
rotate=True,
**kwds):
"""Draw edge labels.
Parameters
----------
G : graph
A networkx graph
pos : dictionary
A dictionary with nodes as keys and positions as values.
Positions should be sequences of length 2.
ax : Matplotlib Axes object, optional
Draw the graph in the specified Matplotlib axes.
alpha : float
The text transparency (default=1.0)
edge_labels : dictionary
Edge labels in a dictionary keyed by edge two-tuple of text
labels (default=None). Only labels for the keys in the dictionary
are drawn.
label_pos : float
Position of edge label along edge (0=head, 0.5=center, 1=tail)
font_size : int
Font size for text labels (default=12)
font_color : string
Font color string (default='k' black)
font_weight : string
Font weight (default='normal')
font_family : string
Font family (default='sans-serif')
bbox : Matplotlib bbox
Specify text box shape and colors.
clip_on : bool
Turn on clipping at axis boundaries (default=True)
Returns
-------
dict
`dict` of labels keyed on the edges
Examples
--------
>>> G=nx.dodecahedral_graph()
>>> edge_labels=nx.draw_networkx_edge_labels(G,pos=nx.spring_layout(G))
Also see the NetworkX drawing examples at
http://networkx.lanl.gov/gallery.html
See Also
--------
draw()
draw_networkx()
draw_networkx_nodes()
draw_networkx_edges()
draw_networkx_labels()
"""
try:
import matplotlib.pyplot as plt
import matplotlib.cbook as cb
import numpy
except ImportError:
raise ImportError("Matplotlib required for draw()")
except RuntimeError:
print("Matplotlib unable to open display")
raise
if ax is None:
ax = plt.gca()
if edge_labels is None:
labels = dict(((u, v), d) for u, v, d in G.edges(data=True))
else:
labels = edge_labels
text_items = {}
for (n1, n2), label in labels.items():
(x1, y1) = pos[n1]
(x2, y2) = pos[n2]
(x, y) = (x1 * label_pos + x2 * (1.0 - label_pos),
y1 * label_pos + y2 * (1.0 - label_pos))
if rotate:
angle = numpy.arctan2(y2-y1, x2-x1)/(2.0*numpy.pi)*360 # degrees
# make label orientation "right-side-up"
if angle > 90:
angle -= 180
if angle < - 90:
angle += 180
# transform data coordinate angle to screen coordinate angle
xy = numpy.array((x, y))
trans_angle = ax.transData.transform_angles(numpy.array((angle,)),
xy.reshape((1, 2)))[0]
else:
trans_angle = 0.0
# use default box of white with white border
if bbox is None:
bbox = dict(boxstyle='round',
ec=(1.0, 1.0, 1.0),
fc=(1.0, 1.0, 1.0),
)
if not cb.is_string_like(label):
label = str(label) # this will cause "1" and 1 to be labeled the same
# set optional alignment
horizontalalignment = kwds.get('horizontalalignment', 'center')
verticalalignment = kwds.get('verticalalignment', 'center')
t = ax.text(x, y,
label,
size=font_size,
color=font_color,
family=font_family,
weight=font_weight,
horizontalalignment=horizontalalignment,
verticalalignment=verticalalignment,
rotation=trans_angle,
transform=ax.transData,
bbox=bbox,
zorder=1,
clip_on=True,
)
text_items[(n1, n2)] = t
return text_items
def draw_circular(G, **kwargs):
"""Draw the graph G with a circular layout.
Parameters
----------
G : graph
A networkx graph
**kwargs : optional keywords
See networkx.draw_networkx() for a description of optional keywords,
with the exception of the pos parameter which is not used by this
function.
"""
draw(G, circular_layout(G), **kwargs)
def draw_random(G, **kwargs):
"""Draw the graph G with a random layout.
Parameters
----------
G : graph
A networkx graph
**kwargs : optional keywords
See networkx.draw_networkx() for a description of optional keywords,
with the exception of the pos parameter which is not used by this
function.
"""
draw(G, random_layout(G), **kwargs)
def draw_spectral(G, **kwargs):
"""Draw the graph G with a spectral layout.
Parameters
----------
G : graph
A networkx graph
**kwargs : optional keywords
See networkx.draw_networkx() for a description of optional keywords,
with the exception of the pos parameter which is not used by this
function.
"""
draw(G, spectral_layout(G), **kwargs)
def draw_spring(G, **kwargs):
"""Draw the graph G with a spring layout.
Parameters
----------
G : graph
A networkx graph
**kwargs : optional keywords
See networkx.draw_networkx() for a description of optional keywords,
with the exception of the pos parameter which is not used by this
function.
"""
draw(G, spring_layout(G), **kwargs)
def draw_shell(G, **kwargs):
"""Draw networkx graph with shell layout.
Parameters
----------
G : graph
A networkx graph
**kwargs : optional keywords
See networkx.draw_networkx() for a description of optional keywords,
with the exception of the pos parameter which is not used by this
function.
"""
nlist = kwargs.get('nlist', None)
if nlist is not None:
del(kwargs['nlist'])
draw(G, shell_layout(G, nlist=nlist), **kwargs)
def draw_graphviz(G, prog="neato", **kwargs):
"""Draw networkx graph with graphviz layout.
Parameters
----------
G : graph
A networkx graph
prog : string, optional
Name of Graphviz layout program
**kwargs : optional keywords
See networkx.draw_networkx() for a description of optional keywords.
"""
pos = nx.drawing.graphviz_layout(G, prog)
draw(G, pos, **kwargs)
def draw_nx(G, pos, **kwds):
"""For backward compatibility; use draw or draw_networkx."""
draw(G, pos, **kwds)
# fixture for nose tests
def setup_module(module):
from nose import SkipTest
try:
import matplotlib as mpl
mpl.use('PS', warn=False)
import matplotlib.pyplot as plt
except:
raise SkipTest("matplotlib not available")
| |
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2010-2011 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import socket
import time
import datetime
import eventlet.patcher
import httplib2
import webob
from glance.common import exception
from glance.common import utils
from glance.common import wsgi
from glance.tests import utils as test_utils
class RequestTest(test_utils.BaseTestCase):
def test_content_type_missing(self):
request = wsgi.Request.blank('/tests/123')
self.assertRaises(exception.InvalidContentType,
request.get_content_type, ('application/xml'))
def test_content_type_unsupported(self):
request = wsgi.Request.blank('/tests/123')
request.headers["Content-Type"] = "text/html"
self.assertRaises(exception.InvalidContentType,
request.get_content_type, ('application/xml'))
def test_content_type_with_charset(self):
request = wsgi.Request.blank('/tests/123')
request.headers["Content-Type"] = "application/json; charset=UTF-8"
result = request.get_content_type(('application/json'))
self.assertEqual(result, "application/json")
def test_content_type_from_accept_xml(self):
request = wsgi.Request.blank('/tests/123')
request.headers["Accept"] = "application/xml"
result = request.best_match_content_type()
self.assertEqual(result, "application/json")
def test_content_type_from_accept_json(self):
request = wsgi.Request.blank('/tests/123')
request.headers["Accept"] = "application/json"
result = request.best_match_content_type()
self.assertEqual(result, "application/json")
def test_content_type_from_accept_xml_json(self):
request = wsgi.Request.blank('/tests/123')
request.headers["Accept"] = "application/xml, application/json"
result = request.best_match_content_type()
self.assertEqual(result, "application/json")
def test_content_type_from_accept_json_xml_quality(self):
request = wsgi.Request.blank('/tests/123')
request.headers["Accept"] = ("application/json; q=0.3, "
"application/xml; q=0.9")
result = request.best_match_content_type()
self.assertEqual(result, "application/json")
def test_content_type_accept_default(self):
request = wsgi.Request.blank('/tests/123.unsupported')
request.headers["Accept"] = "application/unsupported1"
result = request.best_match_content_type()
self.assertEqual(result, "application/json")
class ResourceTest(test_utils.BaseTestCase):
def test_get_action_args(self):
env = {
'wsgiorg.routing_args': [
None,
{
'controller': None,
'format': None,
'action': 'update',
'id': 12,
},
],
}
expected = {'action': 'update', 'id': 12}
actual = wsgi.Resource(None, None, None).get_action_args(env)
self.assertEqual(actual, expected)
def test_get_action_args_invalid_index(self):
env = {'wsgiorg.routing_args': []}
expected = {}
actual = wsgi.Resource(None, None, None).get_action_args(env)
self.assertEqual(actual, expected)
def test_get_action_args_del_controller_error(self):
actions = {'format': None,
'action': 'update',
'id': 12}
env = {'wsgiorg.routing_args': [None, actions]}
expected = {'action': 'update', 'id': 12}
actual = wsgi.Resource(None, None, None).get_action_args(env)
self.assertEqual(actual, expected)
def test_get_action_args_del_format_error(self):
actions = {'action': 'update', 'id': 12}
env = {'wsgiorg.routing_args': [None, actions]}
expected = {'action': 'update', 'id': 12}
actual = wsgi.Resource(None, None, None).get_action_args(env)
self.assertEqual(actual, expected)
def test_dispatch(self):
class Controller(object):
def index(self, shirt, pants=None):
return (shirt, pants)
resource = wsgi.Resource(None, None, None)
actual = resource.dispatch(Controller(), 'index', 'on', pants='off')
expected = ('on', 'off')
self.assertEqual(actual, expected)
def test_dispatch_default(self):
class Controller(object):
def default(self, shirt, pants=None):
return (shirt, pants)
resource = wsgi.Resource(None, None, None)
actual = resource.dispatch(Controller(), 'index', 'on', pants='off')
expected = ('on', 'off')
self.assertEqual(actual, expected)
def test_dispatch_no_default(self):
class Controller(object):
def show(self, shirt, pants=None):
return (shirt, pants)
resource = wsgi.Resource(None, None, None)
self.assertRaises(AttributeError, resource.dispatch, Controller(),
'index', 'on', pants='off')
def test_call(self):
class FakeController(object):
def index(self, shirt, pants=None):
return (shirt, pants)
resource = wsgi.Resource(FakeController(), None, None)
def dispatch(self, obj, action, *args, **kwargs):
if isinstance(obj, wsgi.JSONRequestDeserializer):
return []
if isinstance(obj, wsgi.JSONResponseSerializer):
raise webob.exc.HTTPForbidden()
self.stubs.Set(wsgi.Resource, 'dispatch', dispatch)
request = wsgi.Request.blank('/')
response = resource.__call__(request)
self.assertIsInstance(response, webob.exc.HTTPForbidden)
self.assertEqual(response.status_code, 403)
class JSONResponseSerializerTest(test_utils.BaseTestCase):
def test_to_json(self):
fixture = {"key": "value"}
expected = '{"key": "value"}'
actual = wsgi.JSONResponseSerializer().to_json(fixture)
self.assertEqual(actual, expected)
def test_to_json_with_date_format_value(self):
fixture = {"date": datetime.datetime(1, 3, 8, 2)}
expected = '{"date": "0001-03-08T02:00:00"}'
actual = wsgi.JSONResponseSerializer().to_json(fixture)
self.assertEqual(actual, expected)
def test_to_json_with_more_deep_format(self):
fixture = {"is_public": True, "name": [{"name1": "test"}]}
expected = '{"is_public": true, "name": [{"name1": "test"}]}'
actual = wsgi.JSONResponseSerializer().to_json(fixture)
self.assertEqual(actual, expected)
def test_default(self):
fixture = {"key": "value"}
response = webob.Response()
wsgi.JSONResponseSerializer().default(response, fixture)
self.assertEqual(response.status_int, 200)
content_types = filter(lambda h: h[0] == 'Content-Type',
response.headerlist)
self.assertEqual(len(content_types), 1)
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(response.body, '{"key": "value"}')
class JSONRequestDeserializerTest(test_utils.BaseTestCase):
def test_has_body_no_content_length(self):
request = wsgi.Request.blank('/')
request.method = 'POST'
request.body = 'asdf'
request.headers.pop('Content-Length')
self.assertFalse(wsgi.JSONRequestDeserializer().has_body(request))
def test_has_body_zero_content_length(self):
request = wsgi.Request.blank('/')
request.method = 'POST'
request.body = 'asdf'
request.headers['Content-Length'] = 0
self.assertFalse(wsgi.JSONRequestDeserializer().has_body(request))
def test_has_body_has_content_length(self):
request = wsgi.Request.blank('/')
request.method = 'POST'
request.body = 'asdf'
self.assertTrue('Content-Length' in request.headers)
self.assertTrue(wsgi.JSONRequestDeserializer().has_body(request))
def test_no_body_no_content_length(self):
request = wsgi.Request.blank('/')
self.assertFalse(wsgi.JSONRequestDeserializer().has_body(request))
def test_from_json(self):
fixture = '{"key": "value"}'
expected = {"key": "value"}
actual = wsgi.JSONRequestDeserializer().from_json(fixture)
self.assertEqual(actual, expected)
def test_from_json_malformed(self):
fixture = 'kjasdklfjsklajf'
self.assertRaises(webob.exc.HTTPBadRequest,
wsgi.JSONRequestDeserializer().from_json, fixture)
def test_default_no_body(self):
request = wsgi.Request.blank('/')
actual = wsgi.JSONRequestDeserializer().default(request)
expected = {}
self.assertEqual(actual, expected)
def test_default_with_body(self):
request = wsgi.Request.blank('/')
request.method = 'POST'
request.body = '{"key": "value"}'
actual = wsgi.JSONRequestDeserializer().default(request)
expected = {"body": {"key": "value"}}
self.assertEqual(actual, expected)
def test_has_body_has_transfer_encoding(self):
request = wsgi.Request.blank('/')
request.method = 'POST'
request.body = 'fake_body'
request.headers['transfer-encoding'] = 0
self.assertTrue('transfer-encoding' in request.headers)
self.assertTrue(wsgi.JSONRequestDeserializer().has_body(request))
def test_get_bind_addr_default_value(self):
expected = ('0.0.0.0', '123456')
actual = wsgi.get_bind_addr(default_port="123456")
self.assertEqual(expected, actual)
class ServerTest(test_utils.BaseTestCase):
def test_create_pool(self):
"""Ensure the wsgi thread pool is an eventlet.greenpool.GreenPool."""
actual = wsgi.Server(threads=1).create_pool()
self.assertTrue(isinstance(actual, eventlet.greenpool.GreenPool))
class TestHelpers(test_utils.BaseTestCase):
def test_headers_are_unicode(self):
"""
Verifies that the headers returned by conversion code are unicode.
Headers are passed via http in non-testing mode, which automatically
converts them to unicode. Verifying that the method does the
conversion proves that we aren't passing data that works in tests
but will fail in production.
"""
fixture = {'name': 'fake public image',
'is_public': True,
'size': 19,
'location': "file:///tmp/glance-tests/2",
'properties': {'distro': 'Ubuntu 10.04 LTS'}}
headers = utils.image_meta_to_http_headers(fixture)
for k, v in headers.iteritems():
self.assert_(isinstance(v, unicode), "%s is not unicode" % v)
def test_data_passed_properly_through_headers(self):
"""
Verifies that data is the same after being passed through headers
"""
fixture = {'name': 'fake public image',
'is_public': True,
'deleted': False,
'name': None,
'size': 19,
'location': "file:///tmp/glance-tests/2",
'properties': {'distro': 'Ubuntu 10.04 LTS'}}
headers = utils.image_meta_to_http_headers(fixture)
class FakeResponse():
pass
response = FakeResponse()
response.headers = headers
result = utils.get_image_meta_from_headers(response)
for k, v in fixture.iteritems():
if v is not None:
self.assertEqual(v, result[k])
else:
self.assertFalse(k in result)
| |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
###############################################################################
# Copyright 2013 Kitware Inc.
#
# Licensed under the Apache License, Version 2.0 ( the "License" );
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###############################################################################
import datetime
import json
import six
from .. import base
from girder import events
from girder.constants import AccessType, SortDir
from girder.models.notification import Notification, ProgressState
from girder.models.folder import Folder
from girder.models.item import Item
from girder.models.user import User
def setUpModule():
base.startServer()
def tearDownModule():
base.stopServer()
class FolderTestCase(base.TestCase):
def setUp(self):
base.TestCase.setUp(self)
users = ({
'email': 'good@email.com',
'login': 'goodlogin',
'firstName': 'First',
'lastName': 'Last',
'password': 'goodpassword'
}, {
'email': 'regularuser@email.com',
'login': 'regularuser',
'firstName': 'First',
'lastName': 'Last',
'password': 'goodpassword'
})
self.admin, self.user = [User().createUser(**user) for user in users]
def testChildFolders(self):
# Test with some bad parameters
resp = self.request(path='/folder', method='GET', params={})
self.assertStatus(resp, 400)
self.assertEqual(resp.json['message'], 'Invalid search mode.')
resp = self.request(path='/folder', method='GET', params={
'parentType': 'invalid',
'parentId': self.admin['_id']
})
self.assertStatus(resp, 400)
self.assertEqual(
resp.json['message'],
'Invalid value for parentType: "invalid". Allowed values: folder, user, collection.')
# We should only be able to see the public folder if we are anonymous
resp = self.request(path='/folder', method='GET', params={
'parentType': 'user',
'parentId': self.admin['_id']
})
self.assertStatusOk(resp)
self.assertEqual(len(resp.json), 1)
# Test GET on the result folder
resp = self.request(path='/folder/%s' % str(resp.json[0]['_id']))
self.assertStatusOk(resp)
self.assertIsInstance(resp.json, dict)
self.assertFalse('access' in resp.json)
# If we log in as the user, we should also be able to see the
# private folder. Also test that our sortdir param works.
resp = self.request(
path='/folder', method='GET', user=self.admin, params={
'parentType': 'user',
'parentId': self.admin['_id'],
'sort': 'name',
'sortdir': SortDir.DESCENDING
})
self.assertStatusOk(resp)
self.assertEqual(len(resp.json), 2)
self.assertEqual(resp.json[0]['name'], 'Public')
self.assertEqual(resp.json[1]['name'], 'Private')
publicFolder = resp.json[0]
privateFolder = resp.json[1]
# Change properties of a folder
resp = self.request(
path='/folder/%s' % publicFolder['_id'], method='PUT',
user=self.admin, params={
'name': 'New name ',
'description': ' A description '
})
self.assertStatusOk(resp)
self.assertEqual(resp.json['name'], 'New name')
self.assertEqual(resp.json['description'], 'A description')
# Move should fail with a bogus parent
resp = self.request(
path='/folder/%s' % publicFolder['_id'], method='PUT',
user=self.admin, params={
'parentType': 'badParent',
'parentId': privateFolder['_id']
})
self.assertStatus(resp, 400)
# Move the public folder underneath the private folder
resp = self.request(
path='/folder/%s' % publicFolder['_id'], method='PUT',
user=self.admin, params={
'parentType': 'folder',
'parentId': privateFolder['_id']
})
self.assertStatusOk(resp)
self.assertEqual(resp.json['parentCollection'], 'folder')
self.assertEqual(resp.json['parentId'], privateFolder['_id'])
self.assertEqual(resp.json['name'], 'New name')
# Move should fail if we don't have write permission on the
# destination parent
publicFolder = Folder().load(publicFolder['_id'], force=True)
publicFolder = Folder().setUserAccess(publicFolder, self.user, AccessType.WRITE, save=True)
resp = self.request(
path='/folder/%s' % publicFolder['_id'], method='PUT',
user=self.user, params={
'parentId': self.admin['_id'],
'parentType': 'user'
})
self.assertStatus(resp, 403)
self.assertTrue(resp.json['message'].startswith(
'Write access denied for user'))
def testCreateFolder(self):
self.ensureRequiredParams(
path='/folder', method='POST', required=['name', 'parentId'],
user=self.admin)
# Grab the default user folders
resp = self.request(
path='/folder', method='GET', user=self.admin, params={
'parentType': 'user',
'parentId': self.admin['_id'],
'sort': 'name',
'sortdir': 1
})
privateFolder = resp.json[0]
publicFolder = resp.json[1]
self.assertEqual(privateFolder['name'], 'Private')
self.assertEqual(publicFolder['name'], 'Public')
# Try to create a folder as anonymous; should fail
resp = self.request(path='/folder', method='POST', params={
'name': 'a folder',
'parentId': publicFolder['_id']
})
self.assertStatus(resp, 401)
# Try to create a folder with a bogus parent; should fail
resp = self.request(
path='/folder', method='POST', user=self.admin, params={
'name': ' My public subfolder ',
'parentType': 'badParent',
'parentId': publicFolder['_id']
})
self.assertStatus(resp, 400)
# Try to create a folder with a blank name; should fail
resp = self.request(
path='/folder', method='POST', user=self.admin, params={
'name': ' ',
'parentId': publicFolder['_id']
})
self.assertStatus(resp, 400)
# Actually create subfolder under Public
resp = self.request(
path='/folder', method='POST', user=self.admin, params={
'name': ' My public subfolder ',
'parentId': publicFolder['_id']
})
self.assertStatusOk(resp)
self.assertEqual(resp.json['parentId'], publicFolder['_id'])
self.assertEqual(resp.json['parentCollection'], 'folder')
self.assertTrue(resp.json['public'])
folder = Folder().load(resp.json['_id'], force=True)
self.assertTrue(Folder().hasAccess(folder, self.admin, AccessType.ADMIN))
# Now fetch the children of Public, we should see it
resp = self.request(
path='/folder', method='GET', user=self.admin, params={
'parentType': 'folder',
'parentId': publicFolder['_id']
})
self.assertStatusOk(resp)
self.assertEqual(len(resp.json), 1)
self.assertEqual(resp.json[0]['name'], 'My public subfolder')
# Try to create a folder with same name
resp = self.request(
path='/folder', method='POST', user=self.admin, params={
'name': ' My public subfolder ',
'parentId': publicFolder['_id']
})
self.assertValidationError(resp, 'name')
# Create a folder in the user
resp = self.request(
path='/folder', method='POST', user=self.admin, params={
'name': 'New User Folder',
'parentType': 'user',
'parentId': str(self.admin['_id'])
})
self.assertStatus(resp, 200)
def testReuseExisting(self):
self.ensureRequiredParams(
path='/folder', method='POST', required=['name', 'parentId'],
user=self.admin)
# Grab the default user folders
resp = self.request(
path='/folder', method='GET', user=self.admin, params={
'parentType': 'user',
'parentId': self.admin['_id'],
'sort': 'name',
'sortdir': 1
})
publicFolder = resp.json[1]
# Actually create subfolder under Public
newFolder = self.request(
path='/folder', method='POST', user=self.admin, params={
'name': ' My public subfolder ',
'parentId': publicFolder['_id']
})
self.assertStatusOk(newFolder)
# Try to create a folder with same name, reuseExisting flag not set
resp = self.request(
path='/folder', method='POST', user=self.admin, params={
'name': ' My public subfolder ',
'parentId': publicFolder['_id']
})
self.assertValidationError(resp, 'name')
# Create folder with same name, reuseExisting flag set
reuseFolder = self.request(
path='/folder', method='POST', user=self.admin, params={
'name': ' My public subfolder ',
'parentId': publicFolder['_id'],
'reuseExisting': True
})
self.assertStatusOk(reuseFolder)
self.assertEqual(newFolder.json['_id'], reuseFolder.json['_id'])
def testFolderMetadataDirect(self):
resp = self.request(
path='/folder', method='GET', user=self.admin, params={
'parentType': 'user',
'parentId': self.admin['_id'],
'sort': 'name',
'sortdir': 1
})
self.assertStatusOk(resp)
publicFolder = resp.json[1]
# Actually create subfolder under Public
resp = self.request(
path='/folder', method='POST', user=self.admin, params={
'name': ' My public subfolder ',
'parentId': publicFolder['_id'],
'metadata': 'invalid json'
})
self.assertStatus(resp, 400)
self.assertEqual(
resp.json['message'],
'Parameter metadata must be valid JSON.')
metadata = {
'foo': 'bar',
'test': 2
}
resp = self.request(
path='/folder', method='POST', user=self.admin, params={
'name': ' My public subfolder with meta',
'parentId': publicFolder['_id'],
'metadata': json.dumps(metadata)}
)
self.assertStatusOk(resp)
folder = resp.json
self.assertEqual(folder['meta']['foo'], metadata['foo'])
self.assertEqual(folder['meta']['test'], metadata['test'])
metadata = {
'foo': None,
'test': 3,
'bar': 'baz'
}
resp = self.request(
path='/folder/{_id}'.format(**folder), method='PUT',
user=self.admin, params={'metadata': json.dumps(metadata)}
)
self.assertStatusOk(resp)
folder = resp.json
self.assertNotHasKeys(folder['meta'], ['foo'])
self.assertEqual(folder['meta']['test'], metadata['test'])
self.assertEqual(folder['meta']['bar'], metadata['bar'])
def testFolderMetadataCrud(self):
"""
Test CRUD of metadata on folders
"""
# Grab the default user folders
resp = self.request(
path='/folder', method='GET', user=self.admin, params={
'parentType': 'user',
'parentId': self.admin['_id'],
'sort': 'name',
'sortdir': 1
})
self.assertStatusOk(resp)
publicFolder = resp.json[1]
# Actually create subfolder under Public
resp = self.request(
path='/folder', method='POST', user=self.admin, params={
'name': ' My public subfolder ',
'parentId': publicFolder['_id']
})
self.assertStatusOk(resp)
folder = resp.json
# Test that bad json fails
resp = self.request(path='/folder/%s/metadata' % folder['_id'],
method='PUT', user=self.admin,
body='badJSON', type='application/json')
self.assertStatus(resp, 400)
# Add some metadata
metadata = {
'foo': 'bar',
'test': 2
}
resp = self.request(path='/folder/%s/metadata' % folder['_id'],
method='PUT', user=self.admin,
body=json.dumps(metadata), type='application/json')
folder = resp.json
self.assertEqual(folder['meta']['foo'], metadata['foo'])
self.assertEqual(folder['meta']['test'], metadata['test'])
# Edit and remove metadata
metadata['test'] = None
metadata['foo'] = 'baz'
resp = self.request(path='/folder/%s/metadata' % folder['_id'],
method='PUT', user=self.admin,
body=json.dumps(metadata), type='application/json')
folder = resp.json
self.assertEqual(folder['meta']['foo'], metadata['foo'])
self.assertNotHasKeys(folder['meta'], ['test'])
# Make sure metadata cannot be added if there is a period in the key
# name
metadata = {
'foo.bar': 'notallowed'
}
resp = self.request(path='/folder/%s/metadata' % folder['_id'],
method='PUT', user=self.admin,
body=json.dumps(metadata), type='application/json')
self.assertStatus(resp, 400)
self.assertEqual(
resp.json['message'], 'Invalid key foo.bar: keys must not contain the "." character.')
# Make sure metadata cannot be added if the key begins with a $
metadata = {
'$foobar': 'alsonotallowed'
}
resp = self.request(path='/folder/%s/metadata' % folder['_id'],
method='PUT', user=self.admin,
body=json.dumps(metadata), type='application/json')
self.assertStatus(resp, 400)
self.assertEqual(
resp.json['message'],
'Invalid key $foobar: keys must not start with the "$" character.')
# Test allowNull
metadata = {
'foo': None
}
resp = self.request(
path='/folder/%s/metadata' % folder['_id'], params={'allowNull': True},
user=self.admin, method='PUT', body=json.dumps(metadata), type='application/json')
self.assertStatusOk(resp)
self.assertEqual(resp.json['meta'], metadata)
# Test delete metadata endpoint
resp = self.request(
path='/folder/%s/metadata' % folder['_id'], user=self.admin, method='DELETE',
body=json.dumps(['foo']), type='application/json')
self.assertStatusOk(resp)
self.assertEqual(resp.json['meta'], {})
def testDeleteFolder(self):
cbInfo = {}
# Hook into model deletion with kwargs event to test it
def cb(event):
cbInfo['kwargs'] = event.info['kwargs']
cbInfo['doc'] = event.info['document']
with events.bound('model.folder.remove_with_kwargs', 'test', cb):
# Requesting with no path should fail
resp = self.request(path='/folder', method='DELETE',
user=self.admin)
self.assertStatus(resp, 400)
# Grab one of the user's top level folders
folders = Folder().childFolders(
parent=self.admin, parentType='user', user=self.admin, limit=1,
sort=[('name', SortDir.DESCENDING)])
folderResp = six.next(folders)
# Add a subfolder and an item to that folder
subfolder = Folder().createFolder(
folderResp, 'sub', parentType='folder', creator=self.admin)
item = Item().createItem('item', creator=self.admin, folder=subfolder)
self.assertTrue('_id' in subfolder)
self.assertTrue('_id' in item)
# Delete the folder
resp = self.request(path='/folder/%s' % folderResp['_id'],
method='DELETE', user=self.admin, params={
'progress': 'true'
})
self.assertStatusOk(resp)
# Make sure the folder, its subfolder, and its item were all deleted
folder = Folder().load(folderResp['_id'], force=True)
subfolder = Folder().load(subfolder['_id'], force=True)
item = Item().load(item['_id'])
self.assertEqual(folder, None)
self.assertEqual(subfolder, None)
self.assertEqual(item, None)
# Make sure progress record exists and that it is set to expire soon
notifs = list(Notification().get(self.admin))
self.assertEqual(len(notifs), 1)
self.assertEqual(notifs[0]['type'], 'progress')
self.assertEqual(notifs[0]['data']['state'], ProgressState.SUCCESS)
self.assertEqual(notifs[0]['data']['title'],
'Deleting folder Public')
self.assertEqual(notifs[0]['data']['message'], 'Done')
self.assertEqual(notifs[0]['data']['total'], 3)
self.assertEqual(notifs[0]['data']['current'], 3)
self.assertTrue(notifs[0]['expires'] < datetime.datetime.utcnow() +
datetime.timedelta(minutes=1))
# Make sure our event handler was called with expected args
self.assertTrue('kwargs' in cbInfo)
self.assertTrue('doc' in cbInfo)
self.assertTrue('progress' in cbInfo['kwargs'])
self.assertEqual(cbInfo['doc']['_id'], folderResp['_id'])
def testCleanFolder(self):
folder = six.next(Folder().childFolders(
parent=self.admin, parentType='user', user=self.admin, limit=1,
sort=[('name', SortDir.DESCENDING)]))
# Add some data under the folder
subfolder = Folder().createFolder(
folder, 'sub', parentType='folder', creator=self.admin)
item = Item().createItem('item', creator=self.admin, folder=folder)
subitem = Item().createItem('item', creator=self.admin, folder=subfolder)
# Clean the folder contents
resp = self.request(path='/folder/%s/contents' % folder['_id'],
method='DELETE', user=self.admin, params={
'progress': 'true'
})
self.assertStatusOk(resp)
# Make sure the subfolder and items were deleted, but that the top
# folder still exists.
old, folder = folder, Folder().load(folder['_id'], force=True)
subfolder = Folder().load(subfolder['_id'], force=True)
item = Item().load(item['_id'])
subitem = Item().load(subitem['_id'])
self.assertTrue('_id' in folder)
self.assertEqual(folder, old)
self.assertEqual(subfolder, None)
self.assertEqual(item, None)
self.assertEqual(subitem, None)
def testLazyFieldComputation(self):
"""
Demonstrate that a folder that is saved in the database without
derived fields (like lowerName or baseParentId) get those values
computed at load() time.
"""
folder = Folder().createFolder(
parent=self.admin, parentType='user', creator=self.admin,
name=' My Folder Name')
self.assertEqual(folder['lowerName'], 'my folder name')
self.assertEqual(folder['baseParentType'], 'user')
# Force the item to be saved without lowerName and baseParentType
# fields
del folder['lowerName']
del folder['baseParentType']
folder = Folder().save(folder, validate=False)
folder = Folder().find({'_id': folder['_id']})[0]
self.assertNotHasKeys(folder, ('lowerName', 'baseParentType'))
# Now ensure that calling load() actually populates those fields and
# saves the results persistently
Folder().load(folder['_id'], force=True)
folder = Folder().find({'_id': folder['_id']})[0]
self.assertHasKeys(folder, ('lowerName', 'baseParentType'))
self.assertEqual(folder['lowerName'], 'my folder name')
self.assertEqual(folder['baseParentType'], 'user')
self.assertEqual(folder['baseParentId'], self.admin['_id'])
def testParentsToRoot(self):
"""
Demonstrate that forcing parentsToRoot will cause it to skip the
filtering process.
"""
userFolder = Folder().createFolder(
parent=self.admin, parentType='user', creator=self.admin,
name=' My Folder Name')
# Filtering adds the _accessLevel key to the object
# So forcing should result in an absence of that key
parents = Folder().parentsToRoot(userFolder, force=True)
for parent in parents:
self.assertNotIn('_accessLevel', parent['object'])
parents = Folder().parentsToRoot(userFolder)
for parent in parents:
self.assertIn('_accessLevel', parent['object'])
# The logic is a bit different for user/collection parents,
# so we need to handle the other case
subFolder = Folder().createFolder(
parent=userFolder, parentType='folder', creator=self.admin,
name=' My Subfolder Name')
parents = Folder().parentsToRoot(subFolder, force=True)
for parent in parents:
self.assertNotIn('_accessLevel', parent['object'])
parents = Folder().parentsToRoot(subFolder, user=self.admin)
for parent in parents:
self.assertIn('_accessLevel', parent['object'])
def testFolderAccessAndDetails(self):
# create a folder to work with
folder = Folder().createFolder(
parent=self.admin, parentType='user', creator=self.admin,
name='Folder')
resp = self.request(
path='/folder/%s/access' % folder['_id'], method='GET',
user=self.admin)
self.assertStatusOk(resp)
access = resp.json
self.assertEqual(access, {
'users': [{
'login': self.admin['login'],
'level': AccessType.ADMIN,
'id': str(self.admin['_id']),
'flags': [],
'name': '%s %s' % (
self.admin['firstName'], self.admin['lastName'])}],
'groups': []
})
self.assertTrue(not folder.get('public'))
# Setting the access list with bad json should throw an error
resp = self.request(
path='/folder/%s/access' % folder['_id'], method='PUT',
user=self.admin, params={'access': 'badJSON'})
self.assertStatus(resp, 400)
# Change the access to public
resp = self.request(
path='/folder/%s/access' % folder['_id'], method='PUT',
user=self.admin,
params={'access': json.dumps(access), 'public': True})
self.assertStatusOk(resp)
resp = self.request(
path='/folder/%s' % folder['_id'], method='GET',
user=self.admin)
self.assertStatusOk(resp)
self.assertEqual(resp.json['public'], True)
# Create an item in the folder
Item().createItem(folder=folder, creator=self.admin, name='Item')
# Create a public and private folder within the folder
Folder().createFolder(
parent=folder, parentType='folder', creator=self.admin,
name='Public', public=True)
Folder().createFolder(
parent=folder, parentType='folder', creator=self.admin,
name='Private', public=False)
# Test folder details as anonymous
resp = self.request(
path='/folder/%s/details' % str(folder['_id']))
self.assertStatusOk(resp)
self.assertEqual(resp.json['nItems'], 1)
self.assertEqual(resp.json['nFolders'], 1)
# Test folder details as admin
resp = self.request(
path='/folder/%s/details' % str(folder['_id']), user=self.admin)
self.assertStatusOk(resp)
self.assertEqual(resp.json['nItems'], 1)
self.assertEqual(resp.json['nFolders'], 2)
def testFolderCopy(self):
# create a folder with a subfolder, items, and metadata
mainFolder = Folder().createFolder(
parent=self.admin, parentType='user', creator=self.admin,
name='Main Folder')
subFolder = Folder().createFolder(
parent=mainFolder, parentType='folder', creator=self.admin,
name='Sub Folder')
mainItem = Item().createItem('Main Item', creator=self.admin, folder=mainFolder)
subItem = Item().createItem('Sub Item', creator=self.admin, folder=subFolder)
metadata = {'key': 'value'}
resp = self.request(
path='/folder/%s/metadata' % mainFolder['_id'], method='PUT',
user=self.admin, body=json.dumps(metadata),
type='application/json')
self.assertStatusOk(resp)
# Add a file under the main item to test size reporting
size = 5
self.uploadFile(
name='test.txt', contents='.' * size, user=self.admin,
parent=mainItem, parentType='item')
mainFolder = Folder().load(mainFolder['_id'], force=True)
self.assertEqual(mainFolder['size'], size)
# Now copy the folder alongside itself
resp = self.request(
path='/folder/%s/copy' % mainFolder['_id'], method='POST',
user=self.admin)
self.assertStatusOk(resp)
# Check our new folder information
newFolder = resp.json
self.assertEqual(newFolder['name'], 'Main Folder (1)')
self.assertEqual(newFolder['size'], size)
# Check the copied item inside the new folder
resp = self.request('/item', user=self.admin, params={
'folderId': newFolder['_id']})
self.assertStatusOk(resp)
self.assertEqual(len(resp.json), 1)
self.assertEqual(resp.json[0]['name'], 'Main Item')
self.assertEqual(resp.json[0]['size'], size)
# Check copied folder metadata
resp = self.request(
path='/folder/%s' % newFolder['_id'], method='GET',
user=self.admin, type='application/json')
self.assertStatusOk(resp)
self.assertEqual(resp.json['meta'], metadata)
# Check for the item, subfolder, and subfolder item
resp = self.request(
path='/folder', method='GET',
params={'parentType': 'folder', 'parentId': str(newFolder['_id'])},
user=self.admin, type='application/json')
self.assertStatusOk(resp)
self.assertEqual(len(resp.json), 1)
newSub = resp.json[0]
self.assertEqual(newSub['name'], subFolder['name'])
self.assertNotEqual(str(newSub['_id']), str(subFolder['_id']))
resp = self.request(
path='/item', method='GET',
params={'folderId': str(newFolder['_id'])},
user=self.admin, type='application/json')
self.assertStatusOk(resp)
self.assertEqual(len(resp.json), 1)
newItem = resp.json[0]
self.assertEqual(newItem['name'], mainItem['name'])
self.assertNotEqual(str(newItem['_id']), str(mainItem['_id']))
resp = self.request(
path='/item', method='GET',
params={'folderId': str(newSub['_id'])},
user=self.admin, type='application/json')
self.assertStatusOk(resp)
self.assertEqual(len(resp.json), 1)
newSubItem = resp.json[0]
self.assertEqual(newSubItem['name'], subItem['name'])
self.assertNotEqual(str(newSubItem['_id']), str(subItem['_id']))
# Test copying the subFolder
resp = self.request(
path='/folder/%s/copy' % subFolder['_id'], method='POST',
user=self.admin, params={'public': 'original', 'progress': True})
self.assertStatusOk(resp)
# Check our new folder name
newSubFolder = resp.json
self.assertEqual(newSubFolder['name'], 'Sub Folder (1)')
# Test that a bogus parentType throws an error
resp = self.request(
path='/folder/%s/copy' % subFolder['_id'], method='POST',
user=self.admin, params={'parentType': 'badValue'})
self.assertStatus(resp, 400)
# Test that when we copy a folder into itself we don't recurse
resp = self.request(
path='/folder/%s/copy' % subFolder['_id'], method='POST',
user=self.admin, params={
'progress': True,
'parentType': 'folder',
'parentId': str(subFolder['_id'])})
self.assertStatusOk(resp)
# Test copying with public set to False
resp = self.request(
path='/folder/%s/copy' % subFolder['_id'], method='POST',
user=self.admin, params={'public': 'false', 'progress': True})
self.assertStatusOk(resp)
| |
#!/usr/bin/env python
# Subclass of fract4d.fractal.T which works with a GUI
import sys
import os
import struct
import math
import copy
import random
import gtk
import gobject
from fract4d import fractal,fract4dc,fracttypes, image, messages
import utils, fourway
from gtkio import gtkio
class Hidden(gobject.GObject):
"""This class implements a fractal which calculates asynchronously
and is integrated with the GTK main loop"""
__gsignals__ = {
'parameters-changed' : (
(gobject.SIGNAL_RUN_FIRST | gobject.SIGNAL_NO_RECURSE),
gobject.TYPE_NONE, ()),
'iters-changed' : (
(gobject.SIGNAL_RUN_FIRST | gobject.SIGNAL_NO_RECURSE),
gobject.TYPE_NONE, (gobject.TYPE_INT,)),
'tolerance-changed' : (
(gobject.SIGNAL_RUN_FIRST | gobject.SIGNAL_NO_RECURSE),
gobject.TYPE_NONE, (gobject.TYPE_FLOAT,)),
'formula-changed' : (
(gobject.SIGNAL_RUN_FIRST | gobject.SIGNAL_NO_RECURSE),
gobject.TYPE_NONE, ()),
'status-changed' : (
(gobject.SIGNAL_RUN_FIRST | gobject.SIGNAL_NO_RECURSE),
gobject.TYPE_NONE, (gobject.TYPE_INT,)),
'progress-changed' : (
(gobject.SIGNAL_RUN_FIRST | gobject.SIGNAL_NO_RECURSE),
gobject.TYPE_NONE, (gobject.TYPE_FLOAT,)),
'pointer-moved' : (
(gobject.SIGNAL_RUN_FIRST | gobject.SIGNAL_NO_RECURSE),
gobject.TYPE_NONE, (gobject.TYPE_INT,
gobject.TYPE_FLOAT, gobject.TYPE_FLOAT)),
'stats-changed' : (
(gobject.SIGNAL_RUN_FIRST | gobject.SIGNAL_NO_RECURSE),
gobject.TYPE_NONE, (gobject.TYPE_PYOBJECT,))
}
def __init__(self,comp,width,height,total_width=-1,total_height=-1):
gobject.GObject.__init__(self)
if 'win' == sys.platform[:3]:
(self.readfd, self.writefd) = fract4dc.pipe()
else:
# This is the line that was screwing Windows up.. changed to be run only on Linux, for Windows, we want to do this in fract4dc..
(self.readfd, self.writefd) = os.pipe()
self.nthreads = 1
self.compiler = comp
self.x = self.y = 0
self.button = 0
self.last_progress = 0.0
self.skip_updates = False
self.running = False
self.frozen = False # if true, don't emit signals
self.site = fract4dc.fdsite_create(self.writefd)
self.f = None
self.try_init_fractal()
self.input_add(self.readfd, self.onData)
self.width = width
self.height = height
self.image = image.T(
self.width,self.height,total_width,total_height)
self.msgbuf = ""
self.io_subsys = gtkio();
def try_init_fractal(self):
f = fractal.T(self.compiler,self.site)
self.set_fractal(f)
self.f.compile()
def set_fractal(self,f):
if f != self.f:
if self.f:
self.interrupt()
del self.f
self.f = f
# take over fractal's changed function
f.changed = self.changed
f.formula_changed = self.formula_changed
f.warn = self.warn
self.formula_changed()
self.changed()
def changed(self,clear_image=True):
if self.f == None:
return
self.f.dirty=True
self.f.clear_image = clear_image
self.set_saved(False)
if not self.frozen:
self.emit('parameters-changed')
def formula_changed(self):
#print "formula changed"
self.f.dirtyFormula = True
#if not self.frozen:
self.emit('formula-changed')
def set_saved(self,val):
if self.f != None:
self.f.saved = val
def input_add(self,fd,cb):
utils.input_add(fd,cb)
def error(self,msg,err):
print "Error: %s %s" % (msg,err)
def warn(self,msg):
print "Warning: ", msg
def update_formula(self):
if self.f != None:
self.f.dirtyFormula = True
def freeze(self):
self.frozen = True
def thaw(self):
if self.f == None:
return False
self.frozen = False
was_dirty = self.f.dirty
self.f.clean()
return was_dirty
def interrupt(self):
if self.skip_updates:
#print "skip recursive interrupt"
return
self.skip_updates = True
fract4dc.interrupt(self.site)
n = 0
# wait for stream from worker to flush
while self.running:
n += 1
gtk.main_iteration(True)
self.skip_updates = False
def copy_f(self):
return copy.copy(self.f)
def set_formula(self, fname, formula,index=0):
self.f.set_formula(fname, formula,index)
def onData(self,fd,condition):
self.msgbuf = self.msgbuf + self.io_subsys.read(fd, 8 - len(self.msgbuf))
if len(self.msgbuf) < 8:
#print "incomplete message: %s" % list(self.msgbuf)
return True
(t,size) = struct.unpack("2i",self.msgbuf)
self.msgbuf = ""
bytes = self.io_subsys.read(fd,size)
if len(bytes) < size:
print "not enough bytes, got %d instead of %d" % (len(bytes),size)
return True
m = messages.parse(t,bytes)
if utils.threads_enabled:
gtk.gdk.threads_enter()
#print "msg: %s %d %d %d %d" % (m,p1,p2,p3,p4)
if t == fract4dc.MESSAGE_TYPE_ITERS:
if not self.skip_updates: self.iters_changed(m.iterations)
elif t == fract4dc.MESSAGE_TYPE_IMAGE:
if not self.skip_updates: self.image_changed(m.x, m.y, m.w, m.h)
elif t == fract4dc.MESSAGE_TYPE_PROGRESS:
if not self.skip_updates:
progress = m.progress
# filters out 'backwards' progress which can occur due to threading
if progress > self.last_progress or progress == 0.0:
self.progress_changed(progress)
self.last_progress = progress
elif t == fract4dc.MESSAGE_TYPE_STATUS:
if m.status == fract4dc.CALC_DONE: # DONE
self.running = False
if not self.skip_updates: self.status_changed(m.status)
elif t == fract4dc.MESSAGE_TYPE_PIXEL:
# FIXME pixel_changed
pass
elif t == fract4dc.MESSAGE_TYPE_TOLERANCE:
# tolerance changed
if not self.skip_updates: self.tolerance_changed(m.tolerance)
elif t == fract4dc.MESSAGE_TYPE_STATS:
if not self.skip_updates: self.stats_changed(m)
else:
print "Unknown message from fractal thread; %s" % list(bytes)
if utils.threads_enabled:
gtk.gdk.threads_leave()
return True
def __getattr__(self,name):
return getattr(self.f,name)
def params(self):
return self.f.params
def get_param(self,n):
return self.f.get_param(n)
def set_nthreads(self, n):
if self.nthreads != n:
self.nthreads = n
self.changed()
def set_auto_deepen(self,deepen):
if self.f.auto_deepen != deepen:
self.f.auto_deepen = deepen
self.changed()
def set_antialias(self,aa_type):
if self.f.antialias != aa_type:
self.f.antialias = aa_type
self.changed()
def set_func(self,func,fname,formula):
self.f.set_func(func,fname,formula)
def improve_quality(self):
self.freeze()
self.set_maxiter(self.f.maxiter*2)
self.set_period_tolerance(self.f.period_tolerance / 10.0)
self.thaw()
self.changed()
def reset(self):
self.f.reset()
self.changed()
def loadFctFile(self,file):
new_f = fractal.T(self.compiler,self.site)
new_f.warn = self.warn
new_f.loadFctFile(file)
self.set_fractal(new_f)
self.set_saved(True)
def is_saved(self):
if self.f == None:
return True
return self.f.saved
def save_image(self,filename):
self.image.save(filename)
def progress_changed(self,progress):
self.emit('progress-changed',progress)
def status_changed(self,status):
self.emit('status-changed',status)
def iters_changed(self,n):
self.f.maxiter = n
# don't emit a parameters-changed here to avoid deadlock
self.emit('iters-changed',n)
def tolerance_changed(self,tolerance):
self.f.period_tolerance = tolerance
self.emit('tolerance-changed', tolerance)
def image_changed(self,x1,y1,x2,y2):
pass
def stats_changed(self,stats):
self.emit('stats-changed', stats)
def draw(self,image,width,height,nthreads):
t = self.f.epsilon_tolerance(width,height)
if self.f.auto_epsilon:
self.f.set_named_param("@epsilon",t,
self.f.formula, self.f.initparams)
self.f.init_pfunc()
cmap = self.f.get_colormap()
self.running = True
try:
self.f.calc(image,cmap, nthreads, self.site, True)
except MemoryError:
pass
def draw_image(self,aa=None,auto_deepen=None):
if self.f == None:
return
self.interrupt()
self.f.compile()
if aa != None and auto_deepen != None:
self.f.antialias = aa
self.f.auto_deepen = auto_deepen
self.draw(self.image,self.width,self.height,self.nthreads)
def set_plane(self,angle1,angle2):
self.freeze()
self.reset_angles()
if angle1 != None:
self.set_param(angle1,math.pi/2)
if angle2 != None:
self.f.set_param(angle2,math.pi/2)
if self.thaw():
self.changed()
def float_coords(self,x,y):
return ((x - self.width/2.0)/self.width,
(y - self.height/2.0)/self.width)
def recenter(self,x,y,zoom):
dx = (x - self.width/2.0)/self.width
dy = (y - self.height/2.0)/self.width
self.relocate(dx,dy,zoom)
def count_colors(self,rect):
# calculate the number of different colors which appear
# in the subsection of the image bounded by the rectangle
(xstart,ystart,xend,yend) = rect
buf = self.image.image_buffer(0,0)
colors = {}
for y in xrange(ystart,yend):
for x in xrange(xstart,xend):
offset = (y*self.width+x)*3
col = buf[offset:offset+3]
colors[col] = 1 + colors.get(col,0)
return len(colors)
def get_func_name(self):
if self.f == None:
return _("No fractal loaded")
return self.f.forms[0].funcName
def get_saved(self):
if self.f == None:
return True
return self.f.get_saved()
def serialize(self,compress=False):
if self.f == None:
return None
return self.f.serialize(compress)
def set_size(self, new_width, new_height):
self.interrupt()
if self.width == new_width and self.height == new_height :
return
self.width = new_width
self.height = new_height
self.image.resize_full(new_width, new_height)
utils.idle_add(self.changed)
# explain our existence to GTK's object system
gobject.type_register(Hidden)
class HighResolution(Hidden):
"An invisible GtkFractal which computes in multiple chunks"
def __init__(self,comp,width,height):
(tile_width, tile_height) = self.compute_tile_size(width,height)
Hidden.__init__(self,comp,tile_width, tile_height, width,height)
self.reset_render()
def reset_render(self):
self.tile_list = self.image.get_tile_list()
self.ntiles = len(self.tile_list)
self.ncomplete_tiles = 0
self.last_overall_progress = 0.0
def compute_tile_size(self,w,h):
tile_width = w
tile_height = min(h,128)
return (tile_width, tile_height)
def draw_image(self,name):
if self.f == None:
return
self.interrupt()
self.f.compile()
self.f.auto_deepen = False
self.f.auto_tolerance = False
self.image.start_save(name)
self.next_tile()
return False
def next_tile(self):
# work left to do
(xoff,yoff,w,h) = self.tile_list.pop(0)
self.image.resize_tile(w,h)
self.image.set_offset(xoff,yoff)
self.draw(self.image,w,h,self.nthreads)
def status_changed(self,status):
if status == 0:
# done this chunk
self.image.save_tile()
self.ncomplete_tiles += 1
if len(self.tile_list) > 0:
self.next_tile()
else:
# completely done
self.image.finish_save()
self.emit('status-changed',status)
else:
self.emit('status-changed',status)
def progress_changed(self,progress):
overall_progress = (100.0*self.ncomplete_tiles + progress)/self.ntiles
if overall_progress > self.last_overall_progress:
self.emit('progress-changed',overall_progress)
self.last_overall_progress = overall_progress
class T(Hidden):
"A visible GtkFractal which responds to user input"
def __init__(self,comp,parent=None,width=640,height=480):
self.parent = parent
Hidden.__init__(self,comp,width,height)
self.paint_mode = False
drawing_area = gtk.DrawingArea()
drawing_area.set_events(
gtk.gdk.BUTTON_RELEASE_MASK |
gtk.gdk.BUTTON1_MOTION_MASK |
gtk.gdk.POINTER_MOTION_MASK |
gtk.gdk.POINTER_MOTION_HINT_MASK |
gtk.gdk.BUTTON_PRESS_MASK |
gtk.gdk.KEY_PRESS_MASK |
gtk.gdk.KEY_RELEASE_MASK |
gtk.gdk.EXPOSURE_MASK
)
self.notice_mouse = False
drawing_area.connect('motion_notify_event', self.onMotionNotify)
drawing_area.connect('button_release_event', self.onButtonRelease)
drawing_area.connect('button_press_event', self.onButtonPress)
drawing_area.connect('expose_event',self.onExpose)
c = utils.get_rgb_colormap()
drawing_area.set_colormap(c)
drawing_area.set_size_request(self.width,self.height)
self.widget = drawing_area
def image_changed(self,x1,y1,x2,y2):
self.redraw_rect(x1,y1,x2-x1,y2-y1)
# def changed(self):
# Hidden.changed(self)
# try:
# widget = self.widget
# except Exception, e:
# return
# self.widget.queue_draw_area(0, 0, self.width, self.height)
# #self.expose()
def make_numeric_entry(self, form, param, order):
param_type = form.paramtypes[order]
if param.type == fracttypes.Int:
fmt = "%d"
else:
fmt = "%.17f"
widget = gtk.Entry()
widget.set_activates_default(True)
def set_entry():
new_value = fmt % form.params[order]
if widget.get_text() != new_value:
widget.set_text(new_value)
def set_fractal(entry,event,form,order):
try:
utils.idle_add(
form.set_param,order,entry.get_text())
except Exception, err:
# FIXME: produces too many errors
msg = "Invalid value '%s': must be a number" % \
entry.get_text()
print msg
#utils.idle_add(f.warn,msg)
return False
set_entry()
widget.set_data("update_function", set_entry)
widget.f = self
widget.connect('focus-out-event',
set_fractal,form,order)
if hasattr(param, "min") and hasattr(param, "max"):
# add a slider
adj = gtk.Adjustment(
0.0,param.min.value, param.max.value,
0.001,
0.01)
def set_adj():
if adj.value != form.params[order]:
adj.set_value(form.params[order])
set_adj()
def adj_changed(adjustment,form,order):
utils.idle_add(
form.set_param, order, adjustment.value)
adj.connect('value-changed', adj_changed, form, order)
hscale = gtk.HScale(adj)
hscale.set_draw_value(False)
hscale.set_update_policy(gtk.UPDATE_DELAYED)
hscale.set_data("update_function",set_adj)
vbox = gtk.VBox()
vbox.pack_start(widget)
vbox.pack_start(hscale)
return vbox
return widget
def make_numeric_widget(
self, table, i, form, name, part, param, order):
label = gtk.Label(self.param_display_name(name,param)+part)
label.set_alignment(1.0, 0.0)
table.attach(label,0,1,i,i+1,gtk.EXPAND | gtk.FILL,0,0,0)
widget = self.make_numeric_entry(
form, param, order)
label.set_mnemonic_widget(widget)
return widget
def make_bool_widget(self, form, name, param, order):
widget = gtk.CheckButton(self.param_display_name(name,param))
def set_toggle(*args):
is_set = form.params[order]
widget.set_active(is_set)
if widget.get_active() != is_set:
widget.set_active(is_set)
def set_fractal(entry,form,order):
try:
utils.idle_add(form.set_param,order,entry.get_active())
except Exception, err:
msg = "error setting bool param: %s" % str(err)
print msg
utils.idle_add(f.warn,msg)
return False
set_toggle(self)
widget.set_data("update_function", set_toggle)
widget.f = self
widget.connect('toggled', set_fractal, form, order)
return widget
def make_color_widget(
self, table, i, form, name, param, order):
label = gtk.Label(self.param_display_name(name,param))
label.set_alignment(1.0, 0.0)
table.attach(label,0,1,i,i+1,gtk.EXPAND | gtk.FILL,0,0,0)
def set_fractal(r, g, b, is_left):
self.freeze()
form.set_param(order, r)
form.set_param(order+1, g)
form.set_param(order+2, b)
if self.thaw():
self.changed()
rgba = []
for j in xrange(4):
rgba.append(form.params[order+j])
# do we need to keep this ref?
color_button = utils.ColorButton(rgba, set_fractal, False)
def set_selected_value(*args):
rgba = []
for j in xrange(4):
rgba.append(form.params[order+j])
color_button.set_color(rgba)
set_selected_value()
color_button.widget.set_data("update_function", set_selected_value)
return color_button.widget
def make_enumerated_widget(
self, table, i, form, name, part, param, order):
label = gtk.Label(self.param_display_name(name,param))
label.set_alignment(1.0, 0.0)
table.attach(label,0,1,i,i+1,gtk.EXPAND | gtk.FILL,0,0,0)
widget = utils.create_option_menu(param.enum.value)
def set_selected_value(*args):
try:
index = form.params[order]
except ValueError, err:
print err
return
utils.set_selected(widget, index)
def set_fractal(entry,form,order):
new_value = utils.get_selected(widget)
form.set_param(order, new_value)
set_selected_value(self)
widget.set_data("update_function", set_selected_value)
widget.f = self
widget.connect('changed',
set_fractal,form,order)
label.set_mnemonic_widget(widget)
return widget
def add_formula_setting(
self,table,i,form,name,part,param,order):
if param.type == fracttypes.Int:
if hasattr(param,"enum"):
widget = self.make_enumerated_widget(
table, i,form,name,part,param,order)
else:
widget = self.make_numeric_widget(
table, i,form,name,part,param,order)
elif param.type == fracttypes.Float or \
param.type == fracttypes.Complex or \
param.type == fracttypes.Hyper:
widget = self.make_numeric_widget(
table, i, form, name,part,param,order)
elif param.type == fracttypes.Bool:
widget = self.make_bool_widget(
form, name,param,order)
elif param.type == fracttypes.Color:
widget = self.make_color_widget(
table,i,form,name,param,order)
elif param.type == fracttypes.Image:
# skip image params for now
return
else:
raise "Unsupported parameter type"
table.attach(widget,1,2,i,i+1,gtk.EXPAND | gtk.FILL ,0,0,0)
def add_complex_formula_setting(
self,table,i,form,name,param,order,param_type):
widget = self.make_numeric_entry(
form,param,order)
table.attach(widget,1,2,i,i+1,gtk.EXPAND | gtk.FILL ,0,0,0)
widget = self.make_numeric_entry(
form,param,order+1)
table.attach(widget,1,2,i+1,i+2,gtk.EXPAND | gtk.FILL ,0,0,0)
name = self.param_display_name(name,param)
fway = fourway.T(name)
tip = self.param_tip(name,param)
fway.widget.set_tooltip_text(tip)
fway.connect('value-changed',self.fourway_released, order, form)
if self.parent:
fway.connect(
'value-slightly-changed',
self.parent.on_drag_param_fourway, order, param_type)
table.attach(fway.widget,0,1,i,i+2, gtk.EXPAND|gtk.FILL,0, 0,0)
def fourway_released(self,widget,x,y,order,form):
form.nudge_param(order, x,y)
def construct_function_menu(self,param,form):
funclist = form.formula.symbols.available_param_functions(
param.ret,param.args)
funclist.sort()
return funclist
def set_nthreads(self, n):
if self.nthreads != n:
self.nthreads = n
self.changed()
def error(self,msg,err):
print self, self.parent
if self.parent:
self.parent.show_error_message(msg, err)
else:
print "Error: %s : %s" % (msg,err)
def warn(self,msg):
if self.parent:
self.parent.show_warning(msg)
else:
print "Warning: ", msg
def add_formula_function(self,table,i,name,param,form):
label = gtk.Label(self.param_display_name(name,param))
label.set_alignment(1.0, 0.0)
table.attach(label,0,1,i,i+1,gtk.EXPAND | gtk.FILL,0,0,0)
funclist = self.construct_function_menu(param,form)
widget = utils.create_option_menu(funclist)
formula = form.formula
def set_selected_function():
try:
selected_func_name = form.get_func_value(name)
index = funclist.index(selected_func_name)
except ValueError, err:
# func.cname not in list
#print "bad cname"
return
utils.set_selected(widget, index)
def set_fractal_function(om,f,param,formula):
index = utils.get_selected(om)
if index != -1:
# this shouldn't be necessary but I got weird errors
# trying to reuse the old funclist
list = formula.symbols.available_param_functions(
param.ret,param.args)
list.sort()
fname = list[index]
f.set_func(param,fname,formula)
set_selected_function()
widget.set_data("update_function", set_selected_function)
widget.connect('changed',set_fractal_function,self,param,formula)
table.attach(widget,1,2,i,i+1,gtk.EXPAND | gtk.FILL,0,0,0)
def create_maxiter_widget(self,table,i):
label = gtk.Label("_Max Iterations")
label.set_alignment(1.0, 0.0)
label.set_use_underline(True)
table.attach(label,0,1,i,i+1,gtk.EXPAND | gtk.FILL,0,0,0)
widget = gtk.Entry()
widget.set_activates_default(True)
def set_entry(*args):
widget.set_text("%d" % self.f.maxiter)
def set_fractal(*args):
try:
try:
i = int(widget.get_text())
self.set_maxiter(i)
except ValueError, err:
msg = "Invalid value '%s': must be a number" % \
widget.get_text()
utils.idle_add(self.warn, msg)
except Exception, exn:
print exn
return False
set_entry(self)
self.connect('parameters-changed', set_entry)
self.connect('iters-changed', set_entry)
widget.connect('focus-out-event',set_fractal)
label.set_mnemonic_widget(widget)
table.attach(widget,1,2,i,i+1,gtk.EXPAND | gtk.FILL,0,0,0)
return i+1
def populate_formula_settings(self, table, param_type, row=0):
# create widget to fiddle with this fractal's settings
form = self.f.get_form(param_type)
formula = form.formula
if param_type == 0:
row = self.create_maxiter_widget(table,row)
params = formula.symbols.parameters()
op = formula.symbols.order_of_params()
keys = params.keys()
keys.sort()
for name in keys:
param = params[name]
if isinstance(param,fracttypes.Func):
self.add_formula_function(table,row,name,param,form)
else:
if param.type == fracttypes.Complex:
self.add_complex_formula_setting(
table,row,form,name,param,
op[name],
param_type)
row+= 1
elif param.type == fracttypes.Hyper:
suffixes = [" (re)", " (i)", " (j)", " (k)"]
for j in xrange(4):
self.add_formula_setting(
table,row+j,form,name,suffixes[j],
param,op[name]+j)
row += 3
elif param.type == fracttypes.Color:
self.add_formula_setting(
table,row, form, name,"",
param,op[name])
row += 3
elif param.type == fracttypes.Gradient:
# FIXME
pass
else:
self.add_formula_setting(
table,row,form,name,"",param,op[name])
row += 1
return table
def set_size(self, new_width, new_height):
try:
Hidden.set_size(self,new_width, new_height)
self.widget.set_size_request(new_width,new_height)
except MemoryError, err:
utils.idle_add(self.warn,str(err))
def draw_image(self,aa=None,auto_deepen=None):
try:
Hidden.draw_image(self,aa,auto_deepen)
except fracttypes.TranslationError, err:
advice = _("\nCheck that your compiler settings and formula file are correct.")
utils.idle_add(self.error,
_("Error compiling fractal:"),
err.msg + advice)
return
def onExpose(self,widget,exposeEvent):
r = exposeEvent.area
self.redraw_rect(r.x,r.y,r.width,r.height)
def onMotionNotify(self,widget,event):
(x,y) = self.float_coords(event.x, event.y)
self.emit('pointer-moved', self.button, x, y)
if not self.notice_mouse:
return
self.redraw_rect(0,0,self.width,self.height)
(self.newx,self.newy) = (event.x, event.y)
dummy = widget.window.get_pointer()
dy = int(abs(self.newx - self.x) * float(self.height)/self.width)
if(self.newy < self.y or (self.newy == self.y and self.newx < self.x)):
dy = -dy
self.newy = self.y + dy;
widget.window.draw_rectangle(
self.widget.get_style().white_gc,
False,
int(min(self.x,self.newx)),
int(min(self.y,self.newy)),
int(abs(self.newx-self.x)),
int(abs(self.newy-self.y)));
def onButtonPress(self,widget,event):
self.x = event.x
self.y = event.y
self.newx = self.x
self.newy = self.y
self.button = event.button
if self.button == 1:
self.notice_mouse = True
def set_paint_mode(self,isEnabled, colorsel):
self.paint_mode = isEnabled
self.paint_color_sel = colorsel
def get_paint_color(self):
color = self.paint_color_sel.get_current_color()
return (color.red/65535.0, color.green/65535.0, color.blue/65535.0)
def onPaint(self,x,y):
# obtain index
fate = self.image.get_fate(int(x), int(y))
if not fate:
return
index = self.image.get_color_index(int(x), int(y))
# obtain a color
(r,g,b) = self.get_paint_color()
# update colormap
grad = self.f.get_gradient()
(is_solid, color) = fate
if is_solid:
self.f.solids[color] = (int(r*255.0),int(g*255.0),int(b*255.0),255)
else:
i = grad.get_index_at(index)
if index > grad.segments[i].mid:
alpha = grad.segments[i].right_color[3]
grad.segments[i].right_color = [r, g, b, alpha]
else:
alpha = grad.segments[i].left_color[3]
grad.segments[i].left_color = [r, g, b, alpha]
self.changed(False)
def filterPaintModeRelease(self,event):
if self.paint_mode:
if event.button == 1:
if self.x == self.newx or self.y == self.newy:
self.onPaint(self.newx, self.newy)
return True
return False
def onButtonRelease(self,widget,event):
self.redraw_rect(0,0,self.width,self.height)
self.button = 0
self.notice_mouse = False
if self.filterPaintModeRelease(event):
return
self.freeze()
if event.button == 1:
if self.x == self.newx or self.y == self.newy:
zoom=0.5
x = self.x
y = self.y
else:
zoom= (1+abs(self.x - self.newx))/float(self.width)
x = 0.5 + (self.x + self.newx)/2.0;
y = 0.5 + (self.y + self.newy)/2.0;
# with shift held, don't zoom
if hasattr(event,"state") and event.state & gtk.gdk.SHIFT_MASK:
zoom = 1.0
self.recenter(x,y,zoom)
elif event.button == 2:
(x,y) = (event.x, event.y)
zoom = 1.0
self.recenter(x,y,zoom)
if self.is4D():
self.flip_to_julia()
else:
if hasattr(event,"state") and event.state & gtk.gdk.CONTROL_MASK:
zoom = 20.0
else:
zoom = 2.0
(x,y) = (event.x, event.y)
self.recenter(x,y,zoom)
if self.thaw():
self.changed()
def redraw_rect(self,x,y,w,h):
# check to see if part of the rect is out-of-bounds, and clip if so
if x < 0:
x = 0
if y < 0:
y = 0
if x+w > self.width:
w = self.width-x
if y+h > self.height:
h = self.height-y
if x >= self.width or y >= self.height or w < 1 or h < 1:
# nothing to do
return
gc = self.widget.get_style().white_gc
try:
buf = self.image.image_buffer(x,y)
except MemoryError, err:
# suppress these errors
return
if self.widget.window:
self.widget.window.draw_rgb_image(
gc,
x, y,
min(self.width-x,w),
min(self.height-y,h),
gtk.gdk.RGB_DITHER_NONE,
buf,
self.width*3)
class Preview(T):
def __init__(self,comp,width=120,height=90):
T.__init__(self,comp,None,width,height)
def onButtonRelease(self,widget,event):
pass
def error(self,msg,exn):
# suppress errors from previews
pass
def stats_changed(self,s):
pass
class SubFract(T):
def __init__(self,comp,width=640,height=480):
T.__init__(self,comp,None,width,height)
self.master = None
def set_master(self,master):
self.master = master
def onButtonRelease(self,widget,event):
self.button = 0
if self.master:
self.master.set_fractal(self.copy_f())
def onMotionNotify(self, widget, event):
pass
def error(self,msg,exn):
# suppress errors from subfracts, if they ever happened
# it would be too confusing
pass
| |
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Utilities for file download and caching."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from abc import abstractmethod
import hashlib
import multiprocessing
from multiprocessing.pool import ThreadPool
import os
import random
import shutil
import sys
import tarfile
import threading
import time
import zipfile
import numpy as np
import six
from six.moves.urllib.error import HTTPError
from six.moves.urllib.error import URLError
from six.moves.urllib.request import urlopen
from tensorflow.python.keras._impl.keras.utils.generic_utils import Progbar
try:
import queue # pylint:disable=g-import-not-at-top
except ImportError:
import Queue as queue # pylint:disable=g-import-not-at-top
if sys.version_info[0] == 2:
def urlretrieve(url, filename, reporthook=None, data=None):
"""Replacement for `urlretrive` for Python 2.
Under Python 2, `urlretrieve` relies on `FancyURLopener` from legacy
`urllib` module, known to have issues with proxy management.
Arguments:
url: url to retrieve.
filename: where to store the retrieved data locally.
reporthook: a hook function that will be called once
on establishment of the network connection and once
after each block read thereafter.
The hook will be passed three arguments;
a count of blocks transferred so far,
a block size in bytes, and the total size of the file.
data: `data` argument passed to `urlopen`.
"""
def chunk_read(response, chunk_size=8192, reporthook=None):
content_type = response.info().get('Content-Length')
total_size = -1
if content_type is not None:
total_size = int(content_type.strip())
count = 0
while True:
chunk = response.read(chunk_size)
count += 1
if reporthook is not None:
reporthook(count, chunk_size, total_size)
if chunk:
yield chunk
else:
break
response = urlopen(url, data)
with open(filename, 'wb') as fd:
for chunk in chunk_read(response, reporthook=reporthook):
fd.write(chunk)
else:
from six.moves.urllib.request import urlretrieve # pylint: disable=g-import-not-at-top
def _extract_archive(file_path, path='.', archive_format='auto'):
"""Extracts an archive if it matches tar, tar.gz, tar.bz, or zip formats.
Arguments:
file_path: path to the archive file
path: path to extract the archive file
archive_format: Archive format to try for extracting the file.
Options are 'auto', 'tar', 'zip', and None.
'tar' includes tar, tar.gz, and tar.bz files.
The default 'auto' is ['tar', 'zip'].
None or an empty list will return no matches found.
Returns:
True if a match was found and an archive extraction was completed,
False otherwise.
"""
if archive_format is None:
return False
if archive_format is 'auto':
archive_format = ['tar', 'zip']
if isinstance(archive_format, six.string_types):
archive_format = [archive_format]
for archive_type in archive_format:
if archive_type is 'tar':
open_fn = tarfile.open
is_match_fn = tarfile.is_tarfile
if archive_type is 'zip':
open_fn = zipfile.ZipFile
is_match_fn = zipfile.is_zipfile
if is_match_fn(file_path):
with open_fn(file_path) as archive:
try:
archive.extractall(path)
except (tarfile.TarError, RuntimeError, KeyboardInterrupt):
if os.path.exists(path):
if os.path.isfile(path):
os.remove(path)
else:
shutil.rmtree(path)
raise
return True
return False
def get_file(fname,
origin,
untar=False,
md5_hash=None,
file_hash=None,
cache_subdir='datasets',
hash_algorithm='auto',
extract=False,
archive_format='auto',
cache_dir=None):
"""Downloads a file from a URL if it not already in the cache.
By default the file at the url `origin` is downloaded to the
cache_dir `~/.keras`, placed in the cache_subdir `datasets`,
and given the filename `fname`. The final location of a file
`example.txt` would therefore be `~/.keras/datasets/example.txt`.
Files in tar, tar.gz, tar.bz, and zip formats can also be extracted.
Passing a hash will verify the file after download. The command line
programs `shasum` and `sha256sum` can compute the hash.
Arguments:
fname: Name of the file. If an absolute path `/path/to/file.txt` is
specified the file will be saved at that location.
origin: Original URL of the file.
untar: Deprecated in favor of 'extract'.
boolean, whether the file should be decompressed
md5_hash: Deprecated in favor of 'file_hash'.
md5 hash of the file for verification
file_hash: The expected hash string of the file after download.
The sha256 and md5 hash algorithms are both supported.
cache_subdir: Subdirectory under the Keras cache dir where the file is
saved. If an absolute path `/path/to/folder` is
specified the file will be saved at that location.
hash_algorithm: Select the hash algorithm to verify the file.
options are 'md5', 'sha256', and 'auto'.
The default 'auto' detects the hash algorithm in use.
extract: True tries extracting the file as an Archive, like tar or zip.
archive_format: Archive format to try for extracting the file.
Options are 'auto', 'tar', 'zip', and None.
'tar' includes tar, tar.gz, and tar.bz files.
The default 'auto' is ['tar', 'zip'].
None or an empty list will return no matches found.
cache_dir: Location to store cached files, when None it
defaults to the [Keras
Directory](/faq/#where-is-the-keras-configuration-filed-stored).
Returns:
Path to the downloaded file
"""
if cache_dir is None:
cache_dir = os.path.expanduser(os.path.join('~', '.keras'))
if md5_hash is not None and file_hash is None:
file_hash = md5_hash
hash_algorithm = 'md5'
datadir_base = os.path.expanduser(cache_dir)
if not os.access(datadir_base, os.W_OK):
datadir_base = os.path.join('/tmp', '.keras')
datadir = os.path.join(datadir_base, cache_subdir)
if not os.path.exists(datadir):
os.makedirs(datadir)
if untar:
untar_fpath = os.path.join(datadir, fname)
fpath = untar_fpath + '.tar.gz'
else:
fpath = os.path.join(datadir, fname)
download = False
if os.path.exists(fpath):
# File found; verify integrity if a hash was provided.
if file_hash is not None:
if not validate_file(fpath, file_hash, algorithm=hash_algorithm):
print('A local file was found, but it seems to be '
'incomplete or outdated because the ' + hash_algorithm +
' file hash does not match the original value of ' + file_hash +
' so we will re-download the data.')
download = True
else:
download = True
if download:
print('Downloading data from', origin)
class ProgressTracker(object):
# Maintain progbar for the lifetime of download.
# This design was chosen for Python 2.7 compatibility.
progbar = None
def dl_progress(count, block_size, total_size):
if ProgressTracker.progbar is None:
if total_size is -1:
total_size = None
ProgressTracker.progbar = Progbar(total_size)
else:
ProgressTracker.progbar.update(count * block_size)
error_msg = 'URL fetch failure on {}: {} -- {}'
try:
try:
urlretrieve(origin, fpath, dl_progress)
except URLError as e:
raise Exception(error_msg.format(origin, e.errno, e.reason))
except HTTPError as e:
raise Exception(error_msg.format(origin, e.code, e.msg))
except (Exception, KeyboardInterrupt) as e:
if os.path.exists(fpath):
os.remove(fpath)
raise
ProgressTracker.progbar = None
if untar:
if not os.path.exists(untar_fpath):
_extract_archive(fpath, datadir, archive_format='tar')
return untar_fpath
if extract:
_extract_archive(fpath, datadir, archive_format)
return fpath
def _hash_file(fpath, algorithm='sha256', chunk_size=65535):
"""Calculates a file sha256 or md5 hash.
Example:
```python
>>> from keras.data_utils import _hash_file
>>> _hash_file('/path/to/file.zip')
'e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855'
```
Arguments:
fpath: path to the file being validated
algorithm: hash algorithm, one of 'auto', 'sha256', or 'md5'.
The default 'auto' detects the hash algorithm in use.
chunk_size: Bytes to read at a time, important for large files.
Returns:
The file hash
"""
if (algorithm is 'sha256') or (algorithm is 'auto' and len(hash) is 64):
hasher = hashlib.sha256()
else:
hasher = hashlib.md5()
with open(fpath, 'rb') as fpath_file:
for chunk in iter(lambda: fpath_file.read(chunk_size), b''):
hasher.update(chunk)
return hasher.hexdigest()
def validate_file(fpath, file_hash, algorithm='auto', chunk_size=65535):
"""Validates a file against a sha256 or md5 hash.
Arguments:
fpath: path to the file being validated
file_hash: The expected hash string of the file.
The sha256 and md5 hash algorithms are both supported.
algorithm: Hash algorithm, one of 'auto', 'sha256', or 'md5'.
The default 'auto' detects the hash algorithm in use.
chunk_size: Bytes to read at a time, important for large files.
Returns:
Whether the file is valid
"""
if ((algorithm is 'sha256') or
(algorithm is 'auto' and len(file_hash) is 64)):
hasher = 'sha256'
else:
hasher = 'md5'
if str(_hash_file(fpath, hasher, chunk_size)) == str(file_hash):
return True
else:
return False
class Sequence(object):
"""Base object for fitting to a sequence of data, such as a dataset.
Every `Sequence` must implements the `__getitem__` and the `__len__` methods.
If you want to modify your dataset between epochs you may implement
`on_epoch_end`. The method `__getitem__` should return a complete batch.
Notes:
`Sequence` are a safer way to do multiprocessing. This structure guarantees
that the network will only train once on each sample per epoch which is not
the case with generators.
Examples:
```python
from skimage.io import imread
from skimage.transform import resize
import numpy as np
import math
# Here, `x_set` is list of path to the images
# and `y_set` are the associated classes.
class CIFAR10Sequence(Sequence):
def __init__(self, x_set, y_set, batch_size):
self.x, self.y = x_set, y_set
self.batch_size = batch_size
def __len__(self):
return math.ceil(len(self.x) / self.batch_size)
def __getitem__(self, idx):
batch_x = self.x[idx * self.batch_size:(idx + 1) *
self.batch_size]
batch_y = self.y[idx * self.batch_size:(idx + 1) *
self.batch_size]
return np.array([
resize(imread(file_name), (200, 200))
for file_name in batch_x]), np.array(batch_y)
```
"""
@abstractmethod
def __getitem__(self, index):
"""Gets batch at position `index`.
Arguments:
index: position of the batch in the Sequence.
Returns:
A batch
"""
raise NotImplementedError
@abstractmethod
def __len__(self):
"""Number of batch in the Sequence.
Returns:
The number of batches in the Sequence.
"""
raise NotImplementedError
@abstractmethod
def on_epoch_end(self):
"""Method called at the end of every epoch.
"""
pass
# Global variables to be shared across processes
_SHARED_SEQUENCES = {}
# We use a Value to provide unique id to different processes.
_SEQUENCE_COUNTER = None
def get_index(uid, i):
"""Get the value from the Sequence `uid` at index `i`.
To allow multiple Sequences to be used at the same time, we use `uid` to
get a specific one. A single Sequence would cause the validation to
overwrite the training Sequence.
Arguments:
uid: int, Sequence identifier
i: index
Returns:
The value at index `i`.
"""
return _SHARED_SEQUENCES[uid][i]
class SequenceEnqueuer(object):
"""Base class to enqueue inputs.
The task of an Enqueuer is to use parallelism to speed up preprocessing.
This is done with processes or threads.
Examples:
```python
enqueuer = SequenceEnqueuer(...)
enqueuer.start()
datas = enqueuer.get()
for data in datas:
# Use the inputs; training, evaluating, predicting.
# ... stop sometime.
enqueuer.close()
```
The `enqueuer.get()` should be an infinite stream of datas.
"""
@abstractmethod
def is_running(self):
raise NotImplementedError
@abstractmethod
def start(self, workers=1, max_queue_size=10):
"""Starts the handler's workers.
Arguments:
workers: number of worker threads
max_queue_size: queue size
(when full, threads could block on `put()`).
"""
raise NotImplementedError
@abstractmethod
def stop(self, timeout=None):
"""Stop running threads and wait for them to exit, if necessary.
Should be called by the same thread which called start().
Arguments:
timeout: maximum time to wait on thread.join()
"""
raise NotImplementedError
@abstractmethod
def get(self):
"""Creates a generator to extract data from the queue.
Skip the data if it is `None`.
Returns:
Generator yielding tuples `(inputs, targets)`
or `(inputs, targets, sample_weights)`.
"""
raise NotImplementedError
class OrderedEnqueuer(SequenceEnqueuer):
"""Builds a Enqueuer from a Sequence.
Used in `fit_generator`, `evaluate_generator`, `predict_generator`.
Arguments:
sequence: A `keras.utils.data_utils.Sequence` object.
use_multiprocessing: Use multiprocessing if True, otherwise threading
shuffle: Whether to shuffle the data at the beginning of each epoch
"""
def __init__(self, sequence, use_multiprocessing=False, shuffle=False):
self.sequence = sequence
# Doing Multiprocessing.Value += x is not process-safe.
global _SEQUENCE_COUNTER
if _SEQUENCE_COUNTER is None:
_SEQUENCE_COUNTER = multiprocessing.Value('i', 0)
with _SEQUENCE_COUNTER.get_lock():
self.uid = _SEQUENCE_COUNTER.value
_SEQUENCE_COUNTER.value += 1
self.use_multiprocessing = use_multiprocessing
self.shuffle = shuffle
self.workers = 0
self.executor = None
self.queue = None
self.run_thread = None
self.stop_signal = None
def is_running(self):
return self.stop_signal is not None and not self.stop_signal.is_set()
def start(self, workers=1, max_queue_size=10):
"""Start the handler's workers.
Arguments:
workers: number of worker threads
max_queue_size: queue size
(when full, workers could block on `put()`)
"""
if self.use_multiprocessing:
self.executor = multiprocessing.Pool(workers)
else:
self.executor = ThreadPool(workers)
self.workers = workers
self.queue = queue.Queue(max_queue_size)
self.stop_signal = threading.Event()
self.run_thread = threading.Thread(target=self._run)
self.run_thread.daemon = True
self.run_thread.start()
def _wait_queue(self):
"""Wait for the queue to be empty."""
while True:
time.sleep(0.1)
if self.queue.unfinished_tasks == 0 or self.stop_signal.is_set():
return
def _run(self):
"""Function to submit request to the executor & queue `Future` objects."""
sequence = list(range(len(self.sequence)))
self._send_sequence() # Share the initial sequence
while True:
if self.shuffle:
random.shuffle(sequence)
for i in sequence:
if self.stop_signal.is_set():
return
self.queue.put(
self.executor.apply_async(get_index, (self.uid, i)), block=True)
# Done with the current epoch, waiting for the final batches
self._wait_queue()
if self.stop_signal.is_set():
# We're done
return
# Call the internal on epoch end.
self.sequence.on_epoch_end()
self._send_sequence() # Update the pool
def get(self):
"""Creates a generator to extract data from the queue.
Skip the data if it is `None`.
Yields:
Tuples (inputs, targets)
or (inputs, targets, sample_weights)
"""
try:
while self.is_running():
inputs = self.queue.get(block=True).get()
self.queue.task_done()
if inputs is not None:
yield inputs
except Exception as e:
self.stop()
raise StopIteration(e)
def _send_sequence(self):
"""Send current Sequence to all workers."""
_SHARED_SEQUENCES[
self.uid] = self.sequence # For new processes that may spawn
self._close_pool()
if self.use_multiprocessing:
self.executor = multiprocessing.Pool(self.workers)
else:
self.executor = ThreadPool(self.workers)
def stop(self, timeout=None):
"""Stops running threads and wait for them to exit, if necessary.
Should be called by the same thread which called `start()`.
Arguments:
timeout: maximum time to wait on `thread.join()`
"""
self.stop_signal.set()
with self.queue.mutex:
self.queue.queue.clear()
self.queue.unfinished_tasks = 0
self.queue.not_full.notify()
self._close_pool()
self.run_thread.join(timeout)
_SHARED_SEQUENCES[self.uid] = None
def _close_pool(self):
self.executor.close()
self.executor.join()
class GeneratorEnqueuer(SequenceEnqueuer):
"""Builds a queue out of a data generator.
The provided generator can be finite in which case the class will throw
a `StopIteration` exception.
Used in `fit_generator`, `evaluate_generator`, `predict_generator`.
Arguments:
generator: a generator function which yields data
use_multiprocessing: use multiprocessing if True, otherwise threading
wait_time: time to sleep in-between calls to `put()`
random_seed: Initial seed for workers,
will be incremented by one for each worker.
"""
def __init__(self,
generator,
use_multiprocessing=False,
wait_time=0.05,
seed=None):
self.wait_time = wait_time
self._generator = generator
self._use_multiprocessing = use_multiprocessing
self._threads = []
self._stop_event = None
self.queue = None
self.seed = seed
def start(self, workers=1, max_queue_size=10):
"""Kicks off threads which add data from the generator into the queue.
Arguments:
workers: number of worker threads
max_queue_size: queue size
(when full, threads could block on `put()`)
"""
def data_generator_task():
while not self._stop_event.is_set():
try:
if self._use_multiprocessing or self.queue.qsize() < max_queue_size:
generator_output = next(self._generator)
self.queue.put(generator_output)
else:
time.sleep(self.wait_time)
except StopIteration:
break
except Exception:
self._stop_event.set()
raise
try:
if self._use_multiprocessing:
self.queue = multiprocessing.Queue(maxsize=max_queue_size)
self._stop_event = multiprocessing.Event()
else:
self.queue = queue.Queue()
self._stop_event = threading.Event()
for _ in range(workers):
if self._use_multiprocessing:
# Reset random seed else all children processes
# share the same seed
np.random.seed(self.seed)
thread = multiprocessing.Process(target=data_generator_task)
thread.daemon = True
if self.seed is not None:
self.seed += 1
else:
thread = threading.Thread(target=data_generator_task)
self._threads.append(thread)
thread.start()
except:
self.stop()
raise
def is_running(self):
return self._stop_event is not None and not self._stop_event.is_set()
def stop(self, timeout=None):
"""Stops running threads and wait for them to exit, if necessary.
Should be called by the same thread which called `start()`.
Arguments:
timeout: maximum time to wait on `thread.join()`.
"""
if self.is_running():
self._stop_event.set()
for thread in self._threads:
if thread.is_alive():
if self._use_multiprocessing:
thread.terminate()
else:
thread.join(timeout)
if self._use_multiprocessing:
if self.queue is not None:
self.queue.close()
self._threads = []
self._stop_event = None
self.queue = None
def get(self):
"""Creates a generator to extract data from the queue.
Skip the data if it is `None`.
Yields:
Data arrays.
"""
while self.is_running():
if not self.queue.empty():
inputs = self.queue.get()
if inputs is not None:
yield inputs
else:
all_finished = all([not thread.is_alive() for thread in self._threads])
if all_finished and self.queue.empty():
raise StopIteration()
else:
time.sleep(self.wait_time)
| |
"""
Genetics Module of the CAB Sugarscape simulation.
Encapsulates all aspects of the agent genetics.
Credit to David Grotzky.
"""
__author__ = 'Michael Wagner'
__version__ = '1.0'
from cab.util.rng import get_RNG
# TODO: Implement proper immune system.
class Chromosome:
"""
This class handles all biological aspects of an agent.
"""
def __init__(self, dna):
"""
Standard initializer.
:return:
"""
self.genomes = dna[0:2]
self.culture = dna[2]
self.immune_system = dna[3]
my_generation = max(dna[4][0], dna[4][1]) + 1
self.generation = (dna[4][0], dna[4][1], my_generation)
self.meta_sugar = None
self.meta_spice = None
self.init_sugar = None
self.init_spice = None
self.vision = None
self.gender = None
self.fertility = None
self.dying_age = None
self.dna_color = None
# Read dictionary entries as:
# ----> {attribute: (start index, end index)}
# TODO: Shift this map into GlobalConstants and automatically generate genome lengths from the given constants.
self.att_map = {'meta_sugar': (0, 3),
'meta_spice': (3, 6),
'init_sugar': (6, 12),
'init_spice': (12, 18),
'vision': (18, 21),
'gender': (21, 22),
'fertility_1': (22, 28),
'fertility_2': (28, 34),
'dying_age': (34, 41)}
self.map_genome_to_attributes()
def map_genome_to_attributes(self):
"""
Decodes the genome and creates the attribute of the individual.
"""
# The meta and init attributes cannot become smaller than 1,
# even though that is possible by the encoding. We have to avoid that.
meta_sugar = Chromosome.choose_dominant_gene(self.get_genome_substring('meta_sugar'))
meta_spice = Chromosome.choose_dominant_gene(self.get_genome_substring('meta_spice'))
init_sugar = Chromosome.choose_dominant_gene(self.get_genome_substring('init_sugar'))
init_spice = Chromosome.choose_dominant_gene(self.get_genome_substring('init_spice'))
vision = Chromosome.choose_dominant_gene(self.get_genome_substring('vision'))
gender = get_RNG().choice(self.get_genome_substring('gender'))
f1 = Chromosome.choose_dominant_gene(self.get_genome_substring('fertility_1'))
f2 = Chromosome.choose_dominant_gene(self.get_genome_substring('fertility_2'))
dying_age = Chromosome.choose_dominant_gene(self.get_genome_substring('dying_age'))
self.meta_sugar = max(int(meta_sugar, 2), 1)
self.meta_spice = max(int(meta_spice, 2), 1)
self.init_sugar = max(int(init_sugar, 2), 1)
self.init_spice = max(int(init_spice, 2), 1)
self.vision = int(vision, 2)
self.gender = int(gender, 2)
self.dying_age = int(dying_age, 2)
self.fertility = (int(f1, 2), int(f2, 2))
dna = "".join((meta_sugar, meta_spice, init_sugar, init_spice, vision, gender, f1, f2, dying_age))
self.dna_color = Chromosome.convert_to_color(dna)
def get_genome_substring(self, key):
"""
Retrieves the partitions of both genes.
:param key: The key of the partition entries' location in the dictionary
:return: Two sub-strings of the genomes
"""
indices = self.att_map[key]
start = indices[0]
end = indices[1]
return self.genomes[0][start: end], self.genomes[1][start: end]
@staticmethod
def choose_dominant_gene(strings):
"""
Takes two gene strings and returns the dominant one,
or random if both are dominant/ recessive
:param strings: Two sub-genes of the chromosome
:return: The more dominant/ luckier string of both.
"""
# How do we determine dominance?
# For now just by looking whether there is an even number of 'ones' in it.
dominant0 = strings[0].count('1') % 2 == 0
dominant1 = strings[1].count('1') % 2 == 0
if (dominant0 and dominant1) or (not (dominant0 or dominant1)):
return get_RNG().choice([strings[0], strings[1]])
elif dominant1:
return strings[0]
else:
return strings[1]
def merge_with(self, mate_chromosome):
"""
Takes the chromosome from the mate, performs
all necessary crossovers and returns the resulting DNA
:param mate_chromosome:
:return: The child's chromosome.
"""
# Concept: divide genome in partitions of varying length.
# Exchange those parts between mother and father gametes?
genome1 = Chromosome.create_gamete(self.genomes)
genome2 = Chromosome.create_gamete(mate_chromosome.genomes)
culture = Chromosome.create_gamete((self.culture, mate_chromosome.culture))
immune_sys = Chromosome.create_gamete((self.immune_system, mate_chromosome.immune_system))
# Create a string out of the gene strings
genome1 = "".join(map(str, genome1))
genome2 = "".join(map(str, genome2))
# Order the generation tuple for better overview: (mom, dad)
if self.gender == 1:
generation = (self.generation[2], mate_chromosome.generation[2])
else:
generation = (mate_chromosome.generation[2], self.generation[2])
return [genome1, genome2, culture, immune_sys, generation]
@staticmethod
def create_gamete(genomes):
"""
Creates and returns a gamete that consists of parts of
both genomes in this chromosome.
:return: Gamete in form of a single bitstring.
"""
# 1) Generate a random number (gaussian distributed) of
# random indices which are then used to split the genes at the respective points.
genome_size = len(genomes[0])
num_partitions = int(get_RNG().triangular(0, genome_size / 2, genome_size))
partitions = get_RNG().sample(range(genome_size), num_partitions)
partitions.sort() # Now we have all our indices, and sorted.
partitions.append(genome_size) # Append the end of the string
start = 0
gamete = []
for p in partitions:
i = get_RNG().choice([0, 1])
gamete.extend(genomes[i][start:p])
start = p
# 'gamete' is now a list of integers. Convert the ints to strings and join 'em all together.
return gamete
def mutate(self):
"""
Has a chance of 0.5% to perform a random mutation in the dna,
and a chance of 1% to flip a few bits in the cultural dna.
:return:
"""
# Flip bit in genome
if get_RNG().random() < 0.005:
length = len(self.genomes)
index = get_RNG().randrange(length)
l = list(self.genomes[0])
l[index] = Chromosome.invert_bit(l[index])
g1 = "".join(l)
index = get_RNG().randrange(length)
l = list(self.genomes[1])
l[index] = Chromosome.invert_bit(l[index])
g2 = "".join(l)
self.genomes = (g1, g2)
# Flip a bit in culture
if get_RNG().random() < 0.01:
length = len(self.culture)
num_bits_changed = int(get_RNG().triangular(0, 1, length))
index = get_RNG().sample(range(length), num_bits_changed)
for i in index:
self.culture[i] = 1 - self.culture[i]
@staticmethod
def invert_bit(bit):
"""
Takes the bit as a string and inverts it.
:param bit:
:return: Inverted bit
"""
if bit == "0":
return "1"
else:
return "0"
# This method makes sense only for Lamarckian Evolution!
# def map_attributes_to_genome(self, attributes):
# return
@staticmethod
def convert_to_color(dna):
# l = len(dna)
# l1 = int(l / 3)
# l2 = 2 * l1
r_string = dna[0::3] # dna[0:l1]
g_string = dna[1::3] # dna[l1:l2]
b_string = dna[2::3] # dna[l2:]
r_num = int(r_string, 2)
g_num = int(g_string, 2)
b_num = int(b_string, 2)
r = int((r_num / (2 ** len(r_string))) * 25) * 10
g = int((g_num / (2 ** len(g_string))) * 25) * 10
b = int((b_num / (2 ** len(b_string))) * 25) * 10
return r, g, b
| |
import matplotlib.pyplot as plt
import pandas as pd
import collections
import logging
import networkx as nx
import json
# This reporter show only high level statistics in order to compare different tests
class TestResultsSummary(object):
def __init__(self):
self.debugging = False
self.test = None
self.logger = None
self.curr_cycle = None
self.total_packets_sent = 0
self.total_packets_recv = 0
self.total_delay = 0
self.curr_max_buffer_size = 0
self.curr_max_packet_delay = 0
self.total_time_to_delivery = 0
self.total_route_len = 0
self.max_delay_factor = 0
self.average_delay_factor = 0
self.total_max_buffer_size = 0
self.cycles = 0
def is_debugging(self):
return self.debugging
def init(self, test):
self.test = test
self.network = test.network
self.logger = logging.getLogger(test.desc['test']['desc'])
def start_cycle(self, cycle):
self.curr_cycle = cycle
def packet_invoked(self, packet):
self.total_packets_sent += 1
def packet_forwarded(self, dest, src, packet):
pass
def packet_received(self, packet):
self.total_packets_recv += 1
time_to_delivery = self.curr_cycle - packet.invoke_cycle
route_length = len(packet.route) - 1
delay_factor = 1. * time_to_delivery / route_length
self.total_time_to_delivery += time_to_delivery
self.total_route_len += route_length
self.max_delay_factor = max(self.max_delay_factor, delay_factor)
max_packet_delay = self.curr_cycle - packet.invoke_cycle - (len(packet.route)-1)
self.curr_max_packet_delay = max(max_packet_delay, self.curr_max_packet_delay)
self.total_delay += max_packet_delay
def cycle_end(self):
cycle_max_buffer_size = 0
for n1, n2 in self.network.edges_iter():
buf_size = len(self.network[n1][n2]['buf'])
cycle_max_buffer_size = max(cycle_max_buffer_size, buf_size)
self.total_max_buffer_size += cycle_max_buffer_size
self.cycles += 1
self.curr_max_buffer_size = max(self.curr_max_buffer_size, cycle_max_buffer_size)
def test_finished(self, test_time):
self.test_time = test_time
def finalize(self):
self.max_packet_delay = self.curr_max_packet_delay
self.max_buffer_size = self.curr_max_buffer_size
self.average_max_buffer_size = 1. * self.total_max_buffer_size / self.cycles
self.logger.info('Test have finished. Total seconds: {}'.format(self.test_time.total_seconds()))
self.logger.info('Total sent: {}'.format(self.total_packets_sent))
self.logger.info('Total recv: {}'.format(self.total_packets_recv))
self.average_packet_delay = 0
if self.total_packets_recv > 0:
self.average_packet_delay = 1. * self.total_delay / self.total_packets_recv
self.logger.info('Average packet delay: {}'.format(self.average_packet_delay))
self.logger.info('Max packet delay: {}'.format(self.max_packet_delay))
self.logger.info('Max buffer size: {}'.format(self.max_buffer_size))
self.logger.info('Average max buffer size: {}'.format(self.average_max_buffer_size))
self.average_delay_factor = 1. * self.total_time_to_delivery / self.total_route_len
self.logger.info('Max delay factor: {}'.format(self.max_delay_factor))
self.logger.info('Average delay factor: {}'.format(self.average_delay_factor))
class TestResultsHistory(TestResultsSummary):
def __init__(self, output_file):
TestResultsSummary.__init__(self)
self.stats = {}
self.output_file = output_file
def cycle_end(self):
TestResultsSummary.cycle_end(self)
for n1, n2 in self.network.edges_iter():
buf_size = len(self.network[n1][n2]['buf'])
self.curr_max_buffer_size = max(self.curr_max_buffer_size, buf_size)
self.stats[(n1, n2, self.curr_cycle)] = (buf_size,)
def finalize(self):
TestResultsSummary.finalize(self)
df = pd.DataFrame(self.stats).T
df.columns = ['BUF'] # Relevant only when there is more than one port
df.index.names = ['SRC', 'DEST', 'CYCLE']
if self.output_file.endswith('.csv'):
df.to_csv(self.output_file)
else:
df.to_hdf(self.output_file, 'abc')
class TestResultsLog(TestResultsSummary):
def __init__(self, output_file):
TestResultsSummary.__init__(self)
self.f = open(output_file, 'wt')
self.edge_dict = {}
self.d = {}
def init(self, test):
TestResultsSummary.init(self, test)
self.d = {}
self.d['nodes'] = []
for n, node in enumerate(self.network.nodes()):
node_dict = {}
node_dict['id'] = n
node_dict['label'] = str(n)
self.d['nodes'].append(node_dict)
n = 0
# {'id': 0, 'from': 0, 'to': 0, 'arrows': 'to'},
self.d['edges'] = []
for src, tmp in self.network.edge.iteritems():
for dest, e in tmp.iteritems():
edge_dict = {}
edge_dict['id'] = n
#edge_dict['label'] = str(n)
edge_dict['from'] = src
edge_dict['to'] = dest
edge_dict['cap'] = e['cap']
self.d['edges'].append(edge_dict)
self.edge_dict[src, dest] = n
n += 1
#self.d['test_name'] = self.test.name
self.d['packets'] = {}
self.d['cycles'] = []
# self.f.write('NEW,CYCLE_NUM\n')
# self.f.write('INV,PACKET_ID,ROUTE\n')
# self.f.write('FWD,PACKET_ID,SRC,DEST\n')
def start_cycle(self, cycle):
TestResultsSummary.start_cycle(self, cycle)
self.curr_cycle_events = []
self.d['cycles'].append(self.curr_cycle_events)
def packet_invoked(self, packet):
TestResultsSummary.packet_invoked(self, packet)
self.d['packets'][packet.packet_id] = {'route':packet.route, 'invoke':packet.invoke_cycle}
dest = packet.route[0]
v = packet.packet_id, -1, -1, dest, packet.route.index(dest), len(packet.route)
self.curr_cycle_events.append(v)
def packet_forwarded(self, dest, src, packet):
TestResultsSummary.packet_forwarded(self, dest, src, packet)
v = packet.packet_id, self.edge_dict[src, dest], src, dest, packet.route.index(dest), len(packet.route)
self.curr_cycle_events.append(v)
# def packet_received(self, packet):
# TestResultsSummary.packet_received(self, packet)
# v = packet.packet_id, -1, packet.route[-1], -1
# self.curr_cycle_events.append(v)
def finalize(self):
TestResultsSummary.finalize(self)
json.dump(self.d, self.f)
self.f.close()
class CountTotalLoadPerCycle(object):
def __init__(self, output_file):
self.debugging = False
self.test = None
self.logger = None
self.curr_cycle = None
self.output_file = output_file
self.curr_total_load = 0
self.total_load_per_cycle = []
def is_debugging(self):
return self.debugging
def init(self, test):
self.test = test
self.network = test.network
def packet_invoked(self, packet):
self.curr_total_load += 1
def packet_forwarded(self, dest, src, packet):
pass
def packet_received(self, packet):
self.curr_total_load -= 1
def start_cycle(self, curr_cycle):
pass
def cycle_end(self):
self.total_load_per_cycle.append(self.curr_total_load)
def test_finished(self, test_time):
pass
def finalize(self):
s = pd.Series(self.total_load_per_cycle)
s.to_hdf(self.output_file, 'abc')
| |
import sys
import shutil
import os
import stat
import re
import posixpath
import pkg_resources
import zipfile
import tarfile
from pip.exceptions import InstallationError
from pip.backwardcompat import WindowsError, string_types, raw_input
from pip.locations import site_packages, running_under_virtualenv
from pip.log import logger
__all__ = ['rmtree', 'display_path', 'backup_dir',
'find_command', 'ask', 'Inf',
'normalize_name', 'splitext',
'format_size', 'is_installable_dir',
'is_svn_page', 'file_contents',
'split_leading_dir', 'has_leading_dir',
'make_path_relative', 'normalize_path',
'renames', 'get_terminal_size',
'unzip_file', 'untar_file', 'create_download_cache_folder',
'cache_download', 'unpack_file']
def rmtree(dir, ignore_errors=False):
shutil.rmtree(dir, ignore_errors=ignore_errors,
onerror=rmtree_errorhandler)
def rmtree_errorhandler(func, path, exc_info):
"""On Windows, the files in .svn are read-only, so when rmtree() tries to
remove them, an exception is thrown. We catch that here, remove the
read-only attribute, and hopefully continue without problems."""
exctype, value = exc_info[:2]
# On Python 2.4, it will be OSError number 13
# On all more recent Pythons, it'll be WindowsError number 5
if not ((exctype is WindowsError and value.args[0] == 5) or
(exctype is OSError and value.args[0] == 13)):
raise
# file type should currently be read only
if ((os.stat(path).st_mode & stat.S_IREAD) != stat.S_IREAD):
raise
# convert to read/write
os.chmod(path, stat.S_IWRITE)
# use the original function to repeat the operation
func(path)
def display_path(path):
"""Gives the display value for a given path, making it relative to cwd
if possible."""
path = os.path.normcase(os.path.abspath(path))
if path.startswith(os.getcwd() + os.path.sep):
path = '.' + path[len(os.getcwd()):]
return path
def backup_dir(dir, ext='.bak'):
"""Figure out the name of a directory to back up the given dir to
(adding .bak, .bak2, etc)"""
n = 1
extension = ext
while os.path.exists(dir + extension):
n += 1
extension = ext + str(n)
return dir + extension
def find_command(cmd, paths=None, pathext=None):
"""Searches the PATH for the given command and returns its path"""
if paths is None:
paths = os.environ.get('PATH', []).split(os.pathsep)
if isinstance(paths, string_types):
paths = [paths]
# check if there are funny path extensions for executables, e.g. Windows
if pathext is None:
pathext = os.environ.get('PATHEXT', '.COM;.EXE;.BAT;.CMD')
pathext = [ext for ext in pathext.lower().split(os.pathsep)]
# don't use extensions if the command ends with one of them
if os.path.splitext(cmd)[1].lower() in pathext:
pathext = ['']
# check if we find the command on PATH
for path in paths:
# try without extension first
cmd_path = os.path.join(path, cmd)
for ext in pathext:
# then including the extension
cmd_path_ext = cmd_path + ext
if os.path.isfile(cmd_path_ext):
return cmd_path_ext
if os.path.isfile(cmd_path):
return cmd_path
return None
def ask(message, options):
"""Ask the message interactively, with the given possible responses"""
while 1:
if os.environ.get('PIP_NO_INPUT'):
raise Exception('No input was expected ($PIP_NO_INPUT set); question: %s' % message)
response = raw_input(message)
response = response.strip().lower()
if response not in options:
print('Your response (%r) was not one of the expected responses: %s' % (
response, ', '.join(options)))
else:
return response
class _Inf(object):
"""I am bigger than everything!"""
def __cmp__(self, a):
if self is a:
return 0
return 1
def __repr__(self):
return 'Inf'
Inf = _Inf()
del _Inf
_normalize_re = re.compile(r'[^a-z]', re.I)
def normalize_name(name):
return _normalize_re.sub('-', name.lower())
def format_size(bytes):
if bytes > 1000*1000:
return '%.1fMb' % (bytes/1000.0/1000)
elif bytes > 10*1000:
return '%iKb' % (bytes/1000)
elif bytes > 1000:
return '%.1fKb' % (bytes/1000.0)
else:
return '%ibytes' % bytes
def is_installable_dir(path):
"""Return True if `path` is a directory containing a setup.py file."""
if not os.path.isdir(path):
return False
setup_py = os.path.join(path, 'setup.py')
if os.path.isfile(setup_py):
return True
return False
def is_svn_page(html):
"""Returns true if the page appears to be the index page of an svn repository"""
return (re.search(r'<title>[^<]*Revision \d+:', html)
and re.search(r'Powered by (?:<a[^>]*?>)?Subversion', html, re.I))
def file_contents(filename):
fp = open(filename, 'rb')
try:
return fp.read().decode('utf-8')
finally:
fp.close()
def split_leading_dir(path):
path = str(path)
path = path.lstrip('/').lstrip('\\')
if '/' in path and (('\\' in path and path.find('/') < path.find('\\'))
or '\\' not in path):
return path.split('/', 1)
elif '\\' in path:
return path.split('\\', 1)
else:
return path, ''
def has_leading_dir(paths):
"""Returns true if all the paths have the same leading path name
(i.e., everything is in one subdirectory in an archive)"""
common_prefix = None
for path in paths:
prefix, rest = split_leading_dir(path)
if not prefix:
return False
elif common_prefix is None:
common_prefix = prefix
elif prefix != common_prefix:
return False
return True
def make_path_relative(path, rel_to):
"""
Make a filename relative, where the filename path, and it is
relative to rel_to
>>> make_relative_path('/usr/share/something/a-file.pth',
... '/usr/share/another-place/src/Directory')
'../../../something/a-file.pth'
>>> make_relative_path('/usr/share/something/a-file.pth',
... '/home/user/src/Directory')
'../../../usr/share/something/a-file.pth'
>>> make_relative_path('/usr/share/a-file.pth', '/usr/share/')
'a-file.pth'
"""
path_filename = os.path.basename(path)
path = os.path.dirname(path)
path = os.path.normpath(os.path.abspath(path))
rel_to = os.path.normpath(os.path.abspath(rel_to))
path_parts = path.strip(os.path.sep).split(os.path.sep)
rel_to_parts = rel_to.strip(os.path.sep).split(os.path.sep)
while path_parts and rel_to_parts and path_parts[0] == rel_to_parts[0]:
path_parts.pop(0)
rel_to_parts.pop(0)
full_parts = ['..']*len(rel_to_parts) + path_parts + [path_filename]
if full_parts == ['']:
return '.' + os.path.sep
return os.path.sep.join(full_parts)
def normalize_path(path):
"""
Convert a path to its canonical, case-normalized, absolute version.
"""
return os.path.normcase(os.path.realpath(path))
def splitext(path):
"""Like os.path.splitext, but take off .tar too"""
base, ext = posixpath.splitext(path)
if base.lower().endswith('.tar'):
ext = base[-4:] + ext
base = base[:-4]
return base, ext
def renames(old, new):
"""Like os.renames(), but handles renaming across devices."""
# Implementation borrowed from os.renames().
head, tail = os.path.split(new)
if head and tail and not os.path.exists(head):
os.makedirs(head)
shutil.move(old, new)
head, tail = os.path.split(old)
if head and tail:
try:
os.removedirs(head)
except OSError:
pass
def is_local(path):
"""
Return True if path is within sys.prefix, if we're running in a virtualenv.
If we're not in a virtualenv, all paths are considered "local."
"""
if not running_under_virtualenv():
return True
return normalize_path(path).startswith(normalize_path(sys.prefix))
def dist_is_local(dist):
"""
Return True if given Distribution object is installed locally
(i.e. within current virtualenv).
Always True if we're not in a virtualenv.
"""
return is_local(dist_location(dist))
def get_installed_distributions(local_only=True, skip=('setuptools', 'pip', 'python')):
"""
Return a list of installed Distribution objects.
If ``local_only`` is True (default), only return installations
local to the current virtualenv, if in a virtualenv.
``skip`` argument is an iterable of lower-case project names to
ignore; defaults to ('setuptools', 'pip', 'python'). [FIXME also
skip virtualenv?]
"""
if local_only:
local_test = dist_is_local
else:
local_test = lambda d: True
return [d for d in pkg_resources.working_set if local_test(d) and d.key not in skip]
def egg_link_path(dist):
"""
Return the path where we'd expect to find a .egg-link file for
this distribution. (There doesn't seem to be any metadata in the
Distribution object for a develop egg that points back to its
.egg-link and easy-install.pth files).
This won't find a globally-installed develop egg if we're in a
virtualenv.
"""
return os.path.join(site_packages, dist.project_name) + '.egg-link'
def dist_location(dist):
"""
Get the site-packages location of this distribution. Generally
this is dist.location, except in the case of develop-installed
packages, where dist.location is the source code location, and we
want to know where the egg-link file is.
"""
egg_link = egg_link_path(dist)
if os.path.exists(egg_link):
return egg_link
return dist.location
def get_terminal_size():
"""Returns a tuple (x, y) representing the width(x) and the height(x)
in characters of the terminal window."""
def ioctl_GWINSZ(fd):
try:
import fcntl
import termios
import struct
cr = struct.unpack('hh', fcntl.ioctl(fd, termios.TIOCGWINSZ,
'1234'))
except:
return None
if cr == (0, 0):
return None
if cr == (0, 0):
return None
return cr
cr = ioctl_GWINSZ(0) or ioctl_GWINSZ(1) or ioctl_GWINSZ(2)
if not cr:
try:
fd = os.open(os.ctermid(), os.O_RDONLY)
cr = ioctl_GWINSZ(fd)
os.close(fd)
except:
pass
if not cr:
cr = (os.environ.get('LINES', 25), os.environ.get('COLUMNS', 80))
return int(cr[1]), int(cr[0])
def unzip_file(filename, location, flatten=True):
"""Unzip the file (zip file located at filename) to the destination
location"""
if not os.path.exists(location):
os.makedirs(location)
zipfp = open(filename, 'rb')
try:
zip = zipfile.ZipFile(zipfp)
leading = has_leading_dir(zip.namelist()) and flatten
for name in zip.namelist():
data = zip.read(name)
fn = name
if leading:
fn = split_leading_dir(name)[1]
fn = os.path.join(location, fn)
dir = os.path.dirname(fn)
if not os.path.exists(dir):
os.makedirs(dir)
if fn.endswith('/') or fn.endswith('\\'):
# A directory
if not os.path.exists(fn):
os.makedirs(fn)
else:
fp = open(fn, 'wb')
try:
fp.write(data)
finally:
fp.close()
finally:
zipfp.close()
def untar_file(filename, location):
"""Untar the file (tar file located at filename) to the destination location"""
if not os.path.exists(location):
os.makedirs(location)
if filename.lower().endswith('.gz') or filename.lower().endswith('.tgz'):
mode = 'r:gz'
elif filename.lower().endswith('.bz2') or filename.lower().endswith('.tbz'):
mode = 'r:bz2'
elif filename.lower().endswith('.tar'):
mode = 'r'
else:
logger.warn('Cannot determine compression type for file %s' % filename)
mode = 'r:*'
tar = tarfile.open(filename, mode)
try:
# note: python<=2.5 doesnt seem to know about pax headers, filter them
leading = has_leading_dir([
member.name for member in tar.getmembers()
if member.name != 'pax_global_header'
])
for member in tar.getmembers():
fn = member.name
if fn == 'pax_global_header':
continue
if leading:
fn = split_leading_dir(fn)[1]
path = os.path.join(location, fn)
if member.isdir():
if not os.path.exists(path):
os.makedirs(path)
else:
try:
fp = tar.extractfile(member)
except (KeyError, AttributeError):
e = sys.exc_info()[1]
# Some corrupt tar files seem to produce this
# (specifically bad symlinks)
logger.warn(
'In the tar file %s the member %s is invalid: %s'
% (filename, member.name, e))
continue
if not os.path.exists(os.path.dirname(path)):
os.makedirs(os.path.dirname(path))
destfp = open(path, 'wb')
try:
shutil.copyfileobj(fp, destfp)
finally:
destfp.close()
fp.close()
finally:
tar.close()
def create_download_cache_folder(folder):
logger.indent -= 2
logger.notify('Creating supposed download cache at %s' % folder)
logger.indent += 2
os.makedirs(folder)
def cache_download(target_file, temp_location, content_type):
logger.notify('Storing download in cache at %s' % display_path(target_file))
shutil.copyfile(temp_location, target_file)
fp = open(target_file+'.content-type', 'w')
fp.write(content_type)
fp.close()
os.unlink(temp_location)
def unpack_file(filename, location, content_type, link):
if (content_type == 'application/zip'
or filename.endswith('.zip')
or filename.endswith('.pybundle')
or zipfile.is_zipfile(filename)):
unzip_file(filename, location, flatten=not filename.endswith('.pybundle'))
elif (content_type == 'application/x-gzip'
or tarfile.is_tarfile(filename)
or splitext(filename)[1].lower() in ('.tar', '.tar.gz', '.tar.bz2', '.tgz', '.tbz')):
untar_file(filename, location)
elif (content_type and content_type.startswith('text/html')
and is_svn_page(file_contents(filename))):
# We don't really care about this
from pip.vcs.subversion import Subversion
Subversion('svn+' + link.url).unpack(location)
else:
## FIXME: handle?
## FIXME: magic signatures?
logger.fatal('Cannot unpack file %s (downloaded from %s, content-type: %s); cannot detect archive format'
% (filename, location, content_type))
raise InstallationError('Cannot determine archive format of %s' % location)
| |
import functools
import imp
import mock
import sys
from django.core.management.base import CommandError
from django.db import models
from django.test import TestCase, TransactionTestCase
def _raise(exc_info):
raise exc_info[0], exc_info[1], exc_info[2]
def _cleanup(*models):
""" Function to delete models from the AppCache and remove them from
the models module in which they're defined.
"""
from django.db.models.loading import cache
deleted = []
# Note that we want to use the import lock here - the app loading is
# in many cases initiated implicitly by importing, and thus it is
# possible to end up in deadlock when one thread initiates loading
# without holding the importer lock and another thread then tries to
# import something which also launches the app loading. For details of
# this situation see Django bug #18251.
imp.acquire_lock()
try:
for app_label, model_dict in cache.app_models.items():
for django_name, klass in model_dict.items():
name = '{}.{}'.format(klass.__module__, klass.__name__)
if name in models:
module = sys.modules[klass.__module__]
delattr(module, klass.__name__)
del model_dict[django_name]
deleted.append(name)
if sorted(deleted) != sorted(models):
expected = ', '.join(models)
expected = expected if expected else '(none)'
actual = ', '.join(deleted)
actual = actual if actual else '(none)'
raise AssertionError(
'Expected to delete {}, actually deleted {}'.format(
expected,
actual))
# Reset a load of state variables in the app cache
cache.loaded = False
cache._get_models_cache = {}
cache.handled = {}
cache.postponed = []
cache.nesting_level = 0
cache._populate()
finally:
imp.release_lock()
def cleanup_models(*models):
""" Decorator that declares a test method generates test models that
will need cleaning up.
The decorator then cleans up the Django AppCache after the test
has run, to prevent the test framework's attempts to flush the (then)
non-existant model.
The fully-qualified names of the test models should be passed.
"""
def outer(func):
@functools.wraps(func)
def wrapped(self, *args, **kw):
exc_info = None
try:
try:
func(self, *args, **kw)
except:
exc_info = sys.exc_info()
finally:
try:
_cleanup(*models)
except:
# OK, we got an error cleaning up. If the actual test
# passed, then we're happy to raise this cleanup exception.
# Otherwise, we raise the original exception - the
# cleanup one might get fixed when the test itself is
# fixed. Just allowing the cleanup exception to bubble
# up masks the problem.
cleanup_exc_info = sys.exc_info()
_raise(exc_info) if exc_info else _raise(cleanup_exc_info)
else:
if exc_info:
_raise(exc_info)
return wrapped
return outer
class PartitionedModelTests(TestCase):
def test_base_model_require_abstract(self):
""" When a model has a PartitionManager, it must be declared
abstract
"""
from parting import PartitionManager
with self.assertRaises(AssertionError):
class BadModel(models.Model):
objects = PartitionManager()
class PartitionForeignKeyTests(TestCase):
def test_must_be_abstract(self):
""" Any model featuring a Partition foreign key must itself be
abstract, otherwise the fk constraints won't work.
"""
from parting import PartitionManager, PartitionForeignKey
class PartitionModel(models.Model):
objects = PartitionManager()
class Meta:
abstract = True
with self.assertRaises(AssertionError):
class ChildPartitionModel(models.Model):
parent = PartitionForeignKey(PartitionModel)
@cleanup_models(
'parting.tests.PartitionModel_foo',
'parting.tests.ChildPartitionModel_foo')
def test_child_partions_generated(self):
""" When a parent partition is generated, child partitions (as
determined) by PartitionForeignKey relationships) should also be
generated.
"""
from parting import PartitionManager, PartitionForeignKey
class PartitionModel(models.Model):
objects = PartitionManager()
class Meta:
abstract = True
class ChildPartitionModel(models.Model):
parent = PartitionForeignKey(PartitionModel)
objects = PartitionManager()
class Meta:
abstract = True
# Generating the parent partition should cause a child partition
# to also be created, with the same key.
PartitionModel.objects.get_partition('foo')
child_partition = ChildPartitionModel.objects.get_partition('foo')
self.assertTrue(child_partition is not None)
def test_multiple_fks_bad(self):
""" If there are multiple PartitionForeignKeys, they must all point
to the same model. This keeps everything simpler """
from parting import PartitionManager, PartitionForeignKey
class ParentModel1(models.Model):
objects = PartitionManager()
class Meta:
abstract = True
class ParentModel2(models.Model):
objects = PartitionManager()
class Meta:
abstract = True
with self.assertRaises(AssertionError):
class ChildPartitionModel(models.Model):
parent_1 = PartitionForeignKey(ParentModel1)
parent_2 = PartitionForeignKey(ParentModel2)
class Meta:
abstract = True
@cleanup_models(
'parting.tests.ParentModel_foo',
'parting.tests.ChildPartitionModel_foo')
def test_multiple_fks_good(self):
""" If there are multiple PartitionForeignKeys, they must all point
to the same model. This keeps everything simpler """
from parting import PartitionManager, PartitionForeignKey
class ParentModel(models.Model):
objects = PartitionManager()
class Meta:
abstract = True
class ChildPartitionModel(models.Model):
parent_1 = PartitionForeignKey(
ParentModel,
related_name='parent_1_set')
parent_2 = PartitionForeignKey(
ParentModel,
related_name='parent_2_set')
objects = PartitionManager()
class Meta:
abstract = True
p = ParentModel.objects.get_partition('foo')
c = ChildPartitionModel.objects.get_partition('foo')
self.assertEqual(p, c._meta.get_field('parent_1').rel.to)
self.assertEqual(p, c._meta.get_field('parent_2').rel.to)
# Check that there are no PartitionForeignKey instances hanging
# around in the child's _meta
for field in c._meta.fields:
self.failIf(isinstance(field, PartitionForeignKey))
for field in c._meta.local_fields:
self.failIf(isinstance(field, PartitionForeignKey))
for field, model in c._meta.get_fields_with_model():
self.failIf(isinstance(field, PartitionForeignKey))
class PartitionTests(TestCase):
@cleanup_models('testapp.models.Tweet_foo', 'testapp.models.Star_foo')
def test_get_partition(self):
""" Check that once a partition is generated, we can fetch it
with get_partition
"""
from testapp.models import Star, Tweet
expected_partition = Tweet.partitions.get_partition('foo')
assert expected_partition
partition = Tweet.partitions.get_partition('foo')
self.assertEqual(expected_partition, partition)
# We should also now be able to get the 'foo' partition for Star,
# and its FK should point to the Tweet partition
star_partition = Star.partitions.get_partition('foo')
fk = star_partition._meta.get_field('tweet')
self.assertEqual(partition, fk.rel.to)
# We should also find that our custom manager is in place
self.assertTrue(hasattr(partition.objects, 'my_custom_method'))
def test_get_missing_partition(self):
""" Attempting to fetch a missing partition will just return None
(mirroring the behaviour of Django's get_model), as long as we don't
auto-create
"""
from testapp.models import Tweet
self.assertEqual(
None,
Tweet.partitions.get_partition('foo', create=False)
)
@cleanup_models('testapp.models.Tweet_foo')
def test_no_overwrite(self):
""" Check that we don't overwrite
"""
from testapp.models import Tweet
import testapp.models
testapp.models.Tweet_foo = object()
with self.assertRaises(AttributeError):
Tweet.partitions.get_partition('foo')
@cleanup_models('testapp.models.Tweet_foo', 'testapp.models.Star_foo')
def test_get_field_by_name(self):
""" Check that get_field_by_name on a foreign key that was
generated from a PartitionForeignKey returns a real FK, not the
PFK.
"""
from testapp.models import Star, Tweet
Tweet.partitions.get_partition('foo')
star_partition = Star.partitions.get_partition('foo')
fk, _, _, _ = star_partition._meta.get_field_by_name('tweet')
self.assertTrue(isinstance(fk, models.ForeignKey))
@cleanup_models('testapp.models.Tweet_foo', 'testapp.models.Star_foo')
def test_get_partition_key(self):
""" Check that we can find out what the partition key a model was
generated from. This can be useful if an application knows that
a number of related models were generated using the same key.
"""
from testapp.models import Star, Tweet
from parting.models import get_partition_key
tweet_partition = Tweet.partitions.get_partition('foo')
star_partition = Star.partitions.get_partition('foo')
self.assertEqual('foo', get_partition_key(tweet_partition))
self.assertEqual('foo', get_partition_key(star_partition))
class CommandTests(TransactionTestCase):
def setUp(self):
from django.db import connection
tables = set(connection.introspection.table_names())
self.failIf(tables)
def _run(self, *args, **kwargs):
from parting.management.commands import ensure_partition
command = ensure_partition.Command()
command.handle(*args, **kwargs)
def check_tables(self, *names):
""" Check the named tables exist in the database, and clean them
up if they do
"""
from django.db import connection
names = set(names)
tables = set(connection.introspection.table_names())
missing_tables = names - tables
if missing_tables:
self.fail(
'The following tables are missing: {}'.format(
', '.join(t for t in missing_tables)))
# Yay hack!
cursor = connection.cursor()
for name in tables:
cursor.execute(
'DROP TABLE {}'.format(
connection.ops.quote_name(name)
))
def test_missing_model(self):
""" The command requires at least 1 argument, a model
"""
with self.assertRaises(CommandError):
self._run()
def test_both_current_next(self):
""" Check we can't specify both current and next """
with self.assertRaises(CommandError):
self._run(
'testapp.models.Tweet',
current_only=True,
next_only=True)
def test_ensure_names(self):
""" Check that we can pass an explicit model and partition key,
and the tables will appear
"""
self._run('testapp.models.Tweet', 'foo')
self.check_tables('testapp_tweet_foo', 'testapp_star_foo')
@cleanup_models('testapp.models.Tweet_baz', 'testapp.models.Star_baz')
@mock.patch('testapp.models.TweetPartitionManager.current_partition_key')
def test_current_partition(self, current_partition_key):
""" Check that we can pass --current and the current partition will
be created """
current_partition_key.return_value = 'baz'
self._run('testapp.models.Tweet', current_only=True)
self.check_tables('testapp_tweet_baz', 'testapp_star_baz')
@cleanup_models('testapp.models.Tweet_baz', 'testapp.models.Star_baz')
@mock.patch('testapp.models.TweetPartitionManager.next_partition_key')
def test_next_partition(self, next_partition_key):
next_partition_key.return_value = 'baz'
self._run('testapp.models.Tweet', next_only=True)
self.check_tables('testapp_tweet_baz', 'testapp_star_baz')
@cleanup_models(
'testapp.models.Tweet_baz',
'testapp.models.Star_baz',
'testapp.models.Tweet_foo',
'testapp.models.Star_foo',
)
@mock.patch('testapp.models.TweetPartitionManager.current_partition_key')
@mock.patch('testapp.models.TweetPartitionManager.next_partition_key')
def test_no_switches(self, next_partition_key, current_partition_key):
""" If we pass no switches, then the current and next partitions will
be created. """
current_partition_key.return_value = 'foo'
next_partition_key.return_value = 'baz'
self._run('testapp.models.Tweet')
self.check_tables(
'testapp_tweet_baz',
'testapp_star_baz',
'testapp_tweet_foo',
'testapp_star_foo',
)
def test_bad_model(self):
""" Check that a non-existant model causes a CommandError """
with self.assertRaises(CommandError):
self._run('doesnotexist')
| |
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Top-level presubmit script for Chromium.
See http://dev.chromium.org/developers/how-tos/depottools/presubmit-scripts
for more details about the presubmit API built into gcl.
"""
import re
import subprocess
import sys
_EXCLUDED_PATHS = (
r"^breakpad[\\\/].*",
r"^native_client_sdk[\\\/]src[\\\/]build_tools[\\\/]make_rules.py",
r"^native_client_sdk[\\\/]src[\\\/]build_tools[\\\/]make_simple.py",
r"^native_client_sdk[\\\/]src[\\\/]tools[\\\/].*.mk",
r"^net[\\\/]tools[\\\/]spdyshark[\\\/].*",
r"^skia[\\\/].*",
r"^v8[\\\/].*",
r".*MakeFile$",
r".+_autogen\.h$",
r"^cc[\\\/].*",
r"^webkit[\\\/]compositor_bindings[\\\/].*",
r".+[\\\/]pnacl_shim\.c$",
)
# Fragment of a regular expression that matches file name suffixes
# used to indicate different platforms.
_PLATFORM_SPECIFIERS = r'(_(android|chromeos|gtk|mac|posix|win))?'
# Fragment of a regular expression that matches C++ and Objective-C++
# implementation files.
_IMPLEMENTATION_EXTENSIONS = r'\.(cc|cpp|cxx|mm)$'
# Regular expression that matches code only used for test binaries
# (best effort).
_TEST_CODE_EXCLUDED_PATHS = (
r'.*[/\\](fake_|test_|mock_).+%s' % _IMPLEMENTATION_EXTENSIONS,
r'.+_test_(base|support|util)%s' % _IMPLEMENTATION_EXTENSIONS,
r'.+_(api|browser|perf|unit|ui)?test%s%s' % (_PLATFORM_SPECIFIERS,
_IMPLEMENTATION_EXTENSIONS),
r'.+profile_sync_service_harness%s' % _IMPLEMENTATION_EXTENSIONS,
r'.*[/\\](test|tool(s)?)[/\\].*',
# At request of folks maintaining this folder.
r'chrome[/\\]browser[/\\]automation[/\\].*',
)
_TEST_ONLY_WARNING = (
'You might be calling functions intended only for testing from\n'
'production code. It is OK to ignore this warning if you know what\n'
'you are doing, as the heuristics used to detect the situation are\n'
'not perfect. The commit queue will not block on this warning.\n'
'Email joi@chromium.org if you have questions.')
_INCLUDE_ORDER_WARNING = (
'Your #include order seems to be broken. Send mail to\n'
'marja@chromium.org if this is not the case.')
_BANNED_OBJC_FUNCTIONS = (
(
'addTrackingRect:',
(
'The use of -[NSView addTrackingRect:owner:userData:assumeInside:] is'
'prohibited. Please use CrTrackingArea instead.',
'http://dev.chromium.org/developers/coding-style/cocoa-dos-and-donts',
),
False,
),
(
'NSTrackingArea',
(
'The use of NSTrackingAreas is prohibited. Please use CrTrackingArea',
'instead.',
'http://dev.chromium.org/developers/coding-style/cocoa-dos-and-donts',
),
False,
),
(
'convertPointFromBase:',
(
'The use of -[NSView convertPointFromBase:] is almost certainly wrong.',
'Please use |convertPoint:(point) fromView:nil| instead.',
'http://dev.chromium.org/developers/coding-style/cocoa-dos-and-donts',
),
True,
),
(
'convertPointToBase:',
(
'The use of -[NSView convertPointToBase:] is almost certainly wrong.',
'Please use |convertPoint:(point) toView:nil| instead.',
'http://dev.chromium.org/developers/coding-style/cocoa-dos-and-donts',
),
True,
),
(
'convertRectFromBase:',
(
'The use of -[NSView convertRectFromBase:] is almost certainly wrong.',
'Please use |convertRect:(point) fromView:nil| instead.',
'http://dev.chromium.org/developers/coding-style/cocoa-dos-and-donts',
),
True,
),
(
'convertRectToBase:',
(
'The use of -[NSView convertRectToBase:] is almost certainly wrong.',
'Please use |convertRect:(point) toView:nil| instead.',
'http://dev.chromium.org/developers/coding-style/cocoa-dos-and-donts',
),
True,
),
(
'convertSizeFromBase:',
(
'The use of -[NSView convertSizeFromBase:] is almost certainly wrong.',
'Please use |convertSize:(point) fromView:nil| instead.',
'http://dev.chromium.org/developers/coding-style/cocoa-dos-and-donts',
),
True,
),
(
'convertSizeToBase:',
(
'The use of -[NSView convertSizeToBase:] is almost certainly wrong.',
'Please use |convertSize:(point) toView:nil| instead.',
'http://dev.chromium.org/developers/coding-style/cocoa-dos-and-donts',
),
True,
),
)
_BANNED_CPP_FUNCTIONS = (
# Make sure that gtest's FRIEND_TEST() macro is not used; the
# FRIEND_TEST_ALL_PREFIXES() macro from base/gtest_prod_util.h should be
# used instead since that allows for FLAKY_ and DISABLED_ prefixes.
(
'FRIEND_TEST(',
(
'Chromium code should not use gtest\'s FRIEND_TEST() macro. Include',
'base/gtest_prod_util.h and use FRIEND_TEST_ALL_PREFIXES() instead.',
),
False,
(),
),
(
'ScopedAllowIO',
(
'New code should not use ScopedAllowIO. Post a task to the blocking',
'pool or the FILE thread instead.',
),
True,
(
r"^content[\\\/]shell[\\\/]shell_browser_main\.cc$",
),
),
)
def _CheckNoProductionCodeUsingTestOnlyFunctions(input_api, output_api):
"""Attempts to prevent use of functions intended only for testing in
non-testing code. For now this is just a best-effort implementation
that ignores header files and may have some false positives. A
better implementation would probably need a proper C++ parser.
"""
# We only scan .cc files and the like, as the declaration of
# for-testing functions in header files are hard to distinguish from
# calls to such functions without a proper C++ parser.
file_inclusion_pattern = r'.+%s' % _IMPLEMENTATION_EXTENSIONS
base_function_pattern = r'ForTest(ing)?|for_test(ing)?'
inclusion_pattern = input_api.re.compile(r'(%s)\s*\(' % base_function_pattern)
exclusion_pattern = input_api.re.compile(
r'::[A-Za-z0-9_]+(%s)|(%s)[^;]+\{' % (
base_function_pattern, base_function_pattern))
def FilterFile(affected_file):
black_list = (_EXCLUDED_PATHS +
_TEST_CODE_EXCLUDED_PATHS +
input_api.DEFAULT_BLACK_LIST)
return input_api.FilterSourceFile(
affected_file,
white_list=(file_inclusion_pattern, ),
black_list=black_list)
problems = []
for f in input_api.AffectedSourceFiles(FilterFile):
local_path = f.LocalPath()
lines = input_api.ReadFile(f).splitlines()
line_number = 0
for line in lines:
if (inclusion_pattern.search(line) and
not exclusion_pattern.search(line)):
problems.append(
'%s:%d\n %s' % (local_path, line_number, line.strip()))
line_number += 1
if problems:
if not input_api.is_committing:
return [output_api.PresubmitPromptWarning(_TEST_ONLY_WARNING, problems)]
else:
# We don't warn on commit, to avoid stopping commits going through CQ.
return [output_api.PresubmitNotifyResult(_TEST_ONLY_WARNING, problems)]
else:
return []
def _CheckNoIOStreamInHeaders(input_api, output_api):
"""Checks to make sure no .h files include <iostream>."""
files = []
pattern = input_api.re.compile(r'^#include\s*<iostream>',
input_api.re.MULTILINE)
for f in input_api.AffectedSourceFiles(input_api.FilterSourceFile):
if not f.LocalPath().endswith('.h'):
continue
contents = input_api.ReadFile(f)
if pattern.search(contents):
files.append(f)
if len(files):
return [ output_api.PresubmitError(
'Do not #include <iostream> in header files, since it inserts static '
'initialization into every file including the header. Instead, '
'#include <ostream>. See http://crbug.com/94794',
files) ]
return []
def _CheckNoUNIT_TESTInSourceFiles(input_api, output_api):
"""Checks to make sure no source files use UNIT_TEST"""
problems = []
for f in input_api.AffectedFiles():
if (not f.LocalPath().endswith(('.cc', '.mm'))):
continue
for line_num, line in f.ChangedContents():
if 'UNIT_TEST' in line:
problems.append(' %s:%d' % (f.LocalPath(), line_num))
if not problems:
return []
return [output_api.PresubmitPromptWarning('UNIT_TEST is only for headers.\n' +
'\n'.join(problems))]
def _CheckNoNewWStrings(input_api, output_api):
"""Checks to make sure we don't introduce use of wstrings."""
problems = []
for f in input_api.AffectedFiles():
if (not f.LocalPath().endswith(('.cc', '.h')) or
f.LocalPath().endswith('test.cc')):
continue
allowWString = False
for line_num, line in f.ChangedContents():
if 'presubmit: allow wstring' in line:
allowWString = True
elif not allowWString and 'wstring' in line:
problems.append(' %s:%d' % (f.LocalPath(), line_num))
allowWString = False
else:
allowWString = False
if not problems:
return []
return [output_api.PresubmitPromptWarning('New code should not use wstrings.'
' If you are calling a cross-platform API that accepts a wstring, '
'fix the API.\n' +
'\n'.join(problems))]
def _CheckNoDEPSGIT(input_api, output_api):
"""Make sure .DEPS.git is never modified manually."""
if any(f.LocalPath().endswith('.DEPS.git') for f in
input_api.AffectedFiles()):
return [output_api.PresubmitError(
'Never commit changes to .DEPS.git. This file is maintained by an\n'
'automated system based on what\'s in DEPS and your changes will be\n'
'overwritten.\n'
'See http://code.google.com/p/chromium/wiki/UsingNewGit#Rolling_DEPS\n'
'for more information')]
return []
def _CheckNoBannedFunctions(input_api, output_api):
"""Make sure that banned functions are not used."""
warnings = []
errors = []
file_filter = lambda f: f.LocalPath().endswith(('.mm', '.m', '.h'))
for f in input_api.AffectedFiles(file_filter=file_filter):
for line_num, line in f.ChangedContents():
for func_name, message, error in _BANNED_OBJC_FUNCTIONS:
if func_name in line:
problems = warnings;
if error:
problems = errors;
problems.append(' %s:%d:' % (f.LocalPath(), line_num))
for message_line in message:
problems.append(' %s' % message_line)
file_filter = lambda f: f.LocalPath().endswith(('.cc', '.mm', '.h'))
for f in input_api.AffectedFiles(file_filter=file_filter):
for line_num, line in f.ChangedContents():
for func_name, message, error, excluded_paths in _BANNED_CPP_FUNCTIONS:
def IsBlacklisted(affected_file, blacklist):
local_path = affected_file.LocalPath()
for item in blacklist:
if input_api.re.match(item, local_path):
return True
return False
if IsBlacklisted(f, excluded_paths):
continue
if func_name in line:
problems = warnings;
if error:
problems = errors;
problems.append(' %s:%d:' % (f.LocalPath(), line_num))
for message_line in message:
problems.append(' %s' % message_line)
result = []
if (warnings):
result.append(output_api.PresubmitPromptWarning(
'Banned functions were used.\n' + '\n'.join(warnings)))
if (errors):
result.append(output_api.PresubmitError(
'Banned functions were used.\n' + '\n'.join(errors)))
return result
def _CheckNoPragmaOnce(input_api, output_api):
"""Make sure that banned functions are not used."""
files = []
pattern = input_api.re.compile(r'^#pragma\s+once',
input_api.re.MULTILINE)
for f in input_api.AffectedSourceFiles(input_api.FilterSourceFile):
if not f.LocalPath().endswith('.h'):
continue
contents = input_api.ReadFile(f)
if pattern.search(contents):
files.append(f)
if files:
return [output_api.PresubmitError(
'Do not use #pragma once in header files.\n'
'See http://www.chromium.org/developers/coding-style#TOC-File-headers',
files)]
return []
def _CheckNoTrinaryTrueFalse(input_api, output_api):
"""Checks to make sure we don't introduce use of foo ? true : false."""
problems = []
pattern = input_api.re.compile(r'\?\s*(true|false)\s*:\s*(true|false)')
for f in input_api.AffectedFiles():
if not f.LocalPath().endswith(('.cc', '.h', '.inl', '.m', '.mm')):
continue
for line_num, line in f.ChangedContents():
if pattern.match(line):
problems.append(' %s:%d' % (f.LocalPath(), line_num))
if not problems:
return []
return [output_api.PresubmitPromptWarning(
'Please consider avoiding the "? true : false" pattern if possible.\n' +
'\n'.join(problems))]
def _CheckUnwantedDependencies(input_api, output_api):
"""Runs checkdeps on #include statements added in this
change. Breaking - rules is an error, breaking ! rules is a
warning.
"""
# We need to wait until we have an input_api object and use this
# roundabout construct to import checkdeps because this file is
# eval-ed and thus doesn't have __file__.
original_sys_path = sys.path
try:
sys.path = sys.path + [input_api.os_path.join(
input_api.PresubmitLocalPath(), 'tools', 'checkdeps')]
import checkdeps
from cpp_checker import CppChecker
from rules import Rule
finally:
# Restore sys.path to what it was before.
sys.path = original_sys_path
added_includes = []
for f in input_api.AffectedFiles():
if not CppChecker.IsCppFile(f.LocalPath()):
continue
changed_lines = [line for line_num, line in f.ChangedContents()]
added_includes.append([f.LocalPath(), changed_lines])
deps_checker = checkdeps.DepsChecker()
error_descriptions = []
warning_descriptions = []
for path, rule_type, rule_description in deps_checker.CheckAddedCppIncludes(
added_includes):
description_with_path = '%s\n %s' % (path, rule_description)
if rule_type == Rule.DISALLOW:
error_descriptions.append(description_with_path)
else:
warning_descriptions.append(description_with_path)
results = []
if error_descriptions:
results.append(output_api.PresubmitError(
'You added one or more #includes that violate checkdeps rules.',
error_descriptions))
if warning_descriptions:
if not input_api.is_committing:
warning_factory = output_api.PresubmitPromptWarning
else:
# We don't want to block use of the CQ when there is a warning
# of this kind, so we only show a message when committing.
warning_factory = output_api.PresubmitNotifyResult
results.append(warning_factory(
'You added one or more #includes of files that are temporarily\n'
'allowed but being removed. Can you avoid introducing the\n'
'#include? See relevant DEPS file(s) for details and contacts.',
warning_descriptions))
return results
def _CheckFilePermissions(input_api, output_api):
"""Check that all files have their permissions properly set."""
args = [sys.executable, 'tools/checkperms/checkperms.py', '--root',
input_api.change.RepositoryRoot()]
for f in input_api.AffectedFiles():
args += ['--file', f.LocalPath()]
errors = []
(errors, stderrdata) = subprocess.Popen(args).communicate()
results = []
if errors:
results.append(output_api.PresubmitError('checkperms.py failed.',
errors))
return results
def _CheckNoAuraWindowPropertyHInHeaders(input_api, output_api):
"""Makes sure we don't include ui/aura/window_property.h
in header files.
"""
pattern = input_api.re.compile(r'^#include\s*"ui/aura/window_property.h"')
errors = []
for f in input_api.AffectedFiles():
if not f.LocalPath().endswith('.h'):
continue
for line_num, line in f.ChangedContents():
if pattern.match(line):
errors.append(' %s:%d' % (f.LocalPath(), line_num))
results = []
if errors:
results.append(output_api.PresubmitError(
'Header files should not include ui/aura/window_property.h', errors))
return results
def _CheckIncludeOrderForScope(scope, input_api, file_path, changed_linenums):
"""Checks that the lines in scope occur in the right order.
1. C system files in alphabetical order
2. C++ system files in alphabetical order
3. Project's .h files
"""
c_system_include_pattern = input_api.re.compile(r'\s*#include <.*\.h>')
cpp_system_include_pattern = input_api.re.compile(r'\s*#include <.*>')
custom_include_pattern = input_api.re.compile(r'\s*#include ".*')
C_SYSTEM_INCLUDES, CPP_SYSTEM_INCLUDES, CUSTOM_INCLUDES = range(3)
state = C_SYSTEM_INCLUDES
previous_line = ''
previous_line_num = 0
problem_linenums = []
for line_num, line in scope:
if c_system_include_pattern.match(line):
if state != C_SYSTEM_INCLUDES:
problem_linenums.append((line_num, previous_line_num))
elif previous_line and previous_line > line:
problem_linenums.append((line_num, previous_line_num))
elif cpp_system_include_pattern.match(line):
if state == C_SYSTEM_INCLUDES:
state = CPP_SYSTEM_INCLUDES
elif state == CUSTOM_INCLUDES:
problem_linenums.append((line_num, previous_line_num))
elif previous_line and previous_line > line:
problem_linenums.append((line_num, previous_line_num))
elif custom_include_pattern.match(line):
if state != CUSTOM_INCLUDES:
state = CUSTOM_INCLUDES
elif previous_line and previous_line > line:
problem_linenums.append((line_num, previous_line_num))
else:
problem_linenums.append(line_num)
previous_line = line
previous_line_num = line_num
warnings = []
for (line_num, previous_line_num) in problem_linenums:
if line_num in changed_linenums or previous_line_num in changed_linenums:
warnings.append(' %s:%d' % (file_path, line_num))
return warnings
def _CheckIncludeOrderInFile(input_api, f, changed_linenums):
"""Checks the #include order for the given file f."""
system_include_pattern = input_api.re.compile(r'\s*#include \<.*')
# Exclude #include <.../...> includes from the check; e.g., <sys/...> includes
# often need to appear in a specific order.
excluded_include_pattern = input_api.re.compile(r'\s*#include \<.*/.*')
custom_include_pattern = input_api.re.compile(r'\s*#include "(?P<FILE>.*)"')
if_pattern = input_api.re.compile(
r'\s*#\s*(if|elif|else|endif|define|undef).*')
# Some files need specialized order of includes; exclude such files from this
# check.
uncheckable_includes_pattern = input_api.re.compile(
r'\s*#include '
'("ipc/.*macros\.h"|<windows\.h>|".*gl.*autogen.h")\s*')
contents = f.NewContents()
warnings = []
line_num = 0
# Handle the special first include. If the first include file is
# some/path/file.h, the corresponding including file can be some/path/file.cc,
# some/other/path/file.cc, some/path/file_platform.cc, some/path/file-suffix.h
# etc. It's also possible that no special first include exists.
for line in contents:
line_num += 1
if system_include_pattern.match(line):
# No special first include -> process the line again along with normal
# includes.
line_num -= 1
break
match = custom_include_pattern.match(line)
if match:
match_dict = match.groupdict()
header_basename = input_api.os_path.basename(
match_dict['FILE']).replace('.h', '')
if header_basename not in input_api.os_path.basename(f.LocalPath()):
# No special first include -> process the line again along with normal
# includes.
line_num -= 1
break
# Split into scopes: Each region between #if and #endif is its own scope.
scopes = []
current_scope = []
for line in contents[line_num:]:
line_num += 1
if uncheckable_includes_pattern.match(line):
return []
if if_pattern.match(line):
scopes.append(current_scope)
current_scope = []
elif ((system_include_pattern.match(line) or
custom_include_pattern.match(line)) and
not excluded_include_pattern.match(line)):
current_scope.append((line_num, line))
scopes.append(current_scope)
for scope in scopes:
warnings.extend(_CheckIncludeOrderForScope(scope, input_api, f.LocalPath(),
changed_linenums))
return warnings
def _CheckIncludeOrder(input_api, output_api):
"""Checks that the #include order is correct.
1. The corresponding header for source files.
2. C system files in alphabetical order
3. C++ system files in alphabetical order
4. Project's .h files in alphabetical order
Each region separated by #if, #elif, #else, #endif, #define and #undef follows
these rules separately.
"""
warnings = []
for f in input_api.AffectedFiles():
if f.LocalPath().endswith(('.cc', '.h')):
changed_linenums = set(line_num for line_num, _ in f.ChangedContents())
warnings.extend(_CheckIncludeOrderInFile(input_api, f, changed_linenums))
results = []
if warnings:
if not input_api.is_committing:
results.append(output_api.PresubmitPromptWarning(_INCLUDE_ORDER_WARNING,
warnings))
else:
# We don't warn on commit, to avoid stopping commits going through CQ.
results.append(output_api.PresubmitNotifyResult(_INCLUDE_ORDER_WARNING,
warnings))
return results
def _CheckForVersionControlConflictsInFile(input_api, f):
pattern = input_api.re.compile('^(?:<<<<<<<|>>>>>>>) |^=======$')
errors = []
for line_num, line in f.ChangedContents():
if pattern.match(line):
errors.append(' %s:%d %s' % (f.LocalPath(), line_num, line))
return errors
def _CheckForVersionControlConflicts(input_api, output_api):
"""Usually this is not intentional and will cause a compile failure."""
errors = []
for f in input_api.AffectedFiles():
errors.extend(_CheckForVersionControlConflictsInFile(input_api, f))
results = []
if errors:
results.append(output_api.PresubmitError(
'Version control conflict markers found, please resolve.', errors))
return results
def _CheckHardcodedGoogleHostsInLowerLayers(input_api, output_api):
def FilterFile(affected_file):
"""Filter function for use with input_api.AffectedSourceFiles,
below. This filters out everything except non-test files from
top-level directories that generally speaking should not hard-code
service URLs (e.g. src/android_webview/, src/content/ and others).
"""
return input_api.FilterSourceFile(
affected_file,
white_list=(r'^(android_webview|base|content|net)[\\\/].*', ),
black_list=(_EXCLUDED_PATHS +
_TEST_CODE_EXCLUDED_PATHS +
input_api.DEFAULT_BLACK_LIST))
pattern = input_api.re.compile('"[^"]*google\.com[^"]*"')
problems = [] # items are (filename, line_number, line)
for f in input_api.AffectedSourceFiles(FilterFile):
for line_num, line in f.ChangedContents():
if pattern.search(line):
problems.append((f.LocalPath(), line_num, line))
if problems:
if not input_api.is_committing:
warning_factory = output_api.PresubmitPromptWarning
else:
# We don't want to block use of the CQ when there is a warning
# of this kind, so we only show a message when committing.
warning_factory = output_api.PresubmitNotifyResult
return [warning_factory(
'Most layers below src/chrome/ should not hardcode service URLs.\n'
'Are you sure this is correct? (Contact: joi@chromium.org)',
[' %s:%d: %s' % (
problem[0], problem[1], problem[2]) for problem in problems])]
else:
return []
def _CheckNoAbbreviationInPngFileName(input_api, output_api):
"""Makes sure there are no abbreviations in the name of PNG files.
"""
pattern = input_api.re.compile(r'.*_[a-z]_.*\.png$|.*_[a-z]\.png$')
errors = []
for f in input_api.AffectedFiles(include_deletes=False):
if pattern.match(f.LocalPath()):
errors.append(' %s' % f.LocalPath())
results = []
if errors:
results.append(output_api.PresubmitError(
'The name of PNG files should not have abbreviations. \n'
'Use _hover.png, _center.png, instead of _h.png, _c.png.\n'
'Contact oshima@chromium.org if you have questions.', errors))
return results
def _CommonChecks(input_api, output_api):
"""Checks common to both upload and commit."""
results = []
results.extend(input_api.canned_checks.PanProjectChecks(
input_api, output_api, excluded_paths=_EXCLUDED_PATHS))
results.extend(_CheckAuthorizedAuthor(input_api, output_api))
results.extend(
_CheckNoProductionCodeUsingTestOnlyFunctions(input_api, output_api))
results.extend(_CheckNoIOStreamInHeaders(input_api, output_api))
results.extend(_CheckNoUNIT_TESTInSourceFiles(input_api, output_api))
results.extend(_CheckNoNewWStrings(input_api, output_api))
results.extend(_CheckNoDEPSGIT(input_api, output_api))
results.extend(_CheckNoBannedFunctions(input_api, output_api))
results.extend(_CheckNoPragmaOnce(input_api, output_api))
results.extend(_CheckNoTrinaryTrueFalse(input_api, output_api))
results.extend(_CheckUnwantedDependencies(input_api, output_api))
results.extend(_CheckFilePermissions(input_api, output_api))
results.extend(_CheckNoAuraWindowPropertyHInHeaders(input_api, output_api))
results.extend(_CheckIncludeOrder(input_api, output_api))
results.extend(_CheckForVersionControlConflicts(input_api, output_api))
results.extend(_CheckPatchFiles(input_api, output_api))
results.extend(_CheckHardcodedGoogleHostsInLowerLayers(input_api, output_api))
results.extend(_CheckNoAbbreviationInPngFileName(input_api, output_api))
if any('PRESUBMIT.py' == f.LocalPath() for f in input_api.AffectedFiles()):
results.extend(input_api.canned_checks.RunUnitTestsInDirectory(
input_api, output_api,
input_api.PresubmitLocalPath(),
whitelist=[r'^PRESUBMIT_test\.py$']))
return results
def _CheckSubversionConfig(input_api, output_api):
"""Verifies the subversion config file is correctly setup.
Checks that autoprops are enabled, returns an error otherwise.
"""
join = input_api.os_path.join
if input_api.platform == 'win32':
appdata = input_api.environ.get('APPDATA', '')
if not appdata:
return [output_api.PresubmitError('%APPDATA% is not configured.')]
path = join(appdata, 'Subversion', 'config')
else:
home = input_api.environ.get('HOME', '')
if not home:
return [output_api.PresubmitError('$HOME is not configured.')]
path = join(home, '.subversion', 'config')
error_msg = (
'Please look at http://dev.chromium.org/developers/coding-style to\n'
'configure your subversion configuration file. This enables automatic\n'
'properties to simplify the project maintenance.\n'
'Pro-tip: just download and install\n'
'http://src.chromium.org/viewvc/chrome/trunk/tools/build/slave/config\n')
try:
lines = open(path, 'r').read().splitlines()
# Make sure auto-props is enabled and check for 2 Chromium standard
# auto-prop.
if (not '*.cc = svn:eol-style=LF' in lines or
not '*.pdf = svn:mime-type=application/pdf' in lines or
not 'enable-auto-props = yes' in lines):
return [
output_api.PresubmitNotifyResult(
'It looks like you have not configured your subversion config '
'file or it is not up-to-date.\n' + error_msg)
]
except (OSError, IOError):
return [
output_api.PresubmitNotifyResult(
'Can\'t find your subversion config file.\n' + error_msg)
]
return []
def _CheckAuthorizedAuthor(input_api, output_api):
"""For non-googler/chromites committers, verify the author's email address is
in AUTHORS.
"""
# TODO(maruel): Add it to input_api?
import fnmatch
author = input_api.change.author_email
if not author:
input_api.logging.info('No author, skipping AUTHOR check')
return []
authors_path = input_api.os_path.join(
input_api.PresubmitLocalPath(), 'AUTHORS')
valid_authors = (
input_api.re.match(r'[^#]+\s+\<(.+?)\>\s*$', line)
for line in open(authors_path))
valid_authors = [item.group(1).lower() for item in valid_authors if item]
if not any(fnmatch.fnmatch(author.lower(), valid) for valid in valid_authors):
input_api.logging.info('Valid authors are %s', ', '.join(valid_authors))
return [output_api.PresubmitPromptWarning(
('%s is not in AUTHORS file. If you are a new contributor, please visit'
'\n'
'http://www.chromium.org/developers/contributing-code and read the '
'"Legal" section\n'
'If you are a chromite, verify the contributor signed the CLA.') %
author)]
return []
def _CheckPatchFiles(input_api, output_api):
problems = [f.LocalPath() for f in input_api.AffectedFiles()
if f.LocalPath().endswith(('.orig', '.rej'))]
if problems:
return [output_api.PresubmitError(
"Don't commit .rej and .orig files.", problems)]
else:
return []
def CheckChangeOnUpload(input_api, output_api):
results = []
results.extend(_CommonChecks(input_api, output_api))
return results
def CheckChangeOnCommit(input_api, output_api):
results = []
results.extend(_CommonChecks(input_api, output_api))
# TODO(thestig) temporarily disabled, doesn't work in third_party/
#results.extend(input_api.canned_checks.CheckSvnModifiedDirectories(
# input_api, output_api, sources))
# Make sure the tree is 'open'.
results.extend(input_api.canned_checks.CheckTreeIsOpen(
input_api,
output_api,
json_url='http://chromium-status.appspot.com/current?format=json'))
results.extend(input_api.canned_checks.CheckRietveldTryJobExecution(input_api,
output_api, 'http://codereview.chromium.org',
('win_rel', 'linux_rel', 'mac_rel, win:compile'),
'tryserver@chromium.org'))
results.extend(input_api.canned_checks.CheckChangeHasBugField(
input_api, output_api))
results.extend(input_api.canned_checks.CheckChangeHasDescription(
input_api, output_api))
results.extend(_CheckSubversionConfig(input_api, output_api))
return results
def GetPreferredTrySlaves(project, change):
files = change.LocalPaths()
if not files or all(re.search(r'[\\/]OWNERS$', f) for f in files):
return []
if all(re.search('\.(m|mm)$|(^|[/_])mac[/_.]', f) for f in files):
return ['mac_rel', 'mac_asan', 'mac:compile']
if all(re.search('(^|[/_])win[/_.]', f) for f in files):
return ['win_rel', 'win7_aura', 'win:compile']
if all(re.search('(^|[/_])android[/_.]', f) for f in files):
return ['android_dbg', 'android_clang_dbg']
if all(re.search('^native_client_sdk', f) for f in files):
return ['linux_nacl_sdk', 'win_nacl_sdk', 'mac_nacl_sdk']
if all(re.search('[/_]ios[/_.]', f) for f in files):
return ['ios_rel_device', 'ios_dbg_simulator']
trybots = [
'android_clang_dbg',
'android_dbg',
'ios_dbg_simulator',
'ios_rel_device',
'linux_asan',
'linux_aura',
'linux_chromeos',
'linux_clang:compile',
'linux_rel',
'mac_asan',
'mac_rel',
'mac:compile',
'win7_aura',
'win_rel',
'win:compile',
]
# Match things like path/aura/file.cc and path/file_aura.cc.
# Same for chromeos.
if any(re.search('[/_](aura|chromeos)', f) for f in files):
trybots += ['linux_chromeos_clang:compile', 'linux_chromeos_asan']
return trybots
| |
import math
import numpy
import pytest
import cupy
import cupy._core._accelerator as _acc
import cupy.cuda.cutensor
from cupy._core import _cub_reduction
from cupy import testing
@testing.gpu
class TestSumprod:
@pytest.fixture(autouse=True)
def tearDown(self):
yield
# Free huge memory for slow test
cupy.get_default_memory_pool().free_all_blocks()
cupy.get_default_pinned_memory_pool().free_all_blocks()
@testing.for_all_dtypes()
@testing.numpy_cupy_allclose()
def test_sum_all(self, xp, dtype):
a = testing.shaped_arange((2, 3, 4), xp, dtype)
return a.sum()
@testing.for_all_dtypes()
@testing.numpy_cupy_allclose()
def test_sum_all_keepdims(self, xp, dtype):
a = testing.shaped_arange((2, 3, 4), xp, dtype)
return a.sum(keepdims=True)
@testing.for_all_dtypes()
@testing.numpy_cupy_allclose()
def test_external_sum_all(self, xp, dtype):
a = testing.shaped_arange((2, 3, 4), xp, dtype)
return xp.sum(a)
@testing.for_all_dtypes()
@testing.numpy_cupy_allclose()
def test_sum_all2(self, xp, dtype):
a = testing.shaped_arange((20, 30, 40), xp, dtype)
return a.sum()
@testing.for_all_dtypes()
@testing.numpy_cupy_allclose()
def test_sum_all_transposed(self, xp, dtype):
a = testing.shaped_arange((2, 3, 4), xp, dtype).transpose(2, 0, 1)
return a.sum()
@testing.for_all_dtypes()
@testing.numpy_cupy_allclose()
def test_sum_all_transposed2(self, xp, dtype):
a = testing.shaped_arange((20, 30, 40), xp, dtype).transpose(2, 0, 1)
return a.sum()
@testing.for_all_dtypes()
@testing.numpy_cupy_allclose()
def test_sum_axis(self, xp, dtype):
a = testing.shaped_arange((2, 3, 4), xp, dtype)
return a.sum(axis=1)
@testing.slow
@testing.numpy_cupy_allclose()
def test_sum_axis_huge(self, xp):
a = testing.shaped_random((2048, 1, 1024), xp, 'b')
a = xp.broadcast_to(a, (2048, 1024, 1024))
return a.sum(axis=2)
@testing.for_all_dtypes()
@testing.numpy_cupy_allclose()
def test_external_sum_axis(self, xp, dtype):
a = testing.shaped_arange((2, 3, 4), xp, dtype)
return xp.sum(a, axis=1)
# float16 is omitted, since NumPy's sum on float16 arrays has more error
# than CuPy's.
@testing.for_all_dtypes(no_float16=True)
@testing.numpy_cupy_allclose()
def test_sum_axis2(self, xp, dtype):
a = testing.shaped_arange((20, 30, 40), xp, dtype)
return a.sum(axis=1)
def test_sum_axis2_float16(self):
# Note that the above test example overflows in float16. We use a
# smaller array instead.
a = testing.shaped_arange((2, 30, 4), dtype='e')
sa = a.sum(axis=1)
b = testing.shaped_arange((2, 30, 4), numpy, dtype='f')
sb = b.sum(axis=1)
testing.assert_allclose(sa, sb.astype('e'))
@testing.for_all_dtypes()
@testing.numpy_cupy_allclose(contiguous_check=False)
def test_sum_axis_transposed(self, xp, dtype):
a = testing.shaped_arange((2, 3, 4), xp, dtype).transpose(2, 0, 1)
return a.sum(axis=1)
@testing.for_all_dtypes()
@testing.numpy_cupy_allclose(contiguous_check=False)
def test_sum_axis_transposed2(self, xp, dtype):
a = testing.shaped_arange((20, 30, 40), xp, dtype).transpose(2, 0, 1)
return a.sum(axis=1)
@testing.for_all_dtypes()
@testing.numpy_cupy_allclose()
def test_sum_axes(self, xp, dtype):
a = testing.shaped_arange((2, 3, 4, 5), xp, dtype)
return a.sum(axis=(1, 3))
@testing.for_all_dtypes()
@testing.numpy_cupy_allclose(rtol=1e-4)
def test_sum_axes2(self, xp, dtype):
a = testing.shaped_arange((20, 30, 40, 50), xp, dtype)
return a.sum(axis=(1, 3))
@testing.for_all_dtypes()
@testing.numpy_cupy_allclose(rtol=1e-6)
def test_sum_axes3(self, xp, dtype):
a = testing.shaped_arange((2, 3, 4, 5), xp, dtype)
return a.sum(axis=(0, 2, 3))
@testing.for_all_dtypes()
@testing.numpy_cupy_allclose(rtol=1e-6)
def test_sum_axes4(self, xp, dtype):
a = testing.shaped_arange((20, 30, 40, 50), xp, dtype)
return a.sum(axis=(0, 2, 3))
@testing.for_all_dtypes()
@testing.numpy_cupy_allclose()
def test_sum_empty_axis(self, xp, dtype):
a = testing.shaped_arange((2, 3, 4, 5), xp, dtype)
return a.sum(axis=())
@testing.for_all_dtypes_combination(names=['src_dtype', 'dst_dtype'])
@testing.numpy_cupy_allclose()
def test_sum_dtype(self, xp, src_dtype, dst_dtype):
if not xp.can_cast(src_dtype, dst_dtype):
pytest.skip()
a = testing.shaped_arange((2, 3, 4), xp, src_dtype)
return a.sum(dtype=dst_dtype)
@testing.for_all_dtypes_combination(names=['src_dtype', 'dst_dtype'])
@testing.numpy_cupy_allclose()
def test_sum_keepdims_and_dtype(self, xp, src_dtype, dst_dtype):
if not xp.can_cast(src_dtype, dst_dtype):
pytest.skip()
a = testing.shaped_arange((2, 3, 4), xp, src_dtype)
return a.sum(axis=2, dtype=dst_dtype, keepdims=True)
@testing.for_all_dtypes()
@testing.numpy_cupy_allclose()
def test_sum_keepdims_multiple_axes(self, xp, dtype):
a = testing.shaped_arange((2, 3, 4), xp, dtype)
return a.sum(axis=(1, 2), keepdims=True)
@testing.for_all_dtypes()
@testing.numpy_cupy_allclose()
def test_sum_out(self, xp, dtype):
a = testing.shaped_arange((2, 3, 4), xp, dtype)
b = xp.empty((2, 4), dtype=dtype)
a.sum(axis=1, out=b)
return b
def test_sum_out_wrong_shape(self):
a = testing.shaped_arange((2, 3, 4))
b = cupy.empty((2, 3))
with pytest.raises(ValueError):
a.sum(axis=1, out=b)
@testing.for_all_dtypes()
@testing.numpy_cupy_allclose()
def test_prod_all(self, xp, dtype):
a = testing.shaped_arange((2, 3), xp, dtype)
return a.prod()
@testing.for_all_dtypes()
@testing.numpy_cupy_allclose()
def test_external_prod_all(self, xp, dtype):
a = testing.shaped_arange((2, 3), xp, dtype)
return xp.prod(a)
@testing.for_all_dtypes()
@testing.numpy_cupy_allclose()
def test_prod_axis(self, xp, dtype):
a = testing.shaped_arange((2, 3, 4), xp, dtype)
return a.prod(axis=1)
@testing.for_all_dtypes()
@testing.numpy_cupy_allclose()
def test_external_prod_axis(self, xp, dtype):
a = testing.shaped_arange((2, 3, 4), xp, dtype)
return xp.prod(a, axis=1)
@testing.for_all_dtypes_combination(names=['src_dtype', 'dst_dtype'])
@testing.numpy_cupy_allclose()
def test_prod_dtype(self, xp, src_dtype, dst_dtype):
if not xp.can_cast(src_dtype, dst_dtype):
pytest.skip()
a = testing.shaped_arange((2, 3), xp, src_dtype)
return a.prod(dtype=dst_dtype)
@testing.numpy_cupy_allclose()
def test_product_alias(self, xp):
a = testing.shaped_arange((2, 3), xp, xp.float32)
return xp.product(a)
# This class compares CUB results against NumPy's
@testing.parameterize(*testing.product({
'shape': [(10,), (10, 20), (10, 20, 30), (10, 20, 30, 40)],
'order': ('C', 'F'),
'backend': ('device', 'block'),
}))
@testing.gpu
@pytest.mark.skipif(
not cupy.cuda.cub.available, reason='The CUB routine is not enabled')
class TestCubReduction:
@pytest.fixture(autouse=True)
def setUp(self):
old_routine_accelerators = _acc.get_routine_accelerators()
old_reduction_accelerators = _acc.get_reduction_accelerators()
if self.backend == 'device':
_acc.set_routine_accelerators(['cub'])
_acc.set_reduction_accelerators([])
elif self.backend == 'block':
_acc.set_routine_accelerators([])
_acc.set_reduction_accelerators(['cub'])
yield
_acc.set_routine_accelerators(old_routine_accelerators)
_acc.set_reduction_accelerators(old_reduction_accelerators)
@testing.for_contiguous_axes()
# sum supports less dtypes; don't test float16 as it's not as accurate?
@testing.for_dtypes('qQfdFD')
@testing.numpy_cupy_allclose(rtol=1E-5)
def test_cub_sum(self, xp, dtype, axis):
a = testing.shaped_random(self.shape, xp, dtype)
if self.order in ('c', 'C'):
a = xp.ascontiguousarray(a)
elif self.order in ('f', 'F'):
a = xp.asfortranarray(a)
if xp is numpy:
return a.sum(axis=axis)
# xp is cupy, first ensure we really use CUB
ret = cupy.empty(()) # Cython checks return type, need to fool it
if self.backend == 'device':
func_name = 'cupy._core._routines_math.cub.'
if len(axis) == len(self.shape):
func_name += 'device_reduce'
else:
func_name += 'device_segmented_reduce'
with testing.AssertFunctionIsCalled(func_name, return_value=ret):
a.sum(axis=axis)
elif self.backend == 'block':
# this is the only function we can mock; the rest is cdef'd
func_name = 'cupy._core._cub_reduction.'
func_name += '_SimpleCubReductionKernel_get_cached_function'
func = _cub_reduction._SimpleCubReductionKernel_get_cached_function
if len(axis) == len(self.shape):
times_called = 2 # two passes
else:
times_called = 1 # one pass
with testing.AssertFunctionIsCalled(
func_name, wraps=func, times_called=times_called):
a.sum(axis=axis)
# ...then perform the actual computation
return a.sum(axis=axis)
# sum supports less dtypes; don't test float16 as it's not as accurate?
@testing.for_dtypes('qQfdFD')
@testing.numpy_cupy_allclose(rtol=1E-5, contiguous_check=False)
def test_cub_sum_empty_axis(self, xp, dtype):
a = testing.shaped_random(self.shape, xp, dtype)
if self.order in ('c', 'C'):
a = xp.ascontiguousarray(a)
elif self.order in ('f', 'F'):
a = xp.asfortranarray(a)
return a.sum(axis=())
@testing.for_contiguous_axes()
# prod supports less dtypes; don't test float16 as it's not as accurate?
@testing.for_dtypes('qQfdFD')
@testing.numpy_cupy_allclose(rtol=1E-5)
def test_cub_prod(self, xp, dtype, axis):
a = testing.shaped_random(self.shape, xp, dtype)
if self.order in ('c', 'C'):
a = xp.ascontiguousarray(a)
elif self.order in ('f', 'F'):
a = xp.asfortranarray(a)
if xp is numpy:
return a.prod(axis=axis)
# xp is cupy, first ensure we really use CUB
ret = cupy.empty(()) # Cython checks return type, need to fool it
if self.backend == 'device':
func_name = 'cupy._core._routines_math.cub.'
if len(axis) == len(self.shape):
func_name += 'device_reduce'
else:
func_name += 'device_segmented_reduce'
with testing.AssertFunctionIsCalled(func_name, return_value=ret):
a.prod(axis=axis)
elif self.backend == 'block':
# this is the only function we can mock; the rest is cdef'd
func_name = 'cupy._core._cub_reduction.'
func_name += '_SimpleCubReductionKernel_get_cached_function'
func = _cub_reduction._SimpleCubReductionKernel_get_cached_function
if len(axis) == len(self.shape):
times_called = 2 # two passes
else:
times_called = 1 # one pass
with testing.AssertFunctionIsCalled(
func_name, wraps=func, times_called=times_called):
a.prod(axis=axis)
# ...then perform the actual computation
return a.prod(axis=axis)
# TODO(leofang): test axis after support is added
# don't test float16 as it's not as accurate?
@testing.for_dtypes('bhilBHILfdF')
@testing.numpy_cupy_allclose(rtol=1E-4)
def test_cub_cumsum(self, xp, dtype):
if self.backend == 'block':
pytest.skip('does not support')
a = testing.shaped_random(self.shape, xp, dtype)
if self.order in ('c', 'C'):
a = xp.ascontiguousarray(a)
elif self.order in ('f', 'F'):
a = xp.asfortranarray(a)
if xp is numpy:
return a.cumsum()
# xp is cupy, first ensure we really use CUB
ret = cupy.empty(()) # Cython checks return type, need to fool it
func = 'cupy._core._routines_math.cub.device_scan'
with testing.AssertFunctionIsCalled(func, return_value=ret):
a.cumsum()
# ...then perform the actual computation
return a.cumsum()
# TODO(leofang): test axis after support is added
# don't test float16 as it's not as accurate?
@testing.for_dtypes('bhilBHILfdF')
@testing.numpy_cupy_allclose(rtol=1E-4)
def test_cub_cumprod(self, xp, dtype):
if self.backend == 'block':
pytest.skip('does not support')
a = testing.shaped_random(self.shape, xp, dtype)
if self.order in ('c', 'C'):
a = xp.ascontiguousarray(a)
elif self.order in ('f', 'F'):
a = xp.asfortranarray(a)
if xp is numpy:
result = a.cumprod()
return self._mitigate_cumprod(xp, dtype, result)
# xp is cupy, first ensure we really use CUB
ret = cupy.empty(()) # Cython checks return type, need to fool it
func = 'cupy._core._routines_math.cub.device_scan'
with testing.AssertFunctionIsCalled(func, return_value=ret):
a.cumprod()
# ...then perform the actual computation
result = a.cumprod()
return self._mitigate_cumprod(xp, dtype, result)
def _mitigate_cumprod(self, xp, dtype, result):
# for testing cumprod against complex arrays, the gotcha is CuPy may
# produce only Inf at the position where NumPy starts to give NaN. So,
# an error would be raised during assert_allclose where the positions
# of NaNs are examined. Since this is both algorithm and architecture
# dependent, we have no control over this behavior and can only
# circumvent the issue by manually converting Inf to NaN
if dtype in (numpy.complex64, numpy.complex128):
pos = xp.where(xp.isinf(result))
result[pos] = xp.nan + 1j * xp.nan
return result
# This class compares cuTENSOR results against NumPy's
@testing.parameterize(*testing.product({
'shape': [(10,), (10, 20), (10, 20, 30), (10, 20, 30, 40)],
'order': ('C', 'F'),
}))
@testing.gpu
@pytest.mark.skipif(
not cupy.cuda.cutensor.available,
reason='The cuTENSOR routine is not enabled')
class TestCuTensorReduction:
@pytest.fixture(autouse=True)
def setUp(self):
old_accelerators = cupy._core.get_routine_accelerators()
cupy._core.set_routine_accelerators(['cutensor'])
yield
cupy._core.set_routine_accelerators(old_accelerators)
@testing.for_contiguous_axes()
# sum supports less dtypes; don't test float16 as it's not as accurate?
@testing.for_dtypes('qQfdFD')
@testing.numpy_cupy_allclose(rtol=1E-5, contiguous_check=False)
def test_cutensor_sum(self, xp, dtype, axis):
a = testing.shaped_random(self.shape, xp, dtype)
if self.order in ('c', 'C'):
a = xp.ascontiguousarray(a)
elif self.order in ('f', 'F'):
a = xp.asfortranarray(a)
if xp is numpy:
return a.sum(axis=axis)
# xp is cupy, first ensure we really use cuTENSOR
ret = cupy.empty(()) # Cython checks return type, need to fool it
func = 'cupy.cutensor._try_reduction_routine'
with testing.AssertFunctionIsCalled(func, return_value=ret):
a.sum(axis=axis)
# ...then perform the actual computation
return a.sum(axis=axis)
# sum supports less dtypes; don't test float16 as it's not as accurate?
@testing.for_dtypes('qQfdFD')
@testing.numpy_cupy_allclose(rtol=1E-5, contiguous_check=False)
def test_cutensor_sum_empty_axis(self, xp, dtype):
a = testing.shaped_random(self.shape, xp, dtype)
if self.order in ('c', 'C'):
a = xp.ascontiguousarray(a)
elif self.order in ('f', 'F'):
a = xp.asfortranarray(a)
return a.sum(axis=())
@testing.parameterize(
*testing.product({
'shape': [(2, 3, 4), (20, 30, 40)],
'axis': [0, 1],
'transpose_axes': [True, False],
'keepdims': [True, False],
'func': ['nansum', 'nanprod']
})
)
@testing.gpu
class TestNansumNanprodLong:
def _do_transposed_axis_test(self):
return not self.transpose_axes and self.axis != 1
def _numpy_nanprod_implemented(self):
return (self.func == 'nanprod' and
numpy.__version__ >= numpy.lib.NumpyVersion('1.10.0'))
def _test(self, xp, dtype):
a = testing.shaped_arange(self.shape, xp, dtype)
if self.transpose_axes:
a = a.transpose(2, 0, 1)
if not issubclass(dtype, xp.integer):
a[:, 1] = xp.nan
func = getattr(xp, self.func)
return func(a, axis=self.axis, keepdims=self.keepdims)
@testing.for_all_dtypes(no_bool=True, no_float16=True)
@testing.numpy_cupy_allclose()
def test_nansum_all(self, xp, dtype):
if (not self._numpy_nanprod_implemented() or
not self._do_transposed_axis_test()):
return xp.array(())
return self._test(xp, dtype)
@testing.for_all_dtypes(no_bool=True, no_float16=True)
@testing.numpy_cupy_allclose(contiguous_check=False)
def test_nansum_axis_transposed(self, xp, dtype):
if (not self._numpy_nanprod_implemented() or
not self._do_transposed_axis_test()):
return xp.array(())
return self._test(xp, dtype)
@testing.parameterize(
*testing.product({
'shape': [(2, 3, 4), (20, 30, 40)],
})
)
@testing.gpu
class TestNansumNanprodExtra:
def test_nansum_axis_float16(self):
# Note that the above test example overflows in float16. We use a
# smaller array instead, return True if array is too large.
if (numpy.prod(self.shape) > 24):
return True
a = testing.shaped_arange(self.shape, dtype='e')
a[:, 1] = cupy.nan
sa = cupy.nansum(a, axis=1)
b = testing.shaped_arange(self.shape, numpy, dtype='f')
b[:, 1] = numpy.nan
sb = numpy.nansum(b, axis=1)
testing.assert_allclose(sa, sb.astype('e'))
@testing.for_all_dtypes(no_bool=True, no_float16=True)
@testing.numpy_cupy_allclose()
def test_nansum_out(self, xp, dtype):
a = testing.shaped_arange(self.shape, xp, dtype)
if not issubclass(dtype, xp.integer):
a[:, 1] = xp.nan
b = xp.empty((self.shape[0], self.shape[2]), dtype=dtype)
xp.nansum(a, axis=1, out=b)
return b
def test_nansum_out_wrong_shape(self):
a = testing.shaped_arange(self.shape)
a[:, 1] = cupy.nan
b = cupy.empty((2, 3))
with pytest.raises(ValueError):
cupy.nansum(a, axis=1, out=b)
@testing.parameterize(
*testing.product({
'shape': [(2, 3, 4, 5), (20, 30, 40, 50)],
'axis': [(1, 3), (0, 2, 3)],
})
)
@testing.gpu
class TestNansumNanprodAxes:
@testing.for_all_dtypes(no_bool=True, no_float16=True)
@testing.numpy_cupy_allclose(rtol=1e-6)
def test_nansum_axes(self, xp, dtype):
a = testing.shaped_arange(self.shape, xp, dtype)
if not issubclass(dtype, xp.integer):
a[:, 1] = xp.nan
return xp.nansum(a, axis=self.axis)
@testing.gpu
class TestNansumNanprodHuge:
def _test(self, xp, nan_slice):
a = testing.shaped_random((2048, 1, 1024), xp, 'f')
a[nan_slice] = xp.nan
a = xp.broadcast_to(a, (2048, 1024, 1024))
return xp.nansum(a, axis=2)
@testing.slow
@testing.numpy_cupy_allclose(atol=1e-1)
def test_nansum_axis_huge(self, xp):
return self._test(
xp, (slice(None, None), slice(None, None), slice(1, 2)))
@testing.slow
@testing.numpy_cupy_allclose(atol=1e-2)
def test_nansum_axis_huge_halfnan(self, xp):
return self._test(
xp, (slice(None, None), slice(None, None), slice(0, 512)))
axes = [0, 1, 2]
@testing.parameterize(*testing.product({'axis': axes}))
@testing.gpu
class TestCumsum:
def _cumsum(self, xp, a, *args, **kwargs):
b = a.copy()
res = xp.cumsum(a, *args, **kwargs)
testing.assert_array_equal(a, b) # Check if input array is overwritten
return res
@testing.for_all_dtypes()
@testing.numpy_cupy_allclose()
def test_cumsum(self, xp, dtype):
a = testing.shaped_arange((5,), xp, dtype)
return self._cumsum(xp, a)
@testing.for_all_dtypes()
@testing.numpy_cupy_allclose()
def test_cumsum_out(self, xp, dtype):
a = testing.shaped_arange((5,), xp, dtype)
out = xp.zeros((5,), dtype=dtype)
self._cumsum(xp, a, out=out)
return out
@testing.for_all_dtypes()
@testing.numpy_cupy_allclose()
def test_cumsum_out_noncontiguous(self, xp, dtype):
a = testing.shaped_arange((5,), xp, dtype)
out = xp.zeros((10,), dtype=dtype)[::2] # Non contiguous view
self._cumsum(xp, a, out=out)
return out
@testing.for_all_dtypes()
@testing.numpy_cupy_allclose()
def test_cumsum_2dim(self, xp, dtype):
a = testing.shaped_arange((4, 5), xp, dtype)
return self._cumsum(xp, a)
@testing.for_all_dtypes()
@testing.numpy_cupy_allclose(contiguous_check=False)
def test_cumsum_axis(self, xp, dtype):
n = len(axes)
a = testing.shaped_arange(tuple(range(4, 4 + n)), xp, dtype)
return self._cumsum(xp, a, axis=self.axis)
@testing.for_all_dtypes()
@testing.numpy_cupy_allclose()
def test_cumsum_axis_out(self, xp, dtype):
n = len(axes)
shape = tuple(range(4, 4 + n))
a = testing.shaped_arange(shape, xp, dtype)
out = xp.zeros(shape, dtype=dtype)
self._cumsum(xp, a, axis=self.axis, out=out)
return out
@testing.for_all_dtypes()
@testing.numpy_cupy_allclose()
def test_cumsum_axis_out_noncontiguous(self, xp, dtype):
n = len(axes)
shape = tuple(range(4, 4 + n))
a = testing.shaped_arange(shape, xp, dtype)
out = xp.zeros((8,)+shape[1:], dtype=dtype)[::2] # Non contiguous view
self._cumsum(xp, a, axis=self.axis, out=out)
return out
@testing.for_all_dtypes()
@testing.numpy_cupy_allclose(contiguous_check=False)
def test_ndarray_cumsum_axis(self, xp, dtype):
n = len(axes)
a = testing.shaped_arange(tuple(range(4, 4 + n)), xp, dtype)
return a.cumsum(axis=self.axis)
@testing.for_all_dtypes()
@testing.numpy_cupy_allclose()
def test_cumsum_axis_empty(self, xp, dtype):
n = len(axes)
a = testing.shaped_arange(tuple(range(0, n)), xp, dtype)
return self._cumsum(xp, a, axis=self.axis)
@testing.for_all_dtypes()
def test_invalid_axis_lower1(self, dtype):
for xp in (numpy, cupy):
a = testing.shaped_arange((4, 5), xp, dtype)
with pytest.raises(numpy.AxisError):
xp.cumsum(a, axis=-a.ndim - 1)
@testing.for_all_dtypes()
def test_invalid_axis_lower2(self, dtype):
a = testing.shaped_arange((4, 5), cupy, dtype)
with pytest.raises(numpy.AxisError):
return cupy.cumsum(a, axis=-a.ndim - 1)
@testing.for_all_dtypes()
def test_invalid_axis_upper1(self, dtype):
for xp in (numpy, cupy):
a = testing.shaped_arange((4, 5), xp, dtype)
with pytest.raises(numpy.AxisError):
xp.cumsum(a, axis=a.ndim + 1)
@testing.for_all_dtypes()
def test_invalid_axis_upper2(self, dtype):
a = testing.shaped_arange((4, 5), cupy, dtype)
with pytest.raises(numpy.AxisError):
return cupy.cumsum(a, axis=a.ndim + 1)
def test_cumsum_arraylike(self):
with pytest.raises(TypeError):
return cupy.cumsum((1, 2, 3))
@testing.for_float_dtypes()
def test_cumsum_numpy_array(self, dtype):
a_numpy = numpy.arange(8, dtype=dtype)
with pytest.raises(TypeError):
return cupy.cumsum(a_numpy)
@testing.gpu
class TestCumprod:
def _cumprod(self, xp, a, *args, **kwargs):
b = a.copy()
res = xp.cumprod(a, *args, **kwargs)
testing.assert_array_equal(a, b) # Check if input array is overwritten
return res
@testing.for_all_dtypes()
@testing.numpy_cupy_allclose()
def test_cumprod_1dim(self, xp, dtype):
a = testing.shaped_arange((5,), xp, dtype)
return self._cumprod(xp, a)
@testing.for_all_dtypes()
@testing.numpy_cupy_allclose()
def test_cumprod_out(self, xp, dtype):
a = testing.shaped_arange((5,), xp, dtype)
out = xp.zeros((5,), dtype=dtype)
self._cumprod(xp, a, out=out)
return out
@testing.for_all_dtypes()
@testing.numpy_cupy_allclose()
def test_cumprod_out_noncontiguous(self, xp, dtype):
a = testing.shaped_arange((5,), xp, dtype)
out = xp.zeros((10,), dtype=dtype)[::2] # Non contiguous view
self._cumprod(xp, a, out=out)
return out
@testing.for_all_dtypes()
@testing.numpy_cupy_allclose(rtol=1e-6)
def test_cumprod_2dim_without_axis(self, xp, dtype):
a = testing.shaped_arange((4, 5), xp, dtype)
return self._cumprod(xp, a)
@testing.for_all_dtypes()
@testing.numpy_cupy_allclose()
def test_cumprod_2dim_with_axis(self, xp, dtype):
a = testing.shaped_arange((4, 5), xp, dtype)
return self._cumprod(xp, a, axis=1)
@testing.for_all_dtypes()
@testing.numpy_cupy_allclose()
def test_ndarray_cumprod_2dim_with_axis(self, xp, dtype):
a = testing.shaped_arange((4, 5), xp, dtype)
return a.cumprod(axis=1)
@testing.slow
def test_cumprod_huge_array(self):
size = 2 ** 32
# Free huge memory for slow test
cupy.get_default_memory_pool().free_all_blocks()
a = cupy.ones(size, 'b')
result = cupy.cumprod(a, dtype='b')
del a
assert (result == 1).all()
# Free huge memory for slow test
del result
cupy.get_default_memory_pool().free_all_blocks()
@testing.for_all_dtypes()
def test_invalid_axis_lower1(self, dtype):
for xp in (numpy, cupy):
a = testing.shaped_arange((4, 5), xp, dtype)
with pytest.raises(numpy.AxisError):
xp.cumprod(a, axis=-a.ndim - 1)
@testing.for_all_dtypes()
def test_invalid_axis_lower2(self, dtype):
for xp in (numpy, cupy):
a = testing.shaped_arange((4, 5), xp, dtype)
with pytest.raises(numpy.AxisError):
xp.cumprod(a, axis=-a.ndim - 1)
@testing.for_all_dtypes()
def test_invalid_axis_upper1(self, dtype):
for xp in (numpy, cupy):
a = testing.shaped_arange((4, 5), xp, dtype)
with pytest.raises(numpy.AxisError):
return xp.cumprod(a, axis=a.ndim)
@testing.for_all_dtypes()
def test_invalid_axis_upper2(self, dtype):
a = testing.shaped_arange((4, 5), cupy, dtype)
with pytest.raises(numpy.AxisError):
return cupy.cumprod(a, axis=a.ndim)
def test_cumprod_arraylike(self):
with pytest.raises(TypeError):
return cupy.cumprod((1, 2, 3))
@testing.for_float_dtypes()
def test_cumprod_numpy_array(self, dtype):
a_numpy = numpy.arange(1, 6, dtype=dtype)
with pytest.raises(TypeError):
return cupy.cumprod(a_numpy)
@testing.numpy_cupy_allclose()
def test_cumproduct_alias(self, xp):
a = testing.shaped_arange((2, 3), xp, xp.float32)
return xp.cumproduct(a)
@testing.parameterize(*testing.product({
'shape': [(20,), (7, 6), (3, 4, 5)],
'axis': [None, 0, 1, 2],
'func': ('nancumsum', 'nancumprod'),
}))
@testing.gpu
class TestNanCumSumProd:
zero_density = 0.25
def _make_array(self, dtype):
dtype = numpy.dtype(dtype)
if dtype.char in 'efdFD':
r_dtype = dtype.char.lower()
a = testing.shaped_random(self.shape, numpy, dtype=r_dtype,
scale=1)
if dtype.char in 'FD':
ai = a
aj = testing.shaped_random(self.shape, numpy, dtype=r_dtype,
scale=1)
ai[ai < math.sqrt(self.zero_density)] = 0
aj[aj < math.sqrt(self.zero_density)] = 0
a = ai + 1j * aj
else:
a[a < self.zero_density] = 0
a = a / a
else:
a = testing.shaped_random(self.shape, numpy, dtype=dtype)
return a
@testing.for_all_dtypes()
@testing.numpy_cupy_allclose()
def test_nancumsumprod(self, xp, dtype):
if self.axis is not None and self.axis >= len(self.shape):
pytest.skip()
a = xp.array(self._make_array(dtype))
out = getattr(xp, self.func)(a, axis=self.axis)
return xp.ascontiguousarray(out)
@testing.for_all_dtypes()
@testing.numpy_cupy_allclose()
def test_nancumsumprod_out(self, xp, dtype):
dtype = numpy.dtype(dtype)
if self.axis is not None and self.axis >= len(self.shape):
pytest.skip()
if len(self.shape) > 1 and self.axis is None:
# Skip the cases where np.nancum{sum|prod} raise AssertionError.
pytest.skip()
a = xp.array(self._make_array(dtype))
out = xp.empty(self.shape, dtype=dtype)
getattr(xp, self.func)(a, axis=self.axis, out=out)
return xp.ascontiguousarray(out)
@testing.gpu
class TestDiff:
@testing.for_all_dtypes()
@testing.numpy_cupy_allclose()
def test_diff_1dim(self, xp, dtype):
a = testing.shaped_arange((5,), xp, dtype)
return xp.diff(a)
@testing.for_all_dtypes()
@testing.numpy_cupy_allclose()
def test_diff_1dim_with_n(self, xp, dtype):
a = testing.shaped_arange((5,), xp, dtype)
return xp.diff(a, n=3)
@testing.for_all_dtypes()
@testing.numpy_cupy_allclose()
def test_diff_2dim_without_axis(self, xp, dtype):
a = testing.shaped_arange((4, 5), xp, dtype)
return xp.diff(a)
@testing.for_all_dtypes()
@testing.numpy_cupy_allclose()
def test_diff_2dim_with_axis(self, xp, dtype):
a = testing.shaped_arange((4, 5), xp, dtype)
return xp.diff(a, axis=-2)
@testing.for_all_dtypes()
@testing.numpy_cupy_allclose()
def test_diff_2dim_with_n_and_axis(self, xp, dtype):
a = testing.shaped_arange((4, 5), xp, dtype)
return xp.diff(a, 2, 1)
@testing.with_requires('numpy>=1.16')
@testing.for_all_dtypes()
@testing.numpy_cupy_allclose()
def test_diff_2dim_with_prepend(self, xp, dtype):
a = testing.shaped_arange((4, 5), xp, dtype)
b = testing.shaped_arange((4, 1), xp, dtype)
return xp.diff(a, axis=-1, prepend=b)
@testing.with_requires('numpy>=1.16')
@testing.for_all_dtypes()
@testing.numpy_cupy_allclose()
def test_diff_2dim_with_append(self, xp, dtype):
a = testing.shaped_arange((4, 5), xp, dtype)
b = testing.shaped_arange((1, 5), xp, dtype)
return xp.diff(a, axis=0, append=b, n=2)
@testing.with_requires('numpy>=1.16')
@testing.for_all_dtypes()
@testing.numpy_cupy_allclose()
def test_diff_2dim_with_scalar_append(self, xp, dtype):
a = testing.shaped_arange((4, 5), xp, dtype)
return xp.diff(a, prepend=1, append=0)
@testing.with_requires('numpy>=1.16')
def test_diff_invalid_axis(self):
for xp in (numpy, cupy):
a = testing.shaped_arange((2, 3, 4), xp)
with pytest.raises(numpy.AxisError):
xp.diff(a, axis=3)
with pytest.raises(numpy.AxisError):
xp.diff(a, axis=-4)
# This class compares CUB results against NumPy's
@testing.parameterize(*testing.product_dict(
testing.product({
'shape': [()],
'axis': [None, ()],
'spacing': [(), (1.2,)],
})
+ testing.product({
'shape': [(33,)],
'axis': [None, 0, -1, (0,)],
'spacing': [(), (1.2,), 'sequence of int', 'arrays'],
})
+ testing.product({
'shape': [(10, 20), (10, 20, 30)],
'axis': [None, 0, -1, (0, -1), (1, 0)],
'spacing': [(), (1.2,), 'sequence of int', 'arrays', 'mixed'],
}),
testing.product({
'edge_order': [1, 2],
}),
))
@testing.gpu
class TestGradient:
def _gradient(self, xp, dtype, shape, spacing, axis, edge_order):
x = testing.shaped_random(shape, xp, dtype=dtype)
if axis is None:
normalized_axes = tuple(range(x.ndim))
else:
normalized_axes = axis
if not isinstance(normalized_axes, tuple):
normalized_axes = normalized_axes,
normalized_axes = tuple(ax % x.ndim for ax in normalized_axes)
if spacing == 'sequence of int':
# one scalar per axis
spacing = tuple((ax + 1) / x.ndim for ax in normalized_axes)
elif spacing == 'arrays':
# one array per axis
spacing = tuple(
xp.arange(x.shape[ax]) * (ax + 0.5) for ax in normalized_axes
)
# make at one of the arrays have non-constant spacing
spacing[-1][5:] *= 2.0
elif spacing == 'mixed':
# mixture of arrays and scalars
spacing = [xp.arange(x.shape[normalized_axes[0]])]
spacing = spacing + [0.5] * (len(normalized_axes) - 1)
return xp.gradient(x, *spacing, axis=axis, edge_order=edge_order)
@testing.for_dtypes('fFdD')
@testing.numpy_cupy_allclose(atol=1e-6, rtol=1e-5)
def test_gradient_floating(self, xp, dtype):
return self._gradient(xp, dtype, self.shape, self.spacing, self.axis,
self.edge_order)
# unsigned int behavior fixed in 1.18.1
# https://github.com/numpy/numpy/issues/15207
@testing.with_requires('numpy>=1.18.1')
@testing.for_int_dtypes(no_bool=True)
@testing.numpy_cupy_allclose(atol=1e-6, rtol=1e-5)
def test_gradient_int(self, xp, dtype):
return self._gradient(xp, dtype, self.shape, self.spacing, self.axis,
self.edge_order)
@testing.numpy_cupy_allclose(atol=2e-2, rtol=1e-3)
def test_gradient_float16(self, xp):
return self._gradient(xp, numpy.float16, self.shape, self.spacing,
self.axis, self.edge_order)
@testing.gpu
class TestGradientErrors:
def test_gradient_invalid_spacings1(self):
# more spacings than axes
spacing = (1.0, 2.0, 3.0)
for xp in [numpy, cupy]:
x = testing.shaped_random((32, 16), xp)
with pytest.raises(TypeError):
xp.gradient(x, *spacing)
def test_gradient_invalid_spacings2(self):
# wrong length array in spacing
shape = (32, 16)
spacing = (15, cupy.arange(shape[1] + 1))
for xp in [numpy, cupy]:
x = testing.shaped_random(shape, xp)
with pytest.raises(ValueError):
xp.gradient(x, *spacing)
def test_gradient_invalid_spacings3(self):
# spacing array with ndim != 1
shape = (32, 16)
spacing = (15, cupy.arange(shape[0]).reshape(4, -1))
for xp in [numpy, cupy]:
x = testing.shaped_random(shape, xp)
with pytest.raises(ValueError):
xp.gradient(x, *spacing)
def test_gradient_invalid_edge_order1(self):
# unsupported edge order
shape = (32, 16)
for xp in [numpy, cupy]:
x = testing.shaped_random(shape, xp)
with pytest.raises(ValueError):
xp.gradient(x, edge_order=3)
def test_gradient_invalid_edge_order2(self):
# shape cannot be < edge_order
shape = (1, 16)
for xp in [numpy, cupy]:
x = testing.shaped_random(shape, xp)
with pytest.raises(ValueError):
xp.gradient(x, axis=0, edge_order=2)
@testing.with_requires('numpy>=1.16')
def test_gradient_invalid_axis(self):
# axis out of range
shape = (4, 16)
for xp in [numpy, cupy]:
x = testing.shaped_random(shape, xp)
for axis in [-3, 2]:
with pytest.raises(numpy.AxisError):
xp.gradient(x, axis=axis)
def test_gradient_bool_input(self):
# axis out of range
shape = (4, 16)
for xp in [numpy, cupy]:
x = testing.shaped_random(shape, xp, dtype=numpy.bool_)
with pytest.raises(TypeError):
xp.gradient(x)
class TestEdiff1d:
@testing.for_all_dtypes(no_bool=True)
@testing.numpy_cupy_allclose()
def test_ediff1d_1dim(self, xp, dtype):
a = testing.shaped_arange((5,), xp, dtype)
return xp.ediff1d(a)
@testing.for_all_dtypes(no_bool=True)
@testing.numpy_cupy_allclose()
def test_ediff1d_2dim(self, xp, dtype):
a = testing.shaped_arange((4, 5), xp, dtype)
return xp.ediff1d(a)
@testing.for_all_dtypes(no_bool=True)
@testing.numpy_cupy_allclose()
def test_ediff1d_3dim(self, xp, dtype):
a = testing.shaped_arange((2, 3, 4), xp, dtype)
return xp.ediff1d(a)
@testing.for_all_dtypes(no_bool=True)
@testing.numpy_cupy_allclose()
def test_ediff1d_to_begin1(self, xp, dtype):
a = testing.shaped_arange((5,), xp, dtype)
return xp.ediff1d(a, to_begin=xp.array([0], dtype=dtype))
@testing.for_all_dtypes(no_bool=True)
@testing.numpy_cupy_allclose()
def test_ediff1d_to_begin2(self, xp, dtype):
a = testing.shaped_arange((5,), xp, dtype)
return xp.ediff1d(a, to_begin=xp.array([4, 4], dtype=dtype))
@testing.for_all_dtypes(no_bool=True)
@testing.numpy_cupy_allclose()
def test_ediff1d_to_begin3(self, xp, dtype):
a = testing.shaped_arange((4, 5), xp, dtype)
return xp.ediff1d(a, to_begin=xp.array([1, 1], dtype=dtype))
@testing.for_all_dtypes(no_bool=True)
@testing.numpy_cupy_allclose()
def test_ediff1d_to_end1(self, xp, dtype):
a = testing.shaped_arange((5,), xp, dtype)
return xp.ediff1d(a, to_end=xp.array([0], dtype=dtype))
@testing.for_all_dtypes(no_bool=True)
@testing.numpy_cupy_allclose()
def test_ediff1d_to_end2(self, xp, dtype):
a = testing.shaped_arange((4, 1), xp, dtype)
return xp.ediff1d(a, to_end=xp.array([1, 2], dtype=dtype))
@testing.for_all_dtypes(no_bool=True)
@testing.numpy_cupy_allclose()
def test_ediff1d_ed1(self, xp, dtype):
a = testing.shaped_arange((2, 3, 4, 5), xp, dtype)
return xp.ediff1d(a, to_begin=xp.array([-1], dtype=dtype),
to_end=xp.array([0], dtype=dtype))
@testing.for_all_dtypes(no_bool=True)
@testing.numpy_cupy_allclose()
def test_ediff1d_ed2(self, xp, dtype):
a = testing.shaped_arange((2, 3), xp, dtype)
return xp.ediff1d(a, to_begin=xp.array([0, 4], dtype=dtype),
to_end=xp.array([1, 1], dtype=dtype))
class TestTrapz:
@testing.for_all_dtypes()
@testing.numpy_cupy_allclose()
def test_trapz_1dim(self, xp, dtype):
a = testing.shaped_arange((5,), xp, dtype)
return xp.trapz(a)
@testing.for_all_dtypes()
@testing.numpy_cupy_allclose()
def test_trapz_1dim_with_x(self, xp, dtype):
a = testing.shaped_arange((5,), xp, dtype)
x = testing.shaped_arange((5,), xp, dtype)
return xp.trapz(a, x=x)
@testing.for_all_dtypes()
@testing.numpy_cupy_allclose()
def test_trapz_1dim_with_dx(self, xp, dtype):
a = testing.shaped_arange((5,), xp, dtype)
return xp.trapz(a, dx=0.1)
@testing.for_all_dtypes()
@testing.numpy_cupy_allclose()
def test_trapz_2dim_without_axis(self, xp, dtype):
a = testing.shaped_arange((4, 5), xp, dtype)
return xp.trapz(a)
@testing.for_all_dtypes()
@testing.numpy_cupy_allclose()
def test_trapz_2dim_with_axis(self, xp, dtype):
a = testing.shaped_arange((4, 5), xp, dtype)
return xp.trapz(a, axis=-2)
@testing.for_all_dtypes()
@testing.numpy_cupy_allclose(rtol={numpy.float16: 1e-3, 'default': 1e-7})
def test_trapz_2dim_with_x_and_axis(self, xp, dtype):
a = testing.shaped_arange((4, 5), xp, dtype)
x = testing.shaped_arange((5,), xp, dtype)
return xp.trapz(a, x=x, axis=1)
@testing.for_all_dtypes()
@testing.numpy_cupy_allclose(rtol={numpy.float16: 1e-3, 'default': 1e-7})
def test_trapz_2dim_with_dx_and_axis(self, xp, dtype):
a = testing.shaped_arange((4, 5), xp, dtype)
return xp.trapz(a, dx=0.1, axis=1)
@testing.for_all_dtypes()
@testing.numpy_cupy_allclose(rtol={numpy.float16: 1e-3, 'default': 1e-7})
def test_trapz_1dim_with_x_and_dx(self, xp, dtype):
a = testing.shaped_arange((5,), xp, dtype)
x = testing.shaped_arange((5,), xp, dtype)
return xp.trapz(a, x=x, dx=0.1)
| |
import os
import sys
root_path = os.path.abspath("../")
if root_path not in sys.path:
sys.path.append(root_path)
import matplotlib.pyplot as plt
from g_CNN.Layers import *
from g_CNN.Optimizers import *
from Util.Timing import Timing
from Util.Bases import TFClassifierBase
from Util.ProgressBar import ProgressBar
class NNVerbose:
NONE = 0
EPOCH = 1
ITER = 1.5
METRICS = 2
METRICS_DETAIL = 3
DETAIL = 4
DEBUG = 5
class NN(TFClassifierBase):
NNTiming = Timing()
def __init__(self, **kwargs):
super(NN, self).__init__(**kwargs)
self._layers = []
self._optimizer = None
self._current_dimension = 0
self._available_metrics = {
key: value for key, value in zip(["acc", "f1-score"], [NN.acc, NN.f1_score])
}
self._metrics, self._metric_names, self._logs = [], [], {}
self.verbose = 0
self._layer_factory = LayerFactory()
self._tf_weights, self._tf_bias = [], []
self._loss = self._train_step = self._inner_y = None
self._params["lr"] = kwargs.get("lr", 0.001)
self._params["epoch"] = kwargs.get("epoch", 10)
self._params["optimizer"] = kwargs.get("optimizer", "Adam")
self._params["batch_size"] = kwargs.get("batch_size", 256)
self._params["train_rate"] = kwargs.get("train_rate", None)
self._params["metrics"] = kwargs.get("metrics", None)
self._params["record_period"] = kwargs.get("record_period", 100)
self._params["verbose"] = kwargs.get("verbose", 1)
self._params["preview"] = kwargs.get("preview", True)
@NNTiming.timeit(level=1)
def _get_prediction(self, x, name=None, batch_size=1e6, verbose=None):
if verbose is None:
verbose = self.verbose
single_batch = batch_size / np.prod(x.shape[1:]) # type: float
single_batch = int(single_batch)
if not single_batch:
single_batch = 1
if single_batch >= len(x):
return self._sess.run(self._y_pred, {self._tfx: x})
epoch = int(len(x) / single_batch)
if not len(x) % single_batch:
epoch += 1
name = "Prediction" if name is None else "Prediction ({})".format(name)
sub_bar = ProgressBar(max_value=epoch, name=name, start=False)
if verbose >= NNVerbose.METRICS:
sub_bar.start()
rs = [self._sess.run(self._y_pred, {self._tfx: x[:single_batch]})]
count = single_batch
if verbose >= NNVerbose.METRICS:
sub_bar.update()
while count < len(x):
count += single_batch
if count >= len(x):
rs.append(self._sess.run(self._y_pred, {self._tfx: x[count - single_batch:]}))
else:
rs.append(self._sess.run(self._y_pred, {self._tfx: x[count - single_batch:count]}))
if verbose >= NNVerbose.METRICS:
sub_bar.update()
return np.vstack(rs)
@staticmethod
@NNTiming.timeit(level=4)
def _get_w(shape):
initial = tf.truncated_normal(shape, stddev=0.1)
return tf.Variable(initial, name="w")
@staticmethod
@NNTiming.timeit(level=4)
def _get_b(shape):
return tf.Variable(np.zeros(shape, dtype=np.float32) + 0.1, name="b")
@NNTiming.timeit(level=4)
def _add_params(self, shape, conv_channel=None, fc_shape=None, apply_bias=True):
if fc_shape is not None:
w_shape = (fc_shape, shape[1])
b_shape = shape[1],
elif conv_channel is not None:
if len(shape[1]) <= 2:
w_shape = shape[1][0], shape[1][1], conv_channel, conv_channel
else:
w_shape = (shape[1][1], shape[1][2], conv_channel, shape[1][0])
b_shape = shape[1][0],
else:
w_shape = shape
b_shape = shape[1],
self._tf_weights.append(self._get_w(w_shape))
if apply_bias:
self._tf_bias.append(self._get_b(b_shape))
else:
self._tf_bias.append(None)
@NNTiming.timeit(level=4)
def _add_param_placeholder(self):
self._tf_weights.append(tf.constant([.0]))
self._tf_bias.append(tf.constant([.0]))
@NNTiming.timeit(level=4)
def _add_layer(self, layer, *args, **kwargs):
if not self._layers and isinstance(layer, str):
layer = self._layer_factory.get_root_layer_by_name(layer, *args, **kwargs)
if layer:
self.add(layer)
return
parent = self._layers[-1]
if isinstance(layer, str):
layer, shape = self._layer_factory.get_layer_by_name(
layer, parent, self._current_dimension, *args, **kwargs
)
if shape is None:
self.add(layer)
return
current, nxt = shape
else:
current, nxt = args
if isinstance(layer, SubLayer):
self.parent = parent
self._layers.append(layer)
self._add_param_placeholder()
self._current_dimension = nxt
else:
fc_shape, conv_channel, last_layer = None, None, self._layers[-1]
if isinstance(last_layer, ConvLayer):
if isinstance(layer, ConvLayer):
conv_channel = last_layer.n_filters
current = (conv_channel, last_layer.out_h, last_layer.out_w)
layer.feed_shape((current, nxt))
else:
layer.is_fc = True
last_layer.is_fc_base = True
fc_shape = last_layer.out_h * last_layer.out_w * last_layer.n_filters
self._layers.append(layer)
self._add_params((current, nxt), conv_channel, fc_shape, layer.apply_bias)
self._current_dimension = nxt
@NNTiming.timeit(level=1)
def _get_rs(self, x, predict=True, idx=-1):
cache = self._layers[0].activate(x, self._tf_weights[0], self._tf_bias[0], predict)
idx = idx + 1 if idx >= 0 else len(self._layers) + idx + 1
for i, layer in enumerate(self._layers[1:idx]):
if i == len(self._layers) - 2:
if isinstance(self._layers[-2], ConvLayer):
fc_shape = np.prod(cache.get_shape()[1:]) # type: int
cache = tf.reshape(cache, [-1, fc_shape])
if self._tf_bias[-1] is not None:
return tf.matmul(cache, self._tf_weights[-1]) + self._tf_bias[-1]
return tf.matmul(cache, self._tf_weights[-1])
cache = layer.activate(cache, self._tf_weights[i + 1], self._tf_bias[i + 1], predict)
return cache
@NNTiming.timeit(level=2)
def _append_log(self, x, y, y_classes, name):
y_pred = self._get_prediction(x, name)
y_pred_class = np.argmax(y_pred, axis=1)
for i, metric in enumerate(self._metrics):
self._logs[name][i].append(metric(y_classes, y_pred_class))
self._logs[name][-1].append(self._sess.run(
self._layers[-1].calculate(y, y_pred)
))
@NNTiming.timeit(level=2)
def _print_metric_logs(self, name):
print()
print("=" * 47)
for i, metric in enumerate(self._metric_names):
print("{:<16s} {:<16s}: {:12.8}".format(
name, metric, self._logs[name][i][-1]))
print("{:<16s} {:<16s}: {:12.8}".format(
name, "loss", self._logs[name][-1][-1]))
print("=" * 47)
@staticmethod
@NNTiming.timeit(level=4, prefix="[Private StaticMethod] ")
def _transfer_x(x):
if len(x.shape) == 1:
x = x.reshape(1, -1)
if len(x.shape) == 4:
x = x.transpose(0, 2, 3, 1)
return x.astype(np.float32)
@NNTiming.timeit(level=4)
def _preview(self):
if not self._layers:
rs = "None"
else:
rs = (
"Input : {:<10s} - {}\n".format("Dimension", self._layers[0].shape[0]) +
"\n".join(
["Layer : {:<10s} - {}".format(
_layer.name, _layer.shape[1]
) for _layer in self._layers[:-1]]
) + "\nCost : {:<10s}".format(self._layers[-1].name)
)
print("\n" + "=" * 30 + "\n" + "Structure\n" + "-" * 30 + "\n" + rs + "\n" + "=" * 30)
print("Optimizer")
print("-" * 30)
print(self._optimizer)
print("=" * 30)
@NNTiming.timeit(level=2)
def _batch_work(self, i, bar, x_train, y_train, y_train_classes, x_test, y_test, y_test_classes, condition):
if bar is not None:
condition = bar.update() and condition
if condition:
self._append_log(x_train, y_train, y_train_classes, "Train")
self._append_log(x_test, y_test, y_test_classes, "Test")
self._print_metric_logs("Train")
self._print_metric_logs("Test")
@NNTiming.timeit(level=4, prefix="[API] ")
def add(self, layer, *args, **kwargs):
if isinstance(layer, str):
self._add_layer(layer, *args, **kwargs)
else:
if not self._layers:
self._layers, self._current_dimension = [layer], layer.shape[1]
if isinstance(layer, ConvLayer):
self._add_params(layer.shape, layer.n_channels, apply_bias=layer.apply_bias)
else:
self._add_params(layer.shape, apply_bias=layer.apply_bias)
else:
if len(layer.shape) == 2:
_current, _next = layer.shape
else:
_current, _next = self._current_dimension, layer.shape[0]
layer.shape = (_current, _next)
self._add_layer(layer, _current, _next)
@NNTiming.timeit(level=1, prefix="[API] ")
def fit(self, x, y, lr=None, epoch=None, batch_size=None, train_rate=None,
optimizer=None, metrics=None, record_period=None, verbose=None, preview=None):
if lr is None:
lr = self._params["lr"]
if epoch is None:
epoch = self._params["epoch"]
if optimizer is None:
optimizer = self._params["optimizer"]
if batch_size is None:
batch_size = self._params["batch_size"]
if train_rate is None:
train_rate = self._params["train_rate"]
if metrics is None:
metrics = self._params["metrics"]
if record_period is None:
record_period = self._params["record_period"]
if verbose is None:
verbose = self._params["verbose"]
if preview is None:
preview = self._params["preview"]
x = NN._transfer_x(x)
self.verbose = verbose
self._optimizer = OptFactory().get_optimizer_by_name(optimizer, lr)
self._tfx = tf.placeholder(tf.float32, shape=[None, *x.shape[1:]])
self._tfy = tf.placeholder(tf.float32, shape=[None, y.shape[1]])
if train_rate is not None:
train_rate = float(train_rate)
train_len = int(len(x) * train_rate)
shuffle_suffix = np.random.permutation(int(len(x)))
x, y = x[shuffle_suffix], y[shuffle_suffix]
x_train, y_train = x[:train_len], y[:train_len]
x_test, y_test = x[train_len:], y[train_len:]
else:
x_train = x_test = x
y_train = y_test = y
y_train_classes = np.argmax(y_train, axis=1)
y_test_classes = np.argmax(y_test, axis=1)
if metrics is None:
metrics = []
self._metrics = self.get_metrics(metrics)
self._metric_names = [_m.__name__ for _m in metrics]
self._logs = {
name: [[] for _ in range(len(metrics) + 1)] for name in ("Train", "Test")
}
bar = ProgressBar(max_value=max(1, epoch // record_period), name="Epoch", start=False)
if self.verbose >= NNVerbose.EPOCH:
bar.start()
if preview:
self._preview()
args = (
(x_train, y_train, y_train_classes,
x_test, y_test, y_test_classes,
self.verbose >= NNVerbose.METRICS_DETAIL),
(None, None, x_train, y_train, y_train_classes, x_test, y_test, y_test_classes,
self.verbose >= NNVerbose.METRICS)
)
train_repeat = self._get_train_repeat(x, batch_size)
with self._sess.as_default() as sess:
self._y_pred = self._get_rs(self._tfx)
self._inner_y = self._get_rs(self._tfx, predict=False)
self._loss = self._layers[-1].calculate(self._tfy, self._inner_y)
self._train_step = self._optimizer.minimize(self._loss)
sess.run(tf.global_variables_initializer())
for counter in range(epoch):
if self.verbose >= NNVerbose.ITER and counter % record_period == 0:
sub_bar = ProgressBar(max_value=train_repeat * record_period - 1, name="Iteration")
else:
sub_bar = None
self._batch_training(x_train, y_train, batch_size, train_repeat,
self._loss, self._train_step, sub_bar, *args[0])
if (counter + 1) % record_period == 0:
self._batch_work(*args[1])
if self.verbose >= NNVerbose.EPOCH:
bar.update(counter // record_period + 1)
@NNTiming.timeit(level=1, prefix="[API] ")
def predict(self, x, get_raw_results=False, **kwargs):
y_pred = self._get_prediction(NN._transfer_x(x))
if get_raw_results:
return y_pred
return np.argmax(y_pred, axis=1)
def draw_logs(self):
metrics_log, cost_log = {}, {}
for key, value in sorted(self._logs.items()):
metrics_log[key], cost_log[key] = value[:-1], value[-1]
for i, name in enumerate(sorted(self._metric_names)):
plt.figure()
plt.title("Metric Type: {}".format(name))
for key, log in sorted(metrics_log.items()):
xs = np.arange(len(log[i])) + 1
plt.plot(xs, log[i], label="Data Type: {}".format(key))
plt.legend(loc=4)
plt.show()
plt.close()
plt.figure()
plt.title("Cost")
for key, loss in sorted(cost_log.items()):
xs = np.arange(len(loss)) + 1
plt.plot(xs, loss, label="Data Type: {}".format(key))
plt.legend()
plt.show()
| |
# Copyright (c) 2013-2021 khal contributors
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""This module contains the event model with all relevant subclasses and some
helper functions."""
import datetime as dt
import logging
import os
import icalendar
import pytz
from click import style
from ..exceptions import FatalError
from ..icalendar import cal_from_ics, delete_instance, invalid_timezone
from ..terminal import get_color
from ..utils import generate_random_uid, is_aware, to_naive_utc, to_unix_time
from ..parse_datetime import timedelta2str
logger = logging.getLogger('khal')
class Event:
"""base Event class for representing a *recurring instance* of an Event
(in case of non-recurring events this distinction is irrelevant)
We keep a copy of the start and end time around, because for recurring
events it might be costly to expand the recursion rules
important distinction for AllDayEvents:
all end times are as presented to a user, i.e. an event scheduled for
only one day will have the same start and end date (even though the
icalendar standard would have the end date be one day later)
"""
allday = False
def __init__(self, vevents, ref=None, **kwargs):
"""
:param start: start datetime of this event instance
:type start: datetime.date
:param end: end datetime of this event instance in unix time
:type end: datetime.date
"""
if self.__class__.__name__ == 'Event':
raise ValueError('do not initialize this class directly')
self._vevents = vevents
self._locale = kwargs.pop('locale', None)
self.readonly = kwargs.pop('readonly', None)
self.href = kwargs.pop('href', None)
self.etag = kwargs.pop('etag', None)
self.calendar = kwargs.pop('calendar', None)
self.color = kwargs.pop('color', None)
self.ref = ref
start = kwargs.pop('start', None)
end = kwargs.pop('end', None)
if start is None:
self._start = self._vevents[self.ref]['DTSTART'].dt
else:
self._start = start
if end is None:
try:
self._end = self._vevents[self.ref]['DTEND'].dt
except KeyError:
try:
self._end = self._start + self._vevents[self.ref]['DURATION'].dt
except KeyError:
self._end = self._start + dt.timedelta(days=1)
else:
self._end = end
if kwargs:
raise TypeError('%s are invalid keyword arguments to this function' % kwargs.keys())
@classmethod
def _get_type_from_vDDD(cls, start):
"""
:type start: icalendar.prop.vDDDTypes
:type start: icalendar.prop.vDDDTypes
"""
if not isinstance(start.dt, dt.datetime):
return AllDayEvent
if 'TZID' in start.params or start.dt.tzinfo is not None:
return LocalizedEvent
return FloatingEvent
@classmethod
def _get_type_from_date(cls, start):
if hasattr(start, 'tzinfo') and start.tzinfo is not None:
cls = LocalizedEvent
elif isinstance(start, dt.datetime):
cls = FloatingEvent
elif isinstance(start, dt.date):
cls = AllDayEvent
return cls
@classmethod
def fromVEvents(cls, events_list, ref=None, **kwargs):
"""
:type events: list
"""
assert isinstance(events_list, list)
vevents = {}
for event in events_list:
if 'RECURRENCE-ID' in event:
if invalid_timezone(event['RECURRENCE-ID']):
default_timezone = kwargs['locale']['default_timezone']
recur_id = default_timezone.localize(event['RECURRENCE-ID'].dt)
ident = str(to_unix_time(recur_id))
else:
ident = str(to_unix_time(event['RECURRENCE-ID'].dt))
vevents[ident] = event
else:
vevents['PROTO'] = event
if ref is None:
ref = 'PROTO' if ref in vevents.keys() else list(vevents.keys())[0]
try:
if type(vevents[ref]['DTSTART'].dt) != type(vevents[ref]['DTEND'].dt): # noqa: E721
raise ValueError('DTSTART and DTEND should be of the same type (datetime or date)')
except KeyError:
pass
if kwargs.get('start'):
instcls = cls._get_type_from_date(kwargs.get('start'))
else:
instcls = cls._get_type_from_vDDD(vevents[ref]['DTSTART'])
return instcls(vevents, ref=ref, **kwargs)
@classmethod
def fromString(cls, event_str, ref=None, **kwargs):
calendar_collection = cal_from_ics(event_str)
events = [item for item in calendar_collection.walk() if item.name == 'VEVENT']
return cls.fromVEvents(events, ref, **kwargs)
def __lt__(self, other):
start = self.start_local
other_start = other.start_local
if isinstance(start, dt.date) and not isinstance(start, dt.datetime):
start = dt.datetime.combine(start, dt.time.min)
if isinstance(other_start, dt.date) and not isinstance(other_start, dt.datetime):
other_start = dt.datetime.combine(other_start, dt.time.min)
start = start.replace(tzinfo=None)
other_start = other_start.replace(tzinfo=None)
if start == other_start:
end = self.end_local
other_end = other.end_local
if isinstance(end, dt.date) and not isinstance(end, dt.datetime):
end = dt.datetime.combine(end, dt.time.min)
if isinstance(other_end, dt.date) and not isinstance(other_end, dt.datetime):
other_end = dt.datetime.combine(other_end, dt.time.min)
end = end.replace(tzinfo=None)
other_end = other_end.replace(tzinfo=None)
if end == other_end:
return self.summary < other.summary
try:
return end < other_end
except TypeError:
raise ValueError(f'Cannot compare events {end} and {other_end}')
try:
return start < other_start
except TypeError:
raise ValueError(f'Cannot compare events {start} and {other_start}')
def update_start_end(self, start, end):
"""update start and end time of this event
calling this on a recurring event will lead to the proto instance
be set to the new start and end times
beware, this methods performs some open heart surgery
"""
if type(start) != type(end): # flake8: noqa
raise ValueError('DTSTART and DTEND should be of the same type (datetime or date)')
self.__class__ = self._get_type_from_date(start)
self._vevents[self.ref].pop('DTSTART')
self._vevents[self.ref].add('DTSTART', start)
self._start = start
if not isinstance(end, dt.datetime):
end = end + dt.timedelta(days=1)
self._end = end
if 'DTEND' in self._vevents[self.ref]:
self._vevents[self.ref].pop('DTEND')
self._vevents[self.ref].add('DTEND', end)
else:
self._vevents[self.ref].pop('DURATION')
self._vevents[self.ref].add('DURATION', end - start)
@property
def recurring(self):
try:
rval = 'RRULE' in self._vevents[self.ref] or \
'RECURRENCE-ID' in self._vevents[self.ref] or \
'RDATE' in self._vevents[self.ref]
except KeyError:
logger.fatal(
f"The event at {self.href} might be broken. You might want to "
"file an issue at https://github.com/pimutils/khal/issues"
)
raise
else:
return rval
@property
def recurpattern(self):
if 'RRULE' in self._vevents[self.ref]:
return self._vevents[self.ref]['RRULE'].to_ical().decode('utf-8')
else:
return ''
@property
def recurobject(self):
if 'RRULE' in self._vevents[self.ref]:
return self._vevents[self.ref]['RRULE']
else:
return icalendar.vRecur()
def update_rrule(self, rrule):
self._vevents['PROTO'].pop('RRULE')
if rrule is not None:
self._vevents['PROTO'].add('RRULE', rrule)
@property
def recurrence_id(self):
"""return the "original" start date of this event (i.e. their recurrence-id)
"""
if self.ref == 'PROTO':
return self.start
else:
return pytz.UTC.localize(dt.datetime.utcfromtimestamp(int(self.ref)))
def increment_sequence(self):
"""update the SEQUENCE number, call before saving this event"""
# TODO we might want to do this automatically in raw() everytime
# the event has changed, this will f*ck up the tests though
try:
self._vevents[self.ref]['SEQUENCE'] += 1
except KeyError:
self._vevents[self.ref]['SEQUENCE'] = 0
@property
def symbol_strings(self):
if self._locale['unicode_symbols']:
return {
'recurring': '\N{Clockwise gapped circle arrow}',
'alarming': self._locale['alarm_symbol'],
'range': '\N{Left right arrow}',
'range_end': '\N{Rightwards arrow to bar}',
'range_start': '\N{Rightwards arrow from bar}',
'right_arrow': '\N{Rightwards arrow}'
}
else:
return {
'recurring': '(R)',
'alarming': '(A)',
'range': '<->',
'range_end': '->|',
'range_start': '|->',
'right_arrow': '->'
}
@property
def start_local(self):
"""self.start() localized to local timezone"""
return self.start
@property
def end_local(self):
"""self.end() localized to local timezone"""
return self.end
@property
def start(self):
"""this should return the start date(time) as saved in the event"""
return self._start
@property
def end(self):
"""this should return the end date(time) as saved in the event or
implicitly defined by start and duration"""
return self._end
@property
def duration(self):
try:
return self._vevents[self.ref]['DURATION'].dt
except KeyError:
return self.end - self.start
@property
def uid(self):
return self._vevents[self.ref]['UID']
@property
def organizer(self):
if 'ORGANIZER' not in self._vevents[self.ref]:
return ''
organizer = self._vevents[self.ref]['ORGANIZER']
cn = organizer.params.get('CN', '')
email = organizer.split(':')[-1]
if cn:
return f'{cn} ({email})'
else:
return email
@property
def url(self):
if 'URL' not in self._vevents[self.ref]:
return ''
return self._vevents[self.ref]['URL']
def update_url(self, url):
if url:
self._vevents[self.ref]['URL'] = url
else:
self._vevents[self.ref].pop('URL')
@staticmethod
def _create_calendar():
"""
create the calendar
:returns: calendar
:rtype: icalendar.Calendar()
"""
calendar = icalendar.Calendar()
calendar.add('version', '2.0')
calendar.add(
'prodid', '-//PIMUTILS.ORG//NONSGML khal / icalendar //EN'
)
return calendar
@property
def raw(self):
"""needed for vdirsyncer compatibility
return text
"""
calendar = self._create_calendar()
tzs = []
for vevent in self._vevents.values():
if hasattr(vevent['DTSTART'].dt, 'tzinfo') and vevent['DTSTART'].dt.tzinfo is not None:
tzs.append(vevent['DTSTART'].dt.tzinfo)
if 'DTEND' in vevent and hasattr(vevent['DTEND'].dt, 'tzinfo') and \
vevent['DTEND'].dt.tzinfo is not None and \
vevent['DTEND'].dt.tzinfo not in tzs:
tzs.append(vevent['DTEND'].dt.tzinfo)
for tzinfo in tzs:
if tzinfo == pytz.UTC:
continue
timezone = create_timezone(tzinfo, self.start)
calendar.add_component(timezone)
for vevent in self._vevents.values():
calendar.add_component(vevent)
return calendar.to_ical().decode('utf-8')
def export_ics(self, path):
"""export event as ICS
"""
export_path = os.path.expanduser(path)
with open(export_path, 'w') as fh:
fh.write(self.raw)
@property
def summary(self):
description = None
date = self._vevents[self.ref].get('x-birthday', None)
if date:
description = 'birthday'
else:
date = self._vevents[self.ref].get('x-anniversary', None)
if date:
description = 'anniversary'
else:
date = self._vevents[self.ref].get('x-abdate', None)
if date:
description = self._vevents[self.ref].get('x-ablabel', 'custom event')
if date:
number = self.start_local.year - int(date[:4])
name = self._vevents[self.ref].get('x-fname', None)
if int(date[4:6]) == 2 and int(date[6:8]) == 29:
leap = ' (29th of Feb.)'
else:
leap = ''
if (number - 1) % 10 == 0 and number != 11:
suffix = 'st'
elif (number - 2) % 10 == 0 and number != 12:
suffix = 'nd'
elif (number - 3) % 10 == 0 and number != 13:
suffix = 'rd'
else:
suffix = 'th'
return '{name}\'s {number}{suffix} {desc}{leap}'.format(
name=name, number=number, suffix=suffix, desc=description, leap=leap,
)
else:
return self._vevents[self.ref].get('SUMMARY', '')
def update_summary(self, summary):
self._vevents[self.ref]['SUMMARY'] = summary
@staticmethod
def _can_handle_alarm(alarm):
"""
Decides whether we can handle a certain alarm.
"""
return alarm.get('ACTION') == 'DISPLAY' and \
isinstance(alarm.get('TRIGGER').dt, dt.timedelta)
@property
def alarms(self):
"""
Returns a list of all alarms in th original event that we can handle. Unknown types of
alarms are ignored.
"""
return [(a.get('TRIGGER').dt, a.get('DESCRIPTION'))
for a in self._vevents[self.ref].subcomponents
if a.name == 'VALARM' and self._can_handle_alarm(a)]
def update_alarms(self, alarms):
"""
Replaces all alarms in the event that can be handled with the ones provided.
"""
components = self._vevents[self.ref].subcomponents
# remove all alarms that we can handle from the subcomponents
components = [c for c in components
if not (c.name == 'VALARM' and self._can_handle_alarm(c))]
# add all alarms we could handle from the input
for alarm in alarms:
new = icalendar.Alarm()
new.add('ACTION', 'DISPLAY')
new.add('TRIGGER', alarm[0])
new.add('DESCRIPTION', alarm[1])
components.append(new)
self._vevents[self.ref].subcomponents = components
@property
def location(self):
return self._vevents[self.ref].get('LOCATION', '')
def update_location(self, location):
if location:
self._vevents[self.ref]['LOCATION'] = location
else:
self._vevents[self.ref].pop('LOCATION')
@property
def categories(self):
try:
return self._vevents[self.ref].get('CATEGORIES', '').to_ical().decode('utf-8')
except AttributeError:
return ''
def update_categories(self, categories):
assert isinstance(categories, list)
self._vevents[self.ref].pop('CATEGORIES', False)
if categories:
self._vevents[self.ref].add('CATEGORIES', categories)
@property
def description(self):
return self._vevents[self.ref].get('DESCRIPTION', '')
def update_description(self, description):
if description:
self._vevents[self.ref]['DESCRIPTION'] = description
else:
self._vevents[self.ref].pop('DESCRIPTION')
@property
def _recur_str(self):
if self.recurring:
recurstr = ' ' + self.symbol_strings['recurring']
else:
recurstr = ''
return recurstr
@property
def _alarm_str(self):
if self.alarms:
alarmstr = ' ' + self.symbol_strings['alarming']
else:
alarmstr = ''
return alarmstr
def format(self, format_string, relative_to, env=None, colors=True):
"""
:param colors: determines if colors codes should be printed or not
:type colors: bool
"""
env = env or {}
attributes = {}
try:
relative_to_start, relative_to_end = relative_to
except TypeError:
relative_to_start = relative_to_end = relative_to
if isinstance(relative_to_end, dt.datetime):
relative_to_end = relative_to_end.date()
if isinstance(relative_to_start, dt.datetime):
relative_to_start = relative_to_start.date()
if isinstance(self.start_local, dt.datetime):
start_local_datetime = self.start_local
end_local_datetime = self.end_local
else:
start_local_datetime = self._locale['local_timezone'].localize(
dt.datetime.combine(self.start, dt.time.min))
end_local_datetime = self._locale['local_timezone'].localize(
dt.datetime.combine(self.end, dt.time.min))
day_start = self._locale['local_timezone'].localize(
dt.datetime.combine(relative_to_start, dt.time.min),
)
day_end = self._locale['local_timezone'].localize(
dt.datetime.combine(relative_to_end, dt.time.max),
)
next_day_start = day_start + dt.timedelta(days=1)
allday = isinstance(self, AllDayEvent)
attributes["start"] = self.start_local.strftime(self._locale['datetimeformat'])
attributes["start-long"] = self.start_local.strftime(self._locale['longdatetimeformat'])
attributes["start-date"] = self.start_local.strftime(self._locale['dateformat'])
attributes["start-date-long"] = self.start_local.strftime(self._locale['longdateformat'])
attributes["start-time"] = self.start_local.strftime(self._locale['timeformat'])
attributes["end"] = self.end_local.strftime(self._locale['datetimeformat'])
attributes["end-long"] = self.end_local.strftime(self._locale['longdatetimeformat'])
attributes["end-date"] = self.end_local.strftime(self._locale['dateformat'])
attributes["end-date-long"] = self.end_local.strftime(self._locale['longdateformat'])
attributes["end-time"] = self.end_local.strftime(self._locale['timeformat'])
attributes["duration"] = timedelta2str(self.duration)
# should only have time attributes at this point (start/end)
full = {}
for attr in attributes:
full[attr + "-full"] = attributes[attr]
attributes.update(full)
if allday:
attributes["start"] = attributes["start-date"]
attributes["start-long"] = attributes["start-date-long"]
attributes["start-time"] = ""
attributes["end"] = attributes["end-date"]
attributes["end-long"] = attributes["end-date-long"]
attributes["end-time"] = ""
tostr = ""
if self.start_local.timetuple() < relative_to_start.timetuple():
attributes["start-style"] = self.symbol_strings["right_arrow"]
elif self.start_local.timetuple() == relative_to_start.timetuple():
attributes["start-style"] = self.symbol_strings['range_start']
else:
attributes["start-style"] = attributes["start-time"]
tostr = "-"
if end_local_datetime in [day_end, next_day_start]:
if self._locale["timeformat"] == '%H:%M':
attributes["end-style"] = '24:00'
tostr = '-'
else:
attributes["end-style"] = self.symbol_strings["range_end"]
tostr = ""
elif end_local_datetime > day_end:
attributes["end-style"] = self.symbol_strings["right_arrow"]
tostr = ""
else:
attributes["end-style"] = attributes["end-time"]
if self.start < self.end:
attributes["to-style"] = '-'
else:
attributes["to-style"] = ''
if start_local_datetime < day_start and end_local_datetime > day_end:
attributes["start-end-time-style"] = self.symbol_strings["range"]
else:
attributes["start-end-time-style"] = attributes["start-style"] + \
tostr + attributes["end-style"]
if allday:
if self.start == self.end:
attributes['start-end-time-style'] = ''
elif self.start == relative_to_start and self.end > relative_to_end:
attributes['start-end-time-style'] = self.symbol_strings['range_start']
elif self.start < relative_to_start and self.end > relative_to_end:
attributes['start-end-time-style'] = self.symbol_strings['range']
elif self.start < relative_to_start and self.end == relative_to_end:
attributes['start-end-time-style'] = self.symbol_strings['range_end']
else:
attributes['start-end-time-style'] = ''
if allday:
attributes['end-necessary'] = ''
attributes['end-necessary-long'] = ''
if self.start_local != self.end_local:
attributes['end-necessary'] = attributes['end-date']
attributes['end-necessary-long'] = attributes['end-date-long']
else:
attributes['end-necessary'] = attributes['end-time']
attributes['end-necessary-long'] = attributes['end-time']
if self.start_local.date() != self.end_local.date():
attributes['end-necessary'] = attributes['end']
attributes['end-necessary-long'] = attributes['end-long']
attributes["repeat-symbol"] = self._recur_str
attributes["repeat-pattern"] = self.recurpattern
attributes["alarm-symbol"] = self._alarm_str
attributes["title"] = self.summary
attributes["organizer"] = self.organizer.strip()
attributes["description"] = self.description.strip()
attributes["description-separator"] = ""
if attributes["description"]:
attributes["description-separator"] = " :: "
attributes["location"] = self.location.strip()
attributes["all-day"] = allday
attributes["categories"] = self.categories
attributes['uid'] = self.uid
attributes['url'] = self.url
if "calendars" in env and self.calendar in env["calendars"]:
cal = env["calendars"][self.calendar]
attributes["calendar-color"] = get_color(cal.get('color', ''))
attributes["calendar"] = cal.get("displayname", self.calendar)
else:
attributes["calendar-color"] = attributes["calendar"] = ''
if colors:
attributes['reset'] = style('', reset=True)
attributes['bold'] = style('', bold=True, reset=False)
for c in ["black", "red", "green", "yellow", "blue", "magenta", "cyan", "white"]:
attributes[c] = style("", reset=False, fg=c)
attributes[c + "-bold"] = style("", reset=False, fg=c, bold=True)
else:
attributes['reset'] = attributes['bold'] = ''
for c in ["black", "red", "green", "yellow", "blue", "magenta", "cyan", "white"]:
attributes[c] = attributes[c + '-bold'] = ''
attributes['nl'] = '\n'
attributes['tab'] = '\t'
attributes['bell'] = '\a'
attributes['status'] = self.status + ' ' if self.status else ''
attributes['cancelled'] = 'CANCELLED ' if self.status == 'CANCELLED' else ''
return format_string.format(**dict(attributes)) + attributes["reset"]
def duplicate(self):
"""duplicate this event's PROTO event
:rtype: Event
"""
new_uid = generate_random_uid()
vevent = self._vevents['PROTO'].copy()
vevent['SEQUENCE'] = 0
vevent['UID'] = icalendar.vText(new_uid)
vevent['SUMMARY'] = icalendar.vText(vevent['SUMMARY'] + ' Copy')
event = self.fromVEvents([vevent])
event.calendar = self.calendar
event._locale = self._locale
return event
def delete_instance(self, instance):
"""delete an instance from this event"""
assert self.recurring
delete_instance(self._vevents['PROTO'], instance)
# in case the instance we want to delete is specified as a RECURRENCE-ID
# event, we should delete that as well
to_pop = []
for key in self._vevents:
if key == 'PROTO':
continue
try:
if self._vevents[key].get('RECURRENCE-ID').dt == instance:
to_pop.append(key)
except TypeError: # localized/floating datetime mismatch
continue
for key in to_pop:
self._vevents.pop(key)
@property
def status(self):
return self._vevents[self.ref].get('STATUS', '')
class DatetimeEvent(Event):
pass
class LocalizedEvent(DatetimeEvent):
"""
see parent
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
try:
starttz = getattr(self._vevents[self.ref]['DTSTART'].dt, 'tzinfo', None)
except KeyError:
msg = (
f"Cannot understand event {kwargs.get('href')} from "
f"calendar {kwargs.get('calendar')}, you might want to file an issue at "
"https://github.com/pimutils/khal/issues"
)
logger.fatal(msg)
raise FatalError( # because in ikhal you won't see the logger's output
msg
)
if starttz is None:
starttz = self._locale['default_timezone']
try:
endtz = getattr(self._vevents[self.ref]['DTEND'].dt, 'tzinfo', None)
except KeyError:
endtz = starttz
if endtz is None:
endtz = self._locale['default_timezone']
if is_aware(self._start):
self._start = self._start.astimezone(starttz)
else:
self._start = starttz.localize(self._start)
if is_aware(self._end):
self._end = self._end.astimezone(endtz)
else:
self._end = endtz.localize(self._end)
@property
def start_local(self):
"""
see parent
"""
return self.start.astimezone(self._locale['local_timezone'])
@property
def end_local(self):
"""
see parent
"""
return self.end.astimezone(self._locale['local_timezone'])
class FloatingEvent(DatetimeEvent):
"""
"""
allday = False
@property
def start_local(self):
return self._locale['local_timezone'].localize(self.start)
@property
def end_local(self):
return self._locale['local_timezone'].localize(self.end)
class AllDayEvent(Event):
allday = True
@property
def end(self):
end = super().end
if end == self.start:
# https://github.com/pimutils/khal/issues/129
logger.warning(f'{self.href} ("{self.summary}"): The event\'s end '
'date property contains the same value as the start '
'date, which is invalid as per RFC 5545. Khal will '
'assume this is meant to be a single-day event on '
f'{self.start}')
end += dt.timedelta(days=1)
return end - dt.timedelta(days=1)
@property
def duration(self):
try:
return self._vevents[self.ref]['DURATION'].dt
except KeyError:
return self.end - self.start + dt.timedelta(days=1)
def create_timezone(tz, first_date=None, last_date=None):
"""
create an icalendar vtimezone from a pytz.tzinfo object
:param tz: the timezone
:type tz: pytz.tzinfo
:param first_date: the very first datetime that needs to be included in the
transition times, typically the DTSTART value of the (first recurring)
event
:type first_date: datetime.datetime
:param last_date: the last datetime that needs to included, typically the
end of the (very last) event (of a recursion set)
:returns: timezone information
:rtype: icalendar.Timezone()
we currently have a problem here:
pytz.timezones only carry the absolute dates of time zone transitions,
not their RRULEs. This will a) make for rather bloated VTIMEZONE
components, especially for long recurring events, b) we'll need to
specify for which time range this VTIMEZONE should be generated and c)
will not be valid for recurring events that go into eternity.
Possible Solutions:
As this information is not provided by pytz at all, there is no
easy solution, we'd really need to ship another version of the OLSON DB.
"""
if isinstance(tz, pytz.tzinfo.StaticTzInfo):
return _create_timezone_static(tz)
# TODO last_date = None, recurring to infinity
first_date = dt.datetime.today() if not first_date else to_naive_utc(first_date)
last_date = dt.datetime.today() if not last_date else to_naive_utc(last_date)
timezone = icalendar.Timezone()
timezone.add('TZID', tz)
dst = {
one[2]: 'DST' in two.__repr__()
for one, two in iter(tz._tzinfos.items())
}
bst = {
one[2]: 'BST' in two.__repr__()
for one, two in iter(tz._tzinfos.items())
}
# looking for the first and last transition time we need to include
first_num, last_num = 0, len(tz._utc_transition_times) - 1
first_tt = tz._utc_transition_times[0]
last_tt = tz._utc_transition_times[-1]
for num, transtime in enumerate(tz._utc_transition_times):
if transtime > first_tt and transtime < first_date:
first_num = num
first_tt = transtime
if transtime < last_tt and transtime > last_date:
last_num = num
last_tt = transtime
timezones = {}
for num in range(first_num, last_num + 1):
name = tz._transition_info[num][2]
if name in timezones:
ttime = tz.fromutc(tz._utc_transition_times[num]).replace(tzinfo=None)
if 'RDATE' in timezones[name]:
timezones[name]['RDATE'].dts.append(
icalendar.prop.vDDDTypes(ttime))
else:
timezones[name].add('RDATE', ttime)
continue
if dst[name] or bst[name]:
subcomp = icalendar.TimezoneDaylight()
else:
subcomp = icalendar.TimezoneStandard()
subcomp.add('TZNAME', tz._transition_info[num][2])
subcomp.add(
'DTSTART',
tz.fromutc(tz._utc_transition_times[num]).replace(tzinfo=None))
subcomp.add('TZOFFSETTO', tz._transition_info[num][0])
subcomp.add('TZOFFSETFROM', tz._transition_info[num - 1][0])
timezones[name] = subcomp
for subcomp in timezones.values():
timezone.add_component(subcomp)
return timezone
def _create_timezone_static(tz):
"""create an icalendar vtimezone from a pytz.tzinfo.StaticTzInfo
:param tz: the timezone
:type tz: pytz.tzinfo.StaticTzInfo
:returns: timezone information
:rtype: icalendar.Timezone()
"""
timezone = icalendar.Timezone()
timezone.add('TZID', tz)
subcomp = icalendar.TimezoneStandard()
subcomp.add('TZNAME', tz)
subcomp.add('DTSTART', dt.datetime(1601, 1, 1))
subcomp.add('RDATE', dt.datetime(1601, 1, 1))
subcomp.add('TZOFFSETTO', tz._utcoffset)
subcomp.add('TZOFFSETFROM', tz._utcoffset)
timezone.add_component(subcomp)
return timezone
| |
#!/usr/bin/env python
#
# Copyright 2008 The Closure Linter Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Logic for computing dependency information for closurized JavaScript files.
Closurized JavaScript files express dependencies using goog.require and
goog.provide statements. In order for the linter to detect when a statement is
missing or unnecessary, all identifiers in the JavaScript file must first be
processed to determine if they constitute the creation or usage of a dependency.
"""
import re
from closure_linter import javascripttokens
from closure_linter import tokenutil
# pylint: disable=g-bad-name
TokenType = javascripttokens.JavaScriptTokenType
DEFAULT_EXTRA_NAMESPACES = [
'goog.testing.asserts',
'goog.testing.jsunit',
]
class ClosurizedNamespacesInfo(object):
"""Dependency information for closurized JavaScript files.
Processes token streams for dependency creation or usage and provides logic
for determining if a given require or provide statement is unnecessary or if
there are missing require or provide statements.
"""
def __init__(self, closurized_namespaces, ignored_extra_namespaces):
"""Initializes an instance the ClosurizedNamespacesInfo class.
Args:
closurized_namespaces: A list of namespace prefixes that should be
processed for dependency information. Non-matching namespaces are
ignored.
ignored_extra_namespaces: A list of namespaces that should not be reported
as extra regardless of whether they are actually used.
"""
self._closurized_namespaces = closurized_namespaces
self._ignored_extra_namespaces = (ignored_extra_namespaces +
DEFAULT_EXTRA_NAMESPACES)
self.Reset()
def Reset(self):
"""Resets the internal state to prepare for processing a new file."""
# A list of goog.provide tokens in the order they appeared in the file.
self._provide_tokens = []
# A list of goog.require tokens in the order they appeared in the file.
self._require_tokens = []
# Namespaces that are already goog.provided.
self._provided_namespaces = []
# Namespaces that are already goog.required.
self._required_namespaces = []
# Note that created_namespaces and used_namespaces contain both namespaces
# and identifiers because there are many existing cases where a method or
# constant is provided directly instead of its namespace. Ideally, these
# two lists would only have to contain namespaces.
# A list of tuples where the first element is the namespace of an identifier
# created in the file, the second is the identifier itself and the third is
# the line number where it's created.
self._created_namespaces = []
# A list of tuples where the first element is the namespace of an identifier
# used in the file, the second is the identifier itself and the third is the
# line number where it's used.
self._used_namespaces = []
# A list of seemingly-unnecessary namespaces that are goog.required() and
# annotated with @suppress {extraRequire}.
self._suppressed_requires = []
# A list of goog.provide tokens which are duplicates.
self._duplicate_provide_tokens = []
# A list of goog.require tokens which are duplicates.
self._duplicate_require_tokens = []
# Whether this file is in a goog.scope. Someday, we may add support
# for checking scopified namespaces, but for now let's just fail
# in a more reasonable way.
self._scopified_file = False
# TODO(user): Handle the case where there are 2 different requires
# that can satisfy the same dependency, but only one is necessary.
def GetProvidedNamespaces(self):
"""Returns the namespaces which are already provided by this file.
Returns:
A list of strings where each string is a 'namespace' corresponding to an
existing goog.provide statement in the file being checked.
"""
return set(self._provided_namespaces)
def GetRequiredNamespaces(self):
"""Returns the namespaces which are already required by this file.
Returns:
A list of strings where each string is a 'namespace' corresponding to an
existing goog.require statement in the file being checked.
"""
return set(self._required_namespaces)
def IsExtraProvide(self, token):
"""Returns whether the given goog.provide token is unnecessary.
Args:
token: A goog.provide token.
Returns:
True if the given token corresponds to an unnecessary goog.provide
statement, otherwise False.
"""
namespace = tokenutil.GetStringAfterToken(token)
base_namespace = namespace.split('.', 1)[0]
if base_namespace not in self._closurized_namespaces:
return False
if token in self._duplicate_provide_tokens:
return True
# TODO(user): There's probably a faster way to compute this.
for created_namespace, created_identifier, _ in self._created_namespaces:
if namespace == created_namespace or namespace == created_identifier:
return False
return True
def IsExtraRequire(self, token):
"""Returns whether the given goog.require token is unnecessary.
Args:
token: A goog.require token.
Returns:
True if the given token corresponds to an unnecessary goog.require
statement, otherwise False.
"""
namespace = tokenutil.GetStringAfterToken(token)
base_namespace = namespace.split('.', 1)[0]
if base_namespace not in self._closurized_namespaces:
return False
if namespace in self._ignored_extra_namespaces:
return False
if token in self._duplicate_require_tokens:
return True
if namespace in self._suppressed_requires:
return False
# If the namespace contains a component that is initial caps, then that
# must be the last component of the namespace.
parts = namespace.split('.')
if len(parts) > 1 and parts[-2][0].isupper():
return True
# TODO(user): There's probably a faster way to compute this.
for used_namespace, used_identifier, _ in self._used_namespaces:
if namespace == used_namespace or namespace == used_identifier:
return False
return True
def GetMissingProvides(self):
"""Returns the dict of missing provided namespaces for the current file.
Returns:
Returns a dictionary of key as string and value as integer where each
string(key) is a namespace that should be provided by this file, but is
not and integer(value) is first line number where it's defined.
"""
missing_provides = dict()
for namespace, identifier, line_number in self._created_namespaces:
if (not self._IsPrivateIdentifier(identifier) and
namespace not in self._provided_namespaces and
identifier not in self._provided_namespaces and
namespace not in self._required_namespaces and
namespace not in missing_provides):
missing_provides[namespace] = line_number
return missing_provides
def GetMissingRequires(self):
"""Returns the dict of missing required namespaces for the current file.
For each non-private identifier used in the file, find either a
goog.require, goog.provide or a created identifier that satisfies it.
goog.require statements can satisfy the identifier by requiring either the
namespace of the identifier or the identifier itself. goog.provide
statements can satisfy the identifier by providing the namespace of the
identifier. A created identifier can only satisfy the used identifier if
it matches it exactly (necessary since things can be defined on a
namespace in more than one file). Note that provided namespaces should be
a subset of created namespaces, but we check both because in some cases we
can't always detect the creation of the namespace.
Returns:
Returns a dictionary of key as string and value integer where each
string(key) is a namespace that should be required by this file, but is
not and integer(value) is first line number where it's used.
"""
external_dependencies = set(self._required_namespaces)
# Assume goog namespace is always available.
external_dependencies.add('goog')
created_identifiers = set()
for namespace, identifier, line_number in self._created_namespaces:
created_identifiers.add(identifier)
missing_requires = dict()
for namespace, identifier, line_number in self._used_namespaces:
if (not self._IsPrivateIdentifier(identifier) and
namespace not in external_dependencies and
namespace not in self._provided_namespaces and
identifier not in external_dependencies and
identifier not in created_identifiers and
namespace not in missing_requires):
missing_requires[namespace] = line_number
return missing_requires
def _IsPrivateIdentifier(self, identifier):
"""Returns whether the given identifer is private."""
pieces = identifier.split('.')
for piece in pieces:
if piece.startswith('_'):
return True
return False
def IsFirstProvide(self, token):
"""Returns whether token is the first provide token."""
return self._provide_tokens and token == self._provide_tokens[0]
def IsFirstRequire(self, token):
"""Returns whether token is the first require token."""
return self._require_tokens and token == self._require_tokens[0]
def IsLastProvide(self, token):
"""Returns whether token is the last provide token."""
return self._provide_tokens and token == self._provide_tokens[-1]
def IsLastRequire(self, token):
"""Returns whether token is the last require token."""
return self._require_tokens and token == self._require_tokens[-1]
def ProcessToken(self, token, state_tracker):
"""Processes the given token for dependency information.
Args:
token: The token to process.
state_tracker: The JavaScript state tracker.
"""
# Note that this method is in the critical path for the linter and has been
# optimized for performance in the following ways:
# - Tokens are checked by type first to minimize the number of function
# calls necessary to determine if action needs to be taken for the token.
# - The most common tokens types are checked for first.
# - The number of function calls has been minimized (thus the length of this
# function.
if token.type == TokenType.IDENTIFIER:
# TODO(user): Consider saving the whole identifier in metadata.
whole_identifier_string = tokenutil.GetIdentifierForToken(token)
if whole_identifier_string is None:
# We only want to process the identifier one time. If the whole string
# identifier is None, that means this token was part of a multi-token
# identifier, but it was not the first token of the identifier.
return
# In the odd case that a goog.require is encountered inside a function,
# just ignore it (e.g. dynamic loading in test runners).
if token.string == 'goog.require' and not state_tracker.InFunction():
self._require_tokens.append(token)
namespace = tokenutil.GetStringAfterToken(token)
if namespace in self._required_namespaces:
self._duplicate_require_tokens.append(token)
else:
self._required_namespaces.append(namespace)
# If there is a suppression for the require, add a usage for it so it
# gets treated as a regular goog.require (i.e. still gets sorted).
jsdoc = state_tracker.GetDocComment()
if jsdoc and ('extraRequire' in jsdoc.suppressions):
self._suppressed_requires.append(namespace)
self._AddUsedNamespace(state_tracker, namespace, token.line_number)
elif token.string == 'goog.provide':
self._provide_tokens.append(token)
namespace = tokenutil.GetStringAfterToken(token)
if namespace in self._provided_namespaces:
self._duplicate_provide_tokens.append(token)
else:
self._provided_namespaces.append(namespace)
# If there is a suppression for the provide, add a creation for it so it
# gets treated as a regular goog.provide (i.e. still gets sorted).
jsdoc = state_tracker.GetDocComment()
if jsdoc and ('extraProvide' in jsdoc.suppressions):
self._AddCreatedNamespace(state_tracker, namespace, token.line_number)
elif token.string == 'goog.scope':
self._scopified_file = True
elif token.string == 'goog.setTestOnly':
# Since the message is optional, we don't want to scan to later lines.
for t in tokenutil.GetAllTokensInSameLine(token):
if t.type == TokenType.STRING_TEXT:
message = t.string
if re.match(r'^\w+(\.\w+)+$', message):
# This looks like a namespace. If it's a Closurized namespace,
# consider it created.
base_namespace = message.split('.', 1)[0]
if base_namespace in self._closurized_namespaces:
self._AddCreatedNamespace(state_tracker, message,
token.line_number)
break
else:
jsdoc = state_tracker.GetDocComment()
if token.metadata and token.metadata.aliased_symbol:
whole_identifier_string = token.metadata.aliased_symbol
if jsdoc and jsdoc.HasFlag('typedef'):
self._AddCreatedNamespace(state_tracker, whole_identifier_string,
token.line_number,
namespace=self.GetClosurizedNamespace(
whole_identifier_string))
else:
if not (token.metadata and token.metadata.is_alias_definition):
self._AddUsedNamespace(state_tracker, whole_identifier_string,
token.line_number)
elif token.type == TokenType.SIMPLE_LVALUE:
identifier = token.values['identifier']
start_token = tokenutil.GetIdentifierStart(token)
if start_token and start_token != token:
# Multi-line identifier being assigned. Get the whole identifier.
identifier = tokenutil.GetIdentifierForToken(start_token)
else:
start_token = token
# If an alias is defined on the start_token, use it instead.
if (start_token and
start_token.metadata and
start_token.metadata.aliased_symbol and
not start_token.metadata.is_alias_definition):
identifier = start_token.metadata.aliased_symbol
if identifier:
namespace = self.GetClosurizedNamespace(identifier)
if state_tracker.InFunction():
self._AddUsedNamespace(state_tracker, identifier, token.line_number)
elif namespace and namespace != 'goog':
self._AddCreatedNamespace(state_tracker, identifier,
token.line_number, namespace=namespace)
elif token.type == TokenType.DOC_FLAG:
flag_type = token.attached_object.flag_type
is_interface = state_tracker.GetDocComment().HasFlag('interface')
if flag_type == 'implements' or (flag_type == 'extends' and is_interface):
# Interfaces should be goog.require'd.
doc_start = tokenutil.Search(token, TokenType.DOC_START_BRACE)
interface = tokenutil.Search(doc_start, TokenType.COMMENT)
self._AddUsedNamespace(state_tracker, interface.string,
token.line_number)
def _AddCreatedNamespace(self, state_tracker, identifier, line_number,
namespace=None):
"""Adds the namespace of an identifier to the list of created namespaces.
If the identifier is annotated with a 'missingProvide' suppression, it is
not added.
Args:
state_tracker: The JavaScriptStateTracker instance.
identifier: The identifier to add.
line_number: Line number where namespace is created.
namespace: The namespace of the identifier or None if the identifier is
also the namespace.
"""
if not namespace:
namespace = identifier
jsdoc = state_tracker.GetDocComment()
if jsdoc and 'missingProvide' in jsdoc.suppressions:
return
self._created_namespaces.append([namespace, identifier, line_number])
def _AddUsedNamespace(self, state_tracker, identifier, line_number):
"""Adds the namespace of an identifier to the list of used namespaces.
If the identifier is annotated with a 'missingRequire' suppression, it is
not added.
Args:
state_tracker: The JavaScriptStateTracker instance.
identifier: An identifier which has been used.
line_number: Line number where namespace is used.
"""
jsdoc = state_tracker.GetDocComment()
if jsdoc and 'missingRequire' in jsdoc.suppressions:
return
namespace = self.GetClosurizedNamespace(identifier)
# b/5362203 If its a variable in scope then its not a required namespace.
if namespace and not state_tracker.IsVariableInScope(namespace):
self._used_namespaces.append([namespace, identifier, line_number])
def GetClosurizedNamespace(self, identifier):
"""Given an identifier, returns the namespace that identifier is from.
Args:
identifier: The identifier to extract a namespace from.
Returns:
The namespace the given identifier resides in, or None if one could not
be found.
"""
if identifier.startswith('goog.global'):
# Ignore goog.global, since it is, by definition, global.
return None
parts = identifier.split('.')
for namespace in self._closurized_namespaces:
if not identifier.startswith(namespace + '.'):
continue
last_part = parts[-1]
if not last_part:
# TODO(robbyw): Handle this: it's a multi-line identifier.
return None
# The namespace for a class is the shortest prefix ending in a class
# name, which starts with a capital letter but is not a capitalized word.
#
# We ultimately do not want to allow requiring or providing of inner
# classes/enums. Instead, a file should provide only the top-level class
# and users should require only that.
namespace = []
for part in parts:
if part == 'prototype' or part.isupper():
return '.'.join(namespace)
namespace.append(part)
if part[0].isupper():
return '.'.join(namespace)
# At this point, we know there's no class or enum, so the namespace is
# just the identifier with the last part removed. With the exception of
# apply, inherits, and call, which should also be stripped.
if parts[-1] in ('apply', 'inherits', 'call'):
parts.pop()
parts.pop()
# If the last part ends with an underscore, it is a private variable,
# method, or enum. The namespace is whatever is before it.
if parts and parts[-1].startswith('_'):
parts.pop()
return '.'.join(parts)
return None
| |
"""
Experimental segmentation nodes.
"""
from __future__ import absolute_import
from nodetree import node, writable_node
import ocrolib
from ocrolib import iulib, numpy
from . import base
from .. import stages
class Rectangle(object):
"""
Rectangle class, Iulib-style.
"""
def __init__(self, x0, y0, x1, y1):
"""
Initialise a rectangle.
"""
self.x0 = x0
self.y0 = y0
self.x1 = x1
self.y1 = y1
def __repr__(self):
return "<Rectangle: %d %d %d %d>" % (
self.x0,
self.y0,
self.x1,
self.y1
)
def __eq__(self, rect):
return self.x0 == rect.x0 and self.y0 == rect.y0 \
and self.x1 == rect.x1 and self.y1 == rect.y1
def __ne__(self, rect):
return self.x0 != rect.x0 or self.y0 != rect.y0 \
or self.x1 != rect.x1 or self.y1 != rect.y1
def aspect(self):
if self.empty():
return 1
return float(self.width()) / float(self.height())
def area(self):
if self.empty():
return 0
return self.width() * self.height()
def clone(self):
return Rectangle(self.x0, self.y0, self.x1, self.y1)
def empty(self):
return self.x0 >= self.x1 and self.y0 >= self.y1
def pad_by(self, dx, dy):
assert(not self.empty())
self.x0 -= dx
self.y0 -= dy
self.x1 += dx
self.y0 += dy
def shift_by(self, dx, dy):
assert(not self.empty())
self.x0 += dx
self.y0 += dy
self.x1 += dx
self.y0 += dy
def width(self):
return max(0, self.x1 - self.x0)
def height(self):
return max(0, self.y1 - self.y0)
def include_point(self, x, y):
if self.empty():
self.x0 = x
self.y0 = y
self.x1 = x + 1
self.y1 = y + 1
else:
self.x0 = min(x, self.x0)
self.y0 = min(y, self.y0)
self.x1 = max(x + 1, self.x1)
self.y1 = max(y + 1, self.y1)
def include(self, rect):
if self.empty():
self.x0 = rect.x0
self.y0 = rect.y0
self.x1 = rect.x1
self.y1 = rect.y1
else:
self.x0 = min(self.x0, rect.x0)
self.y0 = min(self.y0, rect.y0)
self.x1 = max(self.x1, rect.x1)
self.y1 = max(self.y1, rect.y1)
def grow(self, dx, dy):
return Rectangle(self.x0 - dx, self.y0 - dy,
self.x1 + dx, self.y1 + dy)
def overlaps(self, rect):
return self.x0 <= rect.x1 and self.x1 >= rect.x0 \
and self.y0 <= rect.y1 and self.y1 >= rect.y0
def overlaps_x(self, rect):
return self.x0 <= rect.x1 and self.x1 >= rect.x0
def overlaps_y(self, rect):
return self.y0 <= rect.y1 and self.y1 >= rect.y0
def contains(self, x, y):
return x >= self.x0 and x < self.x1 \
and y >= self.y0 and y < self.y1
def points(self):
return (self.x0, self.y0, self.x1, self.y1,)
def intersection(self, rect):
if self.empty():
return self
return Rectangle(
max(self.x0, rect.x0),
max(self.y0, rect.y0),
min(self.x1, rect.x1),
min(self.y1, rect.y1)
)
def inclusion(self, rect):
if self.empty():
return rect
return Rectangle(
min(self.x0, rect.x0),
min(self.y0, rect.y0),
max(self.x1, rect.x1),
max(self.y1, rect.y1)
)
def fraction_covered_by(self, rect):
isect = self.intersection(rect)
if self.area():
return isect.area() / float(self.area())
else:
return -1
@classmethod
def union_of(cls, *args):
r = Rectangle(0, 0, 0, 0)
for arg in args:
r.include(arg)
return r
def r2i(rect):
return iulib.rectangle(rect.x0, rect.y0, rect.x1, rect.y1)
def i2r(rect):
return Rectangle(rect.x0, rect.y0, rect.x1, rect.y1)
def smooth(x,window_len=11,window='hanning'):
"""smooth the data using a window with requested size.
This method is based on the convolution of a scaled window with the signal.
The signal is prepared by introducing reflected copies of the signal
(with the window size) in both ends so that transient parts are minimized
in the begining and end part of the output signal.
input:
x: the input signal
window_len: the dimension of the smoothing window; should be an odd integer
window: the type of window from 'flat', 'hanning', 'hamming', 'bartlett', 'blackman'
flat window will produce a moving average smoothing.
output:
the smoothed signal
example:
t=linspace(-2,2,0.1)
x=sin(t)+randn(len(t))*0.1
y=smooth(x)
see also:
numpy.hanning, numpy.hamming, numpy.bartlett, numpy.blackman, numpy.convolve
scipy.signal.lfilter
TODO: the window parameter could be the window itself if an array instead of a string
"""
if x.ndim != 1:
raise ValueError, "smooth only accepts 1 dimension arrays."
if x.size < window_len:
raise ValueError, "Input vector needs to be bigger than window size."
if window_len<3:
return x
if not window in ['flat', 'hanning', 'hamming', 'bartlett', 'blackman']:
raise ValueError, "Window is on of 'flat', 'hanning', 'hamming', 'bartlett', 'blackman'"
s=numpy.r_[2*x[0]-x[window_len:1:-1],x,2*x[-1]-x[-1:-window_len:-1]]
#print(len(s))
if window == 'flat': #moving average
w=numpy.ones(window_len,'d')
else:
w=eval('numpy.'+window+'(window_len)')
y=numpy.convolve(w/w.sum(),s,mode='same')
return y[window_len-1:-window_len+1]
def not_char(rect):
"""
Perform basic validation on a rect to
test if it *could* be a character box.
"""
return rect.area() < 4 or rect.area() > 10000 \
or rect.aspect() < 0.2 or rect.aspect() > 5
def horizontal_overlaps(rect, others, sorted=False):
"""
Get rects that overlap horizontally with the
given rect.
"""
overlaps = []
for other in others:
# Note: can optimise to prevent
# going through the rest of the
# array when we hit a non match
if rect.overlaps_y(other):
overlaps.append(other)
return overlaps
def get_average_line_height(top_bottoms):
"""
Tricksy - get height of median line?
"""
lheights = [b - t for t, b in top_bottoms]
lhm = numpy.max(lheights)
def weight(val):
return 0 if val < (lhm / 2) else 1
weights = numpy.vectorize(weight)(lheights)
return numpy.average(numpy.array(lheights), weights=weights)
def remove_border(narray, average_char_height):
"""
Try and remove anything that's in a likely
border region and return the subimage.
"""
na = iulib.numpy(narray)
hpr = na.sum(axis=0)
vpr = na.sum(axis=1)
hhp = high_pass_median(hpr, 5.0 / average_char_height)
vhp = high_pass_median(vpr, 5.0 / average_char_height)
vidx = vhp.nonzero()[0]
hidx = hhp.nonzero()[0]
b = iulib.bytearray()
iulib.extract_subimage(b, narray, int(vidx[0]), int(hidx[0]),
int(vidx[-1]), int(hidx[-1]))
return b
def get_vertical_projection(narray):
"""
Accumulate image columns.
"""
return iulib.numpy(narray).sum(axis=1)
def get_horizontal_projection(narray):
"""
Accumulate image rows.
"""
return iulib.numpy(narray).sum(axis=0)
def high_pass_max(numpy_arr, maxscale):
"""
Remove everything below 1/2 of the median
value.
"""
# remove noise
max = numpy.max(numpy_arr)
def hp(x, m):
return 0 if x < m else x
return numpy.vectorize(hp)(numpy_arr, max * maxscale)
def high_pass_median(numpy_arr, medscale):
"""
Remove everything below 1/2 of the median
value.
"""
# remove noise
median = numpy.median(numpy_arr)
def hp(x, m):
return 0 if x < m else x
return numpy.vectorize(hp)(numpy_arr, median * medscale)
def get_lines_by_projection(narray, highpass=0.001):
"""
Extract regions of blackness.
"""
hpr = iulib.numpy(narray).sum(axis=0)
hps = high_pass_max(hpr, highpass)
regions = []
gotline = None
count = 0
for val in hps:
if val != 0:
if gotline is None:
gotline = count
else:
if not gotline is None:
regions.append((gotline, count))
gotline = None
count += 1
return regions
def large_or_odd(rect, avg):
"""
An odd shape.
"""
return rect.area() > (100 * avg * avg) or rect.aspect() < 0.2 \
or rect.aspect() > 10
def strip_non_chars(narray, bboxes, average_height, inverted=True):
"""
Remove stuff that isn't looking like a character box.
"""
outboxes = []
color = 0 if inverted else 255
for box in bboxes:
if large_or_odd(box, average_height):
iulib.fill_rect(narray, box.x0, box.y0, box.x1, box.y1, color)
else:
outboxes.append(box)
return outboxes
def trimmed_mean(numpy_arr, lperc=0, hperc=0):
"""
Get a trimmed mean value from array, with low and
high percentage ignored.
"""
alen = len(numpy_arr)
return numpy_arr[(alen / 100 * lperc):
(alen - (alen / 100 * hperc))].mean()
class SegmentPageByHint(node.Node, base.JSONWriterMixin):
"""Segment a page using toplines and column hints"""
stage = stages.PAGE_SEGMENT
intypes = [ocrolib.numpy.ndarray]
outtype = dict
parameters = [
dict(name="toplines", value=0),
dict(name="columns", value=1),
dict(name="highpass", value=0.001, type="float"),
]
def null_data(self):
"""
Return an empty list when ignored.
"""
return dict(columns=[], lines=[], paragraphs=[])
def process(self, input):
"""
Segment a binary image.
input: a binary image.
return: a dictionary of box types:
lines
paragraphs
columns
images
"""
self.inarray = ocrolib.numpy2narray(input, type='B')
self.init()
for topline in range(int(self._params.get("toplines", 0))):
self.get_header_line()
self.columns.append(Rectangle.union_of(*self.textlines))
self.find_columns()
self.find_lines()
def flipud(r):
return [r.x0, input.shape[0] - r.y1, r.x1, input.shape[0] - r.y0]
return dict(
lines=[flipud(r) for r in self.textlines],
columns=[flipud(r) for r in self.columns],
)
def init(self):
"""
Initialise on receipt of the input.
"""
# pointer to the region that remains
# to be segmented - starts at the top
self.topptr = self.inarray.dim(1)
# obtain an inverted version of the array
self.inverted = iulib.bytearray()
self.inverted.copy(self.inarray)
iulib.binary_invert(self.inverted)
self.calc_bounding_boxes()
# list of extracted line rectangles
self.textlines = []
self.columns = []
def calc_bounding_boxes(self):
"""
Get bounding boxes if connected components.
"""
concomps = iulib.intarray()
concomps.copy(self.inverted)
iulib.label_components(concomps, False)
bboxes = iulib.rectarray()
iulib.bounding_boxes(bboxes, concomps)
self.boxes = []
for i in range(bboxes.length()):
if bboxes.at(i).area() > (self.inverted.dim(0) *
self.inverted.dim(1) * 0.95):
continue
self.boxes.append(i2r(bboxes.at(i)))
# get the average text height, excluding any %%
self.avgheight = trimmed_mean(numpy.sort(numpy.array(
[r.height() for r in self.boxes])), 5, 5)
# remove large or weird boxes from the inverted images
self.boxes = strip_non_chars(self.inverted, self.boxes, self.avgheight)
def get_char_boxes(self, boxes):
"""
Get character boxes.
"""
return [b for b in boxes if not not_char(b)]
def get_header_line(self):
"""
Get the first found line in an image.
"""
boxes = self.get_char_boxes(self.boxes)
# eliminate boxes above our top-of-the-page
# pointer
boxes = [b for b in boxes if b.y1 <= self.topptr]
# order boxes by y0 (distance from bottom)
boxes.sort(lambda x, y: cmp(x.y1, y.y1))
# reverse so those nearest the top are first
boxes.reverse()
# get rects with overlap horizontally with
# the topmost one
# try a maximum of 20 lines until we find one with at least
# 5 overlaps
overlaps = []
maxcnt = 0
line = Rectangle(0, 0, 0, 0)
while maxcnt < 200 and (len(overlaps) < 2 \
or line.height() < (self.avgheight * 1.5)):
overlaps = horizontal_overlaps(
boxes[maxcnt], boxes, sorted=False)
line = Rectangle.union_of(*overlaps)
maxcnt += 1
self.textlines.append(line)
# set region of interest to below the top line
self.topptr = line.y0
def get_possible_columns(self, projection):
"""
Extract regions of whiteness.
"""
regions = []
gotcol = None
count = 0
for val in projection:
if count == len(projection) - 1 and gotcol is not None:
regions.append(Rectangle(gotcol, 0, count, self.topptr))
elif val != 0:
if gotcol is None:
gotcol = count
else:
if not gotcol is None:
regions.append(Rectangle(gotcol, 0, count, self.topptr))
gotcol = None
count += 1
return regions
def filter_columns(self, rects, target):
"""
Filter a group of regions to match the target
number, preserving those which seem the most
likely to be 'good'
"""
if len(rects) <= target:
return rects
# add the x largest cols
best = []
for col in sorted(rects, lambda x, y: cmp(y.area(), x.area())):
best.append(col)
if len(best) == target:
break
return best
def find_columns(self):
"""
Get columns in a section of the image
"""
portion = iulib.bytearray()
iulib.extract_subimage(portion, self.inverted, 0, 0,
self.inverted.dim(0), self.topptr)
projection = high_pass_median(iulib.numpy(portion).sum(axis=1), 0.20)
posscols = self.get_possible_columns(projection)
bestcols = self.filter_columns(posscols, int(self._params.get("columns", 1)))
self.columns.extend(bestcols)
def find_lines(self):
"""
Get lines in a section of the images.
"""
for colrect in self.columns:
newrect = Rectangle(colrect.x0, 0, colrect.x1, self.topptr)
if newrect.area() < 1:
continue
portion = iulib.bytearray()
iulib.extract_subimage(portion, self.inverted, *newrect.points())
regions = get_lines_by_projection(portion, float(self._params.get("highpass")))
plines = []
for bottom, top in regions:
height = top - bottom
if height - self.avgheight < self.avgheight / 3:
continue
plines.append(Rectangle(colrect.x0, bottom, colrect.x1, top))
cpline = None
clline = Rectangle(0, 0, 0, 0)
charboxes = self.get_char_boxes(self.boxes)
colboxes = [b for b in charboxes \
if b.overlaps(colrect.grow(10, 10))]
colboxes.sort(lambda x, y: cmp(x.y1, y.y1))
colboxes.reverse()
clines = []
for p in plines:
clines.append(Rectangle(0, 0, 0, 0))
while colboxes:
char = colboxes.pop(0)
cline = Rectangle(0, 0, 0, 0)
for i in range(len(plines)):
pline = plines[i]
if char.overlaps(pline):
clines[i].include(char)
self.textlines.extend(clines)
def get_coords(coordstr):
"""
Return a list of rects from the coords string.
"""
if coordstr is None:
return []
rstr = coordstr.split("~")
rects = []
for r in rstr:
points = r.split(",")
if len(points) != 4:
continue
try:
ints = [int(i) for i in points]
assert len(ints) == 4
rects.append(Rectangle(*ints))
except ValueError:
continue
return rects
def sanitise_coords(rectlist, width, height):
"""
Treat negative numbers as the outer bound.
"""
def sanitise(rect):
rect.x0 = max(rect.x0, 0)
rect.y0 = max(rect.y0, 0)
if rect.x1 < 0:
rect.x1 = width
if rect.y1 < 0:
rect.y1 = height
if rect.x0 > width:
rect.x0 = width - 1
if rect.y0 > height:
rect.y0 = height - 1
if rect.x1 > width:
rect.x1 = width
if rect.y1 > height:
rect.y1 = height
return rect
return [sanitise(rect) for rect in rectlist]
def flip_coord(rect, height):
return Rectangle(rect.x0, height - rect.y1, rect.x1, height - rect.y0)
class SegmentPageManual(node.Node, base.JSONWriterMixin):
"""Segment a page using manual column definitions."""
stage = stages.PAGE_SEGMENT
intypes = [ocrolib.numpy.ndarray]
outtype = dict
parameters = [
dict(name="boxes", value=""),
]
def __init__(self, *args, **kwargs):
super(SegmentPageManual, self).__init__(*args, **kwargs)
self._regions = ocrolib.RegionExtractor()
self._segmenter = ocrolib.SegmentPageByRAST1()
def null_data(self):
"""
Return an empty list when ignored.
"""
return dict(columns=[], lines=[], paragraphs=[])
def process(self, binary):
"""
Segment a binary image.
input: a binary image.
return: a dictionary of box types:
lines
paragraphs
columns
images
"""
height = binary.shape[0]
pstr = self._params.get("boxes", "")
coords = [flip_coord(r, height) for r in get_coords(pstr)]
if len(coords) == 0:
coords.append(Rectangle(0, 0,
binary.shape[1] - 1, binary.shape[0] - 1))
coords = sanitise_coords(coords, binary.shape[1], binary.shape[0]);
boxes = {}
for rect in coords:
points = rect.points()
col = ocrolib.iulib.bytearray()
ocrolib.iulib.extract_subimage(col, ocrolib.numpy2narray(binary), *points)
pout = self.segment_portion(col, points[0], points[1], points[3] - points[1])
for key, rects in pout.iteritems():
if boxes.get(key) is not None:
boxes.get(key).extend(rects)
else:
boxes[key] = rects
for key, rects in boxes.iteritems():
boxes[key] = [flip_coord(r, height).points() for r in rects]
return boxes
def segment_portion(self, portion, dx, dy, pheight):
"""
Segment a single-column chunk.
"""
page_seg = self._segmenter.segment(ocrolib.narray2numpy(portion))
return self.extract_boxes(self._regions, page_seg, dx, dy, pheight)
@classmethod
def extract_boxes(cls, regions, page_seg, dx, dy, pheight):
"""
Extract line/paragraph geoocrolib.metry info.
"""
out = dict(columns=[], lines=[], paragraphs=[])
#out = dict(lines=[], paragraphs=[])
exfuncs = dict(lines=regions.setPageLines,
paragraphs=regions.setPageParagraphs)
#columns=regions.setPageColumns)
#page_seg = numpy.flipud(page_seg)
for box, func in exfuncs.iteritems():
func(page_seg)
for i in range(1, regions.length()):
out[box].append(Rectangle(regions.x0(i) + dx,
(pheight - regions.y1(i)) + dy, regions.x1(i) + dx,
(pheight - regions.y0(i)) + dy))
return out
class BlockOut(node.Node, base.BinaryPngWriterMixin):
"""Blockout sections of an image."""
stage = stages.FILTER_BINARY
intypes = [numpy.ndarray]
outtype = numpy.ndarray
parameters = [dict(name="boxes", value=""),]
def process(self, input):
"""
Blockout an image, using PIL. If any of
the parameters are -1 or less, use the
outer dimensions.
"""
height = input.shape[0]
pstr = self._params.get("boxes", "")
coords = get_coords(pstr)
if len(coords) == 0:
return input
sancoords = sanitise_coords(coords, input.shape[1], input.shape[0]);
flipcoords = [flip_coord(r, height) for r in sancoords]
narray = ocrolib.numpy2narray(input)
for rect in flipcoords:
ocrolib.iulib.fill_rect(narray, rect.x0, rect.y0, rect.x1, rect.y1, 255)
return ocrolib.narray2numpy(narray)
| |
"""ProficiencyRatings API Version 1.0.
This API client was generated using a template. Make sure this code is valid before using it.
"""
import logging
from datetime import date, datetime
from .base import BaseCanvasAPI
from .base import BaseModel
class ProficiencyRatingsAPI(BaseCanvasAPI):
"""ProficiencyRatings API Version 1.0."""
def __init__(self, *args, **kwargs):
"""Init method for ProficiencyRatingsAPI."""
super(ProficiencyRatingsAPI, self).__init__(*args, **kwargs)
self.logger = logging.getLogger("py3canvas.ProficiencyRatingsAPI")
def create_update_proficiency_ratings_accounts(
self,
account_id,
ratings_color=None,
ratings_description=None,
ratings_mastery=None,
ratings_points=None,
):
"""
Create/update proficiency ratings.
Create or update account-level proficiency ratings. These ratings will apply to all
sub-accounts, unless they have their own account-level proficiency ratings defined.
"""
path = {}
data = {}
params = {}
# REQUIRED - PATH - account_id
"""
ID
"""
path["account_id"] = account_id
# OPTIONAL - ratings[description]
"""
The description of the rating level.
"""
if ratings_description is not None:
data["ratings[description]"] = ratings_description
# OPTIONAL - ratings[points]
"""
The non-negative number of points of the rating level. Points across ratings should be strictly decreasing in value.
"""
if ratings_points is not None:
data["ratings[points]"] = ratings_points
# OPTIONAL - ratings[mastery]
"""
Indicates the rating level where mastery is first achieved. Only one rating in a proficiency should be marked for mastery.
"""
if ratings_mastery is not None:
data["ratings[mastery]"] = ratings_mastery
# OPTIONAL - ratings[color]
"""
The color associated with the rating level. Should be a hex color code like '00FFFF'.
"""
if ratings_color is not None:
data["ratings[color]"] = ratings_color
self.logger.debug(
"POST /api/v1/accounts/{account_id}/outcome_proficiency with query params: {params} and form data: {data}".format(
params=params, data=data, **path
)
)
return self.generic_request(
"POST",
"/api/v1/accounts/{account_id}/outcome_proficiency".format(**path),
data=data,
params=params,
single_item=True,
)
def create_update_proficiency_ratings_courses(
self,
course_id,
ratings_color=None,
ratings_description=None,
ratings_mastery=None,
ratings_points=None,
):
"""
Create/update proficiency ratings.
Create or update account-level proficiency ratings. These ratings will apply to all
sub-accounts, unless they have their own account-level proficiency ratings defined.
"""
path = {}
data = {}
params = {}
# REQUIRED - PATH - course_id
"""
ID
"""
path["course_id"] = course_id
# OPTIONAL - ratings[description]
"""
The description of the rating level.
"""
if ratings_description is not None:
data["ratings[description]"] = ratings_description
# OPTIONAL - ratings[points]
"""
The non-negative number of points of the rating level. Points across ratings should be strictly decreasing in value.
"""
if ratings_points is not None:
data["ratings[points]"] = ratings_points
# OPTIONAL - ratings[mastery]
"""
Indicates the rating level where mastery is first achieved. Only one rating in a proficiency should be marked for mastery.
"""
if ratings_mastery is not None:
data["ratings[mastery]"] = ratings_mastery
# OPTIONAL - ratings[color]
"""
The color associated with the rating level. Should be a hex color code like '00FFFF'.
"""
if ratings_color is not None:
data["ratings[color]"] = ratings_color
self.logger.debug(
"POST /api/v1/courses/{course_id}/outcome_proficiency with query params: {params} and form data: {data}".format(
params=params, data=data, **path
)
)
return self.generic_request(
"POST",
"/api/v1/courses/{course_id}/outcome_proficiency".format(**path),
data=data,
params=params,
single_item=True,
)
def get_proficiency_ratings_accounts(self, account_id):
"""
Get proficiency ratings.
Get account-level proficiency ratings. If not defined for this account,
it will return proficiency ratings for the nearest super-account with ratings defined.
Will return 404 if none found.
Examples:
curl https://<canvas>/api/v1/accounts/<account_id>/outcome_proficiency \
-H 'Authorization: Bearer <token>'
"""
path = {}
data = {}
params = {}
# REQUIRED - PATH - account_id
"""
ID
"""
path["account_id"] = account_id
self.logger.debug(
"GET /api/v1/accounts/{account_id}/outcome_proficiency with query params: {params} and form data: {data}".format(
params=params, data=data, **path
)
)
return self.generic_request(
"GET",
"/api/v1/accounts/{account_id}/outcome_proficiency".format(**path),
data=data,
params=params,
single_item=True,
)
def get_proficiency_ratings_courses(self, course_id):
"""
Get proficiency ratings.
Get account-level proficiency ratings. If not defined for this account,
it will return proficiency ratings for the nearest super-account with ratings defined.
Will return 404 if none found.
Examples:
curl https://<canvas>/api/v1/accounts/<account_id>/outcome_proficiency \
-H 'Authorization: Bearer <token>'
"""
path = {}
data = {}
params = {}
# REQUIRED - PATH - course_id
"""
ID
"""
path["course_id"] = course_id
self.logger.debug(
"GET /api/v1/courses/{course_id}/outcome_proficiency with query params: {params} and form data: {data}".format(
params=params, data=data, **path
)
)
return self.generic_request(
"GET",
"/api/v1/courses/{course_id}/outcome_proficiency".format(**path),
data=data,
params=params,
single_item=True,
)
class Proficiencyrating(BaseModel):
"""Proficiencyrating Model."""
def __init__(self, description=None, points=None, mastery=None, color=None):
"""Init method for Proficiencyrating class."""
self._description = description
self._points = points
self._mastery = mastery
self._color = color
self.logger = logging.getLogger("py3canvas.Proficiencyrating")
@property
def description(self):
"""The description of the rating."""
return self._description
@description.setter
def description(self, value):
"""Setter for description property."""
self.logger.warn(
"Setting values on description will NOT update the remote Canvas instance."
)
self._description = value
@property
def points(self):
"""A non-negative number of points for the rating."""
return self._points
@points.setter
def points(self, value):
"""Setter for points property."""
self.logger.warn(
"Setting values on points will NOT update the remote Canvas instance."
)
self._points = value
@property
def mastery(self):
"""Indicates the rating where mastery is first achieved."""
return self._mastery
@mastery.setter
def mastery(self, value):
"""Setter for mastery property."""
self.logger.warn(
"Setting values on mastery will NOT update the remote Canvas instance."
)
self._mastery = value
@property
def color(self):
"""The hex color code of the rating."""
return self._color
@color.setter
def color(self, value):
"""Setter for color property."""
self.logger.warn(
"Setting values on color will NOT update the remote Canvas instance."
)
self._color = value
class Proficiency(BaseModel):
"""Proficiency Model."""
def __init__(self, ratings=None):
"""Init method for Proficiency class."""
self._ratings = ratings
self.logger = logging.getLogger("py3canvas.Proficiency")
@property
def ratings(self):
"""An array of proficiency ratings. See the ProficiencyRating specification above."""
return self._ratings
@ratings.setter
def ratings(self, value):
"""Setter for ratings property."""
self.logger.warn(
"Setting values on ratings will NOT update the remote Canvas instance."
)
self._ratings = value
| |
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Library of dtypes (Tensor element types)."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.core.framework import types_pb2
class DType(object):
"""Represents the type of the elements in a `Tensor`.
The following `DType` objects are defined:
* `tf.float16`: 16-bit half-precision floating-point.
* `tf.float32`: 32-bit single-precision floating-point.
* `tf.float64`: 64-bit double-precision floating-point.
* `tf.bfloat16`: 16-bit truncated floating-point.
* `tf.complex64`: 64-bit single-precision complex.
* `tf.complex128`: 128-bit double-precision complex.
* `tf.int8`: 8-bit signed integer.
* `tf.uint8`: 8-bit unsigned integer.
* `tf.uint16`: 16-bit unsigned integer.
* `tf.int16`: 16-bit signed integer.
* `tf.int32`: 32-bit signed integer.
* `tf.int64`: 64-bit signed integer.
* `tf.bool`: Boolean.
* `tf.string`: String.
* `tf.qint8`: Quantized 8-bit signed integer.
* `tf.quint8`: Quantized 8-bit unsigned integer.
* `tf.qint16`: Quantized 16-bit signed integer.
* `tf.quint16`: Quantized 16-bit unsigned integer.
* `tf.qint32`: Quantized 32-bit signed integer.
* `tf.resource`: Handle to a mutable resource.
In addition, variants of these types with the `_ref` suffix are
defined for reference-typed tensors.
The `tf.as_dtype()` function converts numpy types and string type
names to a `DType` object.
"""
def __init__(self, type_enum):
"""Creates a new `DataType`.
NOTE(mrry): In normal circumstances, you should not need to
construct a `DataType` object directly. Instead, use the
`tf.as_dtype()` function.
Args:
type_enum: A `types_pb2.DataType` enum value.
Raises:
TypeError: If `type_enum` is not a value `types_pb2.DataType`.
"""
# TODO(mrry): Make the necessary changes (using __new__) to ensure
# that calling this returns one of the interned values.
type_enum = int(type_enum)
if (type_enum not in types_pb2.DataType.values()
or type_enum == types_pb2.DT_INVALID):
raise TypeError(
"type_enum is not a valid types_pb2.DataType: %s" % type_enum)
self._type_enum = type_enum
@property
def _is_ref_dtype(self):
"""Returns `True` if this `DType` represents a reference type."""
return self._type_enum > 100
@property
def _as_ref(self):
"""Returns a reference `DType` based on this `DType`."""
if self._is_ref_dtype:
return self
else:
return _INTERN_TABLE[self._type_enum + 100]
@property
def base_dtype(self):
"""Returns a non-reference `DType` based on this `DType`."""
if self._is_ref_dtype:
return _INTERN_TABLE[self._type_enum - 100]
else:
return self
@property
def real_dtype(self):
"""Returns the dtype correspond to this dtype's real part."""
base = self.base_dtype
if base == complex64:
return float32
elif base == complex128:
return float64
else:
return self
@property
def is_numpy_compatible(self):
return (self._type_enum != types_pb2.DT_RESOURCE and
self._type_enum != types_pb2.DT_RESOURCE_REF)
@property
def as_numpy_dtype(self):
"""Returns a `numpy.dtype` based on this `DType`."""
return _TF_TO_NP[self._type_enum]
@property
def as_datatype_enum(self):
"""Returns a `types_pb2.DataType` enum value based on this `DType`."""
return self._type_enum
@property
def is_bool(self):
"""Returns whether this is a boolean data type"""
return self.base_dtype == bool
@property
def is_integer(self):
"""Returns whether this is a (non-quantized) integer type."""
return (self.is_numpy_compatible and not self.is_quantized and
np.issubdtype(self.as_numpy_dtype, np.integer))
@property
def is_floating(self):
"""Returns whether this is a (non-quantized, real) floating point type."""
return self.is_numpy_compatible and np.issubdtype(self.as_numpy_dtype,
np.floating)
@property
def is_complex(self):
"""Returns whether this is a complex floating point type."""
return self.base_dtype in (complex64, complex128)
@property
def is_quantized(self):
"""Returns whether this is a quantized data type."""
return self.base_dtype in [qint8, quint8, qint16, quint16, qint32, bfloat16]
@property
def is_unsigned(self):
"""Returns whether this type is unsigned.
Non-numeric, unordered, and quantized types are not considered unsigned, and
this function returns `False`.
Returns:
Whether a `DType` is unsigned.
"""
try:
return self.min == 0
except TypeError:
return False
@property
def min(self):
"""Returns the minimum representable value in this data type.
Raises:
TypeError: if this is a non-numeric, unordered, or quantized type.
"""
if (self.is_quantized or self.base_dtype in
(bool, string, complex64, complex128)):
raise TypeError("Cannot find minimum value of %s." % self)
# there is no simple way to get the min value of a dtype, we have to check
# float and int types separately
try:
return np.finfo(self.as_numpy_dtype()).min
except: # bare except as possible raises by finfo not documented
try:
return np.iinfo(self.as_numpy_dtype()).min
except:
raise TypeError("Cannot find minimum value of %s." % self)
@property
def max(self):
"""Returns the maximum representable value in this data type.
Raises:
TypeError: if this is a non-numeric, unordered, or quantized type.
"""
if (self.is_quantized or self.base_dtype in
(bool, string, complex64, complex128)):
raise TypeError("Cannot find maximum value of %s." % self)
# there is no simple way to get the max value of a dtype, we have to check
# float and int types separately
try:
return np.finfo(self.as_numpy_dtype()).max
except: # bare except as possible raises by finfo not documented
try:
return np.iinfo(self.as_numpy_dtype()).max
except:
raise TypeError("Cannot find maximum value of %s." % self)
@property
def limits(self, clip_negative=True):
"""Return intensity limits, i.e. (min, max) tuple, of the dtype.
Args:
clip_negative : bool, optional
If True, clip the negative range (i.e. return 0 for min intensity)
even if the image dtype allows negative values.
Returns
min, max : tuple
Lower and upper intensity limits.
"""
min, max = dtype_range[self.as_numpy_dtype]
if clip_negative:
min = 0
return min, max
def is_compatible_with(self, other):
"""Returns True if the `other` DType will be converted to this DType.
The conversion rules are as follows:
```python
DType(T) .is_compatible_with(DType(T)) == True
DType(T) .is_compatible_with(DType(T).as_ref) == True
DType(T).as_ref.is_compatible_with(DType(T)) == False
DType(T).as_ref.is_compatible_with(DType(T).as_ref) == True
```
Args:
other: A `DType` (or object that may be converted to a `DType`).
Returns:
True if a Tensor of the `other` `DType` will be implicitly converted to
this `DType`.
"""
other = as_dtype(other)
return self._type_enum in (
other.as_datatype_enum, other.base_dtype.as_datatype_enum)
def __eq__(self, other):
"""Returns True iff this DType refers to the same type as `other`."""
if other is None:
return False
try:
dtype = as_dtype(other).as_datatype_enum
return self._type_enum == dtype # pylint: disable=protected-access
except TypeError:
return False
def __ne__(self, other):
"""Returns True iff self != other."""
return not self.__eq__(other)
@property
def name(self):
"""Returns the string name for this `DType`."""
return _TYPE_TO_STRING[self._type_enum]
def __int__(self):
return self._type_enum
def __str__(self):
return "<dtype: %r>" % self.name
def __repr__(self):
return "tf." + self.name
def __hash__(self):
return self._type_enum
@property
def size(self):
if self._type_enum == types_pb2.DT_RESOURCE:
return 1
return np.dtype(self.as_numpy_dtype).itemsize
# Define data type range of numpy dtype
dtype_range = {np.bool_: (False, True),
np.bool8: (False, True),
np.uint8: (0, 255),
np.uint16: (0, 65535),
np.int8: (-128, 127),
np.int16: (-32768, 32767),
np.int64: (-2**63, 2**63 - 1),
np.uint64: (0, 2**64 - 1),
np.int32: (-2**31, 2**31 - 1),
np.uint32: (0, 2**32 - 1),
np.float32: (-1, 1),
np.float64: (-1, 1)}
# Define standard wrappers for the types_pb2.DataType enum.
resource = DType(types_pb2.DT_RESOURCE)
float16 = DType(types_pb2.DT_HALF)
half = float16
float32 = DType(types_pb2.DT_FLOAT)
float64 = DType(types_pb2.DT_DOUBLE)
double = float64
int32 = DType(types_pb2.DT_INT32)
uint8 = DType(types_pb2.DT_UINT8)
uint16 = DType(types_pb2.DT_UINT16)
int16 = DType(types_pb2.DT_INT16)
int8 = DType(types_pb2.DT_INT8)
string = DType(types_pb2.DT_STRING)
complex64 = DType(types_pb2.DT_COMPLEX64)
complex128 = DType(types_pb2.DT_COMPLEX128)
int64 = DType(types_pb2.DT_INT64)
bool = DType(types_pb2.DT_BOOL)
qint8 = DType(types_pb2.DT_QINT8)
quint8 = DType(types_pb2.DT_QUINT8)
qint16 = DType(types_pb2.DT_QINT16)
quint16 = DType(types_pb2.DT_QUINT16)
qint32 = DType(types_pb2.DT_QINT32)
resource_ref = DType(types_pb2.DT_RESOURCE_REF)
bfloat16 = DType(types_pb2.DT_BFLOAT16)
float16_ref = DType(types_pb2.DT_HALF_REF)
half_ref = float16_ref
float32_ref = DType(types_pb2.DT_FLOAT_REF)
float64_ref = DType(types_pb2.DT_DOUBLE_REF)
double_ref = float64_ref
int32_ref = DType(types_pb2.DT_INT32_REF)
uint8_ref = DType(types_pb2.DT_UINT8_REF)
uint16_ref = DType(types_pb2.DT_UINT16_REF)
int16_ref = DType(types_pb2.DT_INT16_REF)
int8_ref = DType(types_pb2.DT_INT8_REF)
string_ref = DType(types_pb2.DT_STRING_REF)
complex64_ref = DType(types_pb2.DT_COMPLEX64_REF)
complex128_ref = DType(types_pb2.DT_COMPLEX128_REF)
int64_ref = DType(types_pb2.DT_INT64_REF)
bool_ref = DType(types_pb2.DT_BOOL_REF)
qint8_ref = DType(types_pb2.DT_QINT8_REF)
quint8_ref = DType(types_pb2.DT_QUINT8_REF)
qint16_ref = DType(types_pb2.DT_QINT16_REF)
quint16_ref = DType(types_pb2.DT_QUINT16_REF)
qint32_ref = DType(types_pb2.DT_QINT32_REF)
bfloat16_ref = DType(types_pb2.DT_BFLOAT16_REF)
# Maintain an intern table so that we don't have to create a large
# number of small objects.
_INTERN_TABLE = {
types_pb2.DT_HALF: float16,
types_pb2.DT_FLOAT: float32,
types_pb2.DT_DOUBLE: float64,
types_pb2.DT_INT32: int32,
types_pb2.DT_UINT8: uint8,
types_pb2.DT_UINT16: uint16,
types_pb2.DT_INT16: int16,
types_pb2.DT_INT8: int8,
types_pb2.DT_STRING: string,
types_pb2.DT_COMPLEX64: complex64,
types_pb2.DT_COMPLEX128: complex128,
types_pb2.DT_INT64: int64,
types_pb2.DT_BOOL: bool,
types_pb2.DT_QINT8: qint8,
types_pb2.DT_QUINT8: quint8,
types_pb2.DT_QINT16: qint16,
types_pb2.DT_QUINT16: quint16,
types_pb2.DT_QINT32: qint32,
types_pb2.DT_BFLOAT16: bfloat16,
types_pb2.DT_RESOURCE: resource,
types_pb2.DT_HALF_REF: float16_ref,
types_pb2.DT_FLOAT_REF: float32_ref,
types_pb2.DT_DOUBLE_REF: float64_ref,
types_pb2.DT_INT32_REF: int32_ref,
types_pb2.DT_UINT8_REF: uint8_ref,
types_pb2.DT_UINT16_REF: uint16_ref,
types_pb2.DT_INT16_REF: int16_ref,
types_pb2.DT_INT8_REF: int8_ref,
types_pb2.DT_STRING_REF: string_ref,
types_pb2.DT_COMPLEX64_REF: complex64_ref,
types_pb2.DT_COMPLEX128_REF: complex128_ref,
types_pb2.DT_INT64_REF: int64_ref,
types_pb2.DT_BOOL_REF: bool_ref,
types_pb2.DT_QINT8_REF: qint8_ref,
types_pb2.DT_QUINT8_REF: quint8_ref,
types_pb2.DT_QINT16_REF: qint16_ref,
types_pb2.DT_QUINT16_REF: quint16_ref,
types_pb2.DT_QINT32_REF: qint32_ref,
types_pb2.DT_BFLOAT16_REF: bfloat16_ref,
types_pb2.DT_RESOURCE_REF: resource_ref,
}
# Standard mappings between types_pb2.DataType values and string names.
_TYPE_TO_STRING = {
types_pb2.DT_HALF: "float16",
types_pb2.DT_FLOAT: "float32",
types_pb2.DT_DOUBLE: "float64",
types_pb2.DT_INT32: "int32",
types_pb2.DT_UINT8: "uint8",
types_pb2.DT_UINT16: "uint16",
types_pb2.DT_INT16: "int16",
types_pb2.DT_INT8: "int8",
types_pb2.DT_STRING: "string",
types_pb2.DT_COMPLEX64: "complex64",
types_pb2.DT_COMPLEX128: "complex128",
types_pb2.DT_INT64: "int64",
types_pb2.DT_BOOL: "bool",
types_pb2.DT_QINT8: "qint8",
types_pb2.DT_QUINT8: "quint8",
types_pb2.DT_QINT16: "qint16",
types_pb2.DT_QUINT16: "quint16",
types_pb2.DT_QINT32: "qint32",
types_pb2.DT_BFLOAT16: "bfloat16",
types_pb2.DT_RESOURCE: "resource",
types_pb2.DT_HALF_REF: "float16_ref",
types_pb2.DT_FLOAT_REF: "float32_ref",
types_pb2.DT_DOUBLE_REF: "float64_ref",
types_pb2.DT_INT32_REF: "int32_ref",
types_pb2.DT_UINT8_REF: "uint8_ref",
types_pb2.DT_UINT16_REF: "uint16_ref",
types_pb2.DT_INT16_REF: "int16_ref",
types_pb2.DT_INT8_REF: "int8_ref",
types_pb2.DT_STRING_REF: "string_ref",
types_pb2.DT_COMPLEX64_REF: "complex64_ref",
types_pb2.DT_COMPLEX128_REF: "complex128_ref",
types_pb2.DT_INT64_REF: "int64_ref",
types_pb2.DT_BOOL_REF: "bool_ref",
types_pb2.DT_QINT8_REF: "qint8_ref",
types_pb2.DT_QUINT8_REF: "quint8_ref",
types_pb2.DT_QINT16_REF: "qint16_ref",
types_pb2.DT_QUINT16_REF: "quint16_ref",
types_pb2.DT_QINT32_REF: "qint32_ref",
types_pb2.DT_BFLOAT16_REF: "bfloat16_ref",
types_pb2.DT_RESOURCE_REF: "resource_ref",
}
_STRING_TO_TF = {value: _INTERN_TABLE[key]
for key, value in _TYPE_TO_STRING.items()}
# Add non-canonical aliases.
_STRING_TO_TF["half"] = float16
_STRING_TO_TF["half_ref"] = float16_ref
_STRING_TO_TF["float"] = float32
_STRING_TO_TF["float_ref"] = float32_ref
_STRING_TO_TF["double"] = float64
_STRING_TO_TF["double_ref"] = float64_ref
# Numpy representation for quantized dtypes.
#
# These are magic strings that are used in the swig wrapper to identify
# quantized types.
# TODO(mrry,keveman): Investigate Numpy type registration to replace this
# hard-coding of names.
_np_qint8 = np.dtype([("qint8", np.int8, 1)])
_np_quint8 = np.dtype([("quint8", np.uint8, 1)])
_np_qint16 = np.dtype([("qint16", np.int16, 1)])
_np_quint16 = np.dtype([("quint16", np.uint16, 1)])
_np_qint32 = np.dtype([("qint32", np.int32, 1)])
# Custom struct dtype for directly-fed ResourceHandles of supported type(s).
np_resource = np.dtype([("resource", np.ubyte, 1)])
# Standard mappings between types_pb2.DataType values and numpy.dtypes.
_NP_TO_TF = frozenset([
(np.float16, float16),
(np.float32, float32),
(np.float64, float64),
(np.int32, int32),
(np.int64, int64),
(np.uint8, uint8),
(np.uint16, uint16),
(np.int16, int16),
(np.int8, int8),
(np.complex64, complex64),
(np.complex128, complex128),
(np.object, string),
(np.bool, bool),
(_np_qint8, qint8),
(_np_quint8, quint8),
(_np_qint16, qint16),
(_np_quint16, quint16),
(_np_qint32, qint32),
# NOTE(touts): Intentionally no way to feed a DT_BFLOAT16.
])
_TF_TO_NP = {
types_pb2.DT_HALF: np.float16,
types_pb2.DT_FLOAT: np.float32,
types_pb2.DT_DOUBLE: np.float64,
types_pb2.DT_INT32: np.int32,
types_pb2.DT_UINT8: np.uint8,
types_pb2.DT_UINT16: np.uint16,
types_pb2.DT_INT16: np.int16,
types_pb2.DT_INT8: np.int8,
# NOTE(touts): For strings we use np.object as it supports variable length
# strings.
types_pb2.DT_STRING: np.object,
types_pb2.DT_COMPLEX64: np.complex64,
types_pb2.DT_COMPLEX128: np.complex128,
types_pb2.DT_INT64: np.int64,
types_pb2.DT_BOOL: np.bool,
types_pb2.DT_QINT8: _np_qint8,
types_pb2.DT_QUINT8: _np_quint8,
types_pb2.DT_QINT16: _np_qint16,
types_pb2.DT_QUINT16: _np_quint16,
types_pb2.DT_QINT32: _np_qint32,
types_pb2.DT_BFLOAT16: np.uint16,
# Ref types
types_pb2.DT_HALF_REF: np.float16,
types_pb2.DT_FLOAT_REF: np.float32,
types_pb2.DT_DOUBLE_REF: np.float64,
types_pb2.DT_INT32_REF: np.int32,
types_pb2.DT_UINT8_REF: np.uint8,
types_pb2.DT_UINT16_REF: np.uint16,
types_pb2.DT_INT16_REF: np.int16,
types_pb2.DT_INT8_REF: np.int8,
types_pb2.DT_STRING_REF: np.object,
types_pb2.DT_COMPLEX64_REF: np.complex64,
types_pb2.DT_COMPLEX128_REF: np.complex128,
types_pb2.DT_INT64_REF: np.int64,
types_pb2.DT_BOOL_REF: np.bool,
types_pb2.DT_QINT8_REF: _np_qint8,
types_pb2.DT_QUINT8_REF: _np_quint8,
types_pb2.DT_QINT16_REF: _np_qint16,
types_pb2.DT_QUINT16_REF: _np_quint16,
types_pb2.DT_QINT32_REF: _np_qint32,
types_pb2.DT_BFLOAT16_REF: np.uint16,
}
QUANTIZED_DTYPES = frozenset(
[qint8, quint8, qint16, quint16, qint32, qint8_ref, quint8_ref, qint16_ref,
quint16_ref, qint32_ref])
def as_dtype(type_value):
"""Converts the given `type_value` to a `DType`.
Args:
type_value: A value that can be converted to a `tf.DType`
object. This may currently be a `tf.DType` object, a
[`DataType` enum](https://www.tensorflow.org/code/tensorflow/core/framework/types.proto),
a string type name, or a `numpy.dtype`.
Returns:
A `DType` corresponding to `type_value`.
Raises:
TypeError: If `type_value` cannot be converted to a `DType`.
"""
if isinstance(type_value, DType):
return type_value
try:
return _INTERN_TABLE[type_value]
except KeyError:
pass
try:
return _STRING_TO_TF[type_value]
except KeyError:
pass
if isinstance(type_value, np.dtype):
# The numpy dtype for strings is variable length. We can not compare
# dtype with a single constant (np.string does not exist) to decide
# dtype is a "string" type. We need to compare the dtype.type to be
# sure it's a string type.
if type_value.type == np.string_ or type_value.type == np.unicode_:
return string
for key, val in _NP_TO_TF:
try:
if key == type_value:
return val
except TypeError as e:
raise TypeError("Cannot convert {} to a dtype. {}".format(type_value, e))
raise TypeError(
"Cannot convert value %r to a TensorFlow DType." % type_value)
| |
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Indexed slices."""
# pylint: disable=g-bad-name
import collections
import warnings
import numpy as np
from tensorflow.python import tf2
from tensorflow.python.eager import context
from tensorflow.python.framework import composite_tensor
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import tensor_conversion_registry
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import type_spec
from tensorflow.python.types import internal
from tensorflow.python.util.compat import collections_abc
from tensorflow.python.util.lazy_loader import LazyLoader
from tensorflow.python.util.tf_export import tf_export
# Use LazyLoader to avoid circular dependencies.
#
# Note: these can all be changed to regular imports once all code has been
# updated to refer the symbols defined in this module directly, rather than
# using the backwards-compatible aliases in ops.py. (E.g.,
# "indexed_slices.IndexedSlices" rather than "ops.IndexedSlices".)
math_ops = LazyLoader(
"math_ops", globals(),
"tensorflow.python.ops.math_ops")
ops = LazyLoader(
"ops", globals(), "tensorflow.python.framework.ops")
tensor_spec = LazyLoader(
"tensor_spec", globals(),
"tensorflow.python.framework.tensor_spec")
tensor_util = LazyLoader(
"tensor_util", globals(),
"tensorflow.python.framework.tensor_util")
# TODO(mdan): Should IndexedSlices be a "tensor"?
@tf_export("IndexedSlices")
class IndexedSlices(internal.NativeObject, composite_tensor.CompositeTensor):
"""A sparse representation of a set of tensor slices at given indices.
This class is a simple wrapper for a pair of `Tensor` objects:
* `values`: A `Tensor` of any dtype with shape `[D0, D1, ..., Dn]`.
* `indices`: A 1-D integer `Tensor` with shape `[D0]`.
An `IndexedSlices` is typically used to represent a subset of a larger
tensor `dense` of shape `[LARGE0, D1, .. , DN]` where `LARGE0 >> D0`.
The values in `indices` are the indices in the first dimension of
the slices that have been extracted from the larger tensor.
The dense tensor `dense` represented by an `IndexedSlices` `slices` has
```python
dense[slices.indices[i], :, :, :, ...] = slices.values[i, :, :, :, ...]
```
The `IndexedSlices` class is used principally in the definition of
gradients for operations that have sparse gradients
(e.g. `tf.gather`).
>>> v = tf.Variable([[0.,1, 2], [2, 3, 4], [4, 5, 6], [6, 7, 8]])
>>> with tf.GradientTape() as tape:
... r = tf.gather(v, [1,3])
>>> index_slices = tape.gradient(r,v)
>>> index_slices
<...IndexedSlices object ...>
>>> index_slices.indices.numpy()
array([1, 3], dtype=int32)
>>> index_slices.values.numpy()
array([[1., 1., 1.],
[1., 1., 1.]], dtype=float32)
Contrast this representation with
`tf.sparse.SparseTensor`,
which uses multi-dimensional indices and scalar values.
"""
def __init__(self, values, indices, dense_shape=None):
"""Creates an `IndexedSlices`."""
self._values = values
self._indices = indices
self._dense_shape = dense_shape
@property
def values(self):
"""A `Tensor` containing the values of the slices."""
return self._values
@property
def indices(self):
"""A 1-D `Tensor` containing the indices of the slices."""
return self._indices
@property
def dense_shape(self):
"""A 1-D `Tensor` containing the shape of the corresponding dense tensor."""
return self._dense_shape
@property
def shape(self):
"""Gets the `tf.TensorShape` representing the shape of the dense tensor.
Returns:
A `tf.TensorShape` object.
"""
if self._dense_shape is None:
return tensor_shape.TensorShape(None)
return tensor_util.constant_value_as_shape(self._dense_shape)
@property
def name(self):
"""The name of this `IndexedSlices`."""
return self.values.name
@property
def device(self):
"""The name of the device on which `values` will be produced, or `None`."""
return self.values.device
@property
def op(self):
"""The `Operation` that produces `values` as an output."""
return self.values.op
@property
def dtype(self):
"""The `DType` of elements in this tensor."""
return self.values.dtype
@property
def graph(self):
"""The `Graph` that contains the values, indices, and shape tensors."""
return self._values.graph
def __str__(self):
return "IndexedSlices(indices=%s, values=%s%s)" % (
self._indices, self._values,
(", dense_shape=%s" %
(self._dense_shape,)) if self._dense_shape is not None else "")
def __neg__(self):
return IndexedSlices(-self.values, self.indices, self.dense_shape)
@property
def _type_spec(self):
indices_shape = self._indices.shape.merge_with(self._values.shape[:1])
dense_shape = tensor_shape.TensorShape([None]).concatenate(
self._values.shape[1:])
if self._dense_shape is not None:
dense_shape_dtype = self._dense_shape.dtype
dense_shape = dense_shape.merge_with(
tensor_util.constant_value_as_shape(self._dense_shape))
else:
dense_shape_dtype = None
return IndexedSlicesSpec(dense_shape, self.dtype, self._indices.dtype,
dense_shape_dtype, indices_shape)
def _shape_invariant_to_type_spec(self, shape):
# From tf.while_loop docs: "If a loop variable is an IndexedSlices, the
# shape invariant must be a shape invariant of the values tensor of the
# IndexedSlices. It means the shapes of the three tensors of the
# IndexedSlices are (shape, [shape[0]], [shape.ndims])."
indices_shape = shape[:1]
dense_shape = tensor_shape.TensorShape([None]).concatenate(shape[1:])
if self._dense_shape is None:
dense_shape_dtype = None
else:
dense_shape_dtype = self._dense_shape.dtype
return IndexedSlicesSpec(dense_shape, self.dtype, self._indices.dtype,
dense_shape_dtype, indices_shape)
def consumers(self):
return self._consumers()
IndexedSlicesValue = collections.namedtuple(
"IndexedSlicesValue", ["values", "indices", "dense_shape"])
@tf_export("IndexedSlicesSpec")
class IndexedSlicesSpec(type_spec.TypeSpec):
"""Type specification for a `tf.IndexedSlices`."""
__slots__ = ["_shape", "_values_dtype", "_indices_dtype",
"_dense_shape_dtype", "_indices_shape"]
value_type = property(lambda self: IndexedSlices)
def __init__(self, shape=None, dtype=dtypes.float32,
indices_dtype=dtypes.int64, dense_shape_dtype=None,
indices_shape=None):
"""Constructs a type specification for a `tf.IndexedSlices`.
Args:
shape: The dense shape of the `IndexedSlices`, or `None` to allow any
dense shape.
dtype: `tf.DType` of values in the `IndexedSlices`.
indices_dtype: `tf.DType` of the `indices` in the `IndexedSlices`. One
of `tf.int32` or `tf.int64`.
dense_shape_dtype: `tf.DType` of the `dense_shape` in the `IndexedSlices`.
One of `tf.int32`, `tf.int64`, or `None` (if the `IndexedSlices` has
no `dense_shape` tensor).
indices_shape: The shape of the `indices` component, which indicates
how many slices are in the `IndexedSlices`.
"""
self._shape = tensor_shape.as_shape(shape)
self._values_dtype = dtypes.as_dtype(dtype)
self._indices_dtype = dtypes.as_dtype(indices_dtype)
if dense_shape_dtype is None:
self._dense_shape_dtype = None
else:
self._dense_shape_dtype = dtypes.as_dtype(dense_shape_dtype)
self._indices_shape = tensor_shape.as_shape(indices_shape).with_rank(1)
def _serialize(self):
return (self._shape, self._values_dtype, self._indices_dtype,
self._dense_shape_dtype, self._indices_shape)
@property
def _component_specs(self):
value_shape = self._indices_shape.concatenate(self._shape[1:])
specs = [
tensor_spec.TensorSpec(value_shape, self._values_dtype),
tensor_spec.TensorSpec(self._indices_shape, self._indices_dtype)]
if self._dense_shape_dtype is not None:
specs.append(
tensor_spec.TensorSpec([self._shape.ndims], self._dense_shape_dtype))
return tuple(specs)
def _to_components(self, value):
if value.dense_shape is None:
return (value.values, value.indices)
else:
return (value.values, value.indices, value.dense_shape)
def _from_components(self, tensor_list):
if (all(isinstance(t, np.ndarray) for t in tensor_list) and
not tf2.enabled()):
if len(tensor_list) == 2:
return IndexedSlicesValue(tensor_list[0], tensor_list[1], None)
else:
return IndexedSlicesValue(*tensor_list)
else:
return IndexedSlices(*tensor_list)
@tf_export(v1=["convert_to_tensor_or_indexed_slices"])
def convert_to_tensor_or_indexed_slices(value, dtype=None, name=None):
"""Converts the given object to a `Tensor` or an `IndexedSlices`.
If `value` is an `IndexedSlices` or `SparseTensor` it is returned
unmodified. Otherwise, it is converted to a `Tensor` using
`convert_to_tensor()`.
Args:
value: An `IndexedSlices`, `SparseTensor`, or an object that can be consumed
by `convert_to_tensor()`.
dtype: (Optional.) The required `DType` of the returned `Tensor` or
`IndexedSlices`.
name: (Optional.) A name to use if a new `Tensor` is created.
Returns:
A `Tensor`, `IndexedSlices`, or `SparseTensor` based on `value`.
Raises:
ValueError: If `dtype` does not match the element type of `value`.
"""
return internal_convert_to_tensor_or_indexed_slices(
value=value, dtype=dtype, name=name, as_ref=False)
def internal_convert_to_tensor_or_indexed_slices(value,
dtype=None,
name=None,
as_ref=False):
"""Converts the given object to a `Tensor` or an `IndexedSlices`.
If `value` is an `IndexedSlices` or `SparseTensor` it is returned
unmodified. Otherwise, it is converted to a `Tensor` using
`convert_to_tensor()`.
Args:
value: An `IndexedSlices`, `SparseTensor`, or an object that can be consumed
by `convert_to_tensor()`.
dtype: (Optional.) The required `DType` of the returned `Tensor` or
`IndexedSlices`.
name: (Optional.) A name to use if a new `Tensor` is created.
as_ref: True if the caller wants the results as ref tensors.
Returns:
A `Tensor`, `IndexedSlices`, or `SparseTensor` based on `value`.
Raises:
ValueError: If `dtype` does not match the element type of `value`.
"""
if isinstance(value, ops.EagerTensor) and not context.executing_eagerly():
return ops.convert_to_tensor(value, dtype=dtype, name=name, as_ref=as_ref)
# TODO(mdan): Name says tensor_or_indexed_slices. So do explicitly just that?
elif isinstance(value, internal.NativeObject):
if dtype and not dtypes.as_dtype(dtype).is_compatible_with(value.dtype):
raise ValueError(
"Incompatible tensor conversion requested to `dtype` "
f"{dtypes.as_dtype(dtype).name} for `value` ({value}) with dtype"
f" {value.dtype.name}.")
return value
else:
return ops.convert_to_tensor(value, dtype=dtype, name=name, as_ref=as_ref)
def internal_convert_n_to_tensor_or_indexed_slices(values,
dtype=None,
name=None,
as_ref=False):
"""Converts `values` to a list of `Tensor` or `IndexedSlices` objects.
Any `IndexedSlices` or `SparseTensor` objects in `values` are returned
unmodified.
Args:
values: An iterable of `None`, `IndexedSlices`, `SparseTensor`, or objects
that can be consumed by `convert_to_tensor()`.
dtype: (Optional.) The required `DType` of the returned `Tensor` or
`IndexedSlices`.
name: (Optional.) A name prefix to used when a new `Tensor` is created, in
which case element `i` will be given the name `name + '_' + i`.
as_ref: True if the caller wants the results as ref tensors.
Returns:
A list of `Tensor`, `IndexedSlices`, `SparseTensor` and/or `None` objects.
Raises:
TypeError: If no conversion function is registered for an element in
`values`.
RuntimeError: If a registered conversion function returns an invalid
value.
"""
if not isinstance(values, collections_abc.Iterable):
raise TypeError("Argument `values` must be iterable.")
ret = []
for i, value in enumerate(values):
if value is None:
ret.append(value)
else:
n = None if name is None else "%s_%d" % (name, i)
ret.append(
internal_convert_to_tensor_or_indexed_slices(
value, dtype=dtype, name=n, as_ref=as_ref))
return ret
def convert_n_to_tensor_or_indexed_slices(values, dtype=None, name=None):
"""Converts `values` to a list of `Output` or `IndexedSlices` objects.
Any `IndexedSlices` or `SparseTensor` objects in `values` are returned
unmodified.
Args:
values: A list of `None`, `IndexedSlices`, `SparseTensor`, or objects that
can be consumed by `convert_to_tensor()`.
dtype: (Optional.) The required `DType` of the returned `Tensor`
`IndexedSlices`.
name: (Optional.) A name prefix to used when a new `Tensor` is created, in
which case element `i` will be given the name `name + '_' + i`.
Returns:
A list of `Tensor`, `IndexedSlices`, and/or `SparseTensor` objects.
Raises:
TypeError: If no conversion function is registered for an element in
`values`.
RuntimeError: If a registered conversion function returns an invalid
value.
"""
return internal_convert_n_to_tensor_or_indexed_slices(
values=values, dtype=dtype, name=name, as_ref=False)
# Warn the user if we convert a sparse representation to dense with at
# least this number of elements.
_LARGE_SPARSE_NUM_ELEMENTS = 100000000
def _indexed_slices_to_tensor(value, dtype=None, name=None, as_ref=False):
"""Converts an IndexedSlices object `value` to a Tensor.
NOTE(mrry): This function is potentially expensive.
Args:
value: An ops.IndexedSlices object.
dtype: The dtype of the Tensor to be returned.
name: Optional name to use for the returned Tensor.
as_ref: True if a ref is requested.
Returns:
A dense Tensor representing the values in the given IndexedSlices.
Raises:
ValueError: If the IndexedSlices does not have the same dtype.
"""
_ = as_ref
if dtype and not dtype.is_compatible_with(value.dtype):
raise ValueError(
f"Incompatible tensor conversion requested to `dtype` {dtype.name} for "
f"IndexedSlices ({value}) with dtype {value.dtype.name}")
if value.dense_shape is None:
raise ValueError(
"Tensor conversion requested for IndexedSlices for argument `value` "
f"without dense_shape: {value!s}")
# TODO(mrry): Consider adding static shape information to
# IndexedSlices, to avoid using numpy here.
if not context.executing_eagerly():
dense_shape_value = tensor_util.constant_value(value.dense_shape)
if dense_shape_value is not None:
num_elements = np.prod(dense_shape_value)
if num_elements >= _LARGE_SPARSE_NUM_ELEMENTS:
warnings.warn(
"Converting sparse IndexedSlices to a dense Tensor with %d "
"elements. This may consume a large amount of memory." %
num_elements)
else:
if value.dense_shape.op.type != "VariableShape":
# VariableShape may hide static shapes behind a resource handle
# producing a warning that isn't that useful to users.
warnings.warn(
"Converting sparse IndexedSlices(%s) to a dense Tensor of unknown "
"shape. This may consume a large amount of memory." % value)
return math_ops.unsorted_segment_sum(
value.values, value.indices, value.dense_shape[0], name=name)
tensor_conversion_registry.register_tensor_conversion_function(
IndexedSlices, _indexed_slices_to_tensor)
| |
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# Copyright 2011 Justin Santa Barbara
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Generic Node base class for all workers that run on hosts."""
import errno
import logging as std_logging
import os
import random
import signal
import sys
import time
try:
# Importing just the symbol here because the io module does not
# exist in Python 2.6.
from io import UnsupportedOperation # noqa
except ImportError:
# Python 2.6
UnsupportedOperation = None
import eventlet
from eventlet import event
from oslo.config import cfg
from keystone.openstack.common import eventlet_backdoor
from keystone.openstack.common.gettextutils import _LE, _LI, _LW
from keystone.openstack.common import importutils
from keystone.openstack.common import log as logging
from keystone.openstack.common import systemd
from keystone.openstack.common import threadgroup
rpc = importutils.try_import('keystone.openstack.common.rpc')
CONF = cfg.CONF
LOG = logging.getLogger(__name__)
def _sighup_supported():
return hasattr(signal, 'SIGHUP')
def _is_daemon():
# The process group for a foreground process will match the
# process group of the controlling terminal. If those values do
# not match, or ioctl() fails on the stdout file handle, we assume
# the process is running in the background as a daemon.
# http://www.gnu.org/software/bash/manual/bashref.html#Job-Control-Basics
try:
is_daemon = os.getpgrp() != os.tcgetpgrp(sys.stdout.fileno())
except OSError as err:
if err.errno == errno.ENOTTY:
# Assume we are a daemon because there is no terminal.
is_daemon = True
else:
raise
except UnsupportedOperation:
# Could not get the fileno for stdout, so we must be a daemon.
is_daemon = True
return is_daemon
def _is_sighup_and_daemon(signo):
if not (_sighup_supported() and signo == signal.SIGHUP):
# Avoid checking if we are a daemon, because the signal isn't
# SIGHUP.
return False
return _is_daemon()
def _signo_to_signame(signo):
signals = {signal.SIGTERM: 'SIGTERM',
signal.SIGINT: 'SIGINT'}
if _sighup_supported():
signals[signal.SIGHUP] = 'SIGHUP'
return signals[signo]
def _set_signals_handler(handler):
signal.signal(signal.SIGTERM, handler)
signal.signal(signal.SIGINT, handler)
if _sighup_supported():
signal.signal(signal.SIGHUP, handler)
class Launcher(object):
"""Launch one or more services and wait for them to complete."""
def __init__(self):
"""Initialize the service launcher.
:returns: None
"""
self.services = Services()
self.backdoor_port = eventlet_backdoor.initialize_if_enabled()
def launch_service(self, service):
"""Load and start the given service.
:param service: The service you would like to start.
:returns: None
"""
service.backdoor_port = self.backdoor_port
self.services.add(service)
def stop(self):
"""Stop all services which are currently running.
:returns: None
"""
self.services.stop()
def wait(self):
"""Waits until all services have been stopped, and then returns.
:returns: None
"""
self.services.wait()
def restart(self):
"""Reload config files and restart service.
:returns: None
"""
cfg.CONF.reload_config_files()
self.services.restart()
class SignalExit(SystemExit):
def __init__(self, signo, exccode=1):
super(SignalExit, self).__init__(exccode)
self.signo = signo
class ServiceLauncher(Launcher):
def _handle_signal(self, signo, frame):
# Allow the process to be killed again and die from natural causes
_set_signals_handler(signal.SIG_DFL)
raise SignalExit(signo)
def handle_signal(self):
_set_signals_handler(self._handle_signal)
def _wait_for_exit_or_signal(self, ready_callback=None):
status = None
signo = 0
LOG.debug('Full set of CONF:')
CONF.log_opt_values(LOG, std_logging.DEBUG)
try:
if ready_callback:
ready_callback()
super(ServiceLauncher, self).wait()
except SignalExit as exc:
signame = _signo_to_signame(exc.signo)
LOG.info(_LI('Caught %s, exiting'), signame)
status = exc.code
signo = exc.signo
except SystemExit as exc:
status = exc.code
finally:
self.stop()
if rpc:
try:
rpc.cleanup()
except Exception:
# We're shutting down, so it doesn't matter at this point.
LOG.exception(_LE('Exception during rpc cleanup.'))
return status, signo
def wait(self, ready_callback=None):
systemd.notify_once()
while True:
self.handle_signal()
status, signo = self._wait_for_exit_or_signal(ready_callback)
if not _is_sighup_and_daemon(signo):
return status
self.restart()
class ServiceWrapper(object):
def __init__(self, service, workers):
self.service = service
self.workers = workers
self.children = set()
self.forktimes = []
class ProcessLauncher(object):
def __init__(self, wait_interval=0.01):
"""Constructor.
:param wait_interval: The interval to sleep for between checks
of child process exit.
"""
self.children = {}
self.sigcaught = None
self.running = True
self.wait_interval = wait_interval
rfd, self.writepipe = os.pipe()
self.readpipe = eventlet.greenio.GreenPipe(rfd, 'r')
self.handle_signal()
def handle_signal(self):
_set_signals_handler(self._handle_signal)
def _handle_signal(self, signo, frame):
self.sigcaught = signo
self.running = False
# Allow the process to be killed again and die from natural causes
_set_signals_handler(signal.SIG_DFL)
def _pipe_watcher(self):
# This will block until the write end is closed when the parent
# dies unexpectedly
self.readpipe.read()
LOG.info(_LI('Parent process has died unexpectedly, exiting'))
sys.exit(1)
def _child_process_handle_signal(self):
# Setup child signal handlers differently
def _sigterm(*args):
signal.signal(signal.SIGTERM, signal.SIG_DFL)
raise SignalExit(signal.SIGTERM)
def _sighup(*args):
signal.signal(signal.SIGHUP, signal.SIG_DFL)
raise SignalExit(signal.SIGHUP)
signal.signal(signal.SIGTERM, _sigterm)
if _sighup_supported():
signal.signal(signal.SIGHUP, _sighup)
# Block SIGINT and let the parent send us a SIGTERM
signal.signal(signal.SIGINT, signal.SIG_IGN)
def _child_wait_for_exit_or_signal(self, launcher):
status = 0
signo = 0
# NOTE(johannes): All exceptions are caught to ensure this
# doesn't fallback into the loop spawning children. It would
# be bad for a child to spawn more children.
try:
launcher.wait()
except SignalExit as exc:
signame = _signo_to_signame(exc.signo)
LOG.info(_LI('Child caught %s, exiting'), signame)
status = exc.code
signo = exc.signo
except SystemExit as exc:
status = exc.code
except BaseException:
LOG.exception(_LE('Unhandled exception'))
status = 2
finally:
launcher.stop()
return status, signo
def _child_process(self, service):
self._child_process_handle_signal()
# Reopen the eventlet hub to make sure we don't share an epoll
# fd with parent and/or siblings, which would be bad
eventlet.hubs.use_hub()
# Close write to ensure only parent has it open
os.close(self.writepipe)
# Create greenthread to watch for parent to close pipe
eventlet.spawn_n(self._pipe_watcher)
# Reseed random number generator
random.seed()
launcher = Launcher()
launcher.launch_service(service)
return launcher
def _start_child(self, wrap):
if len(wrap.forktimes) > wrap.workers:
# Limit ourselves to one process a second (over the period of
# number of workers * 1 second). This will allow workers to
# start up quickly but ensure we don't fork off children that
# die instantly too quickly.
if time.time() - wrap.forktimes[0] < wrap.workers:
LOG.info(_LI('Forking too fast, sleeping'))
time.sleep(1)
wrap.forktimes.pop(0)
wrap.forktimes.append(time.time())
pid = os.fork()
if pid == 0:
launcher = self._child_process(wrap.service)
while True:
self._child_process_handle_signal()
status, signo = self._child_wait_for_exit_or_signal(launcher)
if not _is_sighup_and_daemon(signo):
break
launcher.restart()
os._exit(status)
LOG.info(_LI('Started child %d'), pid)
wrap.children.add(pid)
self.children[pid] = wrap
return pid
def launch_service(self, service, workers=1):
wrap = ServiceWrapper(service, workers)
LOG.info(_LI('Starting %d workers'), wrap.workers)
while self.running and len(wrap.children) < wrap.workers:
self._start_child(wrap)
def _wait_child(self):
try:
# Don't block if no child processes have exited
pid, status = os.waitpid(0, os.WNOHANG)
if not pid:
return None
except OSError as exc:
if exc.errno not in (errno.EINTR, errno.ECHILD):
raise
return None
if os.WIFSIGNALED(status):
sig = os.WTERMSIG(status)
LOG.info(_LI('Child %(pid)d killed by signal %(sig)d'),
dict(pid=pid, sig=sig))
else:
code = os.WEXITSTATUS(status)
LOG.info(_LI('Child %(pid)s exited with status %(code)d'),
dict(pid=pid, code=code))
if pid not in self.children:
LOG.warning(_LW('pid %d not in child list'), pid)
return None
wrap = self.children.pop(pid)
wrap.children.remove(pid)
return wrap
def _respawn_children(self):
while self.running:
wrap = self._wait_child()
if not wrap:
# Yield to other threads if no children have exited
# Sleep for a short time to avoid excessive CPU usage
# (see bug #1095346)
eventlet.greenthread.sleep(self.wait_interval)
continue
while self.running and len(wrap.children) < wrap.workers:
self._start_child(wrap)
def wait(self):
"""Loop waiting on children to die and respawning as necessary."""
systemd.notify_once()
LOG.debug('Full set of CONF:')
CONF.log_opt_values(LOG, std_logging.DEBUG)
try:
while True:
self.handle_signal()
self._respawn_children()
# No signal means that stop was called. Don't clean up here.
if not self.sigcaught:
return
signame = _signo_to_signame(self.sigcaught)
LOG.info(_LI('Caught %s, stopping children'), signame)
if not _is_sighup_and_daemon(self.sigcaught):
break
for pid in self.children:
os.kill(pid, signal.SIGHUP)
self.running = True
self.sigcaught = None
except eventlet.greenlet.GreenletExit:
LOG.info(_LI("Wait called after thread killed. Cleaning up."))
self.stop()
def stop(self):
"""Terminate child processes and wait on each."""
self.running = False
for pid in self.children:
try:
os.kill(pid, signal.SIGTERM)
except OSError as exc:
if exc.errno != errno.ESRCH:
raise
# Wait for children to die
if self.children:
LOG.info(_LI('Waiting on %d children to exit'), len(self.children))
while self.children:
self._wait_child()
class Service(object):
"""Service object for binaries running on hosts."""
def __init__(self, threads=1000):
self.tg = threadgroup.ThreadGroup(threads)
# signal that the service is done shutting itself down:
self._done = event.Event()
def reset(self):
# NOTE(Fengqian): docs for Event.reset() recommend against using it
self._done = event.Event()
def start(self):
pass
def stop(self):
self.tg.stop()
self.tg.wait()
# Signal that service cleanup is done:
if not self._done.ready():
self._done.send()
def wait(self):
self._done.wait()
class Services(object):
def __init__(self):
self.services = []
self.tg = threadgroup.ThreadGroup()
self.done = event.Event()
def add(self, service):
self.services.append(service)
self.tg.add_thread(self.run_service, service, self.done)
def stop(self):
# wait for graceful shutdown of services:
for service in self.services:
service.stop()
service.wait()
# Each service has performed cleanup, now signal that the run_service
# wrapper threads can now die:
if not self.done.ready():
self.done.send()
# reap threads:
self.tg.stop()
def wait(self):
self.tg.wait()
def restart(self):
self.stop()
self.done = event.Event()
for restart_service in self.services:
restart_service.reset()
self.tg.add_thread(self.run_service, restart_service, self.done)
@staticmethod
def run_service(service, done):
"""Service start wrapper.
:param service: service to run
:param done: event to wait on until a shutdown is triggered
:returns: None
"""
service.start()
done.wait()
def launch(service, workers=1):
if workers is None or workers == 1:
launcher = ServiceLauncher()
launcher.launch_service(service)
else:
launcher = ProcessLauncher()
launcher.launch_service(service, workers=workers)
return launcher
| |
"""Test file descriptor operations
Made for Jython.
"""
import errno
import os
import sys
import tempfile
import test.test_support as test_support
import unittest
class TestFilenoTestCase(unittest.TestCase):
def setUp(self):
self.filename = tempfile.mktemp()
self.fp = open(self.filename, 'w+')
self.fd = self.fp.fileno()
def tearDown(self):
if self.fp:
self.fp.close()
os.remove(self.filename)
def test_ftruncate(self):
self.fp.write('jython filenos')
self.fp.flush()
os.fsync(self.fd)
self.assertEqual(os.path.getsize(self.filename), 14)
os.ftruncate(self.fd, 8)
self.assertEqual(os.path.getsize(self.filename), 8)
os.ftruncate(self.fd, 0)
self.assertEqual(os.path.getsize(self.filename), 0)
self.fp.close()
raises(IOError, 9, os.ftruncate, self.fd, 0)
def test_lseek(self):
self.assertEqual(os.lseek(self.fd, 0, 1), 0)
os.write(self.fd, 'jython filenos')
os.lseek(self.fd, 7, 0)
self.assertEqual(os.read(self.fd, 7), 'filenos')
self.fp.close()
raises(OSError, 9, os.lseek, self.fd, 0, 1)
def test_read(self):
self.fp.write('jython filenos')
self.fp.flush()
self.fp.seek(0)
result = os.read(self.fd, 7)
self.assertTrue(isinstance(result, str))
self.assertEqual(result, 'jython ')
self.assertEqual(os.read(self.fd, 99), 'filenos')
self.fp.close()
raises(OSError, 9, os.read, self.fd, 1)
def test_write(self):
os.write(self.fd, 'jython filenos')
self.fp.seek(0)
self.assertEqual(self.fp.read(), 'jython filenos')
self.fp.close()
raises(OSError, 9, os.write, self.fd, 'The Larch')
class TestOsOpenTestCase(unittest.TestCase):
def setUp(self):
self.filename = tempfile.mktemp()
self.dir = None
self.fd = None
def tearDown(self):
if self.fd:
try:
os.close(self.fd)
except:
pass
if os.path.exists(self.filename):
os.remove(self.filename)
if self.dir:
os.rmdir(self.dir)
def test_open(self):
# XXX: assert the mode of the file
self.fd = os.open(self.filename, os.O_WRONLY | os.O_CREAT)
self.assert_(os.path.exists(self.filename))
os.write(self.fd, 'jython')
os.close(self.fd)
self.fd = os.open(self.filename, os.O_WRONLY | os.O_APPEND)
os.write(self.fd, ' filenos')
os.close(self.fd)
fp = open(self.filename)
self.assertEquals(fp.read(), 'jython filenos')
fp.close()
# falls back to read only without O_WRONLY/O_RDWR
self.fd = os.open(self.filename, os.O_APPEND)
raises(OSError, 9, os.write, self.fd, 'new')
# Acts as append on windows (seeks to the end)
os.lseek(self.fd, 0, 0)
self.assertEquals(os.read(self.fd, len('jython filenos')), 'jython filenos')
os.close(self.fd)
# falls back to read only without O_WRONLY/O_RDWR
self.fd = os.open(self.filename, os.O_CREAT)
raises(OSError, 9, os.write, self.fd, 'new')
self.assertEquals(os.read(self.fd, len('jython filenos')), 'jython filenos')
os.close(self.fd)
# interpreted as RDWR
self.fd = os.open(self.filename, os.O_RDONLY | os.O_RDWR)
os.write(self.fd, 'test')
os.lseek(self.fd, 0, 0)
self.assertEquals(os.read(self.fd, 4), 'test')
os.close(self.fd)
def test_open_truncate(self):
fp = open(self.filename, 'w')
fp.write('hello')
fp.close()
self.assertEquals(os.path.getsize(self.filename), 5)
self.fd = os.open(self.filename, os.O_TRUNC | os.O_RDWR)
self.assertEquals(os.path.getsize(self.filename), 0)
os.write(self.fd, 'truncated')
os.lseek(self.fd, 0, 0)
self.assertEquals(os.read(self.fd, len('truncated')), 'truncated')
os.close(self.fd)
self.fd = os.open(self.filename, os.O_TRUNC | os.O_WRONLY)
self.assertEquals(os.path.getsize(self.filename), 0)
os.write(self.fd, 'write only truncated')
raises(OSError, 9, os.read, self.fd, 99)
os.close(self.fd)
fd = open(self.filename)
self.assertEquals(fd.read(), 'write only truncated')
fd.close()
# Both fail on Windows, errno 22
"""
# falls back to read only without O_WRONLY/O_RDWR, but truncates
self.fd = os.open(self.filename, os.O_TRUNC)
self.assertEquals(os.path.getsize(self.filename), 0)
raises(OSError, 9, os.write, self.fd, 'new')
self.assertEquals(os.read(self.fd, 99), '')
os.close(self.fd)
fp = open(self.filename, 'w')
fp.write('and ')
fp.close()
self.assertEquals(os.path.getsize(self.filename), 4)
# append with no write falls back to read, but still truncates
self.fd = os.open(self.filename, os.O_TRUNC | os.O_APPEND)
self.assertEquals(os.path.getsize(self.filename), 0)
raises(OSError, 9, os.write, self.fd, 'new')
os.close(self.fd)
fp = open(self.filename, 'w')
fp.write('and ')
fp.close()
self.assertEquals(os.path.getsize(self.filename), 4)
"""
def test_open_exclusive(self):
self.assert_(not os.path.exists(self.filename))
# fails without O_CREAT
raises(OSError, (2, self.filename), os.open, self.filename, os.O_EXCL)
self.assert_(not os.path.exists(self.filename))
# creates, read only
self.fd = os.open(self.filename, os.O_EXCL | os.O_CREAT)
self.assert_(os.path.exists(self.filename))
raises(OSError, 9, os.write, self.fd, 'jython')
self.assertEquals(os.read(self.fd, 99), '')
os.close(self.fd)
# not exclusive unless creating
os.close(os.open(self.filename, os.O_EXCL))
raises(OSError, (17, self.filename),
os.open, self.filename, os.O_CREAT | os.O_EXCL)
raises(OSError, (17, self.filename),
os.open, self.filename, os.O_CREAT | os.O_WRONLY | os.O_EXCL)
raises(OSError, (17, self.filename),
os.open, self.filename, os.O_CREAT | os.O_RDWR | os.O_EXCL)
os.remove(self.filename)
self.fd = os.open(self.filename, os.O_EXCL | os.O_RDWR | os.O_CREAT)
os.write(self.fd, 'exclusive')
os.lseek(self.fd, 0, 0)
self.assertEquals(os.read(self.fd, len('exclusive')), 'exclusive')
def test_open_sync(self):
if not hasattr(os, 'O_SYNC'):
return
# Just ensure this works
self.fd = os.open(self.filename, os.O_SYNC | os.O_WRONLY | os.O_CREAT)
self.assert_(os.path.exists(self.filename))
os.write(self.fd, 'jython')
raises(OSError, 9, os.read, self.fd, 99)
os.close(self.fd)
os.remove(self.filename)
self.fd = os.open(self.filename, os.O_SYNC | os.O_RDWR | os.O_CREAT)
self.assert_(os.path.exists(self.filename))
os.write(self.fd, 'jython')
os.lseek(self.fd, 0, 0)
self.assertEquals(os.read(self.fd, len('jython')), 'jython')
os.close(self.fd)
def test_open_sync_dir(self):
if not hasattr(os, 'O_SYNC'):
return
self.dir = tempfile.mkdtemp()
try:
self.fd = os.open(self.dir, os.O_SYNC | os.O_RDWR)
except OSError, ose:
assert ose.errno == errno.EISDIR, ose.errno
def test_bad_open(self):
for mode in (os.O_WRONLY, os.O_WRONLY, os.O_RDWR):
raises(OSError, (2, self.filename), os.open, self.filename, mode)
open(self.filename, 'w').close()
raises(OSError, (22, self.filename),
os.open, self.filename, os.O_WRONLY | os.O_RDWR)
class TestOsFdopenTestCase(unittest.TestCase):
def setUp(self):
self.filename = tempfile.mktemp()
self.fd = None
def tearDown(self):
if self.fd:
try:
os.close(self.fd)
except:
pass
if os.path.exists(self.filename):
os.remove(self.filename)
def test_fdopen(self):
origw_fp = open(self.filename, 'w')
origw = origw_fp.fileno()
origr_fp = open(self.filename, 'r')
origr = origr_fp.fileno()
# Mode must begin with rwa
raises(ValueError, "invalid file mode 'b'",
os.fdopen, origr, 'b')
# Refuse modes the original file doesn't support
# XXX: allowed on windows CPython
"""
raises(OSError, '[Errno 22] Invalid argument',
os.fdopen, origw, 'r')
raises(OSError, '[Errno 22] Invalid argument',
os.fdopen, origr, 'w')
"""
fp = os.fdopen(origw, 'w')
fp.write('fdopen')
# Windows CPython doesn't raise an exception here
#raises(IOError, '[Errno 9] Bad file descriptor',
# fp.read, 7)
fp.close()
fp = os.fdopen(origr)
self.assertEquals(fp.read(), 'fdopen')
# Windows CPython raises IOError [Errno 0] Error
#raises(IOError, '[Errno 9] Bad file descriptor',
# fp.write, 'test')
raises(IOError, None,
fp.write, 'test')
fp.close()
# Windows CPython raises OSError [Errno 0] Error for both these
#raises(OSError, '[Errno 9] Bad file descriptor',
# os.fdopen, origw, 'w')
#raises(OSError, '[Errno 9] Bad file descriptor',
# os.fdopen, origr, 'r')
raises(OSError, None,
os.fdopen, origw, 'w')
raises(OSError, None,
os.fdopen, origr, 'r')
# These all raise IO/OSErrors on FreeBSD
try:
origw_fp.close()
except:
pass
try:
origr_fp.close()
except:
pass
try:
os.close(origw)
except:
pass
try:
os.close(origr)
except:
pass
def raises(exc, expected, callable, *args):
"""Ensure the specified call raises exc.
expected is compared against the exception message if not None. It
can be a str, an errno or a 2 item tuple of errno/filename. The
latter two being for comparison against EnvironmentErrors.
"""
if expected:
if isinstance(expected, str):
msg = expected
else:
errno = expected[0] if isinstance(expected, tuple) else expected
msg = '[Errno %d] %s' % (errno, os.strerror(errno))
if isinstance(expected, tuple):
msg += ': %r' % expected[1]
try:
callable(*args)
except exc, val:
if expected and str(val) != msg:
raise test_support.TestFailed(
"Message %r, expected %r" % (str(val), msg))
else:
raise test_support.TestFailed("Expected %s" % exc)
def test_main():
test_support.run_unittest(TestFilenoTestCase,
TestOsOpenTestCase,
TestOsFdopenTestCase)
if __name__ == '__main__':
test_main()
| |
import webapp2
import cgi
import datetime
from datetime import datetime
import urllib
import jinja2
import os
import json
import models
from google.appengine.api import users
from models import *
def parseDateString(string):
"""To parse strings like this: 2013-04-12T10:29:00Z
We only actually pull out the date part for now."""
return datetime.strptime(string.split('T')[0], "%Y-%m-%d")
jinja_environment = jinja2.Environment(
loader=jinja2.FileSystemLoader(os.path.dirname(__file__)))
class FavouriteHandler(webapp2.RequestHandler):
def post(self):
user = users.get_current_user()
if user:
seriesName = self.request.get('series')
# Store fact that this series name is to be downloaded in future
series = Series(user_key(user))
series.name = seriesName
series.blocked = False
series.put()
# And also download all current episodes matching this series
query = Episode.all().ancestor(user_key(user)).filter('seriesName =', seriesName)
episodes = []
for episode in query.run():
episode.status = 'download'
episodes.append(episode)
db.put(episodes)
self.response.out.write("ok: Marked %s episodes for download" % len(episodes))
else:
self.response.out.write("error: Not logged in")
class BlockHandler(webapp2.RequestHandler):
def post(self):
user = users.get_current_user()
if user:
seriesName = self.request.get('series')
# Store fact that this series name is to be ignored in future
series = Series(user_key(user))
series.name = seriesName
series.blocked = True
series.put()
# And also ignore all current episodes matching this series
query = Episode.all().ancestor(user_key(user)).filter('seriesName =', seriesName)
episodes = []
for episode in query.run():
episode.status = 'ignore'
episodes.append(episode)
db.put(episodes)
self.response.out.write("ok: Ignored %s episodes" % len(episodes))
else:
self.response.out.write("error: Not logged in")
class DownloadHandler(webapp2.RequestHandler):
def post(self):
user = users.get_current_user()
if user:
pid = self.request.get('pid')
query = Episode.all().ancestor(user_key(user)).filter('pid =', pid)
episode = query.get()
if episode:
episode.status = 'download'
episode.put()
self.response.out.write("ok: Added to download queue")
else:
self.response.out.write("error: Episode not found")
else:
self.response.out.write("error: Not logged in")
class TrashHandler(webapp2.RequestHandler):
def post(self):
user = users.get_current_user()
if user:
pid = self.request.get('pid')
query = Episode.all().ancestor(user_key(user)).filter('pid =', pid)
episode = query.get()
if episode:
episode.status = 'ignore'
episode.put()
self.response.out.write("ok: trashed")
else:
self.response.out.write("error: Episode not found")
else:
self.response.out.write("error: Not logged in")
class DownloadCompleteHandler(webapp2.RequestHandler):
def post(self):
user = users.get_current_user()
if user:
pid = self.request.get('pid')
query = Episode.all().ancestor(user_key(user)).filter('pid =', pid)
episode = query.get()
if episode:
episode.status = 'downloaded'
episode.put()
self.response.out.write("ok: downloaded")
else:
self.response.out.write("error: Episode not found")
else:
self.response.out.write("error: Not logged in")
class MainPage(webapp2.RequestHandler):
def get(self):
user = users.get_current_user()
if user:
logout_url = users.create_logout_url(self.request.uri)
episode_query = Episode.all().ancestor(user_key(user))
numEpisodes = episode_query.count()
template_values = {
'logout_url': logout_url
}
if numEpisodes > 0:
template_values['have_episodes'] = True
template = jinja_environment.get_template('index.html')
self.response.out.write(template.render(template_values))
else:
self.redirect(users.create_login_url(self.request.uri))
class ReadyHandler(webapp2.RequestHandler):
def get(self):
user = users.get_current_user()
if user:
logout_url = users.create_logout_url(self.request.uri)
query = Episode.all().ancestor(user_key(user)).filter('status =', Episode.status.DOWNLOADED)
template_values = {
'episodes': query,
'logout_url': logout_url
}
template = jinja_environment.get_template('ready.html')
self.response.out.write(template.render(template_values))
else:
self.redirect(users.create_login_url(self.request.uri))
class FavouritesHandler(webapp2.RequestHandler):
def get(self):
user = users.get_current_user()
if user:
logout_url = users.create_logout_url(self.request.uri)
series = Series.all().ancestor(user_key(user)).filter('blocked =', False)
template_values = {
'logout_url': logout_url,
'series': series
}
template = jinja_environment.get_template('favourites.html')
self.response.out.write(template.render(template_values))
else:
self.redirect(users.create_login_url(self.request.uri))
class BlockedHandler(webapp2.RequestHandler):
def get(self):
user = users.get_current_user()
if user:
logout_url = users.create_logout_url(self.request.uri)
series = Series.all().ancestor(user_key(user)).filter('blocked = ', True)
template_values = {
'logout_url': logout_url,
'series': series
}
template = jinja_environment.get_template('blocked.html')
self.response.out.write(template.render(template_values))
else:
self.redirect(users.create_login_url(self.request.uri))
class EpisodesJsonHandler(webapp2.RequestHandler):
"""Returns main page of episode listings as json"""
def get(self):
user = users.get_current_user()
if user:
# Show all episodes that need a decision making on them.
episode_query = Episode.all().ancestor(user_key(user)).filter('status =', Episode.status.NEW).order('expiry')
episodes = episode_query.fetch(10)
self.response.out.write(json.dumps(episodes, cls = EpisodeEncoder))
else:
self.response.out.write("error")
class IncomingCountHandler(webapp2.RequestHandler):
"""Returns count of incoming episodes that need a decision"""
def get(self):
user = users.get_current_user()
if user:
episode_query = Episode.all().ancestor(user_key(user)).filter('status =', Episode.status.NEW)
self.response.out.write(episode_query.count())
else:
self.response.out.write("error")
class DownloadQueueHandler(webapp2.RequestHandler):
def get(self):
user = users.get_current_user()
if user:
logout_url = users.create_logout_url(self.request.uri)
query = Episode.all().ancestor(user_key(user)).filter('status =', Episode.status.DOWNLOAD)
template_values = {
'episodes': query,
'logout_url': logout_url
}
template = jinja_environment.get_template('download_queue.html')
self.response.out.write(template.render(template_values))
else:
self.redirect(users.create_login_url(self.request.uri))
class DownloadQueueJsonHandler(webapp2.RequestHandler):
"""Returns download queue of episode ids as json"""
def get(self):
user = users.get_current_user()
if user:
query = Episode.all().ancestor(user_key(user)).filter('status =', Episode.status.DOWNLOAD)
pids = []
for episode in query:
pids.append(episode.pid)
self.response.out.write(json.dumps(pids))
else:
self.response.out.write("error")
class UploadHandler(webapp2.RequestHandler):
"""Receives new episode listings"""
def get(self):
if users.get_current_user():
template = jinja_environment.get_template('new.html')
self.response.out.write(template.render())
else:
self.redirect(users.create_login_url(self.request.uri))
def post(self):
user = users.get_current_user()
if user:
# Check whether identifier already exists in db
pid = self.request.get('pid')
query = Episode.all().ancestor(user_key(user)).filter('pid =', pid)
count = query.count(limit = 1)
if count == 0:
episode = Episode(user_key(user))
episode.categories = self.request.get('categories')
episode.description = self.request.get('description')
episode.duration = int(self.request.get('duration'))
episode.expiry = parseDateString(self.request.get('expiry'))
episode.firstBroadcast = parseDateString(self.request.get('firstBroadcast'))
episode.pid = pid
episode.thumbnailUrl = self.request.get('thumbnailUrl')
seriesName = self.request.get('seriesName')
episode.seriesName = seriesName
episode.title = self.request.get('title')
# Check whether episode is blocked or favourited.
# If so, we can automatically flag for trash/download
episode.status = 'new'
series = Series.all().ancestor(user_key(user)).filter('name =', seriesName).get()
if series:
if series.blocked:
episode.status = 'ignore'
else:
episode.status = 'download'
episode.put()
self.response.out.write(episode.status)
else:
self.response.out.write('duplicate')
else:
self.redirect(users.create_login_url(self.request.uri))
app = webapp2.WSGIApplication([('/', MainPage),
('/download_queue', DownloadQueueHandler),
('/ready', ReadyHandler),
('/favourites', FavouritesHandler),
('/blocked', BlockedHandler),
('/episodes.json', EpisodesJsonHandler),
('/incoming_count', IncomingCountHandler),
('/download_queue.json', DownloadQueueJsonHandler),
('/favourite', FavouriteHandler),
('/block', BlockHandler),
('/download', DownloadHandler),
('/downloaded', DownloadCompleteHandler),
('/trash', TrashHandler),
('/upload', UploadHandler)],
debug=True)
| |
# Copyright (c) 2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo.config import cfg
import webob
import webob.exc
import nova.api.auth
from nova.i18n import _
from nova.openstack.common import jsonutils
from nova.openstack.common.middleware import request_id
from nova import test
CONF = cfg.CONF
class TestNovaKeystoneContextMiddleware(test.NoDBTestCase):
def setUp(self):
super(TestNovaKeystoneContextMiddleware, self).setUp()
@webob.dec.wsgify()
def fake_app(req):
self.context = req.environ['nova.context']
return webob.Response()
self.context = None
self.middleware = nova.api.auth.NovaKeystoneContext(fake_app)
self.request = webob.Request.blank('/')
self.request.headers['X_TENANT_ID'] = 'testtenantid'
self.request.headers['X_AUTH_TOKEN'] = 'testauthtoken'
self.request.headers['X_SERVICE_CATALOG'] = jsonutils.dumps({})
def test_no_user_or_user_id(self):
response = self.request.get_response(self.middleware)
self.assertEqual(response.status, '401 Unauthorized')
def test_user_only(self):
self.request.headers['X_USER_ID'] = 'testuserid'
response = self.request.get_response(self.middleware)
self.assertEqual(response.status, '200 OK')
self.assertEqual(self.context.user_id, 'testuserid')
def test_user_id_only(self):
self.request.headers['X_USER'] = 'testuser'
response = self.request.get_response(self.middleware)
self.assertEqual(response.status, '200 OK')
self.assertEqual(self.context.user_id, 'testuser')
def test_user_id_trumps_user(self):
self.request.headers['X_USER_ID'] = 'testuserid'
self.request.headers['X_USER'] = 'testuser'
response = self.request.get_response(self.middleware)
self.assertEqual(response.status, '200 OK')
self.assertEqual(self.context.user_id, 'testuserid')
def test_invalid_service_catalog(self):
self.request.headers['X_USER'] = 'testuser'
self.request.headers['X_SERVICE_CATALOG'] = "bad json"
response = self.request.get_response(self.middleware)
self.assertEqual(response.status, '500 Internal Server Error')
def test_request_id_extracted_from_env(self):
req_id = 'dummy-request-id'
self.request.headers['X_PROJECT_ID'] = 'testtenantid'
self.request.headers['X_USER_ID'] = 'testuserid'
self.request.environ[request_id.ENV_REQUEST_ID] = req_id
self.request.get_response(self.middleware)
self.assertEqual(req_id, self.context.request_id)
class TestKeystoneMiddlewareRoles(test.NoDBTestCase):
def setUp(self):
super(TestKeystoneMiddlewareRoles, self).setUp()
@webob.dec.wsgify()
def role_check_app(req):
context = req.environ['nova.context']
if "knight" in context.roles and "bad" not in context.roles:
return webob.Response(status="200 Role Match")
elif context.roles == ['']:
return webob.Response(status="200 No Roles")
else:
raise webob.exc.HTTPBadRequest(_("unexpected role header"))
self.middleware = nova.api.auth.NovaKeystoneContext(role_check_app)
self.request = webob.Request.blank('/')
self.request.headers['X_USER'] = 'testuser'
self.request.headers['X_TENANT_ID'] = 'testtenantid'
self.request.headers['X_AUTH_TOKEN'] = 'testauthtoken'
self.request.headers['X_SERVICE_CATALOG'] = jsonutils.dumps({})
self.roles = "pawn, knight, rook"
def test_roles(self):
# Test that the newer style role header takes precedence.
self.request.headers['X_ROLES'] = 'pawn,knight,rook'
self.request.headers['X_ROLE'] = 'bad'
response = self.request.get_response(self.middleware)
self.assertEqual(response.status, '200 Role Match')
def test_roles_empty(self):
self.request.headers['X_ROLES'] = ''
response = self.request.get_response(self.middleware)
self.assertEqual(response.status, '200 No Roles')
def test_deprecated_role(self):
# Test fallback to older role header.
self.request.headers['X_ROLE'] = 'pawn,knight,rook'
response = self.request.get_response(self.middleware)
self.assertEqual(response.status, '200 Role Match')
def test_role_empty(self):
self.request.headers['X_ROLE'] = ''
response = self.request.get_response(self.middleware)
self.assertEqual(response.status, '200 No Roles')
def test_no_role_headers(self):
# Test with no role headers set.
response = self.request.get_response(self.middleware)
self.assertEqual(response.status, '200 No Roles')
class TestPipeLineFactory(test.NoDBTestCase):
class FakeFilter(object):
def __init__(self, name):
self.name = name
self.obj = None
def __call__(self, obj):
self.obj = obj
return self
class FakeApp(object):
def __init__(self, name):
self.name = name
class FakeLoader():
def get_filter(self, name):
return TestPipeLineFactory.FakeFilter(name)
def get_app(self, name):
return TestPipeLineFactory.FakeApp(name)
def _test_pipeline(self, pipeline, app):
for p in pipeline.split()[:-1]:
self.assertEqual(app.name, p)
self.assertIsInstance(app, TestPipeLineFactory.FakeFilter)
app = app.obj
self.assertEqual(app.name, pipeline.split()[-1])
self.assertIsInstance(app, TestPipeLineFactory.FakeApp)
def test_pipeline_factory(self):
fake_pipeline = 'test1 test2 test3'
app = nova.api.auth.pipeline_factory(
TestPipeLineFactory.FakeLoader(), None, noauth=fake_pipeline)
self._test_pipeline(fake_pipeline, app)
def test_pipeline_factory_v21(self):
fake_pipeline = 'test1 test2 test3'
app = nova.api.auth.pipeline_factory_v21(
TestPipeLineFactory.FakeLoader(), None, noauth=fake_pipeline)
self._test_pipeline(fake_pipeline, app)
def test_pipeline_factory_with_rate_limits(self):
CONF.set_override('api_rate_limit', True)
CONF.set_override('auth_strategy', 'keystone')
fake_pipeline = 'test1 test2 test3'
app = nova.api.auth.pipeline_factory(
TestPipeLineFactory.FakeLoader(), None, keystone=fake_pipeline)
self._test_pipeline(fake_pipeline, app)
def test_pipeline_factory_without_rate_limits(self):
CONF.set_override('auth_strategy', 'keystone')
fake_pipeline1 = 'test1 test2 test3'
fake_pipeline2 = 'test4 test5 test6'
app = nova.api.auth.pipeline_factory(
TestPipeLineFactory.FakeLoader(), None,
keystone_nolimit=fake_pipeline1,
keystone=fake_pipeline2)
self._test_pipeline(fake_pipeline1, app)
def test_pipeline_factory_missing_nolimits_pipeline(self):
CONF.set_override('api_rate_limit', False)
CONF.set_override('auth_strategy', 'keystone')
fake_pipeline = 'test1 test2 test3'
app = nova.api.auth.pipeline_factory(
TestPipeLineFactory.FakeLoader(), None, keystone=fake_pipeline)
self._test_pipeline(fake_pipeline, app)
def test_pipeline_factory_compatibility_with_v3(self):
CONF.set_override('api_rate_limit', True)
CONF.set_override('auth_strategy', 'keystone')
fake_pipeline = 'test1 ratelimit_v3 test3'
app = nova.api.auth.pipeline_factory(
TestPipeLineFactory.FakeLoader(), None, keystone=fake_pipeline)
self._test_pipeline('test1 test3', app)
| |
#!/usr/bin/env python
# -*- coding: utf-8-*-
class Config():
#default_db = 'db'
default_subject = 'eecs'
default_db = 'db' #'2016-db'
#ip_adress="172.16.14.82"
ip_adress="localhost:5000"
igon_authorized = True
engin_list_file = 'db/metadata/engin_list'
engin_type_file = 'db/metadata/engin_type'
engin_extension_file = 'db/metadata/engin_extension'
# enable urlFromServer enhancedLink(..., urlFromServer=True)
# engin query order smart_link_engin -> smart_engin_for_tag -> smart_engin_for_extension
smart_link_engin = 'glucky' #'google'
default_engin_searchbox = 'zhihu'
igon_log_for_modules = ['star', 'exclusive', 'content'] #dialog
more_button_for_history_module = True
smart_engin_lucky_mode_for_account = True
smart_engin_for_tag_batch_open = False
smart_engin_for_command_batch_open = ['twitter', 'baidu', 'amazon']
max_account_for_tag_batch_open = 10
recommend_engin = True
recommend_engin_num = 23
recommend_engin_num_dialog = 23
recommend_engin_type = 'star' #ref db/metadata/engin_type
recommend_engin_by_tag = False
#smart_engin_for_tag = {}
smart_engin_for_tag = {'weixin' : ['weixin.so', 'weixinla', 'chuansong', 'toutiao', 'weibo', 'qoofan.com', 'glucky'],\
'fb-pages' : ['fb-pages', 'glucky'],\
'baijiahao' : ['baijiahao'],\
'conference' : ['glucky', 'google', 'd:event']}
'''
smart_engin_for_tag = {'instructors' : ['twitter', 'youtube'],\
'university' : 'youtube',\
'professor' : ['phdtree', 'glucky'],\
'g-plus' : 'plus.google',\
'company' : 'glucky',\
'website' : 'glucky',\
'director' : ['twitter', 'glucky'],\
'job' : ['google', 'd:job']}
#'topic' : ''}
'''
#smart_engin_for_extension = {'' : ''}
#smart_engin_for_dialog = ['google', 'youtube', 'twitter', 'baidu']
smart_engin_for_dialog = []
command_for_dialog = ['add2library', 'trace', 'kgraph', 'exclusive']
command_for_tag_dialog = ['tolist', 'merger']
recommend_engin_type_for_dialog = '' #'star' #ref db/metadata/engin_type
smart_link_max_text_len = 60
smart_link_br_len = 60
replace_with_smart_link = False
page_item_count = 100#63
start_library_title = 'add some record from here!'
start_library_url = 'http://' + ip_adress + '/?db=other/&key=degree-chart-mit2016&column=3'
menu_library_list = ['ai-library', 'multimedia-library', 'mind-library', 'neuro-library', 'gene-library', 'math-library', 'phys-library', 'chem-library', 'business-finance-library', 'engineering-library', 'product-library', 'political-library']
#default_library = ''
default_library = 'ai-library'
#default_library = 'engineering-library'
#default_library = 'multimedia-library'
#default_library = 'mind-library'
#default_library = 'neuro-library'
#default_library = 'gene-library'
#default_library = 'math-library'
#default_library = 'phys-library'
#default_library = 'chem-library'
#default_library = 'business-finance-library'
#default_library = 'medical-library'
#default_library = 'energy-library'
#default_library = 'aerospace-library'
#default_library = 'universe-library'
#default_library = 'earth-library'
#default_library = 'social-library'
#default_library = 'art-library'
#default_library = 'literature-library'
#default_library = 'political-library'
#default_library = 'thought-library'
#default_library = 'media-library'
#default_library = 'telecom-library'
#default_library = 'manufacture-library'
#default_library = 'traffic-library'
#default_library = 'retail-library'
#default_library = 'building-library'
#default_library = 'life-library'
#default_library = 'sport-library'
#default_library = 'entertainment-library'
#default_library = 'military-library'
#default_library = 'product-library'
#default_library = 'research-library'
#show random preview when click nav link
track_mode = False
disable_default_engin = True
disable_nav_engins = True # it take 2s for gen nav engins html
disable_thumb = "false"
disable_icon = True
disable_star_engin = False
disable_reference_image = False
hiden_record_id = True
hiden_record_id_commandline = False
hiden_parentid_record = True
hiden_engins = True
center_content = False
content_margin_left = '15px'
content_margin_top = '10px'
split_height = 2
title_font_size = 0
#do not show nav links, only show extension links
extension_mode = False
#handle by handleQueryNavTab of app.py first
default_tab = 'history' #'content'
second_default_tab = 'bookmark'#'figures'
default_width = "54" #"79"
column_num = "3"
custom_cell_len = 88
split_length = custom_cell_len + 15
custom_cell_row_list = [50, 40, 30]
cell_len = 89 # cell_len >= course_num_len + 1 + course_name_len + 3
course_name_len = 70
course_num_len = 10
color_index = 0
output_with_color = False
output_with_style = False
output_with_describe = False
output_navigation_links = False
merger_result = False
top_row = 0
old_top_row = 0
max_links_row = 10
max_nav_link_row = 11
max_nav_links_row = 7
default_links_row = 2
css_style_type = 0
plugins_mode = False
auto_library_cell_len = False
display_all_library = True
hiden_content_after_search = True
background_after_click = '#E9967A' ##CCEEFF
fontsize_after_click = ''
fav_links = { #'arxiv' : ip_adress + '/?db=eecs/papers/arxiv/&key=?',\
'civilization' : ip_adress + '/?db=other/&key=civilization2017&column=2',\
#'bioRxiv' : 'cshsymposium.com/biorxiv/chartdailydetail.php',\
#'rss' : ip_adress + '/?db=rss/&key=rss2016',\
#'disk' : ip_adress + '/?db=other/&key=disk2016',\
#'github' : ip_adress + '/?db=eecs/projects/github/&key=?',\
#'ipynb' : 'localhost:8888/tree',\
#'degree' : ip_adress + '/?db=other/&key=degree-chart-mit2016&column=3',\
#'members' : ip_adress + '/?db=rank/&key=members2016&column=2',\
'rank' : ip_adress + '/?db=rank/&key=?',\
#'paperbot' : 'https://web.paperbot.ai/',\
#'iris.ai' : 'https://the.iris.ai/explore',\
'frontier' : ip_adress + '/?db=other/&key=frontier2017'}
#'eecs' : ip_adress + '/?db=eecs/&key=?'}
#'library' : ip_adress + '/?db=library/&key=?'}
#'neuroscience' : ip_adress + '/?db=neuroscience/&key=?'}
distribution = False
slack_token = ['xoxb', '156129958533', 'YdIXSA2syy7ipacDQo6cr03j']
delete_from_char = ''
delete_forward = True
application_dict = {'.ppt' : '/Applications/Keynote.app/Contents/MacOS/Keynote',\
'.pptx' : '/Applications/Keynote.app/Contents/MacOS/Keynote',\
'*' : '/Applications/Google Chrome.app/Contents/MacOS/Google Chrome'}
# ==== extension ====
output_data_to_new_tab_path = 'db/other/'
# reference
reference_filter = ''
reference_contain = ''
reference_igon_case = True
reference_hiden_engin_section = True
reference_output_data_to_new_tab = False
reference_output_data_format = ''
# bookmark
bookmark_file_path = "/Users/zd/Downloads/chrome_bookmarks.json"
bookmark_hiden_engin_section = True
bookmark_output_data_to_new_tab = False
bookmark_output_data_format = ''
bookmark_page_item_count = [16, 14, 12]
#history
history_file_path = "/Users/zd/Downloads/chrome_history.json"
history_hiden_engin_section = True
hidtory_sort_type = 0 #0: click count 1: url 2: title
history_show_click_count = False
#exclusive
exclusive_crossref_path = ['db/library']
exclusive_local_db_path = 'db/' + default_subject
#filefinder
filefinder_dirs = ['~/Downloads', 'db']
filefinder_netdisk_engin = 'pan.baidu' #drive onedrive dropbox
filefinder_sort_by_count = True
#content
content_hiden_engin_section = True
# convert
''' default config
convert_url_args = '' #'?start=' #'?start=0&tag='
convert_page_step = 0
convert_page_start = 0
convert_page_max = 600
convert_page_to_end = True
convert_page_custom_parse = False
convert_tag = 'tr' #"div#title" # tag#class or tag
convert_min_num = 0
convert_max_num = 1000
convert_filter = ""
convert_contain = ""
convert_start = 0
convert_split_column_number = 0
convert_hiden_engin_section = True
convert_output_data_to_new_tab = False
convert_output_data_format = ''
'''
#'''
convert_url_args = '/default.html?page=' #'?start=' #'?start=0&tag='
convert_page_step = 1
convert_page_start = 1
convert_page_max = 10
convert_page_to_end = False
convert_tag = 'a#PostTitle' #"div#title" # tag#class or tag
convert_min_num = 0
convert_max_num = 1000
convert_filter = ""
convert_contain = ""
convert_start = 0
convert_split_column_number = 0
convert_output_data_to_new_tab = False
convert_output_data_format = ''
#'''
#=====bk====
background = 0
backgrounds = ['',\
'http://img.blog.csdn.net/20161213000422101',\
'https://datacdn.soyo.or.kr/wcont/uploads/2016/02/02164057/alphago_01.jpg',\
'http://img.blog.csdn.net/20150506120021512?watermark/2/text/aHR0cDovL2Jsb2cuY3Nkbi5uZXQvd293ZGQx/font/5a6L5L2T/fontsize/400/fill/I0JBQkFCMA==/dissolve/70/gravity/Center',\
'http://img.blog.csdn.net/20161227171638323',\
'http://images.njcw.com/upload/2011-5/201105071635561829723_w.jpg',\
'http://www.caneis.com.tw/link/info/Middle_East_info/Egypt/images/Cairo-007-1.jpg',\
'https://cdn-images-1.medium.com/max/2000/1*BTGKRLq55y8Hld9pyvarXg.png',\
'http://st.depositphotos.com/1007919/3724/i/950/depositphotos_37248955-stock-photo-binary-background.jpg',\
'http://amazingstuff.co.uk/wp-content/uploads/2012/02/scale_of_the_universe_2.png',\
'https://curiositando.files.wordpress.com/2014/12/cervello_destro1.jpg',\
'http://img1.voc.com.cn/UpLoadFile/2013/03/05/201303051655526838.jpg',\
'http://p1.pstatp.com/large/530000529b6125ce87c',\
'http://zdnet4.cbsistatic.com/hub/i/r/2016/06/01/8a90fae5-22f7-480b-9ea5-a4f6252e7ed0/resize/1170x878/d756410179b9c71086b1496f4b924556/001.jpg',\
'http://tc.sinaimg.cn/maxwidth.2048/tc.service.weibo.com/s9_rr_itc_cn/000254925dab8674da3fb790364ddcf0.png']
#=====icon====
enable_website_icon = True
website_icons = {'.pdf' : 'https://cdn4.iconfinder.com/data/icons/CS5/128/ACP_PDF%202_file_document.png',\
'.dir' : 'http://cdn.osxdaily.com/wp-content/uploads/2014/05/users-folder-mac-osx.jpg',\
'homepage' : 'http://grupojvr.com.mx/web/wp-content/uploads/2014/08/Direcci%C3%B3n-azul.png',\
'url' : 'http://vintaytime.com/wp-content/uploads/2017/02/url-shortener-icon.png',\
'remark' : 'http://www.mystipendium.de/sites/all/themes/sti/images/coq/editxl.png',\
'youtube' : 'https://www.seeklogo.net/wp-content/uploads/2016/06/YouTube-icon.png',\
'amazon' : 'https://media.licdn.com/mpr/mpr/shrink_200_200/AAEAAQAAAAAAAAUqAAAAJGFmYjUxMmQ3LWUyNDUtNGJmMy04Nzc4LWRmYzE1YTExMDY2YQ.png',\
'csdn' : 'http://a2.mzstatic.com/us/r30/Purple71/v4/99/61/36/996136cc-f759-5c0c-4531-ee0c6fec786a/icon175x175.png',\
'coursera': 'http://techraze.com/wp-content/uploads/2015/06/Coursera-APK-1.png',\
'edx' : 'https://icon.apkmirrordownload.com/org.edx.mobile.png',\
'udacity' : 'https://www.uplabs.com/assets/integrations/udacity-92b3b2525603489c7c5f325491d0ff44652631210086bb2ab082b897b9b39da0.png',\
'github' : 'https://cdn2.iconfinder.com/data/icons/black-white-social-media/64/social_media_logo_github-128.png',\
'arxiv' : 'http://www.thetelegraphic.com/img/icon-arxiv.png',\
'khan' : 'http://academics.cehd.umn.edu/mobile/wp-content/uploads/2013/10/khan-academy-icon.png',\
'medium' : 'https://memoriaelectrika.files.wordpress.com/2015/10/mediumlogo002.png',\
'mit': 'https://1.bp.blogspot.com/-fhwcWQmSJk4/VsMJ_NzuasI/AAAAAAAAAAo/qoBFDEJLnwI/w800-h800/images.png',\
'stanford' : 'https://d9tyu2epg3boq.cloudfront.net/institutions/stanford.png',
'berkeley' : 'http://www.berkeley.edu/brand/img/seals/ucbseal_139_540.png',\
'cmu' : 'http://www.wholeren.com/wp-content/uploads/2015/04/Carnegie_Mellon_University_CMU_1015361.png',\
'harvard' : 'http://tusm.3daystartup.org/files/2013/03/harvard.png',\
'oxford' : 'http://cdn.shopify.com/s/files/1/0581/9089/products/neck_label_option_1.png?v=1456393100',\
'cambridge' : 'http://a5.mzstatic.com/us/r30/Purple1/v4/6a/cf/d8/6acfd890-9467-f907-5092-5198e091fe04/icon256.png',\
'wikipedia' : 'http://vignette3.wikia.nocookie.net/everythingmarioandluigi/images/e/e8/Wikipedia_icon.png/revision/latest?cb=20130709180530',\
'stackoverflow' : 'http://cdn.sstatic.net/Sites/stackoverflow/company/img/logos/so/so-icon.png?v=c78bd457575a',\
'quora' : 'https://cdn4.iconfinder.com/data/icons/miu-flat-social/60/quora-128.png',\
'reddit' : 'http://icons.iconarchive.com/icons/uiconstock/socialmedia/128/Reddit-icon.png',\
'zhihu' : 'http://a3.mzstatic.com/us/r30/Purple6/v4/6e/e3/2b/6ee32b96-56d5-27b8-ea7a-998dae663ce7/icon175x175.png',\
'videolectures' : 'http://ftp.acc.umu.se/mirror/addons.superrepo.org/v7/addons/plugin.video.videolectures.net/icon.png',\
'weixin' : 'http://img4.imgtn.bdimg.com/it/u=972460576,3713596294&fm=21&gp=0.jpg',\
'weibo' : 'http://img4.imgtn.bdimg.com/it/u=173132403,536146045&fm=21&gp=0.jpg',\
'twitter' : 'https://abs.twimg.com/icons/apple-touch-icon-192x192.png',\
'slack' : 'http://www.freeiconspng.com/uploads/slack-icon-10.png',\
'facebook' : 'http://img.25pp.com/uploadfile/app/icon/20160505/1462390862727305.jpg',\
'localhost' : 'https://publicportal.teamsupport.com/Images/file.png',\
'iqiyi' : 'https://images-na.ssl-images-amazon.com/images/I/71ABWNB-YML._SL500_AA300_.png',\
'linkedin' : 'https://blogs.cornell.edu/info2040/files/2016/09/LinkedinII-2f706bu.png',\
'v.qq' : 'https://ss3.bdstatic.com/70cFv8Sh_Q1YnxGkpoWK1HF6hhy/it/u=311155846,3382957541&fm=23&gp=0.jpg',\
'douyu' : 'https://ss3.bdstatic.com/70cFv8Sh_Q1YnxGkpoWK1HF6hhy/it/u=791058301,37936658&fm=23&gp=0.jpg',\
'pan.baidu' : 'http://img0.imgtn.bdimg.com/it/u=3595078885,1850864109&fm=23&gp=0.jpg',\
'youku' : 'https://ss1.bdstatic.com/70cFuXSh_Q1YnxGkpoWK1HF6hhy/it/u=1278976984,3400181597&fm=26&gp=0.jpg',\
'zeef' : 'https://zeef.io/image/24118/300/s?1432128680548',\
'discord' : 'http://www.nirthpanter.net/uploads/4/7/2/8/47284995/discord_3_orig.png',\
'twitch' : 'http://apps.friday.tw/news/wp-content/uploads/2015/03/twitchicon.png',\
'bilibili' : 'https://pbs.twimg.com/profile_images/813934430867759105/bGAicSr_.jpg',\
'slideshare' : 'http://expandedramblingscom-oxyllvbag8y7yalm1.stackpathdns.com/wp-content/uploads/2013/07/slideshare.jpg',\
'google' : 'http://images.dailytech.com/nimage/G_is_For_Google_New_Logo_Thumb.png',\
'flickr' : 'http://clave7.webcindario.com/logo_flickr_01.png',\
'jianshu' : 'http://cdn2.jianshu.io/assets/web/logo-58fd04f6f0de908401aa561cda6a0688.png',\
'archive.org' : 'http://richmondsfblog.com/wp-content/uploads/2016/11/internet-archive-squarelogo.png'}
| |
# -*- coding: utf-8 -*-
"""
pint.quantity
~~~~~~~~~~~~~
:copyright: 2013 by Pint Authors, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
from __future__ import division, unicode_literals, print_function, absolute_import
import copy
import math
import operator
import functools
import bisect
from .formatting import remove_custom_flags
from .errors import (DimensionalityError, OffsetUnitCalculusError,
UndefinedUnitError)
from .definitions import UnitDefinition
from .compat import string_types, ndarray, np, _to_magnitude, long_type
from .util import (logger, UnitsContainer, SharedRegistryObject,
to_units_container, infer_base_unit)
def _eq(first, second, check_all):
"""Comparison of scalars and arrays
"""
out = first == second
if check_all and isinstance(out, ndarray):
return np.all(out)
return out
class _Exception(Exception): # pragma: no cover
def __init__(self, internal):
self.internal = internal
class _Quantity(SharedRegistryObject):
"""Implements a class to describe a physical quantity:
the product of a numerical value and a unit of measurement.
:param value: value of the physical quantity to be created.
:type value: str, Quantity or any numeric type.
:param units: units of the physical quantity to be created.
:type units: UnitsContainer, str or Quantity.
"""
#: Default formatting string.
default_format = ''
def __reduce__(self):
from . import _build_quantity
return _build_quantity, (self.magnitude, self._units)
def __new__(cls, value, units=None):
if units is None:
if isinstance(value, string_types):
if value == '':
raise ValueError('Expression to parse as Quantity cannot '
'be an empty string.')
inst = cls._REGISTRY.parse_expression(value)
return cls.__new__(cls, inst)
elif isinstance(value, cls):
inst = copy.copy(value)
else:
inst = object.__new__(cls)
inst._magnitude = _to_magnitude(value, inst.force_ndarray)
inst._units = UnitsContainer()
elif isinstance(units, (UnitsContainer, UnitDefinition)):
inst = object.__new__(cls)
inst._magnitude = _to_magnitude(value, inst.force_ndarray)
inst._units = units
elif isinstance(units, string_types):
inst = object.__new__(cls)
inst._magnitude = _to_magnitude(value, inst.force_ndarray)
inst._units = inst._REGISTRY.parse_units(units)._units
elif isinstance(units, SharedRegistryObject):
if isinstance(units, _Quantity) and units.magnitude != 1:
inst = copy.copy(units)
logger.warning('Creating new Quantity using a non unity '
'Quantity as units.')
else:
inst = object.__new__(cls)
inst._units = units._units
inst._magnitude = _to_magnitude(value, inst.force_ndarray)
else:
raise TypeError('units must be of type str, Quantity or '
'UnitsContainer; not {0}.'.format(type(units)))
inst.__used = False
inst.__handling = None
return inst
@property
def debug_used(self):
return self.__used
def __copy__(self):
ret = self.__class__(copy.copy(self._magnitude), self._units)
ret.__used = self.__used
return ret
def __deepcopy__(self, memo):
ret = self.__class__(copy.deepcopy(self._magnitude, memo),
copy.deepcopy(self._units, memo))
ret.__used = self.__used
return ret
def __str__(self):
return format(self)
def __repr__(self):
return "<Quantity({0}, '{1}')>".format(self._magnitude, self._units)
def __format__(self, spec):
spec = spec or self.default_format
if '#' in spec:
spec = spec.replace('#', '')
obj = self.to_compact()
else:
obj = self
return '{0} {1}'.format(
format(obj.magnitude, remove_custom_flags(spec)),
format(obj.units, spec))
# IPython related code
def _repr_html_(self):
return self.__format__('H')
def _repr_latex_(self):
return "$" + self.__format__('L') + "$"
@property
def magnitude(self):
"""Quantity's magnitude. Long form for `m`
"""
return self._magnitude
@property
def m(self):
"""Quantity's magnitude. Short form for `magnitude`
"""
return self._magnitude
def m_as(self, unit):
"""Quantity's magnitude expressed in particular units.
:param other: destination units.
:type other: Quantity, str or dict
"""
return (self / unit).to('').magnitude
@property
def units(self):
"""Quantity's units. Long form for `u`
:rtype: UnitContainer
"""
return self._REGISTRY.Unit(self._units)
@property
def u(self):
"""Quantity's units. Short form for `units`
:rtype: UnitContainer
"""
return self._REGISTRY.Unit(self._units)
@property
def unitless(self):
"""Return true if the quantity does not have units.
"""
return not bool(self.to_root_units()._units)
@property
def dimensionless(self):
"""Return true if the quantity is dimensionless.
"""
tmp = self.to_root_units()
return not bool(tmp.dimensionality)
@property
def dimensionality(self):
"""Quantity's dimensionality (e.g. {length: 1, time: -1})
"""
try:
return self._dimensionality
except AttributeError:
self._dimensionality = self._REGISTRY._get_dimensionality(self._units)
return self._dimensionality
def compatible_units(self, *contexts):
if contexts:
with self._REGISTRY.context(*contexts):
return self._REGISTRY.get_compatible_units(self._units)
return self._REGISTRY.get_compatible_units(self._units)
def _convert_magnitude_not_inplace(self, other, *contexts, **ctx_kwargs):
if contexts:
with self._REGISTRY.context(*contexts, **ctx_kwargs):
return self._REGISTRY.convert(self._magnitude, self._units, other)
return self._REGISTRY.convert(self._magnitude, self._units, other)
def _convert_magnitude(self, other, *contexts, **ctx_kwargs):
if contexts:
with self._REGISTRY.context(*contexts, **ctx_kwargs):
return self._REGISTRY.convert(self._magnitude, self._units, other)
return self._REGISTRY.convert(self._magnitude, self._units, other,
inplace=isinstance(self._magnitude, ndarray))
def ito(self, other=None, *contexts, **ctx_kwargs):
"""Inplace rescale to different units.
:param other: destination units.
:type other: Quantity, str or dict
"""
other = to_units_container(other, self._REGISTRY)
self._magnitude = self._convert_magnitude(other, *contexts,
**ctx_kwargs)
self._units = other
return None
def to(self, other=None, *contexts, **ctx_kwargs):
"""Return Quantity rescaled to different units.
:param other: destination units.
:type other: Quantity, str or dict
"""
other = to_units_container(other, self._REGISTRY)
magnitude = self._convert_magnitude_not_inplace(other, *contexts, **ctx_kwargs)
return self.__class__(magnitude, other)
def ito_root_units(self):
"""Return Quantity rescaled to base units
"""
_, other = self._REGISTRY._get_root_units(self._units)
self._magnitude = self._convert_magnitude(other)
self._units = other
return None
def to_root_units(self):
"""Return Quantity rescaled to base units
"""
_, other = self._REGISTRY._get_root_units(self._units)
magnitude = self._convert_magnitude_not_inplace(other)
return self.__class__(magnitude, other)
def ito_base_units(self):
"""Return Quantity rescaled to base units
"""
_, other = self._REGISTRY._get_base_units(self._units)
self._magnitude = self._convert_magnitude(other)
self._units = other
return None
def to_base_units(self):
"""Return Quantity rescaled to base units
"""
_, other = self._REGISTRY._get_base_units(self._units)
magnitude = self._convert_magnitude_not_inplace(other)
return self.__class__(magnitude, other)
def to_compact(self, unit=None):
"""Return Quantity rescaled to compact, human-readable units.
To get output in terms of a different unit, use the unit parameter.
>>> import pint
>>> ureg = pint.UnitRegistry()
>>> (200e-9*ureg.s).to_compact()
<Quantity(200.0, 'nanosecond')>
>>> (1e-2*ureg('kg m/s^2')).to_compact('N')
<Quantity(10.0, 'millinewton')>
"""
if self.unitless:
return self
SI_prefixes = {}
for prefix in self._REGISTRY._prefixes.values():
try:
scale = prefix.converter.scale
# Kludgy way to check if this is an SI prefix
log10_scale = int(math.log10(scale))
if log10_scale == math.log10(scale):
SI_prefixes[log10_scale] = prefix.name
except:
SI_prefixes[0] = ''
SI_prefixes = sorted(SI_prefixes.items())
SI_powers = [item[0] for item in SI_prefixes]
SI_bases = [item[1] for item in SI_prefixes]
if unit is None:
unit = infer_base_unit(self)
q_base = self.to(unit)
magnitude = q_base.magnitude
# Only changes the prefix on the first unit in the UnitContainer
unit_str = list(q_base._units.items())[0][0]
unit_power = list(q_base._units.items())[0][1]
if unit_power > 0:
power = int(math.floor(math.log10(magnitude) / unit_power / 3)) * 3
else:
power = int(math.ceil(math.log10(magnitude) / unit_power / 3)) * 3
prefix = SI_bases[bisect.bisect_left(SI_powers, power)]
new_unit_str = prefix+unit_str
new_unit_container = q_base._units.rename(unit_str, new_unit_str)
return self.to(new_unit_container)
# Mathematical operations
def __int__(self):
if self.dimensionless:
return int(self._convert_magnitude_not_inplace(UnitsContainer()))
raise DimensionalityError(self._units, 'dimensionless')
def __long__(self):
if self.dimensionless:
return long_type(self._convert_magnitude_not_inplace(UnitsContainer()))
raise DimensionalityError(self._units, 'dimensionless')
def __float__(self):
if self.dimensionless:
return float(self._convert_magnitude_not_inplace(UnitsContainer()))
raise DimensionalityError(self._units, 'dimensionless')
def __complex__(self):
if self.dimensionless:
return complex(self._convert_magnitude_not_inplace(UnitsContainer()))
raise DimensionalityError(self._units, 'dimensionless')
def _iadd_sub(self, other, op):
"""Perform addition or subtraction operation in-place and return the result.
:param other: object to be added to / subtracted from self
:type other: Quantity or any type accepted by :func:`_to_magnitude`
:param op: operator function (e.g. operator.add, operator.isub)
:type op: function
"""
if not self._check(other):
# other not from same Registry or not a Quantity
try:
other_magnitude = _to_magnitude(other, self.force_ndarray)
except TypeError:
return NotImplemented
if _eq(other, 0, True):
# If the other value is 0 (but not Quantity 0)
# do the operation without checking units.
# We do the calculation instead of just returning the same
# value to enforce any shape checking and type casting due to
# the operation.
self._magnitude = op(self._magnitude, other_magnitude)
elif self.dimensionless:
self.ito(UnitsContainer())
self._magnitude = op(self._magnitude, other_magnitude)
else:
raise DimensionalityError(self._units, 'dimensionless')
return self
if not self.dimensionality == other.dimensionality:
raise DimensionalityError(self._units, other._units,
self.dimensionality,
other.dimensionality)
# Next we define some variables to make if-clauses more readable.
self_non_mul_units = self._get_non_multiplicative_units()
is_self_multiplicative = len(self_non_mul_units) == 0
if len(self_non_mul_units) == 1:
self_non_mul_unit = self_non_mul_units[0]
other_non_mul_units = other._get_non_multiplicative_units()
is_other_multiplicative = len(other_non_mul_units) == 0
if len(other_non_mul_units) == 1:
other_non_mul_unit = other_non_mul_units[0]
# Presence of non-multiplicative units gives rise to several cases.
if is_self_multiplicative and is_other_multiplicative:
if self._units == other._units:
self._magnitude = op(self._magnitude, other._magnitude)
# If only self has a delta unit, other determines unit of result.
elif self._get_delta_units() and not other._get_delta_units():
self._magnitude = op(self._convert_magnitude(other._units),
other._magnitude)
self._units = other._units
else:
self._magnitude = op(self._magnitude,
other.to(self._units)._magnitude)
elif (op == operator.isub and len(self_non_mul_units) == 1
and self._units[self_non_mul_unit] == 1
and not other._has_compatible_delta(self_non_mul_unit)):
if self._units == other._units:
self._magnitude = op(self._magnitude, other._magnitude)
else:
self._magnitude = op(self._magnitude,
other.to(self._units)._magnitude)
self._units = self._units.rename(self_non_mul_unit,
'delta_' + self_non_mul_unit)
elif (op == operator.isub and len(other_non_mul_units) == 1
and other._units[other_non_mul_unit] == 1
and not self._has_compatible_delta(other_non_mul_unit)):
# we convert to self directly since it is multiplicative
self._magnitude = op(self._magnitude,
other.to(self._units)._magnitude)
elif (len(self_non_mul_units) == 1
# order of the dimension of offset unit == 1 ?
and self._units[self_non_mul_unit] == 1
and other._has_compatible_delta(self_non_mul_unit)):
# Replace offset unit in self by the corresponding delta unit.
# This is done to prevent a shift by offset in the to()-call.
tu = self._units.rename(self_non_mul_unit,
'delta_' + self_non_mul_unit)
self._magnitude = op(self._magnitude, other.to(tu)._magnitude)
elif (len(other_non_mul_units) == 1
# order of the dimension of offset unit == 1 ?
and other._units[other_non_mul_unit] == 1
and self._has_compatible_delta(other_non_mul_unit)):
# Replace offset unit in other by the corresponding delta unit.
# This is done to prevent a shift by offset in the to()-call.
tu = other._units.rename(other_non_mul_unit,
'delta_' + other_non_mul_unit)
self._magnitude = op(self._convert_magnitude(tu), other._magnitude)
self._units = other._units
else:
raise OffsetUnitCalculusError(self._units, other._units)
return self
def _add_sub(self, other, op):
"""Perform addition or subtraction operation and return the result.
:param other: object to be added to / subtracted from self
:type other: Quantity or any type accepted by :func:`_to_magnitude`
:param op: operator function (e.g. operator.add, operator.isub)
:type op: function
"""
if not self._check(other):
# other not from same Registry or not a Quantity
if _eq(other, 0, True):
# If the other value is 0 (but not Quantity 0)
# do the operation without checking units.
# We do the calculation instead of just returning the same
# value to enforce any shape checking and type casting due to
# the operation.
units = self._units
magnitude = op(self._magnitude,
_to_magnitude(other, self.force_ndarray))
elif self.dimensionless:
units = UnitsContainer()
magnitude = op(self.to(units)._magnitude,
_to_magnitude(other, self.force_ndarray))
else:
raise DimensionalityError(self._units, 'dimensionless')
return self.__class__(magnitude, units)
if not self.dimensionality == other.dimensionality:
raise DimensionalityError(self._units, other._units,
self.dimensionality,
other.dimensionality)
# Next we define some variables to make if-clauses more readable.
self_non_mul_units = self._get_non_multiplicative_units()
is_self_multiplicative = len(self_non_mul_units) == 0
if len(self_non_mul_units) == 1:
self_non_mul_unit = self_non_mul_units[0]
other_non_mul_units = other._get_non_multiplicative_units()
is_other_multiplicative = len(other_non_mul_units) == 0
if len(other_non_mul_units) == 1:
other_non_mul_unit = other_non_mul_units[0]
# Presence of non-multiplicative units gives rise to several cases.
if is_self_multiplicative and is_other_multiplicative:
if self._units == other._units:
magnitude = op(self._magnitude, other._magnitude)
units = self._units
# If only self has a delta unit, other determines unit of result.
elif self._get_delta_units() and not other._get_delta_units():
magnitude = op(self._convert_magnitude(other._units),
other._magnitude)
units = other._units
else:
units = self._units
magnitude = op(self._magnitude,
other.to(self._units).magnitude)
elif (op == operator.sub and len(self_non_mul_units) == 1
and self._units[self_non_mul_unit] == 1
and not other._has_compatible_delta(self_non_mul_unit)):
if self._units == other._units:
magnitude = op(self._magnitude, other._magnitude)
else:
magnitude = op(self._magnitude,
other.to(self._units)._magnitude)
units = self._units.rename(self_non_mul_unit,
'delta_' + self_non_mul_unit)
elif (op == operator.sub and len(other_non_mul_units) == 1
and other._units[other_non_mul_unit] == 1
and not self._has_compatible_delta(other_non_mul_unit)):
# we convert to self directly since it is multiplicative
magnitude = op(self._magnitude,
other.to(self._units)._magnitude)
units = self._units
elif (len(self_non_mul_units) == 1
# order of the dimension of offset unit == 1 ?
and self._units[self_non_mul_unit] == 1
and other._has_compatible_delta(self_non_mul_unit)):
# Replace offset unit in self by the corresponding delta unit.
# This is done to prevent a shift by offset in the to()-call.
tu = self._units.rename(self_non_mul_unit,
'delta_' + self_non_mul_unit)
magnitude = op(self._magnitude, other.to(tu).magnitude)
units = self._units
elif (len(other_non_mul_units) == 1
# order of the dimension of offset unit == 1 ?
and other._units[other_non_mul_unit] == 1
and self._has_compatible_delta(other_non_mul_unit)):
# Replace offset unit in other by the corresponding delta unit.
# This is done to prevent a shift by offset in the to()-call.
tu = other._units.rename(other_non_mul_unit,
'delta_' + other_non_mul_unit)
magnitude = op(self._convert_magnitude(tu), other._magnitude)
units = other._units
else:
raise OffsetUnitCalculusError(self._units, other._units)
return self.__class__(magnitude, units)
def __iadd__(self, other):
if not isinstance(self._magnitude, ndarray):
return self._add_sub(other, operator.add)
else:
return self._iadd_sub(other, operator.iadd)
def __add__(self, other):
return self._add_sub(other, operator.add)
__radd__ = __add__
def __isub__(self, other):
if not isinstance(self._magnitude, ndarray):
return self._add_sub(other, operator.sub)
else:
return self._iadd_sub(other, operator.isub)
def __sub__(self, other):
return self._add_sub(other, operator.sub)
def __rsub__(self, other):
return -self._add_sub(other, operator.sub)
def _imul_div(self, other, magnitude_op, units_op=None):
"""Perform multiplication or division operation in-place and return the
result.
:param other: object to be multiplied/divided with self
:type other: Quantity or any type accepted by :func:`_to_magnitude`
:param magnitude_op: operator function to perform on the magnitudes
(e.g. operator.mul)
:type magnitude_op: function
:param units_op: operator function to perform on the units; if None,
*magnitude_op* is used
:type units_op: function or None
"""
if units_op is None:
units_op = magnitude_op
offset_units_self = self._get_non_multiplicative_units()
no_offset_units_self = len(offset_units_self)
if not self._check(other):
if not self._ok_for_muldiv(no_offset_units_self):
raise OffsetUnitCalculusError(self._units,
getattr(other, 'units', ''))
if len(offset_units_self) == 1:
if (self._units[offset_units_self[0]] != 1
or magnitude_op not in [operator.mul, operator.imul]):
raise OffsetUnitCalculusError(self._units,
getattr(other, 'units', ''))
try:
other_magnitude = _to_magnitude(other, self.force_ndarray)
except TypeError:
return NotImplemented
self._magnitude = magnitude_op(self._magnitude, other_magnitude)
self._units = units_op(self._units, UnitsContainer())
return self
if not isinstance(other, _Quantity):
self._magnitude = magnitude_op(self._magnitude, 1)
self._units = units_op(self._units, other._units)
return self
if not self._ok_for_muldiv(no_offset_units_self):
raise OffsetUnitCalculusError(self._units, other._units)
elif no_offset_units_self == 1 and len(self._units) == 1:
self.ito_root_units()
no_offset_units_other = len(other._get_non_multiplicative_units())
if not other._ok_for_muldiv(no_offset_units_other):
raise OffsetUnitCalculusError(self._units, other._units)
elif no_offset_units_other == 1 and len(other._units) == 1:
other.ito_root_units()
self._magnitude = magnitude_op(self._magnitude, other._magnitude)
self._units = units_op(self._units, other._units)
return self
def _mul_div(self, other, magnitude_op, units_op=None):
"""Perform multiplication or division operation and return the result.
:param other: object to be multiplied/divided with self
:type other: Quantity or any type accepted by :func:`_to_magnitude`
:param magnitude_op: operator function to perform on the magnitudes
(e.g. operator.mul)
:type magnitude_op: function
:param units_op: operator function to perform on the units; if None,
*magnitude_op* is used
:type units_op: function or None
"""
if units_op is None:
units_op = magnitude_op
offset_units_self = self._get_non_multiplicative_units()
no_offset_units_self = len(offset_units_self)
if not self._check(other):
if not self._ok_for_muldiv(no_offset_units_self):
raise OffsetUnitCalculusError(self._units,
getattr(other, 'units', ''))
if len(offset_units_self) == 1:
if (self._units[offset_units_self[0]] != 1
or magnitude_op not in [operator.mul, operator.imul]):
raise OffsetUnitCalculusError(self._units,
getattr(other, 'units', ''))
try:
other_magnitude = _to_magnitude(other, self.force_ndarray)
except TypeError:
return NotImplemented
magnitude = magnitude_op(self._magnitude, other_magnitude)
units = units_op(self._units, UnitsContainer())
return self.__class__(magnitude, units)
if not isinstance(other, _Quantity):
magnitude = self._magnitude
units = units_op(self._units, other._units)
return self.__class__(magnitude, units)
new_self = self
if not self._ok_for_muldiv(no_offset_units_self):
raise OffsetUnitCalculusError(self._units, other._units)
elif no_offset_units_self == 1 and len(self._units) == 1:
new_self = self.to_root_units()
no_offset_units_other = len(other._get_non_multiplicative_units())
if not other._ok_for_muldiv(no_offset_units_other):
raise OffsetUnitCalculusError(self._units, other._units)
elif no_offset_units_other == 1 and len(other._units) == 1:
other = other.to_root_units()
magnitude = magnitude_op(new_self._magnitude, other._magnitude)
units = units_op(new_self._units, other._units)
return self.__class__(magnitude, units)
def __imul__(self, other):
if not isinstance(self._magnitude, ndarray):
return self._mul_div(other, operator.mul)
else:
return self._imul_div(other, operator.imul)
def __mul__(self, other):
return self._mul_div(other, operator.mul)
__rmul__ = __mul__
def __itruediv__(self, other):
if not isinstance(self._magnitude, ndarray):
return self._mul_div(other, operator.truediv)
else:
return self._imul_div(other, operator.itruediv)
def __truediv__(self, other):
return self._mul_div(other, operator.truediv)
def __ifloordiv__(self, other):
if not isinstance(self._magnitude, ndarray):
return self._mul_div(other, operator.floordiv, units_op=operator.itruediv)
else:
return self._imul_div(other, operator.ifloordiv, units_op=operator.itruediv)
def __floordiv__(self, other):
return self._mul_div(other, operator.floordiv, units_op=operator.truediv)
def __rtruediv__(self, other):
try:
other_magnitude = _to_magnitude(other, self.force_ndarray)
except TypeError:
return NotImplemented
no_offset_units_self = len(self._get_non_multiplicative_units())
if not self._ok_for_muldiv(no_offset_units_self):
raise OffsetUnitCalculusError(self._units, '')
elif no_offset_units_self == 1 and len(self._units) == 1:
self = self.to_root_units()
return self.__class__(other_magnitude / self._magnitude, 1 / self._units)
def __rfloordiv__(self, other):
try:
other_magnitude = _to_magnitude(other, self.force_ndarray)
except TypeError:
return NotImplemented
no_offset_units_self = len(self._get_non_multiplicative_units())
if not self._ok_for_muldiv(no_offset_units_self):
raise OffsetUnitCalculusError(self._units, '')
elif no_offset_units_self == 1 and len(self._units) == 1:
self = self.to_root_units()
return self.__class__(other_magnitude // self._magnitude, 1 / self._units)
__div__ = __truediv__
__rdiv__ = __rtruediv__
__idiv__ = __itruediv__
def __ipow__(self, other):
if not isinstance(self._magnitude, ndarray):
return self.__pow__(other)
try:
other_magnitude = _to_magnitude(other, self.force_ndarray)
except TypeError:
return NotImplemented
else:
if not self._ok_for_muldiv:
raise OffsetUnitCalculusError(self._units)
if isinstance(getattr(other, '_magnitude', other), ndarray):
# arrays are refused as exponent, because they would create
# len(array) quanitites of len(set(array)) different units
if np.size(other) > 1:
raise DimensionalityError(self._units, 'dimensionless')
if other == 1:
return self
elif other == 0:
self._units = UnitsContainer()
else:
if not self._is_multiplicative:
if self._REGISTRY.autoconvert_offset_to_baseunit:
self.ito_base_units()
else:
raise OffsetUnitCalculusError(self._units)
if getattr(other, 'dimensionless', False):
other = other.to_base_units()
self._units **= other.magnitude
elif not getattr(other, 'dimensionless', True):
raise DimensionalityError(self._units, 'dimensionless')
else:
self._units **= other
self._magnitude **= _to_magnitude(other, self.force_ndarray)
return self
def __pow__(self, other):
try:
other_magnitude = _to_magnitude(other, self.force_ndarray)
except TypeError:
return NotImplemented
else:
if not self._ok_for_muldiv:
raise OffsetUnitCalculusError(self._units)
if isinstance(getattr(other, '_magnitude', other), ndarray):
# arrays are refused as exponent, because they would create
# len(array) quantities of len(set(array)) different units
if np.size(other) > 1:
raise DimensionalityError(self._units, 'dimensionless')
new_self = self
if other == 1:
return self
elif other == 0:
units = UnitsContainer()
else:
if not self._is_multiplicative:
if self._REGISTRY.autoconvert_offset_to_baseunit:
new_self = self.to_root_units()
else:
raise OffsetUnitCalculusError(self._units)
if getattr(other, 'dimensionless', False):
units = new_self._units ** other.to_root_units().magnitude
elif not getattr(other, 'dimensionless', True):
raise DimensionalityError(self._units, 'dimensionless')
else:
units = new_self._units ** other
magnitude = new_self._magnitude ** _to_magnitude(other, self.force_ndarray)
return self.__class__(magnitude, units)
def __rpow__(self, other):
try:
other_magnitude = _to_magnitude(other, self.force_ndarray)
except TypeError:
return NotImplemented
else:
if not self.dimensionless:
raise DimensionalityError(self._units, 'dimensionless')
if isinstance(self._magnitude, ndarray):
if np.size(self._magnitude) > 1:
raise DimensionalityError(self._units, 'dimensionless')
new_self = self.to_root_units()
return other**new_self._magnitude
def __abs__(self):
return self.__class__(abs(self._magnitude), self._units)
def __round__(self, ndigits=0):
return self.__class__(round(self._magnitude, ndigits=ndigits), self._units)
def __pos__(self):
return self.__class__(operator.pos(self._magnitude), self._units)
def __neg__(self):
return self.__class__(operator.neg(self._magnitude), self._units)
def __eq__(self, other):
# We compare to the base class of Quantity because
# each Quantity class is unique.
if not isinstance(other, _Quantity):
return (self.dimensionless and
_eq(self._convert_magnitude(UnitsContainer()), other, False))
if _eq(self._magnitude, 0, True) and _eq(other._magnitude, 0, True):
return self.dimensionality == other.dimensionality
if self._units == other._units:
return _eq(self._magnitude, other._magnitude, False)
try:
return _eq(self._convert_magnitude_not_inplace(other._units),
other._magnitude, False)
except DimensionalityError:
return False
def __ne__(self, other):
out = self.__eq__(other)
if isinstance(out, ndarray):
return np.logical_not(out)
return not out
def compare(self, other, op):
if not isinstance(other, self.__class__):
if self.dimensionless:
return op(self._convert_magnitude_not_inplace(UnitsContainer()), other)
else:
raise ValueError('Cannot compare Quantity and {0}'.format(type(other)))
if self._units == other._units:
return op(self._magnitude, other._magnitude)
if self.dimensionality != other.dimensionality:
raise DimensionalityError(self._units, other._units,
self.dimensionality, other.dimensionality)
return op(self.to_root_units().magnitude,
other.to_root_units().magnitude)
__lt__ = lambda self, other: self.compare(other, op=operator.lt)
__le__ = lambda self, other: self.compare(other, op=operator.le)
__ge__ = lambda self, other: self.compare(other, op=operator.ge)
__gt__ = lambda self, other: self.compare(other, op=operator.gt)
def __bool__(self):
return bool(self._magnitude)
__nonzero__ = __bool__
# NumPy Support
__radian = 'radian'
__same_units = 'equal greater greater_equal less less_equal not_equal arctan2'.split()
#: Dictionary mapping ufunc/attributes names to the units that they
#: require (conversion will be tried).
__require_units = {'cumprod': '',
'arccos': '', 'arcsin': '', 'arctan': '',
'arccosh': '', 'arcsinh': '', 'arctanh': '',
'exp': '', 'expm1': '', 'exp2': '',
'log': '', 'log10': '', 'log1p': '', 'log2': '',
'sin': __radian, 'cos': __radian, 'tan': __radian,
'sinh': __radian, 'cosh': __radian, 'tanh': __radian,
'radians': 'degree', 'degrees': __radian,
'deg2rad': 'degree', 'rad2deg': __radian,
'logaddexp': '', 'logaddexp2': ''}
#: Dictionary mapping ufunc/attributes names to the units that they
#: will set on output.
__set_units = {'cos': '', 'sin': '', 'tan': '',
'cosh': '', 'sinh': '', 'tanh': '',
'arccos': __radian, 'arcsin': __radian,
'arctan': __radian, 'arctan2': __radian,
'arccosh': __radian, 'arcsinh': __radian,
'arctanh': __radian,
'degrees': 'degree', 'radians': __radian,
'expm1': '', 'cumprod': '',
'rad2deg': 'degree', 'deg2rad': __radian}
#: List of ufunc/attributes names in which units are copied from the
#: original.
__copy_units = 'compress conj conjugate copy cumsum diagonal flatten ' \
'max mean min ptp ravel repeat reshape round ' \
'squeeze std sum take trace transpose ' \
'ceil floor hypot rint ' \
'add subtract ' \
'copysign nextafter trunc ' \
'frexp ldexp modf modf__1 ' \
'absolute negative remainder fmod mod'.split()
#: Dictionary mapping ufunc/attributes names to the units that they will
#: set on output. The value is interpreted as the power to which the unit
#: will be raised.
__prod_units = {'var': 2, 'prod': 'size', 'multiply': 'mul',
'true_divide': 'div', 'divide': 'div', 'floor_divide': 'div',
'remainder': 'div',
'sqrt': .5, 'square': 2, 'reciprocal': -1}
__skip_other_args = 'ldexp multiply ' \
'true_divide divide floor_divide fmod mod ' \
'remainder'.split()
__handled = tuple(__same_units) + \
tuple(__require_units.keys()) + \
tuple(__prod_units.keys()) + \
tuple(__copy_units) + tuple(__skip_other_args)
def clip(self, first=None, second=None, out=None, **kwargs):
min = kwargs.get('min', first)
max = kwargs.get('max', second)
if min is None and max is None:
raise TypeError('clip() takes at least 3 arguments (2 given)')
if max is None and 'min' not in kwargs:
min, max = max, min
kwargs = {'out': out}
if min is not None:
if isinstance(min, self.__class__):
kwargs['min'] = min.to(self).magnitude
elif self.dimensionless:
kwargs['min'] = min
else:
raise DimensionalityError('dimensionless', self._units)
if max is not None:
if isinstance(max, self.__class__):
kwargs['max'] = max.to(self).magnitude
elif self.dimensionless:
kwargs['max'] = max
else:
raise DimensionalityError('dimensionless', self._units)
return self.__class__(self.magnitude.clip(**kwargs), self._units)
def fill(self, value):
self._units = value._units
return self.magnitude.fill(value.magnitude)
def put(self, indices, values, mode='raise'):
if isinstance(values, self.__class__):
values = values.to(self).magnitude
elif self.dimensionless:
values = self.__class__(values, '').to(self)
else:
raise DimensionalityError('dimensionless', self._units)
self.magnitude.put(indices, values, mode)
@property
def real(self):
return self.__class__(self._magnitude.real, self._units)
@property
def imag(self):
return self.__class__(self._magnitude.imag, self._units)
@property
def T(self):
return self.__class__(self._magnitude.T, self._units)
def searchsorted(self, v, side='left'):
if isinstance(v, self.__class__):
v = v.to(self).magnitude
elif self.dimensionless:
v = self.__class__(v, '').to(self)
else:
raise DimensionalityError('dimensionless', self._units)
return self.magnitude.searchsorted(v, side)
def __ito_if_needed(self, to_units):
if self.unitless and to_units == 'radian':
return
self.ito(to_units)
def __numpy_method_wrap(self, func, *args, **kwargs):
"""Convenience method to wrap on the fly numpy method taking
care of the units.
"""
if func.__name__ in self.__require_units:
self.__ito_if_needed(self.__require_units[func.__name__])
value = func(*args, **kwargs)
if func.__name__ in self.__copy_units:
return self.__class__(value, self._units)
if func.__name__ in self.__prod_units:
tmp = self.__prod_units[func.__name__]
if tmp == 'size':
return self.__class__(value, self._units ** self._magnitude.size)
return self.__class__(value, self._units ** tmp)
return value
def __len__(self):
return len(self._magnitude)
def __iter__(self):
# Allow exception to propagate in case of non-iterable magnitude
it_mag = iter(self.magnitude)
return iter((self.__class__(mag, self._units) for mag in it_mag))
def __getattr__(self, item):
# Attributes starting with `__array_` are common attributes of NumPy ndarray.
# They are requested by numpy functions.
if item.startswith('__array_'):
if isinstance(self._magnitude, ndarray):
return getattr(self._magnitude, item)
else:
# If an `__array_` attributes is requested but the magnitude is not an ndarray,
# we convert the magnitude to a numpy ndarray.
self._magnitude = _to_magnitude(self._magnitude, force_ndarray=True)
return getattr(self._magnitude, item)
elif item in self.__handled:
if not isinstance(self._magnitude, ndarray):
self._magnitude = _to_magnitude(self._magnitude, True)
attr = getattr(self._magnitude, item)
if callable(attr):
return functools.partial(self.__numpy_method_wrap, attr)
return attr
try:
return getattr(self._magnitude, item)
except AttributeError as ex:
raise AttributeError("Neither Quantity object nor its magnitude ({0}) "
"has attribute '{1}'".format(self._magnitude, item))
def __getitem__(self, key):
try:
value = self._magnitude[key]
return self.__class__(value, self._units)
except TypeError:
raise TypeError("Neither Quantity object nor its magnitude ({0})"
"supports indexing".format(self._magnitude))
def __setitem__(self, key, value):
try:
if math.isnan(value):
self._magnitude[key] = value
return
except (TypeError, DimensionalityError):
pass
try:
if isinstance(value, self.__class__):
factor = self.__class__(value.magnitude, value._units / self._units).to_root_units()
else:
factor = self.__class__(value, self._units ** (-1)).to_root_units()
if isinstance(factor, self.__class__):
if not factor.dimensionless:
raise DimensionalityError(value, self.units,
extra_msg='. Assign a quantity with the same dimensionality or '
'access the magnitude directly as '
'`obj.magnitude[%s] = %s`' % (key, value))
self._magnitude[key] = factor.magnitude
else:
self._magnitude[key] = factor
except TypeError:
raise TypeError("Neither Quantity object nor its magnitude ({0})"
"supports indexing".format(self._magnitude))
def tolist(self):
units = self._units
return [self.__class__(value, units).tolist() if isinstance(value, list) else self.__class__(value, units)
for value in self._magnitude.tolist()]
__array_priority__ = 17
def __array_prepare__(self, obj, context=None):
# If this uf is handled by Pint, write it down in the handling dictionary.
# name of the ufunc, argument of the ufunc, domain of the ufunc
# In ufuncs with multiple outputs, domain indicates which output
# is currently being prepared (eg. see modf).
# In ufuncs with a single output, domain is 0
uf, objs, huh = context
if uf.__name__ in self.__handled and huh == 0:
# Only one ufunc should be handled at a time.
# If a ufunc is already being handled (and this is not another domain),
# something is wrong..
if self.__handling:
raise Exception('Cannot handled nested ufuncs.\n'
'Current: {0}\n'
'New: {1}'.format(context, self.__handling))
self.__handling = context
return obj
def __array_wrap__(self, obj, context=None):
uf, objs, huh = context
# if this ufunc is not handled by Pint, pass it to the magnitude.
if uf.__name__ not in self.__handled:
return self.magnitude.__array_wrap__(obj, context)
try:
ufname = uf.__name__ if huh == 0 else '{0}__{1}'.format(uf.__name__, huh)
# First, we check the units of the input arguments.
if huh == 0:
# Do this only when the wrap is called for the first ouput.
# Store the destination units
dst_units = None
# List of magnitudes of Quantities with the right units
# to be used as argument of the ufunc
mobjs = None
if uf.__name__ in self.__require_units:
# ufuncs in __require_units
# require specific units
# This is more complex that it should be due to automatic
# conversion between radians/dimensionless
# TODO: maybe could be simplified using Contexts
dst_units = self.__require_units[uf.__name__]
if dst_units == 'radian':
mobjs = []
for other in objs:
unt = getattr(other, '_units', '')
if unt == 'radian':
mobjs.append(getattr(other, 'magnitude', other))
else:
factor, units = self._REGISTRY._get_root_units(unt)
if units and units != UnitsContainer({'radian': 1}):
raise DimensionalityError(units, dst_units)
mobjs.append(getattr(other, 'magnitude', other) * factor)
mobjs = tuple(mobjs)
else:
dst_units = self._REGISTRY.parse_expression(dst_units)._units
elif len(objs) > 1 and uf.__name__ not in self.__skip_other_args:
# ufunc with multiple arguments require that all inputs have
# the same arguments unless they are in __skip_other_args
dst_units = objs[0]._units
# Do the conversion (if needed) and extract the magnitude for each input.
if mobjs is None:
if dst_units is not None:
mobjs = tuple(self._REGISTRY.convert(getattr(other, 'magnitude', other),
getattr(other, 'units', ''),
dst_units)
for other in objs)
else:
mobjs = tuple(getattr(other, 'magnitude', other)
for other in objs)
# call the ufunc
out = uf(*mobjs)
# If there are multiple outputs,
# store them in __handling (uf, objs, huh, out0, out1, ...)
# and return the first
if uf.nout > 1:
self.__handling += out
out = out[0]
else:
# If this is not the first output,
# just grab the result that was previously calculated.
out = self.__handling[3 + huh]
# Second, we set the units of the output value.
if ufname in self.__set_units:
try:
out = self.__class__(out, self.__set_units[ufname])
except:
raise _Exception(ValueError)
elif ufname in self.__copy_units:
try:
out = self.__class__(out, self._units)
except:
raise _Exception(ValueError)
elif ufname in self.__prod_units:
tmp = self.__prod_units[ufname]
if tmp == 'size':
out = self.__class__(out, self._units ** self._magnitude.size)
elif tmp == 'div':
units1 = objs[0]._units if isinstance(objs[0], self.__class__) else UnitsContainer()
units2 = objs[1]._units if isinstance(objs[1], self.__class__) else UnitsContainer()
out = self.__class__(out, units1 / units2)
elif tmp == 'mul':
units1 = objs[0]._units if isinstance(objs[0], self.__class__) else UnitsContainer()
units2 = objs[1]._units if isinstance(objs[1], self.__class__) else UnitsContainer()
out = self.__class__(out, units1 * units2)
else:
out = self.__class__(out, self._units ** tmp)
return out
except (DimensionalityError, UndefinedUnitError) as ex:
raise ex
except _Exception as ex:
raise ex.internal
except Exception as ex:
print(ex)
finally:
# If this is the last output argument for the ufunc,
# we are done handling this ufunc.
if uf.nout == huh + 1:
self.__handling = None
return self.magnitude.__array_wrap__(obj, context)
# Measurement support
def plus_minus(self, error, relative=False):
if isinstance(error, self.__class__):
if relative:
raise ValueError('{} is not a valid relative error.'.format(error))
error = error.to(self._units).magnitude
else:
if relative:
error = error * abs(self.magnitude)
return self._REGISTRY.Measurement(copy.copy(self.magnitude), error, self._units)
# methods/properties that help for math operations with offset units
@property
def _is_multiplicative(self):
"""Check if the Quantity object has only multiplicative units.
"""
return not self._get_non_multiplicative_units()
def _get_non_multiplicative_units(self):
"""Return a list of the of non-multiplicative units of the Quantity object
"""
offset_units = [unit for unit in self._units.keys()
if not self._REGISTRY._units[unit].is_multiplicative]
return offset_units
def _get_delta_units(self):
"""Return list of delta units ot the Quantity object
"""
delta_units = [u for u in self._units.keys() if u.startswith("delta_")]
return delta_units
def _has_compatible_delta(self, unit):
""""Check if Quantity object has a delta_unit that is compatible with unit
"""
deltas = self._get_delta_units()
if 'delta_' + unit in deltas:
return True
else: # Look for delta units with same dimension as the offset unit
offset_unit_dim = self._REGISTRY._units[unit].reference
for d in deltas:
if self._REGISTRY._units[d].reference == offset_unit_dim:
return True
return False
def _ok_for_muldiv(self, no_offset_units=None):
"""Checks if Quantity object can be multiplied or divided
:q: quantity object that is checked
:no_offset_units: number of offset units in q
"""
is_ok = True
if no_offset_units is None:
no_offset_units = len(self._get_non_multiplicative_units())
if no_offset_units > 1:
is_ok = False
if no_offset_units == 1:
if len(self._units) > 1:
is_ok = False
if (len(self._units) == 1
and not self._REGISTRY.autoconvert_offset_to_baseunit):
is_ok = False
if next(iter(self._units.values())) != 1:
is_ok = False
return is_ok
| |
###################################################
# SE2017/views.py: Consists of all the valid methods of faculty module of SAMS-IIITS
#__authors__ = "Vagdevi Kommineni", "Swathi Reddy", "Sonia","Indrojyothi Mondal"
#__copyright__ = "Copyright 2017, SE2017 Course"
#__Team__ = ["Vagdevi Kommineni", "Swathi Reddy", "Sonia","Indrojyothi Mondal"]
#__license__ = "MIT"
#__version__ = "1.2"
#__maintainer__ = "Vagdevi"
#__email__ = "vagdevi.k15@iiits.in"
#__status__ = "Development"
####################################################
from __future__ import unicode_literals
from django.contrib.auth.decorators import login_required
import json
from django.http import *
from django.shortcuts import *
from django.template import *
from home.models import *
from home.serializers import *
from django.utils import *
import datetime
from dateutil.parser import parse
@login_required
def index(request):
"""Displays a timetable calendar
value: The request to be processed (request)
Returns: 'fullcalendar/calendar.html' and dictionary of events"""
all_events = Events.objects.all()
serializer = EventsSerializer(all_events, many=True)
a=[]
for i in serializer.data:
a.append({"title":i["Event_Name"],"start":i["Event_Date"],"allDay":True})
print serializer.data
return render(request, 'fullcalendar/calendar.html',{"Events":json.dumps(a)})
@login_required
def ViewProfs(request):
"""
Displays a dropdown of courses offered by the logged in faculty
value: The request to be processed (request)
Returns: 'prof.html' and dictionary of flag, courselist,username,hint"""
CourseList = []
if request.GET.get('hint'):
hint=request.GET.get('hint')
else:
hint=2
if request.user.personnel.Role.Role_name == 'Faculty':
request.session['Prof_Name']=request.user.username
person_id = request.user.personnel.Person_ID
IC = Instructors_Courses.objects.all()
for i in range(0, len(IC)):
if person_id == IC[i].Inst_ID.Person_ID:
CourseList.append(IC[i].Course_ID.Course_Name)
if CourseList==[]:
flag=0
else:
flag=1
template = loader.get_template('prof.html')
context = {'flag':flag,'Courses':CourseList,'Prof_Name':request.session['Prof_Name'],'hint':hint}
return HttpResponse(template.render(context, request))
@login_required
def CoursePage(request):
"""
Displays a form of course description
value: The request to be processed (request)
Returns: 'prof1.html' and dictionary of courses object and coursename"""
if request.POST.get('action')=='Save':
course=Courses.objects.get(Course_Name=request.session['course'])
course.Course_description = request.POST.get('coursedes')
course.save()
else:
request.session['course'] =request.POST.get('dropdown')
course=get_object_or_404(Courses,Course_Name=request.session['course'])
template = loader.get_template('prof1.html')
context = {'Course':course,'CourseName':request.session['course']}
return HttpResponse(template.render(context, request))
@login_required
def AddAssignment(request):
"""
Displays a a form for uploading assignments
value: The request to be processed (request)
Returns: 'forms.html'and dictionary with coursename and success_state of submission"""
success=0
if request.method == 'POST':
date_joined =datetime.now()
if parse(request.POST.get('enddate'))>=date_joined:
courses = Courses.objects.all()
for corse in courses:
if corse.Course_Name == request.session['course']:
course = Courses.objects.get(Course_Name=corse.Course_Name)
break
instance = Assignment(Course_ID=course, Assignment_File=request.FILES['file'],End_Time=request.POST.get('enddate'))
instance.save()
success=1
else:
success=2
return render(request, 'forms.html',{'CourseName':request.session['course'],'success':success})
else:
CourseList=[]
person_id = request.user.personnel.Person_ID
IC = Instructors_Courses.objects.all()
for i in range(0, len(IC)):
if person_id == IC[i].Inst_ID.Person_ID:
CourseList.append(IC[i].Course_ID.Course_Name)
if 'course' in request.session:
success=0
return render(request, 'forms.html',{'CourseName':request.session['course'],'s':success})
elif 'course' not in request.session and CourseList==[] :
return redirect(reverse('faculty:ViewProfs')+"?flag=0&hint=1")
else:
return redirect(reverse('faculty:ViewProfs')+"?hint=0")
@login_required
def ViewAssignment(request):
"""
Displays a table of assignments and their deadlines
value: The request to be processed (request)
Returns: 'assignment.html' and a dictionary with objects of assignments,Coursename"""
asslist = []
CourseList = []
Assignments = Assignment.objects.all()
person_id = request.user.personnel.Person_ID
IC = Instructors_Courses.objects.all()
for i in range(0, len(IC)):
if person_id == IC[i].Inst_ID.Person_ID:
CourseList.append(IC[i].Course_ID.Course_Name)
for assignment in Assignments:
if 'course' in request.session:
if assignment.Course_ID.Course_Name ==request.session['course'] and assignment.End_Time.date()!=datetime.strptime('1900-01-01',"%Y-%m-%d").date():
print assignment.Assignment_File
asslist.append(assignment)
elif 'course' not in request.session and CourseList==[]:
return redirect(reverse('faculty:ViewProfs')+"?flag=0&hint=1")
else:
return redirect(reverse('faculty:ViewProfs')+"?hint=0")
return render(request, 'assignment.html', {'Assignments': asslist,'CourseName':request.session['course']})
@login_required
def OfferCourses(request):
"""
Displays a table of course offerings
value: The request to be processed (request)
Returns: view called Offercourses if POST method, reg.html and a dictionary with objects Courses,list of courses,Instructor Courses and username for a new form.
"""
if request.method == 'POST':
person_id = request.user.personnel.Person_ID
person = Personnel.objects.get(Person_ID=person_id)
courseids = request.POST.getlist('courses[]')
for cid in courseids:
corse = Courses.objects.get(Course_ID=cid)
IC = Instructors_Courses(Course_ID=corse, Inst_ID=person, Start_Date='2017-1-1',End_Date='2017-1-1')
IC.save()
return redirect(reverse('faculty:OfferCourses'))
else:
IC = Instructors_Courses.objects.all()
IClist = []
courselist=[]
for ic in IC:
IClist.append(ic.Course_ID)
person_id = request.user.personnel.Person_ID
courses = Courses.objects.all()
courses1 = []
for corse in courses:
if corse not in IClist:
courses1.append(corse)
for course in courses:
courselist.append(course.Course_ID)
courselist.append(course.Course_Name)
template = loader.get_template('reg.html')
context = {'Courses': courses1,'Courses1':json.dumps(courselist), 'IC': IC, 'Prof_Name': request.user.username}
return HttpResponse(template.render(context, request))
@login_required
def ViewAttendance(request):
"""
Displays a table of session-wise attendance of students for the session course.
value: The request to be processed (request)
Returns: 'attendance.html' (template), a dictionary containing sessions,CourseName(dictionary)
"""
sessionlist={}
sessions=Attendance_Session.objects.all()
students=Attendance.objects.all()
CourseList=[]
person_id = request.user.personnel.Person_ID
IC = Instructors_Courses.objects.all()
for i in range(0, len(IC)):
if person_id == IC[i].Inst_ID.Person_ID:
CourseList.append(IC[i].Course_ID.Course_Name)
for session in sessions:
if 'course' in request.session:
if session.Course_Slot.Course_ID.Course_Name==request.session['course']:
sessionlist[session.Session_ID]=[session.Date_time.date,0]
elif 'course' not in request.session and CourseList==[]:
return redirect(reverse('faculty:ViewProfs')+"?flag=0&hint=1")
else:
return redirect(reverse('faculty:ViewProfs')+"?hint=0")
for session in sessionlist:
for student in students:
if session==student.ASession_ID.Session_ID and student.Marked=='P':
sessionlist[session][1]=sessionlist[session][1]+1
template = loader.get_template('attendance.html')
context = {'sessions':sessionlist,'CourseName':request.session['course']}
return HttpResponse(template.render(context, request))
@login_required
def ViewAttendanceDetails(request):
"""
Displays a table of assignments along with the respective deadlines for the session course.
value: The request to be processed
Returns: 'details.html' template, a dictionary containing students object, CourseName , date
"""
slotid=request.GET.get('id')
session=Attendance_Session.objects.get(Session_ID=slotid)
students=Attendance.objects.all()
studentlist=[]
for student in students:
if str(student.ASession_ID.Session_ID)==str(slotid):
studentlist.append(student)
template = loader.get_template('details.html')
context = {'students':studentlist,'CourseName':request.session['course'],'date':session.Date_time.date}
return HttpResponse(template.render(context, request))
@login_required
def MyLibrary(request):
"""
Displays the upload button to upload files.If POST method, then processes the request and appends the uploaded file to the table.
value: The request to be processed (request)
Returns: 'lib.html' template, a dictionary containing asslist,CourseName,success
"""
success=0
libfiles=[]
if request.method == 'POST':
courses = Courses.objects.all()
libfiles=request.FILES.getlist("files")
for corse in courses:
if corse.Course_Name == request.session['course']:
course = Courses.objects.get(Course_Name=corse.Course_Name)
break
for libfile in libfiles:
instance = Assignment(Course_ID=course, Assignment_File=libfile,Start_Time=datetime.now(),End_Time='1900-01-01')
instance.save()
success=1
asslist = []
Assignments = Assignment.objects.all()
for ass in Assignments:
if ass.Course_ID.Course_Name ==request.session['course'] and ass.End_Time.date()==datetime.strptime('1900-01-01',"%Y-%m-%d").date():
asslist.append(ass)
return render(request, 'lib.html',{'MyLibList':asslist,'CourseName':request.session['course'],'success':success})
else:
asslist = []
success=0
Assignments = Assignment.objects.all()
for assignment in Assignments:
if assignment.Course_ID.Course_Name ==request.session['course'] and assignment.End_Time.date()==datetime.strptime('1900-01-01',"%Y-%m-%d").date():
asslist.append(assignment)
return render(request, 'lib.html',{'MyLibList':asslist,'CourseName':request.session['course'],'success':success})
| |
# Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""Helper functions for mip-NeRF."""
from jax import lax
from jax import random
import jax.numpy as jnp
from internal import math
def pos_enc(x, min_deg, max_deg, append_identity=True):
"""The positional encoding used by the original NeRF paper."""
scales = jnp.array([2**i for i in range(min_deg, max_deg)])
xb = jnp.reshape((x[..., None, :] * scales[:, None]),
list(x.shape[:-1]) + [-1])
four_feat = jnp.sin(jnp.concatenate([xb, xb + 0.5 * jnp.pi], axis=-1))
if append_identity:
return jnp.concatenate([x] + [four_feat], axis=-1)
else:
return four_feat
def expected_sin(x, x_var):
"""Estimates mean and variance of sin(z), z ~ N(x, var)."""
# When the variance is wide, shrink sin towards zero.
y = jnp.exp(-0.5 * x_var) * math.safe_sin(x)
y_var = jnp.maximum(
0, 0.5 * (1 - jnp.exp(-2 * x_var) * math.safe_cos(2 * x)) - y**2)
return y, y_var
def lift_gaussian(d, t_mean, t_var, r_var, diag):
"""Lift a Gaussian defined along a ray to 3D coordinates."""
mean = d[..., None, :] * t_mean[..., None]
d_mag_sq = jnp.maximum(1e-10, jnp.sum(d**2, axis=-1, keepdims=True))
if diag:
d_outer_diag = d**2
null_outer_diag = 1 - d_outer_diag / d_mag_sq
t_cov_diag = t_var[..., None] * d_outer_diag[..., None, :]
xy_cov_diag = r_var[..., None] * null_outer_diag[..., None, :]
cov_diag = t_cov_diag + xy_cov_diag
return mean, cov_diag
else:
d_outer = d[..., :, None] * d[..., None, :]
eye = jnp.eye(d.shape[-1])
null_outer = eye - d[..., :, None] * (d / d_mag_sq)[..., None, :]
t_cov = t_var[..., None, None] * d_outer[..., None, :, :]
xy_cov = r_var[..., None, None] * null_outer[..., None, :, :]
cov = t_cov + xy_cov
return mean, cov
def conical_frustum_to_gaussian(d, t0, t1, base_radius, diag, stable=True):
"""Approximate a conical frustum as a Gaussian distribution (mean+cov).
Assumes the ray is originating from the origin, and base_radius is the
radius at dist=1. Doesn't assume `d` is normalized.
Args:
d: jnp.float32 3-vector, the axis of the cone
t0: float, the starting distance of the frustum.
t1: float, the ending distance of the frustum.
base_radius: float, the scale of the radius as a function of distance.
diag: boolean, whether or the Gaussian will be diagonal or full-covariance.
stable: boolean, whether or not to use the stable computation described in
the paper (setting this to False will cause catastrophic failure).
Returns:
a Gaussian (mean and covariance).
"""
if stable:
mu = (t0 + t1) / 2
hw = (t1 - t0) / 2
t_mean = mu + (2 * mu * hw**2) / (3 * mu**2 + hw**2)
t_var = (hw**2) / 3 - (4 / 15) * ((hw**4 * (12 * mu**2 - hw**2)) /
(3 * mu**2 + hw**2)**2)
r_var = base_radius**2 * ((mu**2) / 4 + (5 / 12) * hw**2 - 4 / 15 *
(hw**4) / (3 * mu**2 + hw**2))
else:
t_mean = (3 * (t1**4 - t0**4)) / (4 * (t1**3 - t0**3))
r_var = base_radius**2 * (3 / 20 * (t1**5 - t0**5) / (t1**3 - t0**3))
t_mosq = 3 / 5 * (t1**5 - t0**5) / (t1**3 - t0**3)
t_var = t_mosq - t_mean**2
return lift_gaussian(d, t_mean, t_var, r_var, diag)
def cylinder_to_gaussian(d, t0, t1, radius, diag):
"""Approximate a cylinder as a Gaussian distribution (mean+cov).
Assumes the ray is originating from the origin, and radius is the
radius. Does not renormalize `d`.
Args:
d: jnp.float32 3-vector, the axis of the cylinder
t0: float, the starting distance of the cylinder.
t1: float, the ending distance of the cylinder.
radius: float, the radius of the cylinder
diag: boolean, whether or the Gaussian will be diagonal or full-covariance.
Returns:
a Gaussian (mean and covariance).
"""
t_mean = (t0 + t1) / 2
r_var = radius**2 / 4
t_var = (t1 - t0)**2 / 12
return lift_gaussian(d, t_mean, t_var, r_var, diag)
def cast_rays(t_vals, origins, directions, radii, ray_shape, diag=True):
"""Cast rays (cone- or cylinder-shaped) and featurize sections of it.
Args:
t_vals: float array, the "fencepost" distances along the ray.
origins: float array, the ray origin coordinates.
directions: float array, the ray direction vectors.
radii: float array, the radii (base radii for cones) of the rays.
ray_shape: string, the shape of the ray, must be 'cone' or 'cylinder'.
diag: boolean, whether or not the covariance matrices should be diagonal.
Returns:
a tuple of arrays of means and covariances.
"""
t0 = t_vals[..., :-1]
t1 = t_vals[..., 1:]
if ray_shape == 'cone':
gaussian_fn = conical_frustum_to_gaussian
elif ray_shape == 'cylinder':
gaussian_fn = cylinder_to_gaussian
else:
assert False
means, covs = gaussian_fn(directions, t0, t1, radii, diag)
means = means + origins[..., None, :]
return means, covs
def integrated_pos_enc(x_coord, min_deg, max_deg, diag=True):
"""Encode `x` with sinusoids scaled by 2^[min_deg:max_deg-1].
Args:
x_coord: a tuple containing: x, jnp.ndarray, variables to be encoded. Should
be in [-pi, pi]. x_cov, jnp.ndarray, covariance matrices for `x`.
min_deg: int, the min degree of the encoding.
max_deg: int, the max degree of the encoding.
diag: bool, if true, expects input covariances to be diagonal (full
otherwise).
Returns:
encoded: jnp.ndarray, encoded variables.
"""
if diag:
x, x_cov_diag = x_coord
scales = jnp.array([2**i for i in range(min_deg, max_deg)])
shape = list(x.shape[:-1]) + [-1]
y = jnp.reshape(x[..., None, :] * scales[:, None], shape)
y_var = jnp.reshape(x_cov_diag[..., None, :] * scales[:, None]**2, shape)
else:
x, x_cov = x_coord
num_dims = x.shape[-1]
basis = jnp.concatenate(
[2**i * jnp.eye(num_dims) for i in range(min_deg, max_deg)], 1)
y = math.matmul(x, basis)
# Get the diagonal of a covariance matrix (ie, variance). This is equivalent
# to jax.vmap(jnp.diag)((basis.T @ covs) @ basis).
y_var = jnp.sum((math.matmul(x_cov, basis)) * basis, -2)
return expected_sin(
jnp.concatenate([y, y + 0.5 * jnp.pi], axis=-1),
jnp.concatenate([y_var] * 2, axis=-1))[0]
def volumetric_rendering(rgb, density, t_vals, dirs, white_bkgd):
"""Volumetric Rendering Function.
Args:
rgb: jnp.ndarray(float32), color, [batch_size, num_samples, 3]
density: jnp.ndarray(float32), density, [batch_size, num_samples, 1].
t_vals: jnp.ndarray(float32), [batch_size, num_samples].
dirs: jnp.ndarray(float32), [batch_size, 3].
white_bkgd: bool.
Returns:
comp_rgb: jnp.ndarray(float32), [batch_size, 3].
disp: jnp.ndarray(float32), [batch_size].
acc: jnp.ndarray(float32), [batch_size].
weights: jnp.ndarray(float32), [batch_size, num_samples]
"""
t_mids = 0.5 * (t_vals[..., :-1] + t_vals[..., 1:])
t_dists = t_vals[..., 1:] - t_vals[..., :-1]
delta = t_dists * jnp.linalg.norm(dirs[..., None, :], axis=-1)
# Note that we're quietly turning density from [..., 0] to [...].
density_delta = density[..., 0] * delta
alpha = 1 - jnp.exp(-density_delta)
trans = jnp.exp(-jnp.concatenate([
jnp.zeros_like(density_delta[..., :1]),
jnp.cumsum(density_delta[..., :-1], axis=-1)
],
axis=-1))
weights = alpha * trans
comp_rgb = (weights[..., None] * rgb).sum(axis=-2)
acc = weights.sum(axis=-1)
distance = (weights * t_mids).sum(axis=-1) / acc
distance = jnp.clip(
jnp.nan_to_num(distance, jnp.inf), t_vals[:, 0], t_vals[:, -1])
if white_bkgd:
comp_rgb = comp_rgb + (1. - acc[..., None])
return comp_rgb, distance, acc, weights
def sample_along_rays(key, origins, directions, radii, num_samples, near, far,
randomized, lindisp, ray_shape):
"""Stratified sampling along the rays.
Args:
key: jnp.ndarray, random generator key.
origins: jnp.ndarray(float32), [batch_size, 3], ray origins.
directions: jnp.ndarray(float32), [batch_size, 3], ray directions.
radii: jnp.ndarray(float32), [batch_size, 3], ray radii.
num_samples: int.
near: jnp.ndarray, [batch_size, 1], near clip.
far: jnp.ndarray, [batch_size, 1], far clip.
randomized: bool, use randomized stratified sampling.
lindisp: bool, sampling linearly in disparity rather than depth.
ray_shape: string, which shape ray to assume.
Returns:
t_vals: jnp.ndarray, [batch_size, num_samples], sampled z values.
means: jnp.ndarray, [batch_size, num_samples, 3], sampled means.
covs: jnp.ndarray, [batch_size, num_samples, 3, 3], sampled covariances.
"""
batch_size = origins.shape[0]
t_vals = jnp.linspace(0., 1., num_samples + 1)
if lindisp:
t_vals = 1. / (1. / near * (1. - t_vals) + 1. / far * t_vals)
else:
t_vals = near * (1. - t_vals) + far * t_vals
if randomized:
mids = 0.5 * (t_vals[..., 1:] + t_vals[..., :-1])
upper = jnp.concatenate([mids, t_vals[..., -1:]], -1)
lower = jnp.concatenate([t_vals[..., :1], mids], -1)
t_rand = random.uniform(key, [batch_size, num_samples + 1])
t_vals = lower + (upper - lower) * t_rand
else:
# Broadcast t_vals to make the returned shape consistent.
t_vals = jnp.broadcast_to(t_vals, [batch_size, num_samples + 1])
means, covs = cast_rays(t_vals, origins, directions, radii, ray_shape)
return t_vals, (means, covs)
def resample_along_rays(key, origins, directions, radii, t_vals, weights,
randomized, ray_shape, stop_grad, resample_padding):
"""Resampling.
Args:
key: jnp.ndarray(float32), [2,], random number generator.
origins: jnp.ndarray(float32), [batch_size, 3], ray origins.
directions: jnp.ndarray(float32), [batch_size, 3], ray directions.
radii: jnp.ndarray(float32), [batch_size, 3], ray radii.
t_vals: jnp.ndarray(float32), [batch_size, num_samples+1].
weights: jnp.array(float32), weights for t_vals
randomized: bool, use randomized samples.
ray_shape: string, which kind of shape to assume for the ray.
stop_grad: bool, whether or not to backprop through sampling.
resample_padding: float, added to the weights before normalizing.
Returns:
t_vals: jnp.ndarray(float32), [batch_size, num_samples+1].
points: jnp.ndarray(float32), [batch_size, num_samples, 3].
"""
# Do a blurpool.
weights_pad = jnp.concatenate([
weights[..., :1],
weights,
weights[..., -1:],
],
axis=-1)
weights_max = jnp.maximum(weights_pad[..., :-1], weights_pad[..., 1:])
weights_blur = 0.5 * (weights_max[..., :-1] + weights_max[..., 1:])
# Add in a constant (the sampling function will renormalize the PDF).
weights = weights_blur + resample_padding
new_t_vals = math.sorted_piecewise_constant_pdf(
key,
t_vals,
weights,
t_vals.shape[-1],
randomized,
)
if stop_grad:
new_t_vals = lax.stop_gradient(new_t_vals)
means, covs = cast_rays(new_t_vals, origins, directions, radii, ray_shape)
return new_t_vals, (means, covs)
| |
#!/usr/bin/python
#
# occi_api.py - common functions, classes, and variables for Vcycle
#
# Andrew McNab, University of Manchester.
# Luis Villazon Esteban, CERN.
# Copyright (c) 2013-5. All rights reserved.
#
# Redistribution and use in source and binary forms, with or
# without modification, are permitted provided that the following
# conditions are met:
#
# o Redistributions of source code must retain the above
# copyright notice, this list of conditions and the following
# disclaimer.
# o Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials
# provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
# CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
# INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS
# BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
# TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
# ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
# OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
# Contacts: Andrew.McNab@cern.ch http://www.gridpp.ac.uk/vcycle/
# Luis.Villazon.Esteban@cern.ch
#
import requests
import time
import base64
import vcycle.vacutils
class OcciError(Exception):
pass
ca_path = '/etc/grid-security/occi.ca-certs'
class OcciSpace(vcycle.BaseSpace):
def __init__(self, api, spaceName, parser, spaceSectionName):
# Initialize data structures from configuration files
# Generic initialization
vcycle.BaseSpace.__init__(self, api, spaceName, parser, spaceSectionName)
# OCCI-specific initialization
try:
self.tenancy_name = parser.get(spaceSectionName, 'tenancy_name')
except Exception as e:
raise OcciError('tenancy_name is required in Occi [space ' + spaceName + '] (' + str(e) + ')')
try:
self.queryURL = parser.get(spaceSectionName, 'url')
except Exception as e:
raise OcciError('url is required in Occi [space ' + spaceName + '] (' + str(e) + ')')
if self.queryURL.endswith('/'):
self.queryURL = self.queryURL[:-1]
#check if proxy is in the configuration, if not, then use the username-password
if parser.has_option(spaceSectionName, 'proxy'):
self.userkey = parser.get(spaceSectionName, 'proxy')
self.usercert = parser.get(spaceSectionName, 'proxy')
else:
#check username and password are defined
if not parser.has_option(spaceSectionName, 'username'):
raise OcciError('username is required in Occi [space %s]' % spaceName)
if not parser.has_option(spaceSectionName, 'password'):
raise OcciError('password is required in Occi [space %s]' % spaceName)
self.username = parser.get(spaceSectionName, 'username')
self.password = ''.join([ chr(ord(c)-1) for c in parser.get(spaceSectionName, 'password')])
self._create_ca_file()
def connect(self):
# Connect to the OCCI service
self.session = requests.Session()
self.session.mount(self.queryURL, requests.adapters.HTTPAdapter(pool_connections=20))
#Retrieve token
keystone_url = self._get_keystone()
if keystone_url is not None:
vcycle.vacutils.logLine("Found Keystone URL %s" % keystone_url)
self.token = self._get_token(keystone_url)
self.session.headers.clear()
self.session.headers.update({"X-Auth-Token": self.token})
self.session.cert = self.usercert
self.session.verify = ca_path
self._get_definitions()
self.computeURL = "%s/compute/" % (self.queryURL)
vcycle.vacutils.logLine("Connected to %s for space %s" % (self.queryURL ,self.spaceName))
def scanMachines(self):
"""Query OCCI compute service for details of machines in this space"""
headers = {'Accept': 'application/occi+json',
'Content-Type': 'application/occi+json'}
try:
response = self.session.get(self.computeURL)
except Exception as e:
raise OcciError('Cannot connect to ' + self.computeURL + ' (' + str(e) + ')')
for machineID in [line[line.rfind('/')+1:] for line in response.text.split("\n")[1:]]:
try:
response = self.session.get("%s/%s" % (self.computeURL, machineID), headers=headers)
except Exception as e:
raise OcciError('Cannot connect to %s/%s (%s)' %(self.computeURL, machineID, str(e)))
response = response.json()
machineName = response['attributes']['occi.compute.hostname']
occiState = response['attributes']['org.openstack.compute.state'].lower()
uuidStr = response['attributes']['occi.core.id']
try:
ip = response['links'][0]['attributes']['occi.networkinterface.address']
except:
ip = '0.0.0.0'
# Just in case other VMs are in this space
if machineName[:7] != 'vcycle-':
# Still count VMs that we didn't create and won't manage, to avoid going above space limit
self.totalMachines += 1
continue
# With OCCI will have to use our file datestamps to get transition times
try:
createdTime = int(open('/var/lib/vcycle/machines/' + machineName + '/started', 'r').read().strip())
updatedTime = int(open('/var/lib/vcycle/machines/' + machineName + '/started', 'r').read().strip())
startedTime = int(open('/var/lib/vcycle/machines/' + machineName + '/started', 'r').read().strip())
except:
createdTime = None
updatedTime = None
startedTime = None
if occiState == 'active':
state = vcycle.MachineState.running
elif occiState == 'inactive':
state = vcycle.MachineState.shutdown
else:
state = vcycle.MachineState.unknown
self.machines[machineName] = vcycle.shared.Machine(name=machineName,
spaceName=self.spaceName,
state=state,
ip=ip,
createdTime=createdTime,
startedTime=startedTime,
updatedTime=updatedTime,
uuidStr=uuidStr,
machinetypeName=None)
def createMachine(self, machineName, machinetypeName):
# OCCI-specific machine creation steps
# 'metadata' : { 'cern-services' : 'false',
# 'machinefeatures' : 'http://' + os.uname()[1] + '/' + machineName + '/machinefeatures',
# 'jobfeatures' : 'http://' + os.uname()[1] + '/' + machineName + '/jobfeatures',
# 'machineoutputs' : 'https://' + os.uname()[1] + '/' + machineName + '/machineoutputs' }
import uuid
headers = {'X-Auth-Token': self.token,
'Accept': 'text/plain,text/occi',
'Content-Type': 'text/plain,text/occi',
'Connection': 'close'
}
image = self.machinetypes[machinetypeName].root_image[6:].strip()
data = 'Category: compute;scheme="http://schemas.ogf.org/occi/infrastructure#";class="kind";location="/compute/";title="Compute Resource"\n'
data += 'Category: %s;%s;class="mixin";location="/%s"\n' % (image, self.categories[image]['scheme'], image)
data += 'Category: %s;%s;class="mixin";location="/%s"\n' % (self.machinetypes[machinetypeName].flavor_name, self.categories[self.machinetypes[machinetypeName].flavor_name]['scheme'], self.machinetypes[machinetypeName].flavor_name)
data += 'Category: user_data;"%s";class="mixin";location="%s";title="OS contextualization mixin"\n' % (self.categories['user_data']['scheme'], self.categories['user_data']['location']);
data += 'X-OCCI-Attribute: occi.core.id="%s"\n' % str(uuid.uuid4())
data += 'X-OCCI-Attribute: occi.core.title="%s"\n' % machineName
data += 'X-OCCI-Attribute: occi.compute.hostname="%s"\n' % machineName
data += 'X-OCCI-Attribute: org.openstack.compute.user_data="%s"' % base64.b64encode(open('/var/lib/vcycle/machines/' + machineName + '/user_data', 'r').read())
if self.machinetypes[machinetypeName].root_public_key:
if self.machinetypes[machinetypeName].root_public_key[0] == '/':
try:
f = open(self.machinetypes[machinetypeName].root_public_key, 'r')
except Exception as e:
OcciError('Cannot open ' + self.machinetypes[machinetypeName].root_public_key)
else:
try:
f = open('/var/lib/vcycle/' + self.spaceName + '/' + self.machinetypeName + '/' + self.machinetypes[machinetypeName].root_public_key, 'r')
except Exception as e:
OcciError('Cannot open ' + self.spaceName + '/' + self.machinetypeName + '/' + self.machinetypes[machinetypeName].root_public_key)
while True:
try:
line = f.read()
except:
raise OcciError('Cannot find ssh-rsa public key line in ' + self.machinetypes[machinetypeName].root_public_key)
if line[:8] == 'ssh-rsa ':
sshPublicKey = line.split(' ')[1]
data += 'X-OCCI-Attribute: org.openstack.credentials.publickey.data="ssh-rsa ' + sshPublicKey + ' vcycle"'
break
try:
response = self.session.post(self.computeURL, data=data, headers=headers)
if response.status_code not in [200, 201]:
raise OcciError(response.text)
except Exception as e:
raise OcciError('Cannot connect to ' + self.computeURL + ' (' + str(e) + ')')
vcycle.vacutils.logLine('Created ' + machineName + ' for ' + machinetypeName + ' within ' + self.spaceName)
self.machines[machineName] = vcycle.shared.Machine(name=machineName,
spaceName=self.spaceName,
state=vcycle.MachineState.starting,
ip='0.0.0.0',
createdTime=int(time.time()),
startedTime=int(time.time()),
updatedTime=int(time.time()),
uuidStr=None,
machinetypeName=machinetypeName)
return machineName
def deleteOneMachine(self, machineName):
"""Deletes a VM from the provider
:param machineName: vm identifier
"""
try:
self.session.delete("%s%s" % (self.computeURL, self.machines[machineName].uuidStr))
except Exception as e:
raise vcycle.shared.VcycleError('Cannot delete ' + machineName + ' via ' + self.computeURL + ' (' + str(e) + ')')
def _get_definitions(self):
"""Store the schema definitions to create VMs
"""
headers = {'X-Auth-Token': self.token,
'Accept': 'text/plain,text/occi'}
response = requests.get("%s/-/" % self.queryURL,
headers=headers,
cert=self.usercert,
verify=ca_path)
self.categories = {}
categories = response.text.split("\n")[1:]
for category in categories:
values = category.split(";")
cat = values[0][values[0].find(":")+1:].strip()
self.categories[cat] = {}
for property in values:
if property.find("scheme=") >= 0:
self.categories[cat]["scheme"] = property.strip()
if property.find("class=") >= 0:
self.categories[cat]["class"] = property.strip()
if property.find("title=") >= 0:
self.categories[cat]["title"] = property.strip()
if property.find("location=") >= 0:
aux = property.strip()
aux = aux.replace("https://","")
aux = aux.replace("http://","")
aux = aux[aux.find("/"):]
self.categories[cat]["location"] = 'location="'+aux
def _get_keystone(self):
""" Returns The authorization token to retrieve the OCCI token
:return: The keystone url
"""
try:
result = requests.head(self.queryURL + '/-/',
headers={"Content-Type": "application/json"},
cert=self.usercert,
verify=ca_path
)
except Exception as e:
raise OcciError('Cannot connect to ' + self.queryURL + ' (' + str(e) + ')')
# This is implicitly only for Keystone authentication
if result.status_code != 401 or result.headers is None:
raise OcciError('Do not recognise response when connecting to ' + self.queryURL)
if 'www-authenticate' not in result.headers:
return None
# Explicitly check for Keystone using hard-coded string index values for now
if not result.headers['www-authenticate'].startswith("Keystone uri="):
raise OcciError('Only Keystone authentication is currently supported (instead got "%s")' %
result.headers['www-authenticate'])
try:
keystoneURL = result.headers['www-authenticate'][14:-1]
keystoneURL = keystoneURL.replace("/v2.0", '')
except:
raise OcciError("Failed to find Keystone URL in %s" % result.headers['www-authenticate'])
return keystoneURL
def _get_token(self, keystone_url):
""" Returns The token to request OCCI site
:param keystone_url: URL to do the request
:return: The token
"""
if self.userkey is not None:
auth = {'auth': {'voms': True}}
else:
auth = {'auth': {
'passwordCredentials': {
'username': self.username,
'password': self.password
}
}
}
try:
result = {'response':requests.post(keystone_url+"/v2.0/tokens",
data='{"auth":{"voms": true}}',
headers={"Content-Type": "application/json"},
cert=self.usercert, verify=ca_path).json()}
except Exception as e:
raise OcciError('Cannot connect to ' + keystone_url + ' (' + str(e) + ')')
token = str(result['response']['access']['token']['id'])
tenants = self._get_tenants(keystone_url, token)
return self.__auth_in_tenant(keystone_url, token, tenants)
def _get_tenants(self, keystone_url, temporal_token):
""" Returns all the tenants available in the provider
:param token: Authorization token
:return: The name of all tenants
"""
result = {'response': requests.get("%s/v2.0/tenants/" % keystone_url,
data='{"auth":{"voms": true}}',
headers={"Content-Type": "application/json", "X-Auth-Token": temporal_token},
cert=self.usercert,
verify=ca_path).json()}
return [tenant['name'] for tenant in result['response']['tenants']]
def __auth_in_tenant(self, keystone_url, token, tenants):
""" Returns the token linked to the tenant
Loop all tenants, trying to authorize the user with each tenant, ones a tenant is valid, a token is returned
:param token: System token
:param tenants: list of tenants
:return: token and expiration date
"""
import json
for tenant in tenants:
data = {'auth': {'voms': True, 'tenantName': tenant}}
headers = {
'Accept': 'application/json',
'X-Auth-Token': token,
'User-Agent': 'Vcycle ' + vcycle.shared.vcycleVersion + ' ( OCCI/1.1 )',
'Content-Type': 'application/json',
'Content-Length': len(json.dumps(data))
}
try:
result = {'response': requests.post("%s/v2.0/tokens" % keystone_url,
data=json.dumps(data),
headers= headers,
cert=self.usercert,
verify=ca_path).json()}
except Exception as e:
print e
if 'access' in result['response']:
return result['response']['access']['token']['id']
def _create_ca_file(self):
import subprocess
import os.path
if not os.path.exists(ca_path):
subprocess.call('cat `ls /etc/grid-security/certificates/*.pem` > %s' % ca_path,
shell=True)
else:
modification_time = os.lstat(ca_path).st_mtime
for file in os.listdir('/etc/grid-security/certificates/'):
if os.lstat('/etc/grid-security/certificates/%s' % file).st_mtime > modification_time:
subprocess.call('cat `ls /etc/grid-security/certificates/*.pem` > %s' % ca_path,
shell=True)
return
| |
"""Base class for sparse matrix formats using compressed storage."""
from __future__ import division, print_function, absolute_import
__all__ = []
from warnings import warn
import operator
import numpy as np
from scipy._lib.six import zip as izip
from scipy._lib._util import _prune_array
from .base import spmatrix, isspmatrix, SparseEfficiencyWarning
from .data import _data_matrix, _minmax_mixin
from .dia import dia_matrix
from . import _sparsetools
from .sputils import (upcast, upcast_char, to_native, isdense, isshape,
getdtype, isscalarlike, IndexMixin, get_index_dtype,
downcast_intp_index, get_sum_dtype)
class _cs_matrix(_data_matrix, _minmax_mixin, IndexMixin):
"""base matrix class for compressed row and column oriented matrices"""
def __init__(self, arg1, shape=None, dtype=None, copy=False):
_data_matrix.__init__(self)
if isspmatrix(arg1):
if arg1.format == self.format and copy:
arg1 = arg1.copy()
else:
arg1 = arg1.asformat(self.format)
self._set_self(arg1)
elif isinstance(arg1, tuple):
if isshape(arg1):
# It's a tuple of matrix dimensions (M, N)
# create empty matrix
self.shape = arg1 # spmatrix checks for errors here
M, N = self.shape
# Select index dtype large enough to pass array and
# scalar parameters to sparsetools
idx_dtype = get_index_dtype(maxval=max(M,N))
self.data = np.zeros(0, getdtype(dtype, default=float))
self.indices = np.zeros(0, idx_dtype)
self.indptr = np.zeros(self._swap((M,N))[0] + 1, dtype=idx_dtype)
else:
if len(arg1) == 2:
# (data, ij) format
from .coo import coo_matrix
other = self.__class__(coo_matrix(arg1, shape=shape))
self._set_self(other)
elif len(arg1) == 3:
# (data, indices, indptr) format
(data, indices, indptr) = arg1
# Select index dtype large enough to pass array and
# scalar parameters to sparsetools
maxval = None
if shape is not None:
maxval = max(shape)
idx_dtype = get_index_dtype((indices, indptr), maxval=maxval, check_contents=True)
self.indices = np.array(indices, copy=copy, dtype=idx_dtype)
self.indptr = np.array(indptr, copy=copy, dtype=idx_dtype)
self.data = np.array(data, copy=copy, dtype=dtype)
else:
raise ValueError("unrecognized %s_matrix constructor usage" %
self.format)
else:
# must be dense
try:
arg1 = np.asarray(arg1)
except:
raise ValueError("unrecognized %s_matrix constructor usage" %
self.format)
from .coo import coo_matrix
self._set_self(self.__class__(coo_matrix(arg1, dtype=dtype)))
# Read matrix dimensions given, if any
if shape is not None:
self.shape = shape # spmatrix will check for errors
else:
if self.shape is None:
# shape not already set, try to infer dimensions
try:
major_dim = len(self.indptr) - 1
minor_dim = self.indices.max() + 1
except:
raise ValueError('unable to infer matrix dimensions')
else:
self.shape = self._swap((major_dim,minor_dim))
if dtype is not None:
self.data = np.asarray(self.data, dtype=dtype)
self.check_format(full_check=False)
def getnnz(self, axis=None):
if axis is None:
return int(self.indptr[-1])
else:
if axis < 0:
axis += 2
axis, _ = self._swap((axis, 1 - axis))
_, N = self._swap(self.shape)
if axis == 0:
return np.bincount(downcast_intp_index(self.indices),
minlength=N)
elif axis == 1:
return np.diff(self.indptr)
raise ValueError('axis out of bounds')
getnnz.__doc__ = spmatrix.getnnz.__doc__
def _set_self(self, other, copy=False):
"""take the member variables of other and assign them to self"""
if copy:
other = other.copy()
self.data = other.data
self.indices = other.indices
self.indptr = other.indptr
self.shape = other.shape
def check_format(self, full_check=True):
"""check whether the matrix format is valid
Parameters
----------
full_check : bool, optional
If `True`, rigorous check, O(N) operations. Otherwise
basic check, O(1) operations (default True).
"""
# use _swap to determine proper bounds
major_name,minor_name = self._swap(('row','column'))
major_dim,minor_dim = self._swap(self.shape)
# index arrays should have integer data types
if self.indptr.dtype.kind != 'i':
warn("indptr array has non-integer dtype (%s)"
% self.indptr.dtype.name)
if self.indices.dtype.kind != 'i':
warn("indices array has non-integer dtype (%s)"
% self.indices.dtype.name)
idx_dtype = get_index_dtype((self.indptr, self.indices))
self.indptr = np.asarray(self.indptr, dtype=idx_dtype)
self.indices = np.asarray(self.indices, dtype=idx_dtype)
self.data = to_native(self.data)
# check array shapes
if self.data.ndim != 1 or self.indices.ndim != 1 or self.indptr.ndim != 1:
raise ValueError('data, indices, and indptr should be 1-D')
# check index pointer
if (len(self.indptr) != major_dim + 1):
raise ValueError("index pointer size (%d) should be (%d)" %
(len(self.indptr), major_dim + 1))
if (self.indptr[0] != 0):
raise ValueError("index pointer should start with 0")
# check index and data arrays
if (len(self.indices) != len(self.data)):
raise ValueError("indices and data should have the same size")
if (self.indptr[-1] > len(self.indices)):
raise ValueError("Last value of index pointer should be less than "
"the size of index and data arrays")
self.prune()
if full_check:
# check format validity (more expensive)
if self.nnz > 0:
if self.indices.max() >= minor_dim:
raise ValueError("%s index values must be < %d" %
(minor_name,minor_dim))
if self.indices.min() < 0:
raise ValueError("%s index values must be >= 0" %
minor_name)
if np.diff(self.indptr).min() < 0:
raise ValueError("index pointer values must form a "
"non-decreasing sequence")
# if not self.has_sorted_indices():
# warn('Indices were not in sorted order. Sorting indices.')
# self.sort_indices()
# assert(self.has_sorted_indices())
# TODO check for duplicates?
#######################
# Boolean comparisons #
#######################
def _scalar_binopt(self, other, op):
"""Scalar version of self._binopt, for cases in which no new nonzeros
are added. Produces a new spmatrix in canonical form.
"""
self.sum_duplicates()
res = self._with_data(op(self.data, other), copy=True)
res.eliminate_zeros()
return res
def __eq__(self, other):
# Scalar other.
if isscalarlike(other):
if np.isnan(other):
return self.__class__(self.shape, dtype=np.bool_)
if other == 0:
warn("Comparing a sparse matrix with 0 using == is inefficient"
", try using != instead.", SparseEfficiencyWarning)
all_true = self.__class__(np.ones(self.shape, dtype=np.bool_))
inv = self._scalar_binopt(other, operator.ne)
return all_true - inv
else:
return self._scalar_binopt(other, operator.eq)
# Dense other.
elif isdense(other):
return self.todense() == other
# Sparse other.
elif isspmatrix(other):
warn("Comparing sparse matrices using == is inefficient, try using"
" != instead.", SparseEfficiencyWarning)
#TODO sparse broadcasting
if self.shape != other.shape:
return False
elif self.format != other.format:
other = other.asformat(self.format)
res = self._binopt(other,'_ne_')
all_true = self.__class__(np.ones(self.shape, dtype=np.bool_))
return all_true - res
else:
return False
def __ne__(self, other):
# Scalar other.
if isscalarlike(other):
if np.isnan(other):
warn("Comparing a sparse matrix with nan using != is inefficient",
SparseEfficiencyWarning)
all_true = self.__class__(np.ones(self.shape, dtype=np.bool_))
return all_true
elif other != 0:
warn("Comparing a sparse matrix with a nonzero scalar using !="
" is inefficient, try using == instead.", SparseEfficiencyWarning)
all_true = self.__class__(np.ones(self.shape), dtype=np.bool_)
inv = self._scalar_binopt(other, operator.eq)
return all_true - inv
else:
return self._scalar_binopt(other, operator.ne)
# Dense other.
elif isdense(other):
return self.todense() != other
# Sparse other.
elif isspmatrix(other):
#TODO sparse broadcasting
if self.shape != other.shape:
return True
elif self.format != other.format:
other = other.asformat(self.format)
return self._binopt(other,'_ne_')
else:
return True
def _inequality(self, other, op, op_name, bad_scalar_msg):
# Scalar other.
if isscalarlike(other):
if 0 == other and op_name in ('_le_', '_ge_'):
raise NotImplementedError(" >= and <= don't work with 0.")
elif op(0, other):
warn(bad_scalar_msg, SparseEfficiencyWarning)
other_arr = np.empty(self.shape, dtype=np.result_type(other))
other_arr.fill(other)
other_arr = self.__class__(other_arr)
return self._binopt(other_arr, op_name)
else:
return self._scalar_binopt(other, op)
# Dense other.
elif isdense(other):
return op(self.todense(), other)
# Sparse other.
elif isspmatrix(other):
#TODO sparse broadcasting
if self.shape != other.shape:
raise ValueError("inconsistent shapes")
elif self.format != other.format:
other = other.asformat(self.format)
if op_name not in ('_ge_', '_le_'):
return self._binopt(other, op_name)
warn("Comparing sparse matrices using >= and <= is inefficient, "
"using <, >, or !=, instead.", SparseEfficiencyWarning)
all_true = self.__class__(np.ones(self.shape))
res = self._binopt(other, '_gt_' if op_name == '_le_' else '_lt_')
return all_true - res
else:
raise ValueError("Operands could not be compared.")
def __lt__(self, other):
return self._inequality(other, operator.lt, '_lt_',
"Comparing a sparse matrix with a scalar "
"greater than zero using < is inefficient, "
"try using >= instead.")
def __gt__(self, other):
return self._inequality(other, operator.gt, '_gt_',
"Comparing a sparse matrix with a scalar "
"less than zero using > is inefficient, "
"try using <= instead.")
def __le__(self, other):
return self._inequality(other, operator.le, '_le_',
"Comparing a sparse matrix with a scalar "
"greater than zero using <= is inefficient, "
"try using > instead.")
def __ge__(self,other):
return self._inequality(other, operator.ge, '_ge_',
"Comparing a sparse matrix with a scalar "
"less than zero using >= is inefficient, "
"try using < instead.")
#################################
# Arithmatic operator overrides #
#################################
def _add_dense(self, other):
if other.shape != self.shape:
raise ValueError('Incompatible shapes.')
dtype = upcast_char(self.dtype.char, other.dtype.char)
order = self._swap('CF')[0]
result = np.array(other, dtype=dtype, order=order, copy=True)
M, N = self._swap(self.shape)
y = result if result.flags.c_contiguous else result.T
_sparsetools.csr_todense(M, N, self.indptr, self.indices, self.data, y)
return np.matrix(result, copy=False)
def _add_sparse(self, other):
return self._binopt(other, '_plus_')
def _sub_sparse(self, other):
return self._binopt(other, '_minus_')
def multiply(self, other):
"""Point-wise multiplication by another matrix, vector, or
scalar.
"""
# Scalar multiplication.
if isscalarlike(other):
return self._mul_scalar(other)
# Sparse matrix or vector.
if isspmatrix(other):
if self.shape == other.shape:
other = self.__class__(other)
return self._binopt(other, '_elmul_')
# Single element.
elif other.shape == (1,1):
return self._mul_scalar(other.toarray()[0, 0])
elif self.shape == (1,1):
return other._mul_scalar(self.toarray()[0, 0])
# A row times a column.
elif self.shape[1] == 1 and other.shape[0] == 1:
return self._mul_sparse_matrix(other.tocsc())
elif self.shape[0] == 1 and other.shape[1] == 1:
return other._mul_sparse_matrix(self.tocsc())
# Row vector times matrix. other is a row.
elif other.shape[0] == 1 and self.shape[1] == other.shape[1]:
other = dia_matrix((other.toarray().ravel(), [0]),
shape=(other.shape[1], other.shape[1]))
return self._mul_sparse_matrix(other)
# self is a row.
elif self.shape[0] == 1 and self.shape[1] == other.shape[1]:
copy = dia_matrix((self.toarray().ravel(), [0]),
shape=(self.shape[1], self.shape[1]))
return other._mul_sparse_matrix(copy)
# Column vector times matrix. other is a column.
elif other.shape[1] == 1 and self.shape[0] == other.shape[0]:
other = dia_matrix((other.toarray().ravel(), [0]),
shape=(other.shape[0], other.shape[0]))
return other._mul_sparse_matrix(self)
# self is a column.
elif self.shape[1] == 1 and self.shape[0] == other.shape[0]:
copy = dia_matrix((self.toarray().ravel(), [0]),
shape=(self.shape[0], self.shape[0]))
return copy._mul_sparse_matrix(other)
else:
raise ValueError("inconsistent shapes")
# Assume other is a dense matrix/array, which produces a single-item
# object array if other isn't convertible to ndarray.
other = np.atleast_2d(other)
if other.ndim != 2:
return np.multiply(self.toarray(), other)
# Single element / wrapped object.
if other.size == 1:
return self._mul_scalar(other.flat[0])
# Fast case for trivial sparse matrix.
elif self.shape == (1, 1):
return np.multiply(self.toarray()[0,0], other)
from .coo import coo_matrix
ret = self.tocoo()
# Matching shapes.
if self.shape == other.shape:
data = np.multiply(ret.data, other[ret.row, ret.col])
# Sparse row vector times...
elif self.shape[0] == 1:
if other.shape[1] == 1: # Dense column vector.
data = np.multiply(ret.data, other)
elif other.shape[1] == self.shape[1]: # Dense matrix.
data = np.multiply(ret.data, other[:, ret.col])
else:
raise ValueError("inconsistent shapes")
row = np.repeat(np.arange(other.shape[0]), len(ret.row))
col = np.tile(ret.col, other.shape[0])
return coo_matrix((data.view(np.ndarray).ravel(), (row, col)),
shape=(other.shape[0], self.shape[1]),
copy=False)
# Sparse column vector times...
elif self.shape[1] == 1:
if other.shape[0] == 1: # Dense row vector.
data = np.multiply(ret.data[:, None], other)
elif other.shape[0] == self.shape[0]: # Dense matrix.
data = np.multiply(ret.data[:, None], other[ret.row])
else:
raise ValueError("inconsistent shapes")
row = np.repeat(ret.row, other.shape[1])
col = np.tile(np.arange(other.shape[1]), len(ret.col))
return coo_matrix((data.view(np.ndarray).ravel(), (row, col)),
shape=(self.shape[0], other.shape[1]),
copy=False)
# Sparse matrix times dense row vector.
elif other.shape[0] == 1 and self.shape[1] == other.shape[1]:
data = np.multiply(ret.data, other[:, ret.col].ravel())
# Sparse matrix times dense column vector.
elif other.shape[1] == 1 and self.shape[0] == other.shape[0]:
data = np.multiply(ret.data, other[ret.row].ravel())
else:
raise ValueError("inconsistent shapes")
ret.data = data.view(np.ndarray).ravel()
return ret
###########################
# Multiplication handlers #
###########################
def _mul_vector(self, other):
M,N = self.shape
# output array
result = np.zeros(M, dtype=upcast_char(self.dtype.char,
other.dtype.char))
# csr_matvec or csc_matvec
fn = getattr(_sparsetools,self.format + '_matvec')
fn(M, N, self.indptr, self.indices, self.data, other, result)
return result
def _mul_multivector(self, other):
M,N = self.shape
n_vecs = other.shape[1] # number of column vectors
result = np.zeros((M,n_vecs), dtype=upcast_char(self.dtype.char,
other.dtype.char))
# csr_matvecs or csc_matvecs
fn = getattr(_sparsetools,self.format + '_matvecs')
fn(M, N, n_vecs, self.indptr, self.indices, self.data, other.ravel(), result.ravel())
return result
def _mul_sparse_matrix(self, other):
M, K1 = self.shape
K2, N = other.shape
major_axis = self._swap((M,N))[0]
other = self.__class__(other) # convert to this format
idx_dtype = get_index_dtype((self.indptr, self.indices,
other.indptr, other.indices),
maxval=M*N)
indptr = np.empty(major_axis + 1, dtype=idx_dtype)
fn = getattr(_sparsetools, self.format + '_matmat_pass1')
fn(M, N,
np.asarray(self.indptr, dtype=idx_dtype),
np.asarray(self.indices, dtype=idx_dtype),
np.asarray(other.indptr, dtype=idx_dtype),
np.asarray(other.indices, dtype=idx_dtype),
indptr)
nnz = indptr[-1]
idx_dtype = get_index_dtype((self.indptr, self.indices,
other.indptr, other.indices),
maxval=nnz)
indptr = np.asarray(indptr, dtype=idx_dtype)
indices = np.empty(nnz, dtype=idx_dtype)
data = np.empty(nnz, dtype=upcast(self.dtype, other.dtype))
fn = getattr(_sparsetools, self.format + '_matmat_pass2')
fn(M, N, np.asarray(self.indptr, dtype=idx_dtype),
np.asarray(self.indices, dtype=idx_dtype),
self.data,
np.asarray(other.indptr, dtype=idx_dtype),
np.asarray(other.indices, dtype=idx_dtype),
other.data,
indptr, indices, data)
return self.__class__((data,indices,indptr),shape=(M,N))
def diagonal(self):
"""Returns the main diagonal of the matrix
"""
# TODO support k-th diagonal
fn = getattr(_sparsetools, self.format + "_diagonal")
y = np.empty(min(self.shape), dtype=upcast(self.dtype))
fn(self.shape[0], self.shape[1], self.indptr, self.indices, self.data, y)
return y
#####################
# Other binary ops #
#####################
def _maximum_minimum(self, other, npop, op_name, dense_check):
if isscalarlike(other):
if dense_check(other):
warn("Taking maximum (minimum) with > 0 (< 0) number results to "
"a dense matrix.",
SparseEfficiencyWarning)
other_arr = np.empty(self.shape, dtype=np.asarray(other).dtype)
other_arr.fill(other)
other_arr = self.__class__(other_arr)
return self._binopt(other_arr, op_name)
else:
self.sum_duplicates()
new_data = npop(self.data, np.asarray(other))
mat = self.__class__((new_data, self.indices, self.indptr),
dtype=new_data.dtype, shape=self.shape)
return mat
elif isdense(other):
return npop(self.todense(), other)
elif isspmatrix(other):
return self._binopt(other, op_name)
else:
raise ValueError("Operands not compatible.")
def maximum(self, other):
return self._maximum_minimum(other, np.maximum, '_maximum_', lambda x: np.asarray(x) > 0)
maximum.__doc__ = spmatrix.maximum.__doc__
def minimum(self, other):
return self._maximum_minimum(other, np.minimum, '_minimum_', lambda x: np.asarray(x) < 0)
minimum.__doc__ = spmatrix.minimum.__doc__
#####################
# Reduce operations #
#####################
def sum(self, axis=None, dtype=None, out=None):
"""Sum the matrix over the given axis. If the axis is None, sum
over both rows and columns, returning a scalar.
"""
# The spmatrix base class already does axis=0 and axis=1 efficiently
# so we only do the case axis=None here
if (not hasattr(self, 'blocksize') and
axis in self._swap(((1, -1), (0, 2)))[0]):
# faster than multiplication for large minor axis in CSC/CSR
res_dtype = get_sum_dtype(self.dtype)
ret = np.zeros(len(self.indptr) - 1, dtype=res_dtype)
major_index, value = self._minor_reduce(np.add)
ret[major_index] = value
ret = np.asmatrix(ret)
if axis % 2 == 1:
ret = ret.T
if out is not None and out.shape != ret.shape:
raise ValueError('dimensions do not match')
return ret.sum(axis=(), dtype=dtype, out=out)
# spmatrix will handle the remaining situations when axis
# is in {None, -1, 0, 1}
else:
return spmatrix.sum(self, axis=axis, dtype=dtype, out=out)
sum.__doc__ = spmatrix.sum.__doc__
def _minor_reduce(self, ufunc):
"""Reduce nonzeros with a ufunc over the minor axis when non-empty
Warning: this does not call sum_duplicates()
Returns
-------
major_index : array of ints
Major indices where nonzero
value : array of self.dtype
Reduce result for nonzeros in each major_index
"""
major_index = np.flatnonzero(np.diff(self.indptr))
value = ufunc.reduceat(self.data,
downcast_intp_index(self.indptr[major_index]))
return major_index, value
#######################
# Getting and Setting #
#######################
def __setitem__(self, index, x):
# Process arrays from IndexMixin
i, j = self._unpack_index(index)
i, j = self._index_to_arrays(i, j)
if isspmatrix(x):
broadcast_row = x.shape[0] == 1 and i.shape[0] != 1
broadcast_col = x.shape[1] == 1 and i.shape[1] != 1
if not ((broadcast_row or x.shape[0] == i.shape[0]) and
(broadcast_col or x.shape[1] == i.shape[1])):
raise ValueError("shape mismatch in assignment")
# clear entries that will be overwritten
ci, cj = self._swap((i.ravel(), j.ravel()))
self._zero_many(ci, cj)
x = x.tocoo()
r, c = x.row, x.col
x = np.asarray(x.data, dtype=self.dtype)
if broadcast_row:
r = np.repeat(np.arange(i.shape[0]), len(r))
c = np.tile(c, i.shape[0])
x = np.tile(x, i.shape[0])
if broadcast_col:
r = np.repeat(r, i.shape[1])
c = np.tile(np.arange(i.shape[1]), len(c))
x = np.repeat(x, i.shape[1])
# only assign entries in the new sparsity structure
i = i[r, c]
j = j[r, c]
else:
# Make x and i into the same shape
x = np.asarray(x, dtype=self.dtype)
x, _ = np.broadcast_arrays(x, i)
if x.shape != i.shape:
raise ValueError("shape mismatch in assignment")
if np.size(x) == 0:
return
i, j = self._swap((i.ravel(), j.ravel()))
self._set_many(i, j, x.ravel())
def _setdiag(self, values, k):
if 0 in self.shape:
return
M, N = self.shape
broadcast = (values.ndim == 0)
if k < 0:
if broadcast:
max_index = min(M + k, N)
else:
max_index = min(M + k, N, len(values))
i = np.arange(max_index, dtype=self.indices.dtype)
j = np.arange(max_index, dtype=self.indices.dtype)
i -= k
else:
if broadcast:
max_index = min(M, N - k)
else:
max_index = min(M, N - k, len(values))
i = np.arange(max_index, dtype=self.indices.dtype)
j = np.arange(max_index, dtype=self.indices.dtype)
j += k
if not broadcast:
values = values[:len(i)]
self[i, j] = values
def _prepare_indices(self, i, j):
M, N = self._swap(self.shape)
def check_bounds(indices, bound):
idx = indices.max()
if idx >= bound:
raise IndexError('index (%d) out of range (>= %d)' %
(idx, bound))
idx = indices.min()
if idx < -bound:
raise IndexError('index (%d) out of range (< -%d)' %
(idx, bound))
check_bounds(i, M)
check_bounds(j, N)
i = np.asarray(i, dtype=self.indices.dtype)
j = np.asarray(j, dtype=self.indices.dtype)
return i, j, M, N
def _set_many(self, i, j, x):
"""Sets value at each (i, j) to x
Here (i,j) index major and minor respectively.
"""
i, j, M, N = self._prepare_indices(i, j)
n_samples = len(x)
offsets = np.empty(n_samples, dtype=self.indices.dtype)
ret = _sparsetools.csr_sample_offsets(M, N, self.indptr, self.indices,
n_samples, i, j, offsets)
if ret == 1:
# rinse and repeat
self.sum_duplicates()
_sparsetools.csr_sample_offsets(M, N, self.indptr,
self.indices, n_samples, i, j,
offsets)
if -1 not in offsets:
# only affects existing non-zero cells
self.data[offsets] = x
return
else:
warn("Changing the sparsity structure of a %s_matrix is expensive. "
"lil_matrix is more efficient." % self.format,
SparseEfficiencyWarning)
# replace where possible
mask = offsets > -1
self.data[offsets[mask]] = x[mask]
# only insertions remain
mask = ~mask
i = i[mask]
i[i < 0] += M
j = j[mask]
j[j < 0] += N
self._insert_many(i, j, x[mask])
def _zero_many(self, i, j):
"""Sets value at each (i, j) to zero, preserving sparsity structure.
Here (i,j) index major and minor respectively.
"""
i, j, M, N = self._prepare_indices(i, j)
n_samples = len(i)
offsets = np.empty(n_samples, dtype=self.indices.dtype)
ret = _sparsetools.csr_sample_offsets(M, N, self.indptr, self.indices,
n_samples, i, j, offsets)
if ret == 1:
# rinse and repeat
self.sum_duplicates()
_sparsetools.csr_sample_offsets(M, N, self.indptr,
self.indices, n_samples, i, j,
offsets)
# only assign zeros to the existing sparsity structure
self.data[offsets[offsets > -1]] = 0
def _insert_many(self, i, j, x):
"""Inserts new nonzero at each (i, j) with value x
Here (i,j) index major and minor respectively.
i, j and x must be non-empty, 1d arrays.
Inserts each major group (e.g. all entries per row) at a time.
Maintains has_sorted_indices property.
Modifies i, j, x in place.
"""
order = np.argsort(i, kind='mergesort') # stable for duplicates
i = i.take(order, mode='clip')
j = j.take(order, mode='clip')
x = x.take(order, mode='clip')
do_sort = self.has_sorted_indices
# Update index data type
idx_dtype = get_index_dtype((self.indices, self.indptr),
maxval=(self.indptr[-1] + x.size))
self.indptr = np.asarray(self.indptr, dtype=idx_dtype)
self.indices = np.asarray(self.indices, dtype=idx_dtype)
i = np.asarray(i, dtype=idx_dtype)
j = np.asarray(j, dtype=idx_dtype)
# Collate old and new in chunks by major index
indices_parts = []
data_parts = []
ui, ui_indptr = np.unique(i, return_index=True)
ui_indptr = np.append(ui_indptr, len(j))
new_nnzs = np.diff(ui_indptr)
prev = 0
for c, (ii, js, je) in enumerate(izip(ui, ui_indptr, ui_indptr[1:])):
# old entries
start = self.indptr[prev]
stop = self.indptr[ii]
indices_parts.append(self.indices[start:stop])
data_parts.append(self.data[start:stop])
# handle duplicate j: keep last setting
uj, uj_indptr = np.unique(j[js:je][::-1], return_index=True)
if len(uj) == je - js:
indices_parts.append(j[js:je])
data_parts.append(x[js:je])
else:
indices_parts.append(j[js:je][::-1][uj_indptr])
data_parts.append(x[js:je][::-1][uj_indptr])
new_nnzs[c] = len(uj)
prev = ii
# remaining old entries
start = self.indptr[ii]
indices_parts.append(self.indices[start:])
data_parts.append(self.data[start:])
# update attributes
self.indices = np.concatenate(indices_parts)
self.data = np.concatenate(data_parts)
nnzs = np.asarray(np.ediff1d(self.indptr, to_begin=0), dtype=idx_dtype)
nnzs[1:][ui] += new_nnzs
self.indptr = np.cumsum(nnzs, out=nnzs)
if do_sort:
# TODO: only sort where necessary
self.has_sorted_indices = False
self.sort_indices()
self.check_format(full_check=False)
def _get_single_element(self,row,col):
M, N = self.shape
if (row < 0):
row += M
if (col < 0):
col += N
if not (0 <= row < M) or not (0 <= col < N):
raise IndexError("index out of bounds")
major_index, minor_index = self._swap((row,col))
# TODO make use of sorted indices (if present)
start = self.indptr[major_index]
end = self.indptr[major_index+1]
# can use np.add(..., where) from numpy 1.7
return np.compress(minor_index == self.indices[start:end],
self.data[start:end]).sum(dtype=self.dtype)
def _get_submatrix(self, slice0, slice1):
"""Return a submatrix of this matrix (new matrix is created)."""
slice0, slice1 = self._swap((slice0,slice1))
shape0, shape1 = self._swap(self.shape)
def _process_slice(sl, num):
if isinstance(sl, slice):
i0, i1 = sl.start, sl.stop
if i0 is None:
i0 = 0
elif i0 < 0:
i0 = num + i0
if i1 is None:
i1 = num
elif i1 < 0:
i1 = num + i1
return i0, i1
elif np.isscalar(sl):
if sl < 0:
sl += num
return sl, sl + 1
else:
return sl[0], sl[1]
def _in_bounds(i0, i1, num):
if not (0 <= i0 < num) or not (0 < i1 <= num) or not (i0 < i1):
raise IndexError("index out of bounds: 0<=%d<%d, 0<=%d<%d, %d<%d" %
(i0, num, i1, num, i0, i1))
i0, i1 = _process_slice(slice0, shape0)
j0, j1 = _process_slice(slice1, shape1)
_in_bounds(i0, i1, shape0)
_in_bounds(j0, j1, shape1)
aux = _sparsetools.get_csr_submatrix(shape0, shape1,
self.indptr, self.indices,
self.data,
i0, i1, j0, j1)
data, indices, indptr = aux[2], aux[1], aux[0]
shape = self._swap((i1 - i0, j1 - j0))
return self.__class__((data, indices, indptr), shape=shape)
######################
# Conversion methods #
######################
def tocoo(self, copy=True):
major_dim, minor_dim = self._swap(self.shape)
minor_indices = self.indices
major_indices = np.empty(len(minor_indices), dtype=self.indices.dtype)
_sparsetools.expandptr(major_dim, self.indptr, major_indices)
row, col = self._swap((major_indices, minor_indices))
from .coo import coo_matrix
return coo_matrix((self.data, (row, col)), self.shape, copy=copy,
dtype=self.dtype)
tocoo.__doc__ = spmatrix.tocoo.__doc__
def toarray(self, order=None, out=None):
if out is None and order is None:
order = self._swap('cf')[0]
out = self._process_toarray_args(order, out)
if not (out.flags.c_contiguous or out.flags.f_contiguous):
raise ValueError('Output array must be C or F contiguous')
# align ideal order with output array order
if out.flags.c_contiguous:
x = self.tocsr()
y = out
else:
x = self.tocsc()
y = out.T
M, N = x._swap(x.shape)
_sparsetools.csr_todense(M, N, x.indptr, x.indices, x.data, y)
return out
toarray.__doc__ = spmatrix.toarray.__doc__
##############################################################
# methods that examine or modify the internal data structure #
##############################################################
def eliminate_zeros(self):
"""Remove zero entries from the matrix
This is an *in place* operation
"""
M, N = self._swap(self.shape)
_sparsetools.csr_eliminate_zeros(M, N, self.indptr, self.indices,
self.data)
self.prune() # nnz may have changed
def __get_has_canonical_format(self):
"""Determine whether the matrix has sorted indices and no duplicates
Returns
- True: if the above applies
- False: otherwise
has_canonical_format implies has_sorted_indices, so if the latter flag
is False, so will the former be; if the former is found True, the
latter flag is also set.
"""
# first check to see if result was cached
if not getattr(self, '_has_sorted_indices', True):
# not sorted => not canonical
self._has_canonical_format = False
elif not hasattr(self, '_has_canonical_format'):
self.has_canonical_format = _sparsetools.csr_has_canonical_format(
len(self.indptr) - 1, self.indptr, self.indices)
return self._has_canonical_format
def __set_has_canonical_format(self, val):
self._has_canonical_format = bool(val)
if val:
self.has_sorted_indices = True
has_canonical_format = property(fget=__get_has_canonical_format,
fset=__set_has_canonical_format)
def sum_duplicates(self):
"""Eliminate duplicate matrix entries by adding them together
The is an *in place* operation
"""
if self.has_canonical_format:
return
self.sort_indices()
M, N = self._swap(self.shape)
_sparsetools.csr_sum_duplicates(M, N, self.indptr, self.indices,
self.data)
self.prune() # nnz may have changed
self.has_canonical_format = True
def __get_sorted(self):
"""Determine whether the matrix has sorted indices
Returns
- True: if the indices of the matrix are in sorted order
- False: otherwise
"""
# first check to see if result was cached
if not hasattr(self,'_has_sorted_indices'):
self._has_sorted_indices = _sparsetools.csr_has_sorted_indices(
len(self.indptr) - 1, self.indptr, self.indices)
return self._has_sorted_indices
def __set_sorted(self, val):
self._has_sorted_indices = bool(val)
has_sorted_indices = property(fget=__get_sorted, fset=__set_sorted)
def sorted_indices(self):
"""Return a copy of this matrix with sorted indices
"""
A = self.copy()
A.sort_indices()
return A
# an alternative that has linear complexity is the following
# although the previous option is typically faster
# return self.toother().toother()
def sort_indices(self):
"""Sort the indices of this matrix *in place*
"""
if not self.has_sorted_indices:
_sparsetools.csr_sort_indices(len(self.indptr) - 1, self.indptr,
self.indices, self.data)
self.has_sorted_indices = True
def prune(self):
"""Remove empty space after all non-zero elements.
"""
major_dim = self._swap(self.shape)[0]
if len(self.indptr) != major_dim + 1:
raise ValueError('index pointer has invalid length')
if len(self.indices) < self.nnz:
raise ValueError('indices array has fewer than nnz elements')
if len(self.data) < self.nnz:
raise ValueError('data array has fewer than nnz elements')
self.indices = _prune_array(self.indices[:self.nnz])
self.data = _prune_array(self.data[:self.nnz])
###################
# utility methods #
###################
# needed by _data_matrix
def _with_data(self,data,copy=True):
"""Returns a matrix with the same sparsity structure as self,
but with different data. By default the structure arrays
(i.e. .indptr and .indices) are copied.
"""
if copy:
return self.__class__((data,self.indices.copy(),self.indptr.copy()),
shape=self.shape,dtype=data.dtype)
else:
return self.__class__((data,self.indices,self.indptr),
shape=self.shape,dtype=data.dtype)
def _binopt(self, other, op):
"""apply the binary operation fn to two sparse matrices."""
other = self.__class__(other)
# e.g. csr_plus_csr, csr_minus_csr, etc.
fn = getattr(_sparsetools, self.format + op + self.format)
maxnnz = self.nnz + other.nnz
idx_dtype = get_index_dtype((self.indptr, self.indices,
other.indptr, other.indices),
maxval=maxnnz)
indptr = np.empty(self.indptr.shape, dtype=idx_dtype)
indices = np.empty(maxnnz, dtype=idx_dtype)
bool_ops = ['_ne_', '_lt_', '_gt_', '_le_', '_ge_']
if op in bool_ops:
data = np.empty(maxnnz, dtype=np.bool_)
else:
data = np.empty(maxnnz, dtype=upcast(self.dtype, other.dtype))
fn(self.shape[0], self.shape[1],
np.asarray(self.indptr, dtype=idx_dtype),
np.asarray(self.indices, dtype=idx_dtype),
self.data,
np.asarray(other.indptr, dtype=idx_dtype),
np.asarray(other.indices, dtype=idx_dtype),
other.data,
indptr, indices, data)
A = self.__class__((data, indices, indptr), shape=self.shape)
A.prune()
return A
def _divide_sparse(self, other):
"""
Divide this matrix by a second sparse matrix.
"""
if other.shape != self.shape:
raise ValueError('inconsistent shapes')
r = self._binopt(other, '_eldiv_')
if np.issubdtype(r.dtype, np.inexact):
# Eldiv leaves entries outside the combined sparsity
# pattern empty, so they must be filled manually.
# Everything outside of other's sparsity is NaN, and everything
# inside it is either zero or defined by eldiv.
out = np.empty(self.shape, dtype=self.dtype)
out.fill(np.nan)
row, col = other.nonzero()
out[row, col] = 0
r = r.tocoo()
out[r.row, r.col] = r.data
out = np.matrix(out)
else:
# integers types go with nan <-> 0
out = r
return out
| |
"""
A simple VTK input file for PyQt, the qt bindings for python.
See http://www.trolltech.com for qt documentation, and
http://www.river-bank.demon.co.uk or http://www.thekompany.com
for the qt python bindings.
This class is based on the vtkGenericRenderWindowInteractor and is
therefore fairly powerful. It should also play nicely with the
vtk3DWidget code.
Created by Prabhu Ramachandran, May 2002
Based on David Gobbi's QVTKRenderWidget.py
Changes by Gerard Vermeulen Feb. 2003
Win32 support.
Changes by Gerard Vermeulen, May 2003
Bug fixes and better integration with the Qt framework.
"""
"""
This class works with the UNIX and Win32 versions of Qt.
Depending on the OpenGL graphics drivers, it may not
be possible to have more than one QVTKRenderWidget
per application.
In short, this class is experimental.
"""
# To do for Win32:
# 1. More testing to assure that the widget is always cleaned up
# properly and does not crash the application.
import qt
import vtk
class QVTKRenderWindowInteractor(qt.QWidget):
""" A QVTKRenderWindowInteractor for Python and Qt. Uses a
vtkGenericRenderWindowInteractor to handle the interactions. Use
GetRenderWindow() to get the vtkRenderWindow. Create with the
keyword stereo=1 in order to generate a stereo-capable window.
The user interface is summarized in vtkInteractorStyle.h:
- Keypress j / Keypress t: toggle between joystick (position
sensitive) and trackball (motion sensitive) styles. In joystick
style, motion occurs continuously as long as a mouse button is
pressed. In trackball style, motion occurs when the mouse button
is pressed and the mouse pointer moves.
- Keypress c / Keypress o: toggle between camera and object
(actor) modes. In camera mode, mouse events affect the camera
position and focal point. In object mode, mouse events affect
the actor that is under the mouse pointer.
- Button 1: rotate the camera around its focal point (if camera
mode) or rotate the actor around its origin (if actor mode). The
rotation is in the direction defined from the center of the
renderer's viewport towards the mouse position. In joystick mode,
the magnitude of the rotation is determined by the distance the
mouse is from the center of the render window.
- Button 2: pan the camera (if camera mode) or translate the actor
(if object mode). In joystick mode, the direction of pan or
translation is from the center of the viewport towards the mouse
position. In trackball mode, the direction of motion is the
direction the mouse moves. (Note: with 2-button mice, pan is
defined as <Shift>-Button 1.)
- Button 3: zoom the camera (if camera mode) or scale the actor
(if object mode). Zoom in/increase scale if the mouse position is
in the top half of the viewport; zoom out/decrease scale if the
mouse position is in the bottom half. In joystick mode, the amount
of zoom is controlled by the distance of the mouse pointer from
the horizontal centerline of the window.
- Keypress 3: toggle the render window into and out of stereo
mode. By default, red-blue stereo pairs are created. Some systems
support Crystal Eyes LCD stereo glasses; you have to invoke
SetStereoTypeToCrystalEyes() on the rendering window. Note: to
use stereo you also need to pass a stereo=1 keyword argument to
the constructor.
- Keypress e: exit the application.
- Keypress f: fly to the picked point
- Keypress p: perform a pick operation. The render window interactor
has an internal instance of vtkCellPicker that it uses to pick.
- Keypress r: reset the camera view along the current view
direction. Centers the actors and moves the camera so that all actors
are visible.
- Keypress s: modify the representation of all actors so that they
are surfaces.
- Keypress u: invoke the user-defined function. Typically, this
keypress will bring up an interactor that you can type commands in.
- Keypress w: modify the representation of all actors so that they
are wireframe.
"""
def __init__(self, parent=None, name=None, *args, **kw):
# the current button
self._ActiveButton = 0
# private attributes
self.__oldFocus = None
self.__saveX = 0
self.__saveY = 0
self.__saveState = 0
self.__connected = 0 # is QT->VTK connection done?
# do special handling of some keywords:
# stereo, rw
stereo = 0
if kw.has_key('stereo'):
if kw['stereo']:
stereo = 1
del kw['stereo']
rw = None
if kw.has_key('rw'):
rw = kw['rw']
del kw['rw']
# create qt-level widget
# You cannot pass kw anymore, you'll a TypeError: keyword arguments are not supported
# http://goldenspud.com/webrog/archives/2004/07/20/pyqt-platform-inconsistencies/
apply(qt.QWidget.__init__, (self,parent,name) + args)
if rw: # user-supplied render window
self._RenderWindow = rw
else:
self._RenderWindow = vtk.vtkRenderWindow()
if stereo: # stereo mode
self._RenderWindow.StereoCapableWindowOn()
self._RenderWindow.SetStereoTypeToCrystalEyes()
self._Iren = vtk.vtkGenericRenderWindowInteractor()
self._Iren.SetRenderWindow(self._RenderWindow)
# do all the necessary qt setup
self.setBackgroundMode(qt.Qt.NoBackground)
self.setMouseTracking(1) # get all mouse events
self.setFocusPolicy(qt.QWidget.ClickFocus)
if parent == None:
self.show()
self._Timer = qt.QTimer(self, 'timer handler')
self.connect(self._Timer, qt.SIGNAL('timeout()'),
self.TimerEvent)
self._Iren.AddObserver('CreateTimerEvent', self.CreateTimer)
self._Iren.AddObserver('DestroyTimerEvent', self.DestroyTimer)
def __getattr__(self, attr):
"""Makes the object behave like a
vtkGenericRenderWindowInteractor"""
if attr == '__vtk__':
return lambda t=self._Iren: t
elif hasattr(self._Iren, attr):
return getattr(self._Iren, attr)
elif hasattr(qt.QWidget, attr):
return getattr(self.sipThis, attr)
else:
raise AttributeError, self.__class__.__name__ + \
" has no attribute named " + attr
def CreateTimer(self, obj, evt):
self._Timer.start(10)
def DestroyTimer(self, obj, evt):
self._Timer.stop()
return 1
def TimerEvent(self):
self._Iren.TimerEvent()
def polish(self):
"""Final initialization just before the widget is displayed."""
size = self.size()
self._Iren.SetSize(size.width(), size.height())
self._RenderWindow.SetWindowInfo(str(int(self.winId())))
self._Iren.ConfigureEvent()
self.__connected = 1
def show(self):
qt.QWidget.show(self)
self.update() # needed for initial contents display on Win32
def paintEvent(self,ev):
if self.__connected:
self.Render()
def resizeEvent(self,ev):
size = self.size()
self._Iren.SetSize(size.width(), size.height())
self._Iren.ConfigureEvent()
self.update()
def _GetCtrlShift(self, ev):
ctrl, shift = 0, 0
if hasattr(ev, 'state'):
if (ev.state() & 8):
shift = 1
if (ev.state() & 16):
ctrl = 1
elif self.__saveState:
if (self.__saveState & 8):
shift = 1
if (self.__saveState & 16):
ctrl = 1
return ctrl, shift
def enterEvent(self,ev):
if not self.hasFocus():
self.__oldFocus = self.focusWidget()
self.setFocus()
ctrl, shift = self._GetCtrlShift(ev)
self._Iren.SetEventInformationFlipY(self.__saveX, self.__saveY,
ctrl, shift, chr(0), 0, None)
self._Iren.EnterEvent()
def leaveEvent(self,ev):
if (self.__saveState & 0x7) == 0 and self.__oldFocus:
self.__oldFocus.setFocus()
self.__oldFocus = None
ctrl, shift = self._GetCtrlShift(ev)
self._Iren.SetEventInformationFlipY(self.__saveX, self.__saveY,
ctrl, shift, chr(0), 0, None)
self._Iren.LeaveEvent()
def mousePressEvent(self,ev):
ctrl, shift = self._GetCtrlShift(ev)
repeat = 0
if ev.type() == qt.QEvent.MouseButtonDblClick:
repeat = 1
self._Iren.SetEventInformationFlipY(ev.x(), ev.y(),
ctrl, shift, chr(0), repeat, None)
self._ActiveButton = 0
if ev.button() == 1:
self._Iren.LeftButtonPressEvent()
self._ActiveButton = 'Left'
elif ev.button() == 2:
self._Iren.RightButtonPressEvent()
self._ActiveButton = 'Right'
elif ev.button() == 4:
self._Iren.MiddleButtonPressEvent()
self._ActiveButton = 'Middle'
def mouseReleaseEvent(self,ev):
ctrl, shift = self._GetCtrlShift(ev)
self._Iren.SetEventInformationFlipY(ev.x(), ev.y(),
ctrl, shift, chr(0), 0, None)
if self._ActiveButton == 'Right':
self._Iren.RightButtonReleaseEvent()
elif self._ActiveButton == 'Left':
self._Iren.LeftButtonReleaseEvent()
elif self._ActiveButton == 'Middle':
self._Iren.MiddleButtonReleaseEvent()
def mouseMoveEvent(self,ev):
self.__saveState = ev.state()
self.__saveX = ev.x()
self.__saveY = ev.y()
ctrl, shift = self._GetCtrlShift(ev)
self._Iren.SetEventInformationFlipY(ev.x(), ev.y(),
ctrl, shift, chr(0), 0, None)
self._Iren.MouseMoveEvent()
def keyPressEvent(self,ev):
ctrl, shift = self._GetCtrlShift(ev)
key = chr(0)
if ev.key() < 256:
key = str(ev.text())
self._Iren.SetEventInformationFlipY(self.__saveX, self.__saveY,
ctrl, shift, key, 0, None)
self._Iren.KeyPressEvent()
self._Iren.CharEvent()
def keyReleaseEvent(self,ev):
ctrl, shift = self._GetCtrlShift(ev)
key = chr(0)
if ev.key() < 256:
key = chr(ev.key())
self._Iren.SetEventInformationFlipY(self.__saveX, self.__saveY,
ctrl, shift, key, 0, None)
self._Iren.KeyReleaseEvent()
def GetRenderWindow(self):
return self._RenderWindow
def Render(self):
self._RenderWindow.Render()
#-----------------------------------------------------------------------
def QVTKRenderWidgetConeExample():
"""A simple example that uses the QVTKRenderWindowInteractor
class. """
# every QT app needs an app
app = qt.QApplication(['QVTKRenderWindowInteractor'])
# create the widget
widget = QVTKRenderWindowInteractor()
widget.Initialize()
widget.Start()
# if you dont want the 'q' key to exit comment this.
widget.AddObserver("ExitEvent", lambda o, e, a=app: a.quit())
ren = vtk.vtkRenderer()
widget.GetRenderWindow().AddRenderer(ren)
cone = vtk.vtkConeSource()
cone.SetResolution(8)
coneMapper = vtk.vtkPolyDataMapper()
coneMapper.SetInputConnection(cone.GetOutputPort())
coneActor = vtk.vtkActor()
coneActor.SetMapper(coneMapper)
ren.AddActor(coneActor)
# show the widget
widget.show()
# close the application when window is closed
app.setMainWidget(widget)
# start event processing
app.exec_loop()
if __name__ == "__main__":
QVTKRenderWidgetConeExample()
| |
# -*- coding: utf-8 -*-
"""Dpkg build files generator."""
from __future__ import unicode_literals
import logging
import os
import shutil
import stat
import time
class DPKGBuildConfiguration(object):
"""Dpkg build configuration.
Attributes:
has_bin_directory (bool): True if the Python module creates
a /usr/bin directory.
has_egg_info_directory (bool): True if the Python module has
an .egg_info directory in the dist-packages directory.
has_egg_info_file (bool): True if the Python module has
an .egg_info file in the dist-packages directory.
has_module_source_files (bool): True if the Python module has
one or more source (*.py) files in the dist-packages directory.
has_module_shared_object (bool): True if the Python module has
one or more shared object (*.so) files in the dist-packages directory.
module_directories (list[str]): module directories in the dist-packages
directory.
"""
def __init__(self):
"""Initializes a dpkg build configuration."""
super(DPKGBuildConfiguration, self).__init__()
self.has_bin_directory = False
self.has_egg_info_directory = False
self.has_egg_info_file = False
self.has_module_source_files = False
self.has_module_shared_object = False
self.module_directories = []
class DPKGBuildFilesGenerator(object):
"""Dpkg build files generator."""
_EMAIL_ADDRESS = (
'log2timeline development team <log2timeline-dev@googlegroups.com>')
_CHANGELOG_TEMPLATE = '\n'.join([
'{source_package_name:s} ({project_version!s}-1) unstable; urgency=low',
'',
' * Auto-generated',
'',
' -- {maintainer_email_address:s} {date_time:s}',
''])
_CLEAN_TEMPLATE_PYTHON = '\n'.join([
'{setup_name:s}/*.pyc',
'*.pyc',
''])
_COMPAT_TEMPLATE = '\n'.join([
'9',
''])
_CONTROL_TEMPLATE_CONFIGURE_MAKE = [
'Source: {source_package_name:s}',
'Section: libs',
'Priority: extra',
'Maintainer: {upstream_maintainer:s}',
('Build-Depends: debhelper (>= 9){build_depends:s}'),
'Standards-Version: 4.1.4',
'Homepage: {upstream_homepage:s}',
'',
'Package: {package_name:s}',
'Architecture: {architecture:s}',
'Depends: {depends:s}',
'Description: {description_short:s}',
' {description_long:s}',
'']
_CONTROL_TEMPLATE_SETUP_PY_PYTHON3_ONLY = [
'Source: {source_package_name:s}',
'Section: python',
'Priority: extra',
'Maintainer: {upstream_maintainer:s}',
'Build-Depends: debhelper (>= 9){build_depends:s}',
'Standards-Version: 4.1.4',
'X-Python3-Version: >= 3.5',
'Homepage: {upstream_homepage:s}',
'',
'Package: {python3_package_name:s}',
'Architecture: {architecture:s}',
'Depends: {python3_depends:s}',
'Description: {description_short:s}',
' {description_long:s}',
'']
_CONTROL_TEMPLATE_SETUP_PY_TOOLS = [
'Package: {source_package_name:s}-tools',
'Architecture: all',
('Depends: {python3_package_name:s} (>= ${{binary:Version}}), '
'python3 (>= 3.5~), ${{python3:Depends}}, ${{misc:Depends}}'),
'Description: Tools of {description_name:s}',
' {description_long:s}',
'']
_COPYRIGHT_TEMPLATE = '\n'.join([
''])
_INSTALL_TEMPLATE_PYTHON_DATA = '\n'.join([
'data/* usr/share/{package_name:s}',
''])
_INSTALL_TEMPLATE_PYTHON3 = '\n'.join([
'usr/lib/python3*/dist-packages/{package_name:s}/',
'usr/lib/python3*/dist-packages/{package_name:s}*.egg-info/*',
''])
_INSTALL_TEMPLATE_PYTHON_TOOLS = '\n'.join([
'usr/bin',
''])
_RULES_TEMPLATE_CONFIGURE_MAKE = '\n'.join([
'#!/usr/bin/make -f',
'',
'# Uncomment this to turn on verbose mode.',
'# export DH_VERBOSE=1',
'',
'# This has to be exported to make some magic below work.',
'export DH_OPTIONS',
'',
'%:',
'\tdh $@ {build_system:s}{with_quilt:s}',
'',
'.PHONY: override_dh_auto_configure',
'override_dh_auto_configure:',
'\tdh_auto_configure -- {configure_options:s} CFLAGS="-g"',
'',
'.PHONY: override_dh_auto_test',
'override_dh_auto_test:',
'',
'.PHONY: override_dh_install',
'override_dh_install:',
'\t# Create the {package_name:s} package.',
'\tdh_install',
'',
'.PHONY: override_dh_strip',
'override_dh_strip:',
'ifeq (,$(filter nostrip,$(DEB_BUILD_OPTIONS)))',
' dh_strip -p{package_name:s} --dbg-package={package_name:s}-dbg',
'endif',
'',
'.PHONY: override_dh_shlibdeps',
'override_dh_shlibdeps:',
'\tdh_shlibdeps -L{package_name:s} -l${{CURDIR}}/debian/tmp/usr/lib',
''])
# Force the build system to setup.py here in case the package ships
# a Makefile or equivalent.
_RULES_TEMPLATE_SETUP_PY = '\n'.join([
'#!/usr/bin/make -f',
'',
'%:',
'\tdh $@ --buildsystem=pybuild --with=python3{with_quilt:s}',
'',
'.PHONY: override_dh_auto_test',
'override_dh_auto_test:',
'',
''])
_SOURCE_FORMAT_TEMPLATE = '\n'.join([
'3.0 (quilt)',
''])
_SOURCE_OPTIONS_TEMPLATE = '\n'.join([
('extend-diff-ignore = "(^|/)(\\.eggs|config\\.h|config\\.log|'
'config\\.status|.*\\.egg-info|.*\\.egg-info/.*|Makefile)$"'),
''])
def __init__(
self, project_definition, project_version, data_path,
dependency_definitions, build_configuration=None):
"""Initializes a dpkg build files generator.
Args:
project_definition (ProjectDefinition): project definition.
project_version (str): version of the project.
data_path (str): path to the data directory which contains the dpkg
templates and patches sub directories.
dependency_definitions (dict[str, ProjectDefinition]): definitions of all
projects, which is used to determine the properties of dependencies.
build_configuration (Optional[DPKGBuildConfiguration]): the dpgk build
configuration.
"""
super(DPKGBuildFilesGenerator, self).__init__()
self._build_configuration = build_configuration
self._data_path = data_path
self._dependency_definitions = dependency_definitions
self._project_definition = project_definition
self._project_version = project_version
def _GenerateFile(
self, template_filename, template_data, template_values, output_filename):
"""Generates a file based on a template.
Args:
template_filename (str): template filename or None if not defined.
If not defined template_data is used.
template_data (str): template data.
template_values (dict[str, str]): template values or None if not defined.
output_filename (str): name of the resulting file.
"""
if template_filename:
template_file_path = os.path.join(
self._data_path, 'dpkg_templates', template_filename)
with open(template_file_path, 'rb') as file_object:
template_data = file_object.read()
template_data = template_data.decode('utf-8')
if template_values:
template_data = template_data.format(**template_values)
template_data = template_data.encode('utf-8')
with open(output_filename, 'wb') as file_object:
file_object.write(template_data)
def _GenerateChangelogFile(self, dpkg_path):
"""Generates the dpkg build changelog file.
Args:
dpkg_path (str): path to the dpkg files.
"""
source_package_name = self._GetSourcePackageName()
timezone_minutes, _ = divmod(time.timezone, 60)
timezone_hours, timezone_minutes = divmod(timezone_minutes, 60)
# If timezone_hours is -1 {0:02d} will format as -1 instead of -01
# hence we detect the sign and force a leading zero.
if timezone_hours < 0:
timezone_string = '-{0:02d}{1:02d}'.format(
-timezone_hours, timezone_minutes)
else:
timezone_string = '+{0:02d}{1:02d}'.format(
timezone_hours, timezone_minutes)
date_time_string = '{0:s} {1:s}'.format(
time.strftime('%a, %d %b %Y %H:%M:%S'), timezone_string)
template_values = {
'date_time': date_time_string,
'maintainer_email_address': self._EMAIL_ADDRESS,
'project_version': self._project_version,
'source_package_name': source_package_name}
output_filename = os.path.join(dpkg_path, 'changelog')
self._GenerateFile(
None, self._CHANGELOG_TEMPLATE, template_values, output_filename)
def _GenerateCleanFile(self, dpkg_path):
"""Generates the dpkg build clean file.
Args:
dpkg_path (str): path to the dpkg files.
"""
# TODO: add support for configure_make
if self._project_definition.build_system == 'setup_py':
setup_name = self._GetPythonSetupName()
template_values = {
'setup_name': setup_name}
output_filename = os.path.join(dpkg_path, 'clean')
self._GenerateFile(
None, self._CLEAN_TEMPLATE_PYTHON, template_values, output_filename)
def _GenerateCompatFile(self, dpkg_path):
"""Generates the dpkg build compat file.
Args:
dpkg_path (str): path to the dpkg files.
"""
output_filename = os.path.join(dpkg_path, 'compat')
self._GenerateFile(None, self._COMPAT_TEMPLATE, None, output_filename)
def _GenerateControlFile(self, dpkg_path):
"""Generates the dpkg build control file.
Args:
dpkg_path (str): path to the dpkg files.
"""
source_package_name = self._GetSourcePackageName()
package_name = self._GetPackageName(self._project_definition)
python3_package_name = self._GetPython3PackageName()
architecture = self._GetArchitecture()
build_depends = []
python3_build_depends = []
if self._project_definition.patches:
build_depends.append('quilt')
if self._project_definition.build_system == 'configure_make':
build_depends.append('autotools-dev')
elif self._project_definition.build_system == 'setup_py':
build_depends.append('dh-python')
python3_build_depends.append('python3-all (>= 3.5~)')
python3_build_depends.append('python3-setuptools')
if self._project_definition.architecture_dependent:
python3_build_depends.append('python3-all-dev')
for dependency in self._project_definition.dpkg_build_dependencies:
if self._project_definition.build_system == 'setup_py':
if dependency.startswith('python-'):
dependency = 'python3-{0:s}'.format(dependency[7:])
python3_build_depends.append(dependency)
continue
if (dependency.startswith('python2-') or
dependency.startswith('python3-')):
dependency = 'python3-{0:s}'.format(dependency[8:])
python3_build_depends.append(dependency)
continue
build_depends.append(dependency)
if self._project_definition.build_system == 'setup_py':
build_depends.extend(python3_build_depends)
if build_depends:
build_depends = ', {0:s}'.format(', '.join(build_depends))
else:
build_depends = ''
# description short needs to be a single line.
description_short = self._project_definition.description_short
description_short = ' '.join(description_short.split('\n'))
# description long needs a space at the start of every line after
# the first.
description_long = self._project_definition.description_long
description_long = '\n '.join(description_long.split('\n'))
depends = []
python3_depends = []
for dependency in self._project_definition.dpkg_dependencies:
if dependency.startswith('python-'):
python3_depends.append('python3-{0:s}'.format(dependency[7:]))
elif (dependency.startswith('python2-') or
dependency.startswith('python3-')):
python3_depends.append('python3-{0:s}'.format(dependency[8:]))
else:
depends.append(dependency)
depends.append('${shlibs:Depends}')
depends.append('${misc:Depends}')
depends = ', '.join(depends)
python3_depends.append('${python3:Depends}')
python3_depends.append('${misc:Depends}')
python3_depends = ', '.join(python3_depends)
template_values = {
'architecture': architecture,
'build_depends': build_depends,
'depends': depends,
'description_long': description_long,
'description_name': self._project_definition.name,
'description_short': description_short,
'package_name': package_name,
'python3_depends': python3_depends,
'python3_package_name': python3_package_name,
'source_package_name': source_package_name,
'upstream_homepage': self._project_definition.homepage_url,
'upstream_maintainer': self._project_definition.maintainer}
control_template = []
if self._project_definition.build_system == 'configure_make':
control_template.extend(self._CONTROL_TEMPLATE_CONFIGURE_MAKE)
elif self._project_definition.build_system == 'setup_py':
control_template.extend(self._CONTROL_TEMPLATE_SETUP_PY_PYTHON3_ONLY)
# TODO: add configuration setting to indicate tools should be packaged.
if package_name not in ('idna', 'mock', 'psutil'):
if (self._build_configuration and
self._build_configuration.has_bin_directory):
control_template.extend(self._CONTROL_TEMPLATE_SETUP_PY_TOOLS)
control_template = '\n'.join(control_template)
output_filename = os.path.join(dpkg_path, 'control')
self._GenerateFile(
self._project_definition.dpkg_template_control, control_template,
template_values, output_filename)
def _GenerateCopyrightFile(self, dpkg_path):
"""Generates the dpkg build copyright file.
Args:
dpkg_path (str): path to the dpkg files.
"""
license_file = os.path.dirname(__file__)
license_file = os.path.dirname(license_file)
license_file = os.path.join(
license_file, 'data', 'licenses', 'LICENSE.{0:s}'.format(
self._project_definition.name))
filename = os.path.join(dpkg_path, 'copyright')
if os.path.exists(license_file):
shutil.copy(license_file, filename)
else:
logging.warning('Missing license file: {0:s}'.format(license_file))
with open(filename, 'wb') as file_object:
file_object.write(b'\n')
def _GeneratePython3ModuleInstallFile(self, dpkg_path, template_values):
"""Generates the dpkg build Python 3 module .install file.
Args:
dpkg_path (str): path to the dpkg files.
template_values (dict[str, str]): template values or None if not defined.
"""
python3_package_name = self._GetPython3PackageName()
template_files = (
self._project_definition.dpkg_template_install_python3 or [None])
for template_file in template_files:
if template_file:
output_filename = template_file
template_data = None
else:
output_filename = '{0:s}.install'.format(python3_package_name)
if not self._build_configuration:
template_data = self._INSTALL_TEMPLATE_PYTHON3
else:
template_data = []
if self._build_configuration.has_module_source_files:
template_data.append('usr/lib/python3*/dist-packages/*.py')
if self._build_configuration.has_module_shared_object:
template_data.append('usr/lib/python3*/dist-packages/*.so')
module_directories = self._build_configuration.module_directories
template_data.extend([
'usr/lib/python3*/dist-packages/{0:s}'.format(
module_directory)
for module_directory in module_directories])
if self._build_configuration.has_egg_info_directory:
template_data.append(
'usr/lib/python3*/dist-packages/*.egg-info/*')
elif self._build_configuration.has_egg_info_file:
template_data.append(
'usr/lib/python3*/dist-packages/*.egg-info')
template_data = '\n'.join(template_data)
output_filename = os.path.join(dpkg_path, output_filename)
self._GenerateFile(
template_file, template_data, template_values, output_filename)
def _GenerateRulesFile(self, dpkg_path):
"""Generates the dpkg build rules file.
Args:
dpkg_path (str): path to the dpkg files.
"""
if self._project_definition.build_system == 'configure_make':
self._GenerateConfigureMakeRulesFile(dpkg_path)
elif self._project_definition.build_system == 'setup_py':
self._GenerateSetupPyRulesFile(dpkg_path)
filename = os.path.join(dpkg_path, 'rules')
stat_info = os.stat(filename)
os.chmod(filename, stat_info.st_mode | stat.S_IEXEC)
def _GenerateConfigureMakeRulesFile(self, dpkg_path):
"""Generates the dpkg build rules file.
Args:
dpkg_path (str): path to the dpkg files.
"""
package_name = self._GetPackageName(self._project_definition)
build_system = '--buildsystem=autoconf'
if self._project_definition.patches:
with_quilt = ' --with quilt'
else:
with_quilt = ''
configure_options = ''
if self._project_definition.dpkg_configure_options:
configure_options = ' '.join(
self._project_definition.dpkg_configure_options)
elif self._project_definition.configure_options:
configure_options = ' '.join(
self._project_definition.configure_options)
template_values = {
'build_system': build_system,
'configure_options': configure_options,
'package_name': package_name,
'with_quilt': with_quilt}
output_filename = os.path.join(dpkg_path, 'rules')
self._GenerateFile(
self._project_definition.dpkg_template_rules,
self._RULES_TEMPLATE_CONFIGURE_MAKE, template_values, output_filename)
def _GenerateSetupPyRulesFile(self, dpkg_path):
"""Generates the dpkg build rules file.
Args:
dpkg_path (str): path to the dpkg files.
"""
setup_name = self._GetPythonSetupName()
if self._project_definition.patches:
with_quilt = ' --with quilt'
else:
with_quilt = ''
template_values = {
'setup_name': setup_name,
'with_quilt': with_quilt}
rules_template = self._RULES_TEMPLATE_SETUP_PY
# TODO: replace manual write of rules file by call to _GenerateFile.
template_filename = self._project_definition.dpkg_template_rules
if template_filename:
template_file_path = os.path.join(
self._data_path, 'dpkg_templates', template_filename)
with open(template_file_path, 'rb') as file_object:
rules_template = file_object.read()
rules_template = rules_template.decode('utf-8')
output_filename = os.path.join(dpkg_path, 'rules')
with open(output_filename, 'wb') as file_object:
data = rules_template.format(**template_values)
file_object.write(data.encode('utf-8'))
def _GenerateSourceFormatFile(self, dpkg_path):
"""Generates the dpkg build source/format file.
Args:
dpkg_path (str): path to the dpkg files.
"""
template_file = self._SOURCE_FORMAT_TEMPLATE
output_filename = os.path.join(dpkg_path, 'source', 'format')
self._GenerateFile(None, template_file, None, output_filename)
def _GenerateSourceOptionsFile(self, dpkg_path):
"""Generates the dpkg build source/options file.
Args:
dpkg_path (str): path to the dpkg files.
"""
template_file = self._SOURCE_OPTIONS_TEMPLATE
output_filename = os.path.join(dpkg_path, 'source', 'options')
self._GenerateFile(None, template_file, None, output_filename)
def _GetArchitecture(self):
"""Retrieves the architecture.
Returns:
str: architecture.
"""
if not self._project_definition.architecture_dependent:
return 'all'
return 'any'
def _GetPackageName(self, project_definition):
"""Retrieves the package name.
Args:
project_definition (ProjectDefinition): project definition.
Returns:
str: package name.
"""
if project_definition.dpkg_name:
package_name = project_definition.dpkg_name
else:
package_name = project_definition.name
if package_name.startswith('python-'):
package_name = package_name[7:]
elif (package_name.startswith('python2-') or
package_name.startswith('python3-')):
package_name = package_name[8:]
return package_name
def _GetPython3PackageName(self):
"""Retrieves the Python 3 package name.
Returns:
str: Python 3 package name.
"""
package_name = self._GetPackageName(self._project_definition)
return 'python3-{0:s}'.format(package_name)
def _GetPythonSetupName(self):
"""Retrieves the Python setup.py name.
Returns:
str: setup.py name.
"""
if self._project_definition.setup_name:
return self._project_definition.setup_name
return self._project_definition.name
def _GetSourcePackageName(self):
"""Retrieves the source package name.
Returns:
str: source package name.
"""
if self._project_definition.dpkg_source_name:
return self._project_definition.dpkg_source_name
return self._project_definition.name
def GenerateFiles(self, dpkg_path):
"""Generates the dpkg build files.
Args:
dpkg_path (str): path to the dpkg files.
"""
os.mkdir(dpkg_path)
self._GenerateChangelogFile(dpkg_path)
self._GenerateCleanFile(dpkg_path)
self._GenerateCompatFile(dpkg_path)
self._GenerateControlFile(dpkg_path)
self._GenerateCopyrightFile(dpkg_path)
self._GenerateRulesFile(dpkg_path)
for filename in self._project_definition.dpkg_template_additional:
output_filename = os.path.join(dpkg_path, filename)
self._GenerateFile(filename, '', None, output_filename)
os.mkdir(os.path.join(dpkg_path, 'source'))
self._GenerateSourceFormatFile(dpkg_path)
self._GenerateSourceOptionsFile(dpkg_path)
if self._project_definition.patches:
patches_directory = os.path.join(dpkg_path, 'patches')
os.mkdir(patches_directory)
current_path = os.getcwd()
os.chdir(patches_directory)
patch_filenames = []
for patch_filename in self._project_definition.patches:
filename = os.path.join(self._data_path, 'patches', patch_filename)
if not os.path.exists(filename):
logging.warning('Missing patch file: {0:s}'.format(filename))
continue
shutil.copy(filename, patch_filename)
patch_filenames.append(patch_filename)
os.chdir(current_path)
filename = os.path.join(dpkg_path, 'patches', 'series')
with open(filename, 'wb') as file_object:
data = '\n'.join(patch_filenames)
file_object.write(data.encode('utf-8'))
| |
# Copyright 2022 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from connector import channel
from google3.cloud.graphite.mmv2.services.google.apigee import instance_pb2
from google3.cloud.graphite.mmv2.services.google.apigee import instance_pb2_grpc
from typing import List
class Instance(object):
def __init__(
self,
name: str = None,
location: str = None,
peering_cidr_range: str = None,
host: str = None,
port: str = None,
description: str = None,
display_name: str = None,
created_at: int = None,
last_modified_at: int = None,
disk_encryption_key_name: str = None,
state: str = None,
apigee_organization: str = None,
service_account_file: str = "",
):
channel.initialize()
self.name = name
self.location = location
self.peering_cidr_range = peering_cidr_range
self.description = description
self.display_name = display_name
self.disk_encryption_key_name = disk_encryption_key_name
self.apigee_organization = apigee_organization
self.service_account_file = service_account_file
def apply(self):
stub = instance_pb2_grpc.ApigeeBetaInstanceServiceStub(channel.Channel())
request = instance_pb2.ApplyApigeeBetaInstanceRequest()
if Primitive.to_proto(self.name):
request.resource.name = Primitive.to_proto(self.name)
if Primitive.to_proto(self.location):
request.resource.location = Primitive.to_proto(self.location)
if InstancePeeringCidrRangeEnum.to_proto(self.peering_cidr_range):
request.resource.peering_cidr_range = InstancePeeringCidrRangeEnum.to_proto(
self.peering_cidr_range
)
if Primitive.to_proto(self.description):
request.resource.description = Primitive.to_proto(self.description)
if Primitive.to_proto(self.display_name):
request.resource.display_name = Primitive.to_proto(self.display_name)
if Primitive.to_proto(self.disk_encryption_key_name):
request.resource.disk_encryption_key_name = Primitive.to_proto(
self.disk_encryption_key_name
)
if Primitive.to_proto(self.apigee_organization):
request.resource.apigee_organization = Primitive.to_proto(
self.apigee_organization
)
request.service_account_file = self.service_account_file
response = stub.ApplyApigeeBetaInstance(request)
self.name = Primitive.from_proto(response.name)
self.location = Primitive.from_proto(response.location)
self.peering_cidr_range = InstancePeeringCidrRangeEnum.from_proto(
response.peering_cidr_range
)
self.host = Primitive.from_proto(response.host)
self.port = Primitive.from_proto(response.port)
self.description = Primitive.from_proto(response.description)
self.display_name = Primitive.from_proto(response.display_name)
self.created_at = Primitive.from_proto(response.created_at)
self.last_modified_at = Primitive.from_proto(response.last_modified_at)
self.disk_encryption_key_name = Primitive.from_proto(
response.disk_encryption_key_name
)
self.state = InstanceStateEnum.from_proto(response.state)
self.apigee_organization = Primitive.from_proto(response.apigee_organization)
def delete(self):
stub = instance_pb2_grpc.ApigeeBetaInstanceServiceStub(channel.Channel())
request = instance_pb2.DeleteApigeeBetaInstanceRequest()
request.service_account_file = self.service_account_file
if Primitive.to_proto(self.name):
request.resource.name = Primitive.to_proto(self.name)
if Primitive.to_proto(self.location):
request.resource.location = Primitive.to_proto(self.location)
if InstancePeeringCidrRangeEnum.to_proto(self.peering_cidr_range):
request.resource.peering_cidr_range = InstancePeeringCidrRangeEnum.to_proto(
self.peering_cidr_range
)
if Primitive.to_proto(self.description):
request.resource.description = Primitive.to_proto(self.description)
if Primitive.to_proto(self.display_name):
request.resource.display_name = Primitive.to_proto(self.display_name)
if Primitive.to_proto(self.disk_encryption_key_name):
request.resource.disk_encryption_key_name = Primitive.to_proto(
self.disk_encryption_key_name
)
if Primitive.to_proto(self.apigee_organization):
request.resource.apigee_organization = Primitive.to_proto(
self.apigee_organization
)
response = stub.DeleteApigeeBetaInstance(request)
@classmethod
def list(self, apigeeOrganization, service_account_file=""):
stub = instance_pb2_grpc.ApigeeBetaInstanceServiceStub(channel.Channel())
request = instance_pb2.ListApigeeBetaInstanceRequest()
request.service_account_file = service_account_file
request.ApigeeOrganization = apigeeOrganization
return stub.ListApigeeBetaInstance(request).items
def to_proto(self):
resource = instance_pb2.ApigeeBetaInstance()
if Primitive.to_proto(self.name):
resource.name = Primitive.to_proto(self.name)
if Primitive.to_proto(self.location):
resource.location = Primitive.to_proto(self.location)
if InstancePeeringCidrRangeEnum.to_proto(self.peering_cidr_range):
resource.peering_cidr_range = InstancePeeringCidrRangeEnum.to_proto(
self.peering_cidr_range
)
if Primitive.to_proto(self.description):
resource.description = Primitive.to_proto(self.description)
if Primitive.to_proto(self.display_name):
resource.display_name = Primitive.to_proto(self.display_name)
if Primitive.to_proto(self.disk_encryption_key_name):
resource.disk_encryption_key_name = Primitive.to_proto(
self.disk_encryption_key_name
)
if Primitive.to_proto(self.apigee_organization):
resource.apigee_organization = Primitive.to_proto(self.apigee_organization)
return resource
class InstancePeeringCidrRangeEnum(object):
@classmethod
def to_proto(self, resource):
if not resource:
return resource
return instance_pb2.ApigeeBetaInstancePeeringCidrRangeEnum.Value(
"ApigeeBetaInstancePeeringCidrRangeEnum%s" % resource
)
@classmethod
def from_proto(self, resource):
if not resource:
return resource
return instance_pb2.ApigeeBetaInstancePeeringCidrRangeEnum.Name(resource)[
len("ApigeeBetaInstancePeeringCidrRangeEnum") :
]
class InstanceStateEnum(object):
@classmethod
def to_proto(self, resource):
if not resource:
return resource
return instance_pb2.ApigeeBetaInstanceStateEnum.Value(
"ApigeeBetaInstanceStateEnum%s" % resource
)
@classmethod
def from_proto(self, resource):
if not resource:
return resource
return instance_pb2.ApigeeBetaInstanceStateEnum.Name(resource)[
len("ApigeeBetaInstanceStateEnum") :
]
class Primitive(object):
@classmethod
def to_proto(self, s):
if not s:
return ""
return s
@classmethod
def from_proto(self, s):
return s
| |
"""Forms for OAuth2 applications."""
from django import forms
from django.core.exceptions import ValidationError
from django.forms import widgets
from django.utils.translation import ugettext, ugettext_lazy as _
from djblets.forms.widgets import CopyableTextInput, ListEditWidget
from oauth2_provider.generators import (generate_client_id,
generate_client_secret)
from oauth2_provider.validators import URIValidator
from reviewboard.admin.form_widgets import RelatedUserWidget
from reviewboard.oauth.models import Application
from reviewboard.oauth.widgets import OAuthSecretInputWidget
from reviewboard.site.models import LocalSite
from reviewboard.site.urlresolvers import local_site_reverse
class ApplicationChangeForm(forms.ModelForm):
"""A form for updating an Application.
This form is intended to be used by the admin site.
"""
DISABLED_FOR_SECURITY_ERROR = _(
'This Application has been disabled to keep your server secure. '
'It cannot be re-enabled until its client secret changes.'
)
client_id = forms.CharField(
label=_('Client ID'),
help_text=_(
'The client ID. Your application will use this in OAuth2 '
'authentication to identify itself.',
),
widget=CopyableTextInput(attrs={
'readonly': True,
'size': 100,
}),
required=False,
)
def __init__(self, data=None, initial=None, instance=None):
"""Initialize the form:
Args:
data (dict, optional):
The provided form data.
initial (dict, optional):
The initial form values.
instance (Application, optional):
The application to edit.
"""
super(ApplicationChangeForm, self).__init__(data=data,
initial=initial,
instance=instance)
if instance and instance.pk:
# If we are creating an application (as the
# ApplicationCreationForm is a subclass of this class), the
# client_secret wont be present so we don't have to initialize the
# widget.
client_secret = self.fields['client_secret']
client_secret.widget = OAuthSecretInputWidget(
attrs=client_secret.widget.attrs,
api_url=local_site_reverse('oauth-app-resource',
local_site=instance.local_site,
kwargs={'app_id': instance.pk}),
)
def clean_extra_data(self):
"""Prevent ``extra_data`` from being an empty string.
Returns:
unicode:
Either a non-zero length string of JSON-encoded data or ``None``.
"""
return self.cleaned_data['extra_data'] or None
def clean_redirect_uris(self):
"""Clean the ``redirect_uris`` field.
This method will ensure that all the URIs are valid by validating
each of them, as well as removing unnecessary whitespace.
Returns:
unicode:
A space-separated list of URIs.
Raises:
django.core.exceptions.ValidationError:
Raised when one or more URIs are invalid.
"""
validator = URIValidator()
redirect_uris = self.cleaned_data.get('redirect_uris', '').split()
errors = []
for uri in redirect_uris:
try:
validator(uri)
except ValidationError as e:
errors.append(e)
if errors:
raise ValidationError(errors)
# We join the list instead of returning the initial value because the
# the original value may have had multiple adjacent whitespace
# characters.
return ' '.join(redirect_uris)
def clean(self):
"""Validate the form.
This will validate the relationship between the
``authorization_grant_type`` and ``redirect_uris`` fields to ensure the
values are compatible.
This method is very similar to
:py:func:`Application.clean
<oauth2_provider.models.AbstractApplication.clean>`, but the data will
be verified by the form instead of the model to allow error messages to
be usable by consumers of the form.
This method does not raise an exception upon failing validation.
Instead, it sets errors internally so that they are related to the
pertinent field instead of the form as a whole.
Returns:
dict:
The cleaned form data.
"""
super(ApplicationChangeForm, self).clean()
grant_type = self.cleaned_data.get('authorization_grant_type')
# redirect_uris will not be present in cleaned_data if validation
# failed.
redirect_uris = self.cleaned_data.get('redirect_uris')
if (redirect_uris is not None and
len(redirect_uris) == 0 and
grant_type in (Application.GRANT_AUTHORIZATION_CODE,
Application.GRANT_IMPLICIT)):
# This is unfortunately not publicly exposed in Django 1.6, but it
# is exposed in later versions (as add_error).
self._errors['redirect_uris'] = self.error_class([
ugettext(
'The "redirect_uris" field may not be blank when '
'"authorization_grant_type" is "%s"'
)
% grant_type
])
self.cleaned_data.pop('redirect_uris')
if (self.instance and
self.instance.pk and
self.instance.is_disabled_for_security and
self.cleaned_data['enabled']):
raise ValidationError(self.DISABLED_FOR_SECURITY_ERROR)
if 'client_id' in self.cleaned_data:
del self.cleaned_data['client_id']
if 'client_secret' in self.cleaned_data:
del self.cleaned_data['client_secret']
return self.cleaned_data
class Meta:
model = Application
fields = '__all__'
help_texts = {
'authorization_grant_type': _(
'How the authorization is granted to the application.'
),
'client_secret': _(
'The client secret. This should only be known to Review Board '
'and your application.'
),
'client_type': _(
"The type of client. Confidential clients must be able to "
"keep users' passwords secure."
),
'name': _(
'The application name.'
),
'redirect_uris': _(
'A list of allowed URIs to redirect to.',
),
'skip_authorization': _(
'Whether or not users will be prompted for authentication. '
'This should most likely be unchecked.'
),
'user': _(
'The user who created the application. The selected user will '
'be able to change these settings from their account settings.'
),
}
widgets = {
'client_secret': CopyableTextInput(attrs={
'readonly': True,
'size': 100,
}),
'name': widgets.TextInput(attrs={'size': 60}),
'redirect_uris': ListEditWidget(attrs={'size': 60}, sep=' '),
'user': RelatedUserWidget(multivalued=False),
'original_user': RelatedUserWidget(multivalued=False),
}
labels = {
'authorization_grant_type': _('Authorization Grant Type'),
'client_secret': _('Client Secret'),
'client_type': _('Client Type'),
'name': _('Name'),
'redirect_uris': _('Redirect URIs'),
'skip_authorization': _('Skip Authorization'),
'user': _('User'),
}
class ApplicationCreationForm(ApplicationChangeForm):
"""A form for creating an Application.
This is meant to be used by the admin site.
"""
def save(self, commit=True):
"""Save the form.
This method will generate the ``client_id`` and ``client_secret``
fields.
Args:
commit (bool, optional):
Whether or not the Application should be saved to the database.
Returns:
reviewboard.oauth.models.Application:
The created Application.
"""
instance = super(ApplicationCreationForm, self).save(commit=False)
instance.client_id = generate_client_id()
instance.client_secret = generate_client_secret()
if commit:
instance.save()
return instance
class Meta(ApplicationChangeForm.Meta):
exclude = (
'client_id',
'client_secret',
)
class UserApplicationChangeForm(ApplicationChangeForm):
"""A form for an end user to change an Application."""
def __init__(self, user, data=None, initial=None, instance=None):
"""Initialize the form.
Args:
user (django.contrib.auth.models.User):
The user changing the form. Ignored, but included to match
:py:meth:`UserApplicationCreationForm.__init__`.
data (dict):
The provided data.
initial (dict, optional):
The initial form values.
instance (reviewboard.oauth.models.Application):
The Application that is to be edited.
"""
super(UserApplicationChangeForm, self).__init__(data=data,
initial=initial,
instance=instance)
local_site_field = self.fields['local_site']
local_site_field.queryset = LocalSite.objects.filter(users=user)
local_site_field.widget.attrs['disabled'] = True
def clean(self):
"""Clean the form data.
This method will ensure that the ``local_site`` field cannot be changed
via form submission.
Returns:
dict:
A dictionary of the cleaned form data.
"""
super(UserApplicationChangeForm, self).clean()
if 'local_site' in self.cleaned_data:
self.cleaned_data.pop('local_site')
return self.cleaned_data
class Meta(ApplicationChangeForm.Meta):
exclude = (
'extra_data',
'original_user',
'skip_authorization',
'user',
)
labels = dict(
ApplicationChangeForm.Meta.labels,
local_site=_('Restrict To'),
)
help_texts = dict(
ApplicationChangeForm.Meta.help_texts,
local_site=_('If this application is not restricted, it will be '
'available to all users.<br><br>This cannot be '
'changed once set.'),
)
class UserApplicationCreationForm(ApplicationCreationForm):
"""A form for an end user to update an Application."""
def __init__(self, user, data, initial=None, instance=None):
"""Initialize the form.
Args:
user (django.contrib.auth.models.User):
The user changing the form. Ignored, but included to match
:py:meth:`UserApplicationCreationForm.__init__`.
data (dict):
The provided data.
initial (dict, optional):
The initial form values.
instance (reviewboard.oauth.models.Application, optional):
The Application that is to be edited.
This should always be ``None``.
"""
assert instance is None
super(UserApplicationCreationForm, self).__init__(data=data,
initial=initial,
instance=instance)
self.user = user
self.fields['local_site'].queryset = LocalSite.objects.filter(
users=user)
def save(self, commit=True):
"""Save the form.
This method will associate the user creating the application as its
owner.
Args:
commit (bool, optional):
Whether or not the Application should be saved to the database.
Returns:
reviewboard.oauth.models.Application:
The created Application.
"""
instance = super(UserApplicationCreationForm, self).save(commit=False)
instance.user = self.user
if commit:
instance.save()
return instance
class Meta(ApplicationCreationForm.Meta):
exclude = (ApplicationCreationForm.Meta.exclude +
UserApplicationChangeForm.Meta.exclude)
labels = dict(
ApplicationCreationForm.Meta.labels,
local_site=UserApplicationChangeForm.Meta.labels['local_site'],
)
help_texts = dict(
ApplicationCreationForm.Meta.help_texts,
local_site=UserApplicationChangeForm.Meta.help_texts['local_site'],
)
| |
#!/usr/bin/env python
import argparse
import urllib
import datetime
import inspect
import sqlite3
import os
import requests
from models import Track, TimeRange
class ApiUsageLimitException(Exception):
pass
class LastFmApiException(Exception):
pass
class UserTracks(object):
api_request_limit = 5
last_fm_url = "http://ws.audioscrobbler.com/2.0/"
db_location = "dbs"
def __init__(self, username, api_key):
self.username = username
self.api_key = api_key
self.request_couter = 0
self.conn = self._get_db_connection()
self.cur = self.conn.cursor()
self._prepare_tables()
def _get_db_connection(self):
db_name = u"{}.db".format(self.username)
if not os.path.exists(self.db_location):
os.makedirs(self.db_location)
path_to_db = os.path.join(self.db_location, db_name)
conn = sqlite3.connect(path_to_db)
conn.isolation_level = None
return conn
def _prepare_tables(self):
TimeRange.create_table(self.cur)
Track.create_table(self.cur)
@staticmethod
def _utc_timestamp_now():
dt_now = datetime.datetime.utcnow()
return int(dt_now.strftime("%s"))
def request_tracks(self, timestamp_from, timestamp_to):
"""
Execute API call and returns Track objects
"""
if self.request_couter >= self.api_request_limit:
raise ApiUsageLimitException()
query = {
"method": "user.getRecentTracks",
"format": "json",
"api_key": self.api_key,
"user": self.username,
"from": timestamp_from,
"to": timestamp_to,
}
query_str = urllib.urlencode(query)
url = "{}?{}".format(self.last_fm_url, query_str)
response = requests.get(url)
self.request_couter += 1
if not response.ok:
msg = "Error while requesting data: {!r}".format(response.reason)
raise LastFmApiException(msg)
data = response.json()
if "error" in data:
msg = "Error from Last.fm: {!r}".format(data.get("message", data))
raise LastFmApiException(msg)
tracks = Track.many_from_json(data)
return tracks
def next_time_range(self):
"""
Calculates next missing range of data
"""
if self.request_couter > 0 and TimeRange.table_empty(self.cur):
return None
if self.request_couter == 0:
if Track.table_empty(self.cur):
timestamp_from = 0
else:
timestamp_from = Track.latest(self.cur).timestamp
return (timestamp_from, self._utc_timestamp_now())
latest = TimeRange.latest(self.cur)
return latest.timestamp_from, latest.timestamp_to
def _update_time_ranges(self, tr_query, tr_got):
TimeRange(*tr_query).remove_from_db(self.cur)
short_range = tr_got[0] - tr_query[0] <= 1
if short_range and (
Track(None, None, timestamp=tr_query[0]).is_in_db(self.cur) or
Track(None, None, timestamp=tr_got[0]).is_in_db(self.cur)
):
return
remaining_time_range = TimeRange(tr_query[0], tr_got[0])
TimeRange.add_to_db(self.cur, remaining_time_range)
def process(self):
"""
Main execution loop.
Once user reach limit of API requests or
there is no more data to fetch for now it stops
"""
while True:
try:
more_possible = self.update_user_tracks()
except ApiUsageLimitException:
break
if not more_possible:
break
def update_user_tracks(self):
"""
Executes one cycle of requesting data from API
Once it had data updates db with tracs and
what time ranges are still missing
"""
time_range = self.next_time_range()
if not time_range:
return False
tracks = self.request_tracks(*time_range)
if not tracks:
TimeRange(*time_range).remove_from_db(self.cur)
return not TimeRange.table_empty(self.cur)
Track.add_to_db(self.cur, tracks)
newest_ts = tracks[0].timestamp
oldest_ts = tracks[-1].timestamp
new_time_range = (oldest_ts, newest_ts)
self._update_time_ranges(time_range, new_time_range)
return True
def stats(self):
"""
Produces printable stats
"""
data = {
"username": self.username,
"count": Track.count(self.cur),
"top_artists": u", ".join(Track.favourite_artists(self.cur)),
"most_active_day_of_week": Track.most_active_day_of_week(self.cur),
"average_tracks_per_day": Track.average_tracks_per_day(self.cur),
}
msg = u"""
Stats for user '{username}':
- listened to a total of {count} tracks.
- top 5 favorite artists: {top_artists}.
- listen to an average of {average_tracks_per_day} tracks a day.
- most active day is {most_active_day_of_week}.
All stats based on data fetched for far
""".format(**data)
return inspect.cleandoc(msg)
def main():
description = """
Last.fm user track analysis.
Builds up history of users tracks and produces stats like:
- Number of tracks fetched
- Top 5 artists
- Average number of tracks per day
- Most active day
"""
parser = argparse.ArgumentParser(
description=inspect.cleandoc(description),
epilog="Created by Karol Duleba",
formatter_class=argparse.RawTextHelpFormatter
)
parser.add_argument('username', help="Name of Last.fm user")
parser.add_argument('api_key', help="Last.fm api key")
args = parser.parse_args()
# API_KEY = "48e30b0cc7a2df581c9ac25ae35df23e" # my
ut = UserTracks(username=args.username, api_key=args.api_key)
ut.process()
stats = ut.stats()
print stats
if __name__ == '__main__':
main()
| |
from django.utils.translation import ugettext
from livesettings import values
from livesettings.models import SettingNotSet
from livesettings.utils import is_string_like
import logging
log = logging.getLogger('configuration')
_NOTSET = object()
class ConfigurationSettings(object):
"""A singleton manager for ConfigurationSettings"""
class __impl(object):
def __init__(self):
self.settings = values.SortedDotDict()
self.prereg = {}
def __getitem__(self, key):
"""Get an element either by ConfigurationGroup object or by its key"""
key = self._resolve_key(key)
return self.settings.get(key)
def __getattr__(self, key):
"""Get an element either by ConfigurationGroup object or by its key"""
try:
return self[key]
except:
raise AttributeError, key
def __iter__(self):
for v in self.groups():
yield v
def __len__(self):
return len(self.settings)
def __contains__(self, key):
try:
key = self._resolve_key(key)
return self.settings.has_key(key)
except:
return False
def _resolve_key(self, raw):
if is_string_like(raw):
key = raw
elif isinstance(raw, values.ConfigurationGroup):
key = raw.key
else:
group = self.groups()[raw]
key = group.key
return key
def get_config(self, group, key):
try:
if isinstance(group, values.ConfigurationGroup):
group = group.key
cg = self.settings.get(group, None)
if not cg:
raise SettingNotSet('%s config group does not exist' % group)
else:
return cg[key]
except KeyError:
raise SettingNotSet('%s.%s' % (group, key))
def groups(self):
"""Return ordered list"""
return self.settings.values()
def has_config(self, group, key):
if isinstance(group, values.ConfigurationGroup):
group = group.key
cfg = self.settings.get(group, None)
if cfg and key in cfg:
return True
else:
return False
def preregister_choice(self, group, key, choice):
"""Setup a choice for a group/key which hasn't been instantiated yet."""
k = (group, key)
if self.prereg.has_key(k):
self.prereg[k].append(choice)
else:
self.prereg[k] = [choice]
def register(self, value):
g = value.group
if not isinstance(g, values.ConfigurationGroup):
raise ValueError('value.group should be an instance of ConfigurationGroup')
groupkey = g.key
valuekey = value.key
k = (groupkey, valuekey)
if self.prereg.has_key(k):
for choice in self.prereg[k]:
value.add_choice(choice)
if not groupkey in self.settings:
self.settings[groupkey] = g
self.settings[groupkey][valuekey] = value
return value
__instance = None
def __init__(self):
if ConfigurationSettings.__instance is None:
ConfigurationSettings.__instance = ConfigurationSettings.__impl()
#ConfigurationSettings.__instance.load_app_configurations()
self.__dict__['_ConfigurationSettings__instance'] = ConfigurationSettings.__instance
def __getattr__(self, attr):
""" Delegate access to implementation """
return getattr(self.__instance, attr)
def __getitem__(self, key):
return self.__instance[key]
def __len__(self):
return len(self.__instance)
def __setattr__(self, attr, value):
""" Delegate access to implementation """
return setattr(self.__instance, attr, value)
def __unicode__(self):
return u"ConfigurationSettings: " + unicode(self.groups())
def config_exists(group, key):
"""Test to see if a setting has been registered"""
return ConfigurationSettings().has_config(group, key)
def config_get(group, key):
"""Get a configuration setting"""
try:
return ConfigurationSettings().get_config(group, key)
except SettingNotSet:
log.debug('SettingNotSet: %s.%s', group, key)
raise
def config_get_group(group):
return ConfigurationSettings()[group]
def config_collect_values(group, groupkey, key, unique=True, skip_missing=True):
"""Look up (group, groupkey) from config, then take the values returned and
use them as groups for a second-stage lookup.
For example:
config_collect_values(PAYMENT, MODULES, CREDITCHOICES)
Stage 1: ['PAYMENT_GOOGLE', 'PAYMENT_AUTHORIZENET']
Stage 2: config_value('PAYMENT_GOOGLE', 'CREDITCHOICES')
+ config_value('PAYMENT_AUTHORIZENET', 'CREDITCHOICES')
Stage 3: (if unique is true) remove dupes
"""
groups = config_value(group, groupkey)
ret = []
for g in groups:
try:
ret.append(config_value(g, key))
except KeyError, ke:
if not skip_missing:
raise SettingNotSet('No config %s.%s' % (g, key))
if unique:
out = []
for x in ret:
if not x in out:
out.append(x)
ret = out
return ret
def config_register(value):
"""Register a value or values.
Parameters:
-A Value
"""
return ConfigurationSettings().register(value)
def config_register_list(*args):
for value in args:
config_register(value)
def config_value(group, key, default=_NOTSET):
"""Get a value from the configuration system"""
try:
return config_get(group, key).value
except SettingNotSet:
if default != _NOTSET:
return default
raise
def config_value_safe(group, key, default_value):
"""Get a config value with a default fallback, safe for use during SyncDB."""
raw = default_value
try:
raw = config_value(group, key)
except SettingNotSet:
pass
except ImportError, e:
log.warn("Error getting %s.%s, OK if you are in SyncDB.", group, key)
return raw
def config_choice_values(group, key, skip_missing=True, translate=False):
"""Get pairs of key, label from the setting."""
try:
cfg = config_get(group, key)
choices = cfg.choice_values
except SettingNotSet:
if skip_missing:
return []
else:
raise SettingNotSet('%s.%s' % (group, key))
if translate:
choices = [(k, ugettext(v)) for k, v in choices]
return choices
def config_add_choice(group, key, choice):
"""Add a choice to a value"""
if config_exists(group, key):
cfg = config_get(group, key)
cfg.add_choice(choice)
else:
ConfigurationSettings().preregister_choice(group, key, choice)
| |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest import Serializer, Deserializer
from typing import TYPE_CHECKING
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Any, Iterable, Optional, Union
from azure.core.paging import ItemPaged
class WebSiteManagementClientOperationsMixin(object):
def check_name_availability(
self,
name, # type: str
type, # type: Union[str, "_models.CheckNameResourceTypes"]
is_fqdn=None, # type: Optional[bool]
**kwargs # type: Any
):
# type: (...) -> "_models.ResourceNameAvailability"
"""Check if a resource name is available.
Description for Check if a resource name is available.
:param name: Resource name to verify.
:type name: str
:param type: Resource type used for verification.
:type type: str or ~azure.mgmt.web.v2021_03_01.models.CheckNameResourceTypes
:param is_fqdn: Is fully qualified domain name.
:type is_fqdn: bool
:keyword callable cls: A custom type or function that will be passed the direct response
:return: ResourceNameAvailability, or the result of cls(response)
:rtype: ~azure.mgmt.web.v2021_03_01.models.ResourceNameAvailability
:raises: ~azure.core.exceptions.HttpResponseError
"""
api_version = self._get_api_version('check_name_availability')
if api_version == '2016-03-01':
from .v2016_03_01.operations import WebSiteManagementClientOperationsMixin as OperationClass
elif api_version == '2018-02-01':
from .v2018_02_01.operations import WebSiteManagementClientOperationsMixin as OperationClass
elif api_version == '2019-08-01':
from .v2019_08_01.operations import WebSiteManagementClientOperationsMixin as OperationClass
elif api_version == '2020-06-01':
from .v2020_06_01.operations import WebSiteManagementClientOperationsMixin as OperationClass
elif api_version == '2020-09-01':
from .v2020_09_01.operations import WebSiteManagementClientOperationsMixin as OperationClass
elif api_version == '2020-12-01':
from .v2020_12_01.operations import WebSiteManagementClientOperationsMixin as OperationClass
elif api_version == '2021-01-01':
from .v2021_01_01.operations import WebSiteManagementClientOperationsMixin as OperationClass
elif api_version == '2021-01-15':
from .v2021_01_15.operations import WebSiteManagementClientOperationsMixin as OperationClass
elif api_version == '2021-03-01':
from .v2021_03_01.operations import WebSiteManagementClientOperationsMixin as OperationClass
else:
raise ValueError("API version {} does not have operation 'check_name_availability'".format(api_version))
mixin_instance = OperationClass()
mixin_instance._client = self._client
mixin_instance._config = self._config
mixin_instance._serialize = Serializer(self._models_dict(api_version))
mixin_instance._serialize.client_side_validation = False
mixin_instance._deserialize = Deserializer(self._models_dict(api_version))
return mixin_instance.check_name_availability(name, type, is_fqdn, **kwargs)
def get_publishing_user(
self,
**kwargs # type: Any
):
# type: (...) -> "_models.User"
"""Gets publishing user.
Description for Gets publishing user.
:keyword callable cls: A custom type or function that will be passed the direct response
:return: User, or the result of cls(response)
:rtype: ~azure.mgmt.web.v2021_03_01.models.User
:raises: ~azure.core.exceptions.HttpResponseError
"""
api_version = self._get_api_version('get_publishing_user')
if api_version == '2016-03-01':
from .v2016_03_01.operations import WebSiteManagementClientOperationsMixin as OperationClass
elif api_version == '2018-02-01':
from .v2018_02_01.operations import WebSiteManagementClientOperationsMixin as OperationClass
elif api_version == '2019-08-01':
from .v2019_08_01.operations import WebSiteManagementClientOperationsMixin as OperationClass
elif api_version == '2020-06-01':
from .v2020_06_01.operations import WebSiteManagementClientOperationsMixin as OperationClass
elif api_version == '2020-09-01':
from .v2020_09_01.operations import WebSiteManagementClientOperationsMixin as OperationClass
elif api_version == '2020-12-01':
from .v2020_12_01.operations import WebSiteManagementClientOperationsMixin as OperationClass
elif api_version == '2021-01-01':
from .v2021_01_01.operations import WebSiteManagementClientOperationsMixin as OperationClass
elif api_version == '2021-01-15':
from .v2021_01_15.operations import WebSiteManagementClientOperationsMixin as OperationClass
elif api_version == '2021-03-01':
from .v2021_03_01.operations import WebSiteManagementClientOperationsMixin as OperationClass
else:
raise ValueError("API version {} does not have operation 'get_publishing_user'".format(api_version))
mixin_instance = OperationClass()
mixin_instance._client = self._client
mixin_instance._config = self._config
mixin_instance._serialize = Serializer(self._models_dict(api_version))
mixin_instance._serialize.client_side_validation = False
mixin_instance._deserialize = Deserializer(self._models_dict(api_version))
return mixin_instance.get_publishing_user(**kwargs)
def get_source_control(
self,
source_control_type, # type: str
**kwargs # type: Any
):
# type: (...) -> "_models.SourceControl"
"""Gets source control token.
Description for Gets source control token.
:param source_control_type: Type of source control.
:type source_control_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: SourceControl, or the result of cls(response)
:rtype: ~azure.mgmt.web.v2021_03_01.models.SourceControl
:raises: ~azure.core.exceptions.HttpResponseError
"""
api_version = self._get_api_version('get_source_control')
if api_version == '2016-03-01':
from .v2016_03_01.operations import WebSiteManagementClientOperationsMixin as OperationClass
elif api_version == '2018-02-01':
from .v2018_02_01.operations import WebSiteManagementClientOperationsMixin as OperationClass
elif api_version == '2019-08-01':
from .v2019_08_01.operations import WebSiteManagementClientOperationsMixin as OperationClass
elif api_version == '2020-06-01':
from .v2020_06_01.operations import WebSiteManagementClientOperationsMixin as OperationClass
elif api_version == '2020-09-01':
from .v2020_09_01.operations import WebSiteManagementClientOperationsMixin as OperationClass
elif api_version == '2020-12-01':
from .v2020_12_01.operations import WebSiteManagementClientOperationsMixin as OperationClass
elif api_version == '2021-01-01':
from .v2021_01_01.operations import WebSiteManagementClientOperationsMixin as OperationClass
elif api_version == '2021-01-15':
from .v2021_01_15.operations import WebSiteManagementClientOperationsMixin as OperationClass
elif api_version == '2021-03-01':
from .v2021_03_01.operations import WebSiteManagementClientOperationsMixin as OperationClass
else:
raise ValueError("API version {} does not have operation 'get_source_control'".format(api_version))
mixin_instance = OperationClass()
mixin_instance._client = self._client
mixin_instance._config = self._config
mixin_instance._serialize = Serializer(self._models_dict(api_version))
mixin_instance._serialize.client_side_validation = False
mixin_instance._deserialize = Deserializer(self._models_dict(api_version))
return mixin_instance.get_source_control(source_control_type, **kwargs)
def get_subscription_deployment_locations(
self,
**kwargs # type: Any
):
# type: (...) -> "_models.DeploymentLocations"
"""Gets list of available geo regions plus ministamps.
Description for Gets list of available geo regions plus ministamps.
:keyword callable cls: A custom type or function that will be passed the direct response
:return: DeploymentLocations, or the result of cls(response)
:rtype: ~azure.mgmt.web.v2021_03_01.models.DeploymentLocations
:raises: ~azure.core.exceptions.HttpResponseError
"""
api_version = self._get_api_version('get_subscription_deployment_locations')
if api_version == '2016-03-01':
from .v2016_03_01.operations import WebSiteManagementClientOperationsMixin as OperationClass
elif api_version == '2018-02-01':
from .v2018_02_01.operations import WebSiteManagementClientOperationsMixin as OperationClass
elif api_version == '2019-08-01':
from .v2019_08_01.operations import WebSiteManagementClientOperationsMixin as OperationClass
elif api_version == '2020-06-01':
from .v2020_06_01.operations import WebSiteManagementClientOperationsMixin as OperationClass
elif api_version == '2020-09-01':
from .v2020_09_01.operations import WebSiteManagementClientOperationsMixin as OperationClass
elif api_version == '2020-12-01':
from .v2020_12_01.operations import WebSiteManagementClientOperationsMixin as OperationClass
elif api_version == '2021-01-01':
from .v2021_01_01.operations import WebSiteManagementClientOperationsMixin as OperationClass
elif api_version == '2021-01-15':
from .v2021_01_15.operations import WebSiteManagementClientOperationsMixin as OperationClass
elif api_version == '2021-03-01':
from .v2021_03_01.operations import WebSiteManagementClientOperationsMixin as OperationClass
else:
raise ValueError("API version {} does not have operation 'get_subscription_deployment_locations'".format(api_version))
mixin_instance = OperationClass()
mixin_instance._client = self._client
mixin_instance._config = self._config
mixin_instance._serialize = Serializer(self._models_dict(api_version))
mixin_instance._serialize.client_side_validation = False
mixin_instance._deserialize = Deserializer(self._models_dict(api_version))
return mixin_instance.get_subscription_deployment_locations(**kwargs)
def list_billing_meters(
self,
billing_location=None, # type: Optional[str]
os_type=None, # type: Optional[str]
**kwargs # type: Any
):
# type: (...) -> Iterable["_models.BillingMeterCollection"]
"""Gets a list of meters for a given location.
Description for Gets a list of meters for a given location.
:param billing_location: Azure Location of billable resource.
:type billing_location: str
:param os_type: App Service OS type meters used for.
:type os_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either BillingMeterCollection or the result of
cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.web.v2021_03_01.models.BillingMeterCollection]
:raises: ~azure.core.exceptions.HttpResponseError
"""
api_version = self._get_api_version('list_billing_meters')
if api_version == '2018-02-01':
from .v2018_02_01.operations import WebSiteManagementClientOperationsMixin as OperationClass
elif api_version == '2019-08-01':
from .v2019_08_01.operations import WebSiteManagementClientOperationsMixin as OperationClass
elif api_version == '2020-06-01':
from .v2020_06_01.operations import WebSiteManagementClientOperationsMixin as OperationClass
elif api_version == '2020-09-01':
from .v2020_09_01.operations import WebSiteManagementClientOperationsMixin as OperationClass
elif api_version == '2020-12-01':
from .v2020_12_01.operations import WebSiteManagementClientOperationsMixin as OperationClass
elif api_version == '2021-01-01':
from .v2021_01_01.operations import WebSiteManagementClientOperationsMixin as OperationClass
elif api_version == '2021-01-15':
from .v2021_01_15.operations import WebSiteManagementClientOperationsMixin as OperationClass
elif api_version == '2021-03-01':
from .v2021_03_01.operations import WebSiteManagementClientOperationsMixin as OperationClass
else:
raise ValueError("API version {} does not have operation 'list_billing_meters'".format(api_version))
mixin_instance = OperationClass()
mixin_instance._client = self._client
mixin_instance._config = self._config
mixin_instance._serialize = Serializer(self._models_dict(api_version))
mixin_instance._serialize.client_side_validation = False
mixin_instance._deserialize = Deserializer(self._models_dict(api_version))
return mixin_instance.list_billing_meters(billing_location, os_type, **kwargs)
def list_custom_host_name_sites(
self,
**kwargs # type: Any
):
# type: (...) -> Iterable["_models.CustomHostnameSitesCollection"]
"""Get custom hostnames under this subscription.
Get custom hostnames under this subscription.
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either CustomHostnameSitesCollection or the result of
cls(response)
:rtype:
~azure.core.paging.ItemPaged[~azure.mgmt.web.v2021_03_01.models.CustomHostnameSitesCollection]
:raises: ~azure.core.exceptions.HttpResponseError
"""
api_version = self._get_api_version('list_custom_host_name_sites')
if api_version == '2021-03-01':
from .v2021_03_01.operations import WebSiteManagementClientOperationsMixin as OperationClass
else:
raise ValueError("API version {} does not have operation 'list_custom_host_name_sites'".format(api_version))
mixin_instance = OperationClass()
mixin_instance._client = self._client
mixin_instance._config = self._config
mixin_instance._serialize = Serializer(self._models_dict(api_version))
mixin_instance._serialize.client_side_validation = False
mixin_instance._deserialize = Deserializer(self._models_dict(api_version))
return mixin_instance.list_custom_host_name_sites(**kwargs)
def list_geo_regions(
self,
sku=None, # type: Optional[Union[str, "_models.SkuName"]]
linux_workers_enabled=None, # type: Optional[bool]
xenon_workers_enabled=None, # type: Optional[bool]
linux_dynamic_workers_enabled=None, # type: Optional[bool]
**kwargs # type: Any
):
# type: (...) -> Iterable["_models.GeoRegionCollection"]
"""Get a list of available geographical regions.
Description for Get a list of available geographical regions.
:param sku: Name of SKU used to filter the regions.
:type sku: str or ~azure.mgmt.web.v2021_03_01.models.SkuName
:param linux_workers_enabled: Specify :code:`<code>true</code>` if you want to filter to only
regions that support Linux workers.
:type linux_workers_enabled: bool
:param xenon_workers_enabled: Specify :code:`<code>true</code>` if you want to filter to only
regions that support Xenon workers.
:type xenon_workers_enabled: bool
:param linux_dynamic_workers_enabled: Specify :code:`<code>true</code>` if you want to filter
to only regions that support Linux Consumption Workers.
:type linux_dynamic_workers_enabled: bool
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either GeoRegionCollection or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.web.v2021_03_01.models.GeoRegionCollection]
:raises: ~azure.core.exceptions.HttpResponseError
"""
api_version = self._get_api_version('list_geo_regions')
if api_version == '2016-03-01':
from .v2016_03_01.operations import WebSiteManagementClientOperationsMixin as OperationClass
elif api_version == '2018-02-01':
from .v2018_02_01.operations import WebSiteManagementClientOperationsMixin as OperationClass
elif api_version == '2019-08-01':
from .v2019_08_01.operations import WebSiteManagementClientOperationsMixin as OperationClass
elif api_version == '2020-06-01':
from .v2020_06_01.operations import WebSiteManagementClientOperationsMixin as OperationClass
elif api_version == '2020-09-01':
from .v2020_09_01.operations import WebSiteManagementClientOperationsMixin as OperationClass
elif api_version == '2020-12-01':
from .v2020_12_01.operations import WebSiteManagementClientOperationsMixin as OperationClass
elif api_version == '2021-01-01':
from .v2021_01_01.operations import WebSiteManagementClientOperationsMixin as OperationClass
elif api_version == '2021-01-15':
from .v2021_01_15.operations import WebSiteManagementClientOperationsMixin as OperationClass
elif api_version == '2021-03-01':
from .v2021_03_01.operations import WebSiteManagementClientOperationsMixin as OperationClass
else:
raise ValueError("API version {} does not have operation 'list_geo_regions'".format(api_version))
mixin_instance = OperationClass()
mixin_instance._client = self._client
mixin_instance._config = self._config
mixin_instance._serialize = Serializer(self._models_dict(api_version))
mixin_instance._serialize.client_side_validation = False
mixin_instance._deserialize = Deserializer(self._models_dict(api_version))
return mixin_instance.list_geo_regions(sku, linux_workers_enabled, xenon_workers_enabled, linux_dynamic_workers_enabled, **kwargs)
def list_premier_add_on_offers(
self,
**kwargs # type: Any
):
# type: (...) -> Iterable["_models.PremierAddOnOfferCollection"]
"""List all premier add-on offers.
Description for List all premier add-on offers.
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either PremierAddOnOfferCollection or the result of
cls(response)
:rtype:
~azure.core.paging.ItemPaged[~azure.mgmt.web.v2021_03_01.models.PremierAddOnOfferCollection]
:raises: ~azure.core.exceptions.HttpResponseError
"""
api_version = self._get_api_version('list_premier_add_on_offers')
if api_version == '2016-03-01':
from .v2016_03_01.operations import WebSiteManagementClientOperationsMixin as OperationClass
elif api_version == '2018-02-01':
from .v2018_02_01.operations import WebSiteManagementClientOperationsMixin as OperationClass
elif api_version == '2019-08-01':
from .v2019_08_01.operations import WebSiteManagementClientOperationsMixin as OperationClass
elif api_version == '2020-06-01':
from .v2020_06_01.operations import WebSiteManagementClientOperationsMixin as OperationClass
elif api_version == '2020-09-01':
from .v2020_09_01.operations import WebSiteManagementClientOperationsMixin as OperationClass
elif api_version == '2020-12-01':
from .v2020_12_01.operations import WebSiteManagementClientOperationsMixin as OperationClass
elif api_version == '2021-01-01':
from .v2021_01_01.operations import WebSiteManagementClientOperationsMixin as OperationClass
elif api_version == '2021-01-15':
from .v2021_01_15.operations import WebSiteManagementClientOperationsMixin as OperationClass
elif api_version == '2021-03-01':
from .v2021_03_01.operations import WebSiteManagementClientOperationsMixin as OperationClass
else:
raise ValueError("API version {} does not have operation 'list_premier_add_on_offers'".format(api_version))
mixin_instance = OperationClass()
mixin_instance._client = self._client
mixin_instance._config = self._config
mixin_instance._serialize = Serializer(self._models_dict(api_version))
mixin_instance._serialize.client_side_validation = False
mixin_instance._deserialize = Deserializer(self._models_dict(api_version))
return mixin_instance.list_premier_add_on_offers(**kwargs)
def list_site_identifiers_assigned_to_host_name(
self,
name_identifier, # type: "_models.NameIdentifier"
**kwargs # type: Any
):
# type: (...) -> Iterable["_models.IdentifierCollection"]
"""List all apps that are assigned to a hostname.
Description for List all apps that are assigned to a hostname.
:param name_identifier: Hostname information.
:type name_identifier: ~azure.mgmt.web.v2021_03_01.models.NameIdentifier
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either IdentifierCollection or the result of
cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.web.v2021_03_01.models.IdentifierCollection]
:raises: ~azure.core.exceptions.HttpResponseError
"""
api_version = self._get_api_version('list_site_identifiers_assigned_to_host_name')
if api_version == '2016-03-01':
from .v2016_03_01.operations import WebSiteManagementClientOperationsMixin as OperationClass
elif api_version == '2018-02-01':
from .v2018_02_01.operations import WebSiteManagementClientOperationsMixin as OperationClass
elif api_version == '2019-08-01':
from .v2019_08_01.operations import WebSiteManagementClientOperationsMixin as OperationClass
elif api_version == '2020-06-01':
from .v2020_06_01.operations import WebSiteManagementClientOperationsMixin as OperationClass
elif api_version == '2020-09-01':
from .v2020_09_01.operations import WebSiteManagementClientOperationsMixin as OperationClass
elif api_version == '2020-12-01':
from .v2020_12_01.operations import WebSiteManagementClientOperationsMixin as OperationClass
elif api_version == '2021-01-01':
from .v2021_01_01.operations import WebSiteManagementClientOperationsMixin as OperationClass
elif api_version == '2021-01-15':
from .v2021_01_15.operations import WebSiteManagementClientOperationsMixin as OperationClass
elif api_version == '2021-03-01':
from .v2021_03_01.operations import WebSiteManagementClientOperationsMixin as OperationClass
else:
raise ValueError("API version {} does not have operation 'list_site_identifiers_assigned_to_host_name'".format(api_version))
mixin_instance = OperationClass()
mixin_instance._client = self._client
mixin_instance._config = self._config
mixin_instance._serialize = Serializer(self._models_dict(api_version))
mixin_instance._serialize.client_side_validation = False
mixin_instance._deserialize = Deserializer(self._models_dict(api_version))
return mixin_instance.list_site_identifiers_assigned_to_host_name(name_identifier, **kwargs)
def list_skus(
self,
**kwargs # type: Any
):
# type: (...) -> "_models.SkuInfos"
"""List all SKUs.
Description for List all SKUs.
:keyword callable cls: A custom type or function that will be passed the direct response
:return: SkuInfos, or the result of cls(response)
:rtype: ~azure.mgmt.web.v2021_03_01.models.SkuInfos
:raises: ~azure.core.exceptions.HttpResponseError
"""
api_version = self._get_api_version('list_skus')
if api_version == '2016-03-01':
from .v2016_03_01.operations import WebSiteManagementClientOperationsMixin as OperationClass
elif api_version == '2018-02-01':
from .v2018_02_01.operations import WebSiteManagementClientOperationsMixin as OperationClass
elif api_version == '2019-08-01':
from .v2019_08_01.operations import WebSiteManagementClientOperationsMixin as OperationClass
elif api_version == '2020-06-01':
from .v2020_06_01.operations import WebSiteManagementClientOperationsMixin as OperationClass
elif api_version == '2020-09-01':
from .v2020_09_01.operations import WebSiteManagementClientOperationsMixin as OperationClass
elif api_version == '2020-12-01':
from .v2020_12_01.operations import WebSiteManagementClientOperationsMixin as OperationClass
elif api_version == '2021-01-01':
from .v2021_01_01.operations import WebSiteManagementClientOperationsMixin as OperationClass
elif api_version == '2021-01-15':
from .v2021_01_15.operations import WebSiteManagementClientOperationsMixin as OperationClass
elif api_version == '2021-03-01':
from .v2021_03_01.operations import WebSiteManagementClientOperationsMixin as OperationClass
else:
raise ValueError("API version {} does not have operation 'list_skus'".format(api_version))
mixin_instance = OperationClass()
mixin_instance._client = self._client
mixin_instance._config = self._config
mixin_instance._serialize = Serializer(self._models_dict(api_version))
mixin_instance._serialize.client_side_validation = False
mixin_instance._deserialize = Deserializer(self._models_dict(api_version))
return mixin_instance.list_skus(**kwargs)
def list_source_controls(
self,
**kwargs # type: Any
):
# type: (...) -> Iterable["_models.SourceControlCollection"]
"""Gets the source controls available for Azure websites.
Description for Gets the source controls available for Azure websites.
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either SourceControlCollection or the result of
cls(response)
:rtype:
~azure.core.paging.ItemPaged[~azure.mgmt.web.v2021_03_01.models.SourceControlCollection]
:raises: ~azure.core.exceptions.HttpResponseError
"""
api_version = self._get_api_version('list_source_controls')
if api_version == '2016-03-01':
from .v2016_03_01.operations import WebSiteManagementClientOperationsMixin as OperationClass
elif api_version == '2018-02-01':
from .v2018_02_01.operations import WebSiteManagementClientOperationsMixin as OperationClass
elif api_version == '2019-08-01':
from .v2019_08_01.operations import WebSiteManagementClientOperationsMixin as OperationClass
elif api_version == '2020-06-01':
from .v2020_06_01.operations import WebSiteManagementClientOperationsMixin as OperationClass
elif api_version == '2020-09-01':
from .v2020_09_01.operations import WebSiteManagementClientOperationsMixin as OperationClass
elif api_version == '2020-12-01':
from .v2020_12_01.operations import WebSiteManagementClientOperationsMixin as OperationClass
elif api_version == '2021-01-01':
from .v2021_01_01.operations import WebSiteManagementClientOperationsMixin as OperationClass
elif api_version == '2021-01-15':
from .v2021_01_15.operations import WebSiteManagementClientOperationsMixin as OperationClass
elif api_version == '2021-03-01':
from .v2021_03_01.operations import WebSiteManagementClientOperationsMixin as OperationClass
else:
raise ValueError("API version {} does not have operation 'list_source_controls'".format(api_version))
mixin_instance = OperationClass()
mixin_instance._client = self._client
mixin_instance._config = self._config
mixin_instance._serialize = Serializer(self._models_dict(api_version))
mixin_instance._serialize.client_side_validation = False
mixin_instance._deserialize = Deserializer(self._models_dict(api_version))
return mixin_instance.list_source_controls(**kwargs)
def move(
self,
resource_group_name, # type: str
move_resource_envelope, # type: "_models.CsmMoveResourceEnvelope"
**kwargs # type: Any
):
# type: (...) -> None
"""Move resources between resource groups.
Description for Move resources between resource groups.
:param resource_group_name: Name of the resource group to which the resource belongs.
:type resource_group_name: str
:param move_resource_envelope: Object that represents the resource to move.
:type move_resource_envelope: ~azure.mgmt.web.v2021_03_01.models.CsmMoveResourceEnvelope
:keyword callable cls: A custom type or function that will be passed the direct response
:return: None, or the result of cls(response)
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
api_version = self._get_api_version('move')
if api_version == '2016-03-01':
from .v2016_03_01.operations import WebSiteManagementClientOperationsMixin as OperationClass
elif api_version == '2018-02-01':
from .v2018_02_01.operations import WebSiteManagementClientOperationsMixin as OperationClass
elif api_version == '2019-08-01':
from .v2019_08_01.operations import WebSiteManagementClientOperationsMixin as OperationClass
elif api_version == '2020-06-01':
from .v2020_06_01.operations import WebSiteManagementClientOperationsMixin as OperationClass
elif api_version == '2020-09-01':
from .v2020_09_01.operations import WebSiteManagementClientOperationsMixin as OperationClass
elif api_version == '2020-12-01':
from .v2020_12_01.operations import WebSiteManagementClientOperationsMixin as OperationClass
elif api_version == '2021-01-01':
from .v2021_01_01.operations import WebSiteManagementClientOperationsMixin as OperationClass
elif api_version == '2021-01-15':
from .v2021_01_15.operations import WebSiteManagementClientOperationsMixin as OperationClass
elif api_version == '2021-03-01':
from .v2021_03_01.operations import WebSiteManagementClientOperationsMixin as OperationClass
else:
raise ValueError("API version {} does not have operation 'move'".format(api_version))
mixin_instance = OperationClass()
mixin_instance._client = self._client
mixin_instance._config = self._config
mixin_instance._serialize = Serializer(self._models_dict(api_version))
mixin_instance._serialize.client_side_validation = False
mixin_instance._deserialize = Deserializer(self._models_dict(api_version))
return mixin_instance.move(resource_group_name, move_resource_envelope, **kwargs)
def update_publishing_user(
self,
user_details, # type: "_models.User"
**kwargs # type: Any
):
# type: (...) -> "_models.User"
"""Updates publishing user.
Description for Updates publishing user.
:param user_details: Details of publishing user.
:type user_details: ~azure.mgmt.web.v2021_03_01.models.User
:keyword callable cls: A custom type or function that will be passed the direct response
:return: User, or the result of cls(response)
:rtype: ~azure.mgmt.web.v2021_03_01.models.User
:raises: ~azure.core.exceptions.HttpResponseError
"""
api_version = self._get_api_version('update_publishing_user')
if api_version == '2016-03-01':
from .v2016_03_01.operations import WebSiteManagementClientOperationsMixin as OperationClass
elif api_version == '2018-02-01':
from .v2018_02_01.operations import WebSiteManagementClientOperationsMixin as OperationClass
elif api_version == '2019-08-01':
from .v2019_08_01.operations import WebSiteManagementClientOperationsMixin as OperationClass
elif api_version == '2020-06-01':
from .v2020_06_01.operations import WebSiteManagementClientOperationsMixin as OperationClass
elif api_version == '2020-09-01':
from .v2020_09_01.operations import WebSiteManagementClientOperationsMixin as OperationClass
elif api_version == '2020-12-01':
from .v2020_12_01.operations import WebSiteManagementClientOperationsMixin as OperationClass
elif api_version == '2021-01-01':
from .v2021_01_01.operations import WebSiteManagementClientOperationsMixin as OperationClass
elif api_version == '2021-01-15':
from .v2021_01_15.operations import WebSiteManagementClientOperationsMixin as OperationClass
elif api_version == '2021-03-01':
from .v2021_03_01.operations import WebSiteManagementClientOperationsMixin as OperationClass
else:
raise ValueError("API version {} does not have operation 'update_publishing_user'".format(api_version))
mixin_instance = OperationClass()
mixin_instance._client = self._client
mixin_instance._config = self._config
mixin_instance._serialize = Serializer(self._models_dict(api_version))
mixin_instance._serialize.client_side_validation = False
mixin_instance._deserialize = Deserializer(self._models_dict(api_version))
return mixin_instance.update_publishing_user(user_details, **kwargs)
def update_source_control(
self,
source_control_type, # type: str
request_message, # type: "_models.SourceControl"
**kwargs # type: Any
):
# type: (...) -> "_models.SourceControl"
"""Updates source control token.
Description for Updates source control token.
:param source_control_type: Type of source control.
:type source_control_type: str
:param request_message: Source control token information.
:type request_message: ~azure.mgmt.web.v2021_03_01.models.SourceControl
:keyword callable cls: A custom type or function that will be passed the direct response
:return: SourceControl, or the result of cls(response)
:rtype: ~azure.mgmt.web.v2021_03_01.models.SourceControl
:raises: ~azure.core.exceptions.HttpResponseError
"""
api_version = self._get_api_version('update_source_control')
if api_version == '2016-03-01':
from .v2016_03_01.operations import WebSiteManagementClientOperationsMixin as OperationClass
elif api_version == '2018-02-01':
from .v2018_02_01.operations import WebSiteManagementClientOperationsMixin as OperationClass
elif api_version == '2019-08-01':
from .v2019_08_01.operations import WebSiteManagementClientOperationsMixin as OperationClass
elif api_version == '2020-06-01':
from .v2020_06_01.operations import WebSiteManagementClientOperationsMixin as OperationClass
elif api_version == '2020-09-01':
from .v2020_09_01.operations import WebSiteManagementClientOperationsMixin as OperationClass
elif api_version == '2020-12-01':
from .v2020_12_01.operations import WebSiteManagementClientOperationsMixin as OperationClass
elif api_version == '2021-01-01':
from .v2021_01_01.operations import WebSiteManagementClientOperationsMixin as OperationClass
elif api_version == '2021-01-15':
from .v2021_01_15.operations import WebSiteManagementClientOperationsMixin as OperationClass
elif api_version == '2021-03-01':
from .v2021_03_01.operations import WebSiteManagementClientOperationsMixin as OperationClass
else:
raise ValueError("API version {} does not have operation 'update_source_control'".format(api_version))
mixin_instance = OperationClass()
mixin_instance._client = self._client
mixin_instance._config = self._config
mixin_instance._serialize = Serializer(self._models_dict(api_version))
mixin_instance._serialize.client_side_validation = False
mixin_instance._deserialize = Deserializer(self._models_dict(api_version))
return mixin_instance.update_source_control(source_control_type, request_message, **kwargs)
def validate(
self,
resource_group_name, # type: str
validate_request, # type: "_models.ValidateRequest"
**kwargs # type: Any
):
# type: (...) -> "_models.ValidateResponse"
"""Validate if a resource can be created.
Description for Validate if a resource can be created.
:param resource_group_name: Name of the resource group to which the resource belongs.
:type resource_group_name: str
:param validate_request: Request with the resources to validate.
:type validate_request: ~azure.mgmt.web.v2021_03_01.models.ValidateRequest
:keyword callable cls: A custom type or function that will be passed the direct response
:return: ValidateResponse, or the result of cls(response)
:rtype: ~azure.mgmt.web.v2021_03_01.models.ValidateResponse
:raises: ~azure.core.exceptions.HttpResponseError
"""
api_version = self._get_api_version('validate')
if api_version == '2016-03-01':
from .v2016_03_01.operations import WebSiteManagementClientOperationsMixin as OperationClass
elif api_version == '2018-02-01':
from .v2018_02_01.operations import WebSiteManagementClientOperationsMixin as OperationClass
elif api_version == '2019-08-01':
from .v2019_08_01.operations import WebSiteManagementClientOperationsMixin as OperationClass
elif api_version == '2020-06-01':
from .v2020_06_01.operations import WebSiteManagementClientOperationsMixin as OperationClass
elif api_version == '2020-09-01':
from .v2020_09_01.operations import WebSiteManagementClientOperationsMixin as OperationClass
elif api_version == '2020-12-01':
from .v2020_12_01.operations import WebSiteManagementClientOperationsMixin as OperationClass
elif api_version == '2021-01-01':
from .v2021_01_01.operations import WebSiteManagementClientOperationsMixin as OperationClass
elif api_version == '2021-01-15':
from .v2021_01_15.operations import WebSiteManagementClientOperationsMixin as OperationClass
elif api_version == '2021-03-01':
from .v2021_03_01.operations import WebSiteManagementClientOperationsMixin as OperationClass
else:
raise ValueError("API version {} does not have operation 'validate'".format(api_version))
mixin_instance = OperationClass()
mixin_instance._client = self._client
mixin_instance._config = self._config
mixin_instance._serialize = Serializer(self._models_dict(api_version))
mixin_instance._serialize.client_side_validation = False
mixin_instance._deserialize = Deserializer(self._models_dict(api_version))
return mixin_instance.validate(resource_group_name, validate_request, **kwargs)
def validate_container_settings(
self,
resource_group_name, # type: str
validate_container_settings_request, # type: "_models.ValidateContainerSettingsRequest"
**kwargs # type: Any
):
# type: (...) -> Any
"""Validate if the container settings are correct.
Validate if the container settings are correct.
:param resource_group_name: Name of the resource group to which the resource belongs.
:type resource_group_name: str
:param validate_container_settings_request:
:type validate_container_settings_request:
~azure.mgmt.web.v2018_02_01.models.ValidateContainerSettingsRequest
:keyword callable cls: A custom type or function that will be passed the direct response
:return: any, or the result of cls(response)
:rtype: any
:raises: ~azure.core.exceptions.HttpResponseError
"""
api_version = self._get_api_version('validate_container_settings')
if api_version == '2018-02-01':
from .v2018_02_01.operations import WebSiteManagementClientOperationsMixin as OperationClass
else:
raise ValueError("API version {} does not have operation 'validate_container_settings'".format(api_version))
mixin_instance = OperationClass()
mixin_instance._client = self._client
mixin_instance._config = self._config
mixin_instance._serialize = Serializer(self._models_dict(api_version))
mixin_instance._serialize.client_side_validation = False
mixin_instance._deserialize = Deserializer(self._models_dict(api_version))
return mixin_instance.validate_container_settings(resource_group_name, validate_container_settings_request, **kwargs)
def validate_move(
self,
resource_group_name, # type: str
move_resource_envelope, # type: "_models.CsmMoveResourceEnvelope"
**kwargs # type: Any
):
# type: (...) -> None
"""Validate whether a resource can be moved.
Description for Validate whether a resource can be moved.
:param resource_group_name: Name of the resource group to which the resource belongs.
:type resource_group_name: str
:param move_resource_envelope: Object that represents the resource to move.
:type move_resource_envelope: ~azure.mgmt.web.v2021_03_01.models.CsmMoveResourceEnvelope
:keyword callable cls: A custom type or function that will be passed the direct response
:return: None, or the result of cls(response)
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
api_version = self._get_api_version('validate_move')
if api_version == '2016-03-01':
from .v2016_03_01.operations import WebSiteManagementClientOperationsMixin as OperationClass
elif api_version == '2018-02-01':
from .v2018_02_01.operations import WebSiteManagementClientOperationsMixin as OperationClass
elif api_version == '2019-08-01':
from .v2019_08_01.operations import WebSiteManagementClientOperationsMixin as OperationClass
elif api_version == '2020-06-01':
from .v2020_06_01.operations import WebSiteManagementClientOperationsMixin as OperationClass
elif api_version == '2020-09-01':
from .v2020_09_01.operations import WebSiteManagementClientOperationsMixin as OperationClass
elif api_version == '2020-12-01':
from .v2020_12_01.operations import WebSiteManagementClientOperationsMixin as OperationClass
elif api_version == '2021-01-01':
from .v2021_01_01.operations import WebSiteManagementClientOperationsMixin as OperationClass
elif api_version == '2021-01-15':
from .v2021_01_15.operations import WebSiteManagementClientOperationsMixin as OperationClass
elif api_version == '2021-03-01':
from .v2021_03_01.operations import WebSiteManagementClientOperationsMixin as OperationClass
else:
raise ValueError("API version {} does not have operation 'validate_move'".format(api_version))
mixin_instance = OperationClass()
mixin_instance._client = self._client
mixin_instance._config = self._config
mixin_instance._serialize = Serializer(self._models_dict(api_version))
mixin_instance._serialize.client_side_validation = False
mixin_instance._deserialize = Deserializer(self._models_dict(api_version))
return mixin_instance.validate_move(resource_group_name, move_resource_envelope, **kwargs)
def verify_hosting_environment_vnet(
self,
parameters, # type: "_models.VnetParameters"
**kwargs # type: Any
):
# type: (...) -> "_models.VnetValidationFailureDetails"
"""Verifies if this VNET is compatible with an App Service Environment by analyzing the Network
Security Group rules.
Description for Verifies if this VNET is compatible with an App Service Environment by
analyzing the Network Security Group rules.
:param parameters: VNET information.
:type parameters: ~azure.mgmt.web.v2021_03_01.models.VnetParameters
:keyword callable cls: A custom type or function that will be passed the direct response
:return: VnetValidationFailureDetails, or the result of cls(response)
:rtype: ~azure.mgmt.web.v2021_03_01.models.VnetValidationFailureDetails
:raises: ~azure.core.exceptions.HttpResponseError
"""
api_version = self._get_api_version('verify_hosting_environment_vnet')
if api_version == '2016-03-01':
from .v2016_03_01.operations import WebSiteManagementClientOperationsMixin as OperationClass
elif api_version == '2018-02-01':
from .v2018_02_01.operations import WebSiteManagementClientOperationsMixin as OperationClass
elif api_version == '2019-08-01':
from .v2019_08_01.operations import WebSiteManagementClientOperationsMixin as OperationClass
elif api_version == '2020-06-01':
from .v2020_06_01.operations import WebSiteManagementClientOperationsMixin as OperationClass
elif api_version == '2020-09-01':
from .v2020_09_01.operations import WebSiteManagementClientOperationsMixin as OperationClass
elif api_version == '2020-12-01':
from .v2020_12_01.operations import WebSiteManagementClientOperationsMixin as OperationClass
elif api_version == '2021-01-01':
from .v2021_01_01.operations import WebSiteManagementClientOperationsMixin as OperationClass
elif api_version == '2021-01-15':
from .v2021_01_15.operations import WebSiteManagementClientOperationsMixin as OperationClass
elif api_version == '2021-03-01':
from .v2021_03_01.operations import WebSiteManagementClientOperationsMixin as OperationClass
else:
raise ValueError("API version {} does not have operation 'verify_hosting_environment_vnet'".format(api_version))
mixin_instance = OperationClass()
mixin_instance._client = self._client
mixin_instance._config = self._config
mixin_instance._serialize = Serializer(self._models_dict(api_version))
mixin_instance._serialize.client_side_validation = False
mixin_instance._deserialize = Deserializer(self._models_dict(api_version))
return mixin_instance.verify_hosting_environment_vnet(parameters, **kwargs)
| |
from __future__ import division
from __future__ import unicode_literals
import numpy as np
from rdkit import Chem
import deepchem as dc
from deepchem.feat import Featurizer
from deepchem.feat.atomic_coordinates import ComplexNeighborListFragmentAtomicCoordinates
from deepchem.feat.mol_graphs import ConvMol, WeaveMol
from deepchem.data import DiskDataset
import multiprocessing
import logging
def _featurize_complex(featurizer, mol_pdb_file, protein_pdb_file, log_message):
logging.info(log_message)
return featurizer._featurize_complex(mol_pdb_file, protein_pdb_file)
def one_of_k_encoding(x, allowable_set):
if x not in allowable_set:
raise Exception("input {0} not in allowable set{1}:".format(
x, allowable_set))
return list(map(lambda s: x == s, allowable_set))
def one_of_k_encoding_unk(x, allowable_set):
"""Maps inputs not in the allowable set to the last element."""
if x not in allowable_set:
x = allowable_set[-1]
return list(map(lambda s: x == s, allowable_set))
def get_intervals(l):
"""For list of lists, gets the cumulative products of the lengths"""
intervals = len(l) * [0]
# Initalize with 1
intervals[0] = 1
for k in range(1, len(l)):
intervals[k] = (len(l[k]) + 1) * intervals[k - 1]
return intervals
def safe_index(l, e):
"""Gets the index of e in l, providing an index of len(l) if not found"""
try:
return l.index(e)
except:
return len(l)
possible_atom_list = [
'C', 'N', 'O', 'S', 'F', 'P', 'Cl', 'Mg', 'Na', 'Br', 'Fe', 'Ca', 'Cu',
'Mc', 'Pd', 'Pb', 'K', 'I', 'Al', 'Ni', 'Mn'
]
possible_numH_list = [0, 1, 2, 3, 4]
possible_valence_list = [0, 1, 2, 3, 4, 5, 6]
possible_formal_charge_list = [-3, -2, -1, 0, 1, 2, 3]
possible_hybridization_list = [
Chem.rdchem.HybridizationType.SP, Chem.rdchem.HybridizationType.SP2,
Chem.rdchem.HybridizationType.SP3, Chem.rdchem.HybridizationType.SP3D,
Chem.rdchem.HybridizationType.SP3D2
]
possible_number_radical_e_list = [0, 1, 2]
possible_chirality_list = ['R', 'S']
reference_lists = [
possible_atom_list, possible_numH_list, possible_valence_list,
possible_formal_charge_list, possible_number_radical_e_list,
possible_hybridization_list, possible_chirality_list
]
intervals = get_intervals(reference_lists)
def get_feature_list(atom):
features = 6 * [0]
features[0] = safe_index(possible_atom_list, atom.GetSymbol())
features[1] = safe_index(possible_numH_list, atom.GetTotalNumHs())
features[2] = safe_index(possible_valence_list, atom.GetImplicitValence())
features[3] = safe_index(possible_formal_charge_list, atom.GetFormalCharge())
features[4] = safe_index(possible_number_radical_e_list,
atom.GetNumRadicalElectrons())
features[5] = safe_index(possible_hybridization_list, atom.GetHybridization())
return features
def features_to_id(features, intervals):
"""Convert list of features into index using spacings provided in intervals"""
id = 0
for k in range(len(intervals)):
id += features[k] * intervals[k]
# Allow 0 index to correspond to null molecule 1
id = id + 1
return id
def id_to_features(id, intervals):
features = 6 * [0]
# Correct for null
id -= 1
for k in range(0, 6 - 1):
# print(6-k-1, id)
features[6 - k - 1] = id // intervals[6 - k - 1]
id -= features[6 - k - 1] * intervals[6 - k - 1]
# Correct for last one
features[0] = id
return features
def atom_to_id(atom):
"""Return a unique id corresponding to the atom type"""
features = get_feature_list(atom)
return features_to_id(features, intervals)
def atom_features(atom,
bool_id_feat=False,
explicit_H=False,
use_chirality=False):
if bool_id_feat:
return np.array([atom_to_id(atom)])
else:
from rdkit import Chem
results = one_of_k_encoding_unk(
atom.GetSymbol(),
[
'C',
'N',
'O',
'S',
'F',
'Si',
'P',
'Cl',
'Br',
'Mg',
'Na',
'Ca',
'Fe',
'As',
'Al',
'I',
'B',
'V',
'K',
'Tl',
'Yb',
'Sb',
'Sn',
'Ag',
'Pd',
'Co',
'Se',
'Ti',
'Zn',
'H', # H?
'Li',
'Ge',
'Cu',
'Au',
'Ni',
'Cd',
'In',
'Mn',
'Zr',
'Cr',
'Pt',
'Hg',
'Pb',
'Unknown'
]) + one_of_k_encoding(atom.GetDegree(),
[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10]) + \
one_of_k_encoding_unk(atom.GetImplicitValence(), [0, 1, 2, 3, 4, 5, 6]) + \
[atom.GetFormalCharge(), atom.GetNumRadicalElectrons()] + \
one_of_k_encoding_unk(atom.GetHybridization(), [
Chem.rdchem.HybridizationType.SP, Chem.rdchem.HybridizationType.SP2,
Chem.rdchem.HybridizationType.SP3, Chem.rdchem.HybridizationType.
SP3D, Chem.rdchem.HybridizationType.SP3D2
]) + [atom.GetIsAromatic()]
# In case of explicit hydrogen(QM8, QM9), avoid calling `GetTotalNumHs`
if not explicit_H:
results = results + one_of_k_encoding_unk(atom.GetTotalNumHs(),
[0, 1, 2, 3, 4])
if use_chirality:
try:
results = results + one_of_k_encoding_unk(
atom.GetProp('_CIPCode'),
['R', 'S']) + [atom.HasProp('_ChiralityPossible')]
except:
results = results + [False, False
] + [atom.HasProp('_ChiralityPossible')]
return np.array(results)
def bond_features(bond, use_chirality=False):
from rdkit import Chem
bt = bond.GetBondType()
bond_feats = [
bt == Chem.rdchem.BondType.SINGLE, bt == Chem.rdchem.BondType.DOUBLE,
bt == Chem.rdchem.BondType.TRIPLE, bt == Chem.rdchem.BondType.AROMATIC,
bond.GetIsConjugated(),
bond.IsInRing()
]
if use_chirality:
bond_feats = bond_feats + one_of_k_encoding_unk(
str(bond.GetStereo()),
["STEREONONE", "STEREOANY", "STEREOZ", "STEREOE"])
return np.array(bond_feats)
def pair_features(mol, edge_list, canon_adj_list, bt_len=6,
graph_distance=True):
if graph_distance:
max_distance = 7
else:
max_distance = 1
N = mol.GetNumAtoms()
features = np.zeros((N, N, bt_len + max_distance + 1))
num_atoms = mol.GetNumAtoms()
rings = mol.GetRingInfo().AtomRings()
for a1 in range(num_atoms):
for a2 in canon_adj_list[a1]:
# first `bt_len` features are bond features(if applicable)
features[a1, a2, :bt_len] = np.asarray(
edge_list[tuple(sorted((a1, a2)))], dtype=float)
for ring in rings:
if a1 in ring:
# `bt_len`-th feature is if the pair of atoms are in the same ring
features[a1, ring, bt_len] = 1
features[a1, a1, bt_len] = 0.
# graph distance between two atoms
if graph_distance:
distance = find_distance(
a1, num_atoms, canon_adj_list, max_distance=max_distance)
features[a1, :, bt_len + 1:] = distance
# Euclidean distance between atoms
if not graph_distance:
coords = np.zeros((N, 3))
for atom in range(N):
pos = mol.GetConformer(0).GetAtomPosition(atom)
coords[atom, :] = [pos.x, pos.y, pos.z]
features[:, :, -1] = np.sqrt(np.sum(np.square(
np.stack([coords] * N, axis=1) - \
np.stack([coords] * N, axis=0)), axis=2))
return features
def find_distance(a1, num_atoms, canon_adj_list, max_distance=7):
distance = np.zeros((num_atoms, max_distance))
radial = 0
# atoms `radial` bonds away from `a1`
adj_list = set(canon_adj_list[a1])
# atoms less than `radial` bonds away
all_list = set([a1])
while radial < max_distance:
distance[list(adj_list), radial] = 1
all_list.update(adj_list)
# find atoms `radial`+1 bonds away
next_adj = set()
for adj in adj_list:
next_adj.update(canon_adj_list[adj])
adj_list = next_adj - all_list
radial = radial + 1
return distance
class ConvMolFeaturizer(Featurizer):
name = ['conv_mol']
def __init__(self, master_atom=False, use_chirality=False,
atom_properties=[]):
"""
Parameters
----------
master_atom: Boolean
if true create a fake atom with bonds to every other atom.
the initialization is the mean of the other atom features in
the molecule. This technique is briefly discussed in
Neural Message Passing for Quantum Chemistry
https://arxiv.org/pdf/1704.01212.pdf
use_chirality: Boolean
if true then make the resulting atom features aware of the
chirality of the molecules in question
atom_properties: list of string or None
properties in the RDKit Mol object to use as additional
atom-level features in the larger molecular feature. If None,
then no atom-level properties are used. Properties should be in the
RDKit mol object should be in the form
atom XXXXXXXX NAME
where XXXXXXXX is a zero-padded 8 digit number coresponding to the
zero-indexed atom index of each atom and NAME is the name of the property
provided in atom_properties. So "atom 00000000 sasa" would be the
name of the molecule level property in mol where the solvent
accessible surface area of atom 0 would be stored.
Since ConvMol is an object and not a numpy array, need to set dtype to
object.
"""
self.dtype = object
self.master_atom = master_atom
self.use_chirality = use_chirality
self.atom_properties = list(atom_properties)
def _get_atom_properties(self, atom):
"""
For a given input RDKit atom return the values of the properties
requested when initializing the featurize. See the __init__ of the
class for a full description of the names of the properties
Parameters
----------
atom: RDKit.rdchem.Atom
Atom to get the properties of
returns a numpy lists of floats of the same size as self.atom_properties
"""
values = []
for prop in self.atom_properties:
mol_prop_name = str("atom %08d %s" % (atom.GetIdx(), prop))
try:
values.append(float(atom.GetOwningMol().GetProp(mol_prop_name)))
except KeyError:
raise KeyError("No property %s found in %s in %s" %
(mol_prop_name, atom.GetOwningMol(), self))
return np.array(values)
def _featurize(self, mol):
"""Encodes mol as a ConvMol object."""
# Get the node features
idx_nodes = [(a.GetIdx(),
np.concatenate((atom_features(
a, use_chirality=self.use_chirality),
self._get_atom_properties(a))))
for a in mol.GetAtoms()]
idx_nodes.sort() # Sort by ind to ensure same order as rd_kit
idx, nodes = list(zip(*idx_nodes))
# Stack nodes into an array
nodes = np.vstack(nodes)
if self.master_atom:
master_atom_features = np.expand_dims(np.mean(nodes, axis=0), axis=0)
nodes = np.concatenate([nodes, master_atom_features], axis=0)
# Get bond lists with reverse edges included
edge_list = [
(b.GetBeginAtomIdx(), b.GetEndAtomIdx()) for b in mol.GetBonds()
]
# Get canonical adjacency list
canon_adj_list = [[] for mol_id in range(len(nodes))]
for edge in edge_list:
canon_adj_list[edge[0]].append(edge[1])
canon_adj_list[edge[1]].append(edge[0])
if self.master_atom:
fake_atom_index = len(nodes) - 1
for index in range(len(nodes) - 1):
canon_adj_list[index].append(fake_atom_index)
return ConvMol(nodes, canon_adj_list)
def feature_length(self):
return 75 + len(self.atom_properties)
def __hash__(self):
atom_properties = tuple(self.atom_properties)
return hash((self.master_atom, self.use_chirality, atom_properties))
def __eq__(self, other):
if not isinstance(self, other.__class__):
return False
return self.master_atom == other.master_atom and \
self.use_chirality == other.use_chirality and \
tuple(self.atom_properties) == tuple(other.atom_properties)
class WeaveFeaturizer(Featurizer):
name = ['weave_mol']
def __init__(self, graph_distance=True, explicit_H=False,
use_chirality=False):
# Distance is either graph distance(True) or Euclidean distance(False,
# only support datasets providing Cartesian coordinates)
self.graph_distance = graph_distance
# Set dtype
self.dtype = object
# If includes explicit hydrogens
self.explicit_H = explicit_H
# If uses use_chirality
self.use_chirality = use_chirality
def _featurize(self, mol):
"""Encodes mol as a WeaveMol object."""
# Atom features
idx_nodes = [(a.GetIdx(),
atom_features(
a,
explicit_H=self.explicit_H,
use_chirality=self.use_chirality))
for a in mol.GetAtoms()]
idx_nodes.sort() # Sort by ind to ensure same order as rd_kit
idx, nodes = list(zip(*idx_nodes))
# Stack nodes into an array
nodes = np.vstack(nodes)
# Get bond lists
edge_list = {}
for b in mol.GetBonds():
edge_list[tuple(sorted([b.GetBeginAtomIdx(),
b.GetEndAtomIdx()]))] = bond_features(
b, use_chirality=self.use_chirality)
# Get canonical adjacency list
canon_adj_list = [[] for mol_id in range(len(nodes))]
for edge in edge_list.keys():
canon_adj_list[edge[0]].append(edge[1])
canon_adj_list[edge[1]].append(edge[0])
# Calculate pair features
pairs = pair_features(
mol,
edge_list,
canon_adj_list,
bt_len=6,
graph_distance=self.graph_distance)
return WeaveMol(nodes, pairs)
class AtomicConvFeaturizer(ComplexNeighborListFragmentAtomicCoordinates):
"""This class computes the Atomic Convolution features"""
# TODO (VIGS25): Complete the description
name = ['atomic_conv']
def __init__(self,
labels,
neighbor_cutoff,
frag1_num_atoms=70,
frag2_num_atoms=634,
complex_num_atoms=701,
max_num_neighbors=12,
batch_size=24,
atom_types=[
6, 7., 8., 9., 11., 12., 15., 16., 17., 20., 25., 30., 35.,
53., -1.
],
radial=[[
1.5, 2.0, 2.5, 3.0, 3.5, 4.0, 4.5, 5.0, 5.5, 6.0, 6.5, 7.0,
7.5, 8.0, 8.5, 9.0, 9.5, 10.0, 10.5, 11.0, 11.5, 12.0
], [0.0, 4.0, 8.0], [0.4]],
layer_sizes=[32, 32, 16],
strip_hydrogens=True,
learning_rate=0.001,
epochs=10):
"""
Parameters
labels: numpy.ndarray
Labels which we want to predict using the model
neighbor_cutoff: int
TODO (VIGS25): Add description
frag1_num_atoms: int
Number of atoms in first fragment
frag2_num_atoms: int
Number of atoms in second fragment
complex_num_atoms: int
TODO (VIGS25) : Add description
max_num_neighbors: int
Maximum number of neighbors possible for an atom
batch_size: int
Batch size used for training and evaluation
atom_types: list
List of atoms recognized by model. Atoms are indicated by their
nuclear numbers.
radial: list
TODO (VIGS25): Add description
layer_sizes: list
List of layer sizes for the AtomicConvolutional Network
strip_hydrogens: bool
Whether to remove hydrogens while computing neighbor features
learning_rate: float
Learning rate for training the model
epochs: int
Number of epochs to train the model for
"""
self.atomic_conv_model = dc.models.tensorgraph.models.atomic_conv.AtomicConvModel(
frag1_num_atoms=frag1_num_atoms,
frag2_num_atoms=frag2_num_atoms,
complex_num_atoms=complex_num_atoms,
max_num_neighbors=max_num_neighbors,
batch_size=batch_size,
atom_types=atom_types,
radial=radial,
layer_sizes=layer_sizes,
learning_rate=learning_rate)
super(AtomicConvFeaturizer, self).__init__(
frag1_num_atoms=frag1_num_atoms,
frag2_num_atoms=frag2_num_atoms,
complex_num_atoms=complex_num_atoms,
max_num_neighbors=max_num_neighbors,
neighbor_cutoff=neighbor_cutoff,
strip_hydrogens=strip_hydrogens)
self.epochs = epochs
self.labels = labels
def featurize_complexes(self, mol_files, protein_files):
pool = multiprocessing.Pool()
results = []
for i, (mol_file, protein_pdb) in enumerate(zip(mol_files, protein_files)):
log_message = "Featurizing %d / %d" % (i, len(mol_files))
results.append(
pool.apply_async(_featurize_complex,
(self, mol_file, protein_pdb, log_message)))
pool.close()
features = []
failures = []
for ind, result in enumerate(results):
new_features = result.get()
# Handle loading failures which return None
if new_features is not None:
features.append(new_features)
else:
failures.append(ind)
features = np.asarray(features)
labels = np.delete(self.labels, failures)
dataset = DiskDataset.from_numpy(features, labels)
# Fit atomic conv model
self.atomic_conv_model.fit(dataset, nb_epoch=self.epochs)
# Add the Atomic Convolution layers to fetches
layers_to_fetch = list()
for layer in self.atomic_conv_model.layers.values():
if isinstance(layer,
dc.models.tensorgraph.models.atomic_conv.AtomicConvolution):
layers_to_fetch.append(layer)
# Extract the atomic convolution features
atomic_conv_features = list()
feed_dict_generator = self.atomic_conv_model.default_generator(
dataset=dataset, epochs=1)
for feed_dict in self.atomic_conv_model._create_feed_dicts(
feed_dict_generator, training=False):
frag1_conv, frag2_conv, complex_conv = self.atomic_conv_model._run_graph(
outputs=layers_to_fetch, feed_dict=feed_dict, training=False)
concatenated = np.concatenate(
[frag1_conv, frag2_conv, complex_conv], axis=1)
atomic_conv_features.append(concatenated)
batch_size = self.atomic_conv_model.batch_size
if len(features) % batch_size != 0:
num_batches = (len(features) // batch_size) + 1
num_to_skip = num_batches * batch_size - len(features)
else:
num_to_skip = 0
atomic_conv_features = np.asarray(atomic_conv_features)
atomic_conv_features = atomic_conv_features[-num_to_skip:]
atomic_conv_features = np.squeeze(atomic_conv_features)
return atomic_conv_features, failures
| |
import tensorflow as tf
import prettytensor as pt
import numpy as np
from zutils.py_utils import *
import zutils.tf_graph_utils as tgu
import zutils.tf_math_funcs as tmf
class NeuralNetworkTrainer:
def __init__(
self, data_module, loss_tensor, solver_type,
solver_kwargs=None, disp_tensor_dict=None,
minimizer_kwargs=None, update_ops=None,
max_epochs=None, disp_time_interval=2, disp_prefix=None,
learning_rate=None, global_step=None,
snapshot_func=None, snapshot_interval=7200, snapshot_sharing=None,
permanent_snapshot_step_list=None, snapshot_step_list=None,
test_func=None, test_steps=10000, logger=None, scope=None,
extra_output_tensors=None
):
if solver_kwargs is None:
solver_kwargs = dict()
if minimizer_kwargs is None:
minimizer_kwargs = dict()
if disp_tensor_dict is None:
disp_tensor_dict = dict()
minimizer_kwargs = copy(minimizer_kwargs)
if learning_rate is not None:
solver_kwargs["learning_rate"] = learning_rate
else:
assert hasattr(solver_kwargs, "learning_rate"), "learning rate is not set"
self.learning_rate_tensor = solver_kwargs["learning_rate"]
if not tmf.is_tf_data(self.learning_rate_tensor):
self.learning_rate_tensor = tf.constant(self.learning_rate_tensor)
self.learning_rate = None
if scope is None:
scope = "trainer"
self.data_module = data_module
optimizer_func = getattr(tf.train, solver_type + "Optimizer")
self.optimizer = optimizer_func(**solver_kwargs)
# figure out subiters
if "var_list" in minimizer_kwargs:
var_list = minimizer_kwargs["var_list"]
else:
var_list = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES)
self.update_shared = tgu.update_shared_vars(var_list)
var_list = list(set(var_list) - set(tgu.get_freeze_collection())) # remove freeze variables
self.var_list = var_list
minimizer_kwargs["var_list"] = var_list
# cache variables
old_variable_list = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES)
# define training iters
with tf.device("/cpu:0"), tf.variable_scope(scope):
self.iter_variable = tf.Variable(
0, trainable=False, dtype=tf.int64, name="trainer_step")
self.pos_variable = tf.Variable(
0, trainable=False, dtype=tf.int64, name="trainer_pos")
# function for handling update ops
def attach_updates_to_train_op(train_op_without_updates):
# add update ops (mainly for batch normalization)
if update_ops is None:
train_op = pt.with_update_ops(train_op_without_updates)
else:
assert isinstance(update_ops, list), "update_ops must be a list"
if update_ops:
train_op = tf.group(train_op_without_updates, *update_ops)
else:
train_op = train_op_without_updates
return train_op
# define minimizer
self.gradient_tensors = OrderedDict()
is_single_device = tmf.is_tf_data(loss_tensor)
assert is_single_device, \
"ERROR: this code does not support multiple devices. Use CUDA_VISIBLE_DEVICES=... to specify the GPU."
raw_gradient_tensor = self.optimizer.compute_gradients(loss_tensor, **minimizer_kwargs)
# disp and extra variables
self.loss_tensor = loss_tensor
self.disp_tensor_dict = flatten_str_dict(disp_tensor_dict)
self.extra_output_tensors = extra_output_tensors
new_variable_list = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES)
# train for all subiters
self.gradient_tensor = []
for g, v in raw_gradient_tensor:
if hasattr(v, "lr_mult"):
g *= v.lr_mult
self.gradient_tensor.append((g,v))
self.train_op_without_updates = self.optimizer.apply_gradients(self.gradient_tensor)
self.train_op = attach_updates_to_train_op(self.train_op_without_updates)
with tf.control_dependencies([self.update_shared]):
self.train_op = tf.group(self.train_op)
# sanity check
assert "extra" not in disp_tensor_dict, \
"extra is reserved for extra outputs"
# saveable variables
self.saveable_variables = list(set(new_variable_list) - set(old_variable_list))
# helper for extra outputs
self.extra_output_tensors_flattened, self.extra_output_tensors_wrapfunc = \
recursive_flatten_with_wrap_func(tmf.is_tf_data, self.extra_output_tensors)
# setup loss summaries
self.loss_summaries = []
with tf.name_scope('trainer_summaries'):
if disp_prefix is not None:
summary_prefix = disp_prefix + "_"
else:
summary_prefix = ""
self.loss_summaries.append(
tf.summary.scalar(summary_prefix+"Loss", self.loss_tensor))
for k, v in self.disp_tensor_dict.items():
self.loss_summaries.append(tf.summary.scalar(summary_prefix + k, v))
self.loss_summaries.append(tf.summary.scalar(summary_prefix + "learning_rate", self.learning_rate_tensor))
self.merged_loss_summaries = tf.summary.merge([self.loss_summaries])
self.logger = logger # do not do anything with it just set it up if possible
# self.train_op = pt.apply_optimizer(
# self.optimizer, losses=[loss_tensor], **minimizer_kwargs)
# step up variables for the training stage
self.max_epochs = max_epochs
self.disp_time_interval = disp_time_interval
self.disp_prefix = disp_prefix
self.total_iter = np.uint64(0)
self.total_pos = np.uint64(0)
if global_step is None:
global_step = tf.train.get_or_create_global_step()
self.global_step = global_step
with tf.device("/cpu:0"):
self.iter_variable_inc = tf.assign_add(self.iter_variable, 1)
self.global_step_inc = tf.assign_add(self.global_step, 1)
self.pos_assign_placeholder = tf.placeholder(tf.int64, shape=[], name="trainer_pos_assign")
self.pos_variable_assign = tf.assign(self.pos_variable, self.pos_assign_placeholder)
# set up variables for run_init
self.all_output_tensors = None
self.all_output_names = None
self.tmp_training_losses = None
self.disp_countdown = None
self.outside_timestamp = None
self.tmp_iter_start = None
# set up snapshot saver
if permanent_snapshot_step_list is None:
permanent_snapshot_step_list = []
if snapshot_step_list is None:
snapshot_step_list = []
if snapshot_sharing is None:
self.snapshot_runner_shared = False
if snapshot_func is None:
snapshot_interval = None
else:
if snapshot_interval is not None:
print(" - snapshot in every %d sec" % snapshot_interval)
self.permanent_snapshot_condition = \
ArgsSepFunc(lambda the_step: the_step in permanent_snapshot_step_list)
self.snapshot_condition = \
ArgsSepFunc(lambda the_step: the_step in snapshot_step_list)
_snapshot_periodic_runner = PeriodicRun(
snapshot_interval, snapshot_func
)
_snapshot_periodic_runner.add_extra_true_condition(
self.snapshot_condition
)
_snapshot_periodic_runner.add_extra_true_condition(
self.need_stop
)
_snapshot_periodic_runner.add_extra_true_condition(
self.permanent_snapshot_condition,
extra_func=lambda sess, step: snapshot_func(sess, step, preserve=True)
)
self.snapshot_periodic_runner = _snapshot_periodic_runner
else:
self.snapshot_runner_shared = True
self.snapshot_periodic_runner = snapshot_sharing.snapshot_periodic_runner
self.permanent_snapshot_condition = snapshot_sharing.permanent_snapshot_condition
self.snapshot_condition = snapshot_sharing.snapshot_condition
# set up test func
self.test_func = test_func
self.test_steps = test_steps
# step up variables for avg update
self.avg_var_list = None
self.avg_var_forward_steps = None
self.avg_var_minimum_update_steps = None
self.avg_var_update_num = None
self.avg_var_exact_mode = None
self.avg_var_running_mode = None
def set_logger(self, logger):
self.logger = logger
def run_init(self):
self.all_output_tensors = \
[self.loss_tensor] + list(self.disp_tensor_dict.values()) + self.extra_output_tensors_flattened
self.all_output_names = ["Loss"] + list(self.disp_tensor_dict.keys())
self.tmp_training_losses = np.asarray([0.0] * len(self.all_output_names), dtype=np.float64)
# remark: higher precision for being more accurate
self.disp_countdown = IfTimeout(self.disp_time_interval)
self.tmp_iter_start = 0
if not self.snapshot_runner_shared:
self.snapshot_periodic_runner.reset()
def need_stop(self):
return self.data_module is not None and self.data_module.epoch() >= self.max_epochs
def step(self, sess):
if self.all_output_tensors is None:
self.run_init()
tf.train.start_queue_runners(sess=sess)
if self.need_stop():
return None
if self.outside_timestamp is not None:
outside_time = time.time() - self.outside_timestamp
self.disp_countdown.add_ignored_time(time_amount=outside_time)
if self.total_iter == 0:
# resume data module stats (currently only support "resumable_wrapper")
self.total_iter, self.total_pos = sess.run([self.iter_variable, self.pos_variable])
if self.data_module is not None:
self.data_module.set_pos(self.total_pos)
run_outputs = sess.run(
[self.train_op, self.merged_loss_summaries] + self.all_output_tensors +
[self.learning_rate_tensor, self.iter_variable_inc, self.global_step_inc], {})
global_step = run_outputs[-1]
self.total_iter = run_outputs[-2]
self.learning_rate = run_outputs[-3]
loss_summaries = run_outputs[1]
main_outputs = run_outputs[2:-3]
losses = main_outputs[:len(self.all_output_names)]
extra_outputs = self.extra_output_tensors_wrapfunc(main_outputs[len(self.all_output_names):])
losses = np.asarray(losses, dtype=np.float64) # remove the output from train object
self.tmp_training_losses += losses
output_dict = OrderedDict(zip(self.all_output_names, losses))
output_dict["extra"] = extra_outputs
if self.logger is not None:
self.logger.add_summary(loss_summaries, global_step)
if self.data_module is not None:
self.total_pos = sess.run(self.pos_variable_assign, {self.pos_assign_placeholder: self.data_module.pos()})
if self.disp_countdown.is_timeout():
actual_time_interval = self.disp_countdown.interval
iter_interval = self.total_iter - self.tmp_iter_start
self.tmp_training_losses /= iter_interval
iter_per_sec = iter_interval / actual_time_interval
results = OrderedDict(zip(self.all_output_names, self.tmp_training_losses.tolist()))
results_str = " \t".join(["%s: %.5g" % (k, v) for k, v in results.items()])
timestamp_str = time_stamp_str()
if self.disp_prefix is not None:
timestamp_str = timestamp_str + "[" + self.disp_prefix + "]"
if self.data_module is None:
print("%s[lr:%g] step %d, iter %d (%g iter/sec):\t%s" % (
timestamp_str,
self.learning_rate,
global_step,
self.total_iter,
iter_per_sec,
results_str
))
else:
epoch_percentage = self.data_module.num_samples_finished() / \
self.data_module.num_samples() * 100
# results_str = list(chain(*results_str)) # equivalent to [j for i in result_str for j in i]
# results_str = "".join(results_str)
print("%s[lr:%g] step %d, iter %d (%4.1f%%, epoch %d, %g iter/sec):\t%s" %
(timestamp_str,
self.learning_rate,
global_step,
self.total_iter, epoch_percentage,
self.data_module.epoch(),
iter_per_sec,
results_str))
self.tmp_training_losses[:] = 0.0
self.tmp_iter_start = self.total_iter
self.disp_countdown = IfTimeout(self.disp_time_interval)
self.permanent_snapshot_condition.set_args(global_step)
self.snapshot_condition.set_args(global_step)
is_snapshotted, _ = self.snapshot_periodic_runner.run_if_timeout_with_prefixfunc(
lambda: self.update_avgvar_if_necessary(sess=sess), sess, global_step
)
self.outside_timestamp = time.time()
if self.test_func is not None:
if is_snapshotted or (self.test_steps is not None and global_step % self.test_steps == 0):
if not is_snapshotted:
self.update_avgvar_if_necessary(sess)
_ = self.test_func(sess, global_step)
if self.need_stop():
self.update_avgvar(sess=sess)
return output_dict
def forward_step(self, sess):
return sess.run(self.loss_tensor)
def run(self, sess):
assert self.max_epochs is not None, \
"cannot run with a particular max_epochs"
while self.step(sess) is not None:
pass
# for batch normalization
def setup_avgvar_update(
self, full_var_list=None, forward_steps=0, minimum_update_steps=0
):
self.avg_var_list = None
self.avg_var_forward_steps = None
self.avg_var_minimum_update_steps = None
if full_var_list is None or not full_var_list:
return
if forward_steps <= 0:
return
the_var_list = []
for v in full_var_list:
if hasattr(v, "is_switchable_avg") and v.is_switchable_avg:
the_var_list.append(v)
if not the_var_list:
return
the_var_list = list(set(the_var_list))
the_update_nums = []
the_exact_avg_modes = []
the_running_avg_modes = []
for v in the_var_list:
the_update_nums.append(tf.reshape(v.num_updates, [1]))
the_exact_avg_modes.append(v.exact_avg_mode)
the_running_avg_modes.append(v.running_avg_mode)
self.avg_var_list = the_var_list
self.avg_var_forward_steps = forward_steps
self.avg_var_minimum_update_steps = minimum_update_steps
self.avg_var_update_num = tf.reduce_max(tf.concat(the_update_nums, axis=0))
self.avg_var_exact_mode = tf.group(*the_exact_avg_modes)
self.avg_var_running_mode = tf.group(*the_running_avg_modes)
def update_avgvar_if_necessary(self, sess):
if self.avg_var_list is None:
return
if (
self.avg_var_minimum_update_steps <= 0 or
sess.run(self.avg_var_update_num) > self.avg_var_minimum_update_steps
):
self.update_avgvar(sess)
def update_avgvar(self, sess, forward_steps=None):
if self.avg_var_list is None:
return
if forward_steps is None:
forward_steps = self.avg_var_forward_steps
assert forward_steps is not None, "must set forward_steps"
print("%s START UPDATE AVERAGE VARIABLES ======" % time_stamp_str())
sess.run([self.avg_var_exact_mode])
the_disp_countdown = IfTimeout(self.disp_time_interval)
for k in range(forward_steps):
if the_disp_countdown.is_timeout():
print("%s update average variables: %d / %d" % (time_stamp_str(), k+1, forward_steps))
the_disp_countdown = IfTimeout(self.disp_time_interval)
self.forward_step(sess)
sess.run([self.avg_var_running_mode])
print("%s END UPDATE AVERAGE VARIABLES ======" % time_stamp_str())
# handle multiple devices (remark: only support a single device in this code) -------------------------------------
class TrainingNetOutput:
def __init__(self):
self.loss = None
self.display_outputs = None
self.device_outputs = OrderedDict()
self.device_outputs["ps"] = None
self.ps_device_outputs = None
def _single_device_training_net(data_tensors, train_net_func, default_reuse=None):
# single default device cases
print(" - training net on default device")
loss, display_outputs, ps_device_outputs = train_net_func(data_tensors, default_reuse)
output_entry = TrainingNetOutput()
output_entry.loss = loss
output_entry.display_outputs = display_outputs
output_entry.device_outputs["ps"] = ps_device_outputs
output_entry.ps_device_outputs = ps_device_outputs
new_variable_list = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES)
return output_entry, new_variable_list
def single_device_training_net(data_tensors, train_net_func):
""" generate training nets for multiple devices
:param data_tensors: [ [batch_size, ...], [batch_size, ...], ... ]
:param train_net_func: loss, display_outputs, first_device_output = train_net_func(data_tensors, default_reuse)
Remark: loss can be a list or a dict, then the return of this function can be organized accordingly
:return output_entry: a class instance with fields for a single device
:return unique_variable_list: variable_list on the first/only device
"""
old_variable_list = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES)
output_struct, new_variable_list = _single_device_training_net(data_tensors, train_net_func)
unique_variable_list = list(set(new_variable_list) - set(old_variable_list))
return output_struct, unique_variable_list
| |
"""Test script for the dbm.open function based on testdumbdbm.py"""
import unittest
import glob
from test.support import import_helper
from test.support import os_helper
# Skip tests if dbm module doesn't exist.
dbm = import_helper.import_module('dbm')
try:
from dbm import ndbm
except ImportError:
ndbm = None
_fname = os_helper.TESTFN
#
# Iterates over every database module supported by dbm currently available,
# setting dbm to use each in turn, and yielding that module
#
def dbm_iterator():
for name in dbm._names:
try:
mod = __import__(name, fromlist=['open'])
except ImportError:
continue
dbm._modules[name] = mod
yield mod
#
# Clean up all scratch databases we might have created during testing
#
def delete_files():
# we don't know the precise name the underlying database uses
# so we use glob to locate all names
for f in glob.glob(glob.escape(_fname) + "*"):
os_helper.unlink(f)
class AnyDBMTestCase:
_dict = {'a': b'Python:',
'b': b'Programming',
'c': b'the',
'd': b'way',
'f': b'Guido',
'g': b'intended',
}
def init_db(self):
f = dbm.open(_fname, 'n')
for k in self._dict:
f[k.encode("ascii")] = self._dict[k]
f.close()
def keys_helper(self, f):
keys = sorted(k.decode("ascii") for k in f.keys())
dkeys = sorted(self._dict.keys())
self.assertEqual(keys, dkeys)
return keys
def test_error(self):
self.assertTrue(issubclass(self.module.error, OSError))
def test_anydbm_not_existing(self):
self.assertRaises(dbm.error, dbm.open, _fname)
def test_anydbm_creation(self):
f = dbm.open(_fname, 'c')
self.assertEqual(list(f.keys()), [])
for key in self._dict:
f[key.encode("ascii")] = self._dict[key]
self.read_helper(f)
f.close()
def test_anydbm_creation_n_file_exists_with_invalid_contents(self):
# create an empty file
os_helper.create_empty_file(_fname)
with dbm.open(_fname, 'n') as f:
self.assertEqual(len(f), 0)
def test_anydbm_modification(self):
self.init_db()
f = dbm.open(_fname, 'c')
self._dict['g'] = f[b'g'] = b"indented"
self.read_helper(f)
# setdefault() works as in the dict interface
self.assertEqual(f.setdefault(b'xxx', b'foo'), b'foo')
self.assertEqual(f[b'xxx'], b'foo')
f.close()
def test_anydbm_read(self):
self.init_db()
f = dbm.open(_fname, 'r')
self.read_helper(f)
# get() works as in the dict interface
self.assertEqual(f.get(b'a'), self._dict['a'])
self.assertEqual(f.get(b'xxx', b'foo'), b'foo')
self.assertIsNone(f.get(b'xxx'))
with self.assertRaises(KeyError):
f[b'xxx']
f.close()
def test_anydbm_keys(self):
self.init_db()
f = dbm.open(_fname, 'r')
keys = self.keys_helper(f)
f.close()
def test_empty_value(self):
if getattr(dbm._defaultmod, 'library', None) == 'Berkeley DB':
self.skipTest("Berkeley DB doesn't distinguish the empty value "
"from the absent one")
f = dbm.open(_fname, 'c')
self.assertEqual(f.keys(), [])
f[b'empty'] = b''
self.assertEqual(f.keys(), [b'empty'])
self.assertIn(b'empty', f)
self.assertEqual(f[b'empty'], b'')
self.assertEqual(f.get(b'empty'), b'')
self.assertEqual(f.setdefault(b'empty'), b'')
f.close()
def test_anydbm_access(self):
self.init_db()
f = dbm.open(_fname, 'r')
key = "a".encode("ascii")
self.assertIn(key, f)
assert(f[key] == b"Python:")
f.close()
def read_helper(self, f):
keys = self.keys_helper(f)
for key in self._dict:
self.assertEqual(self._dict[key], f[key.encode("ascii")])
def tearDown(self):
delete_files()
def setUp(self):
dbm._defaultmod = self.module
delete_files()
class WhichDBTestCase(unittest.TestCase):
def test_whichdb(self):
for module in dbm_iterator():
# Check whether whichdb correctly guesses module name
# for databases opened with "module" module.
# Try with empty files first
name = module.__name__
if name == 'dbm.dumb':
continue # whichdb can't support dbm.dumb
delete_files()
f = module.open(_fname, 'c')
f.close()
self.assertEqual(name, self.dbm.whichdb(_fname))
# Now add a key
f = module.open(_fname, 'w')
f[b"1"] = b"1"
# and test that we can find it
self.assertIn(b"1", f)
# and read it
self.assertEqual(f[b"1"], b"1")
f.close()
self.assertEqual(name, self.dbm.whichdb(_fname))
@unittest.skipUnless(ndbm, reason='Test requires ndbm')
def test_whichdb_ndbm(self):
# Issue 17198: check that ndbm which is referenced in whichdb is defined
db_file = '{}_ndbm.db'.format(_fname)
with open(db_file, 'w'):
self.addCleanup(os_helper.unlink, db_file)
self.assertIsNone(self.dbm.whichdb(db_file[:-3]))
def tearDown(self):
delete_files()
def setUp(self):
delete_files()
self.filename = os_helper.TESTFN
self.d = dbm.open(self.filename, 'c')
self.d.close()
self.dbm = import_helper.import_fresh_module('dbm')
def test_keys(self):
self.d = dbm.open(self.filename, 'c')
self.assertEqual(self.d.keys(), [])
a = [(b'a', b'b'), (b'12345678910', b'019237410982340912840198242')]
for k, v in a:
self.d[k] = v
self.assertEqual(sorted(self.d.keys()), sorted(k for (k, v) in a))
for k, v in a:
self.assertIn(k, self.d)
self.assertEqual(self.d[k], v)
self.assertNotIn(b'xxx', self.d)
self.assertRaises(KeyError, lambda: self.d[b'xxx'])
self.d.close()
def load_tests(loader, tests, pattern):
classes = []
for mod in dbm_iterator():
classes.append(type("TestCase-" + mod.__name__,
(AnyDBMTestCase, unittest.TestCase),
{'module': mod}))
suites = [unittest.makeSuite(c) for c in classes]
tests.addTests(suites)
return tests
if __name__ == "__main__":
unittest.main()
| |
'''
utils.py
Functions that don't fit anywhere else.
'''
from io import BytesIO
import os
import glob
import socket
import zipfile
import itertools
from PIL import Image
import numpy as np
'''
IMAGES
'''
def scale(im, size=128):
'''
accepts: PIL image, size of square sides
returns: PIL image scaled so sides lenght = size
'''
size = (size,size)
im.thumbnail(size, Image.ANTIALIAS)
return im
def img_to_binary(img):
'''
accepts: PIL image
returns: binary stream (used to save to database)
'''
f = BytesIO()
img.save(f, format='jpeg')
return f.getvalue()
def arr_to_binary(arr):
'''
accepts: numpy array with shape (Hight, Width, Channels)
returns: binary stream (used to save to database)
'''
img = arr_to_img(arr)
return img_to_binary(img)
def arr_to_img(arr):
'''
accepts: numpy array with shape (Hight, Width, Channels)
returns: binary stream (used to save to database)
'''
arr = np.uint8(arr)
img = Image.fromarray(arr)
return img
def img_to_arr(img):
'''
accepts: numpy array with shape (Hight, Width, Channels)
returns: binary stream (used to save to database)
'''
return np.array(img)
def binary_to_img(binary):
'''
accepts: binary file object from BytesIO
returns: PIL image
'''
img = BytesIO(binary)
return Image.open(img)
def norm_img(img):
return (img - img.mean() / np.std(img))/255.0
def create_video(img_dir_path, output_video_path):
import envoy
# Setup path to the images with telemetry.
full_path = os.path.join(img_dir_path, 'frame_*.png')
# Run ffmpeg.
command = ("""ffmpeg
-framerate 30/1
-pattern_type glob -i '%s'
-c:v libx264
-r 15
-pix_fmt yuv420p
-y
%s""" % (full_path, output_video_path))
response = envoy.run(command)
'''
FILES
'''
def most_recent_file(dir_path, ext=''):
'''
return the most recent file given a directory path and extension
'''
query = dir_path + '/*' + ext
newest = min(glob.iglob(query), key=os.path.getctime)
return newest
def make_dir(path):
real_path = os.path.expanduser(path)
if not os.path.exists(real_path):
os.makedirs(real_path)
return real_path
def zip_dir(dir_path, zip_path):
"""
Create and save a zipfile of a one level directory
"""
file_paths = glob.glob(dir_path + "/*") #create path to search for files.
zf = zipfile.ZipFile(zip_path, 'w')
dir_name = os.path.basename(dir_path)
for p in file_paths:
file_name = os.path.basename(p)
zf.write(p, arcname=os.path.join(dir_name, file_name))
zf.close()
return zip_path
'''
BINNING
functions to help converte between floating point numbers and categories.
'''
def linear_bin(a):
a = a + 1
b = round(a / (2/14))
arr = np.zeros(15)
arr[int(b)] = 1
return arr
def linear_unbin(arr):
b = np.argmax(arr)
a = b *(2/14) - 1
return a
def bin_Y(Y):
d = []
for y in Y:
arr = np.zeros(15)
arr[linear_bin(y)] = 1
d.append(arr)
return np.array(d)
def unbin_Y(Y):
d=[]
for y in Y:
v = linear_unbin(y)
d.append(v)
return np.array(d)
def map_range(x, X_min, X_max, Y_min, Y_max):
'''
Linear mapping between two ranges of values
'''
X_range = X_max - X_min
Y_range = Y_max - Y_min
XY_ratio = X_range/Y_range
y = ((x-X_min) / XY_ratio + Y_min) // 1
return int(y)
'''
NETWORKING
'''
def my_ip():
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
s.connect(('192.0.0.8', 1027))
return s.getsockname()[0]
'''
OTHER
'''
def merge_two_dicts(x, y):
"""Given two dicts, merge them into a new dict as a shallow copy."""
z = x.copy()
z.update(y)
return z
def param_gen(params):
'''
Accepts a dictionary of parameter options and returns
a list of dictionary with the permutations of the parameters.
'''
for p in itertools.product(*params.values()):
yield dict(zip(params.keys(), p ))
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.