code
stringlengths 2
1.05M
| repo_name
stringlengths 5
104
| path
stringlengths 4
251
| language
stringclasses 1
value | license
stringclasses 15
values | size
int32 2
1.05M
|
|---|---|---|---|---|---|
"""
sentry.interfaces.exception
~~~~~~~~~~~~~~~~~~~~~~~~~~~
:copyright: (c) 2010-2014 by the Sentry Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
from __future__ import absolute_import
__all__ = ('Exception',)
from django.conf import settings
from sentry.interfaces.base import Interface, InterfaceValidationError
from sentry.interfaces.stacktrace import Stacktrace, slim_frame_data
from sentry.utils import json
from sentry.utils.safe import trim
class SingleException(Interface):
"""
A standard exception with a ``type`` and value argument, and an optional
``module`` argument describing the exception class type and
module namespace. Either ``type`` or ``value`` must be present.
You can also optionally bind a stacktrace interface to an exception. The
spec is identical to ``sentry.interfaces.Stacktrace``.
>>> {
>>> "type": "ValueError",
>>> "value": "My exception value",
>>> "module": "__builtins__"
>>> "stacktrace": {
>>> # see sentry.interfaces.Stacktrace
>>> }
>>> }
"""
score = 900
display_score = 1200
@classmethod
def to_python(cls, data, has_system_frames=None, slim_frames=True):
if not (data.get('type') or data.get('value')):
raise InterfaceValidationError("No 'type' or 'value' present")
if data.get('stacktrace') and data['stacktrace'].get('frames'):
stacktrace = Stacktrace.to_python(
data['stacktrace'],
has_system_frames=has_system_frames,
slim_frames=slim_frames,
)
else:
stacktrace = None
type = data.get('type')
value = data.get('value')
if not type and ':' in value.split(' ', 1)[0]:
type, value = value.split(':', 1)
# in case of TypeError: foo (no space)
value = value.strip()
if value is not None and not isinstance(value, basestring):
value = json.dumps(value)
value = trim(value, 4096)
kwargs = {
'type': trim(type, 128),
'value': value,
'module': trim(data.get('module'), 128),
'stacktrace': stacktrace,
}
return cls(**kwargs)
def to_json(self):
if self.stacktrace:
stacktrace = self.stacktrace.to_json()
else:
stacktrace = None
return {
'type': self.type,
'value': self.value,
'module': self.module,
'stacktrace': stacktrace,
}
def get_api_context(self, is_public=False):
if self.stacktrace:
stacktrace = self.stacktrace.get_api_context(is_public=is_public)
else:
stacktrace = None
return {
'type': self.type,
'value': unicode(self.value) if self.value else None,
'module': self.module,
'stacktrace': stacktrace,
}
def get_alias(self):
return 'exception'
def get_path(self):
return 'sentry.interfaces.Exception'
def get_hash(self):
output = None
if self.stacktrace:
output = self.stacktrace.get_hash()
if output and self.type:
output.append(self.type)
if not output:
output = filter(bool, [self.type, self.value])
return output
class Exception(Interface):
"""
An exception consists of a list of values. In most cases, this list
contains a single exception, with an optional stacktrace interface.
Each exception has a mandatory ``value`` argument and optional ``type`` and
``module`` arguments describing the exception class type and module
namespace.
You can also optionally bind a stacktrace interface to an exception. The
spec is identical to ``sentry.interfaces.Stacktrace``.
>>> {
>>> "values": [{
>>> "type": "ValueError",
>>> "value": "My exception value",
>>> "module": "__builtins__"
>>> "stacktrace": {
>>> # see sentry.interfaces.Stacktrace
>>> }
>>> }]
>>> }
Values should be sent oldest to newest, this includes both the stacktrace
and the exception itself.
.. note:: This interface can be passed as the 'exception' key in addition
to the full interface path.
"""
score = 2000
def __getitem__(self, key):
return self.values[key]
def __iter__(self):
return iter(self.values)
def __len__(self):
return len(self.values)
@classmethod
def to_python(cls, data):
if 'values' not in data:
data = {'values': [data]}
if not data['values']:
raise InterfaceValidationError("No 'values' present")
has_system_frames = cls.data_has_system_frames(data)
kwargs = {
'values': [
SingleException.to_python(
v,
has_system_frames=has_system_frames,
slim_frames=False,
)
for v in data['values']
],
}
if data.get('exc_omitted'):
if len(data['exc_omitted']) != 2:
raise InterfaceValidationError("Invalid value for 'exc_omitted'")
kwargs['exc_omitted'] = data['exc_omitted']
else:
kwargs['exc_omitted'] = None
instance = cls(**kwargs)
# we want to wait to slim things til we've reconciled in_app
slim_exception_data(instance)
return instance
@classmethod
def data_has_system_frames(cls, data):
system_frames = 0
app_frames = 0
for exc in data['values']:
if not exc.get('stacktrace'):
continue
for frame in exc['stacktrace'].get('frames', []):
# XXX(dcramer): handle PHP sending an empty array for a frame
if not isinstance(frame, dict):
continue
if frame.get('in_app') is True:
app_frames += 1
else:
system_frames += 1
# if there is a mix of frame styles then we indicate that system frames
# are present and should be represented as a split
return bool(app_frames and system_frames)
def to_json(self):
return {
'values': [v.to_json() for v in self.values],
'exc_omitted': self.exc_omitted,
}
def get_alias(self):
return 'exception'
def get_path(self):
return 'sentry.interfaces.Exception'
def compute_hashes(self, platform):
system_hash = self.get_hash(system_frames=True)
if not system_hash:
return []
app_hash = self.get_hash(system_frames=False)
if system_hash == app_hash or not app_hash:
return [system_hash]
return [system_hash, app_hash]
def get_hash(self, system_frames=True):
# optimize around the fact that some exceptions might have stacktraces
# while others may not and we ALWAYS want stacktraces over values
output = []
for value in self.values:
if not value.stacktrace:
continue
stack_hash = value.stacktrace.get_hash(
system_frames=system_frames,
)
if stack_hash:
output.extend(stack_hash)
output.append(value.type)
if not output:
for value in self.values:
output.extend(value.get_hash())
return output
def get_api_context(self, is_public=False):
return {
'values': [
v.get_api_context(is_public=is_public)
for v in self.values
],
'hasSystemFrames': any(
v.stacktrace.has_system_frames
for v in self.values
if v.stacktrace
),
'excOmitted': self.exc_omitted,
}
def to_string(self, event, is_public=False, **kwargs):
if not self.values:
return ''
output = []
for exc in self.values:
output.append(u'{0}: {1}\n'.format(exc.type, exc.value))
if exc.stacktrace:
output.append(exc.stacktrace.get_stacktrace(
event, system_frames=False, max_frames=5,
header=False) + '\n\n')
return (''.join(output)).strip()
def get_stacktrace(self, *args, **kwargs):
exc = self.values[0]
if exc.stacktrace:
return exc.stacktrace.get_stacktrace(*args, **kwargs)
return ''
def slim_exception_data(instance, frame_allowance=settings.SENTRY_MAX_STACKTRACE_FRAMES):
"""
Removes various excess metadata from middle frames which go beyond
``frame_allowance``.
"""
# TODO(dcramer): it probably makes sense to prioritize a certain exception
# rather than distributing allowance among all exceptions
frames = []
for exception in instance.values:
if not exception.stacktrace:
continue
frames.extend(exception.stacktrace.frames)
slim_frame_data(frames, frame_allowance)
|
nicholasserra/sentry
|
src/sentry/interfaces/exception.py
|
Python
|
bsd-3-clause
| 9,328
|
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
class ParserException(Exception):
pass
|
dergraaf/xpcc
|
tools/system_design/xmlparser/parser_exception.py
|
Python
|
bsd-3-clause
| 88
|
# This is an example on how to use complex columns
import tables as tb
class Particle(tb.IsDescription):
name = tb.StringCol(16, pos=1) # 16-character String
lati = tb.ComplexCol(itemsize=16, pos=2)
longi = tb.ComplexCol(itemsize=8, pos=3)
vector = tb.ComplexCol(itemsize=8, shape=(2,), pos=4)
matrix2D = tb.ComplexCol(itemsize=16, shape=(2, 2), pos=5)
# Open a file in "w"rite mode
fileh = tb.open_file("table3.h5", mode="w")
table = fileh.create_table(fileh.root, 'table', Particle, "A table")
# Append several rows in only one call
table.append([
("Particle: 10", 10j, 0, (10 * 9 + 1j, 1), [[10 ** 2j, 11 * 3]] * 2),
("Particle: 11", 11j, -1, (11 * 10 + 2j, 2), [[11 ** 2j, 10 * 3]] * 2),
("Particle: 12", 12j, -2, (12 * 11 + 3j, 3), [[12 ** 2j, 9 * 3]] * 2),
("Particle: 13", 13j, -3, (13 * 11 + 4j, 4), [[13 ** 2j, 8 * 3]] * 2),
("Particle: 14", 14j, -4, (14 * 11 + 5j, 5), [[14 ** 2j, 7 * 3]] * 2)
])
print("str(Cols)-->", table.cols)
print("repr(Cols)-->", repr(table.cols))
print("Column handlers:")
for name in table.colnames:
print(table.cols._f_col(name))
print("Select table.cols.name[1]-->", table.cols.name[1])
print("Select table.cols.name[1:2]-->", table.cols.name[1:2])
print("Select table.cols.name[:]-->", table.cols.name[:])
print("Select table.cols._f_col('name')[:]-->", table.cols._f_col('name')[:])
print("Select table.cols.lati[1]-->", table.cols.lati[1])
print("Select table.cols.lati[1:2]-->", table.cols.lati[1:2])
print("Select table.cols.vector[:]-->", table.cols.vector[:])
print("Select table.cols['matrix2D'][:]-->", table.cols.matrix2D[:])
fileh.close()
|
avalentino/PyTables
|
examples/table3.py
|
Python
|
bsd-3-clause
| 1,660
|
"""A single source for all units in known ATS variables (WIP!)"""
native_units = { 'pressure' : 'Pa',
'temperature' : 'K',
'ponded_depth' : 'm',
'saturation_liquid' : '-',
'saturation_gas' : '-',
'saturation_ice' : '-',
'snow-depth' : 'm',
}
def get_units(varname):
if varname in native_units:
return native_units[varname]
if len(varname.split('-')) == 2:
return get_units(varname[1])
|
amanzi/ats-dev
|
tools/utils/ats_units.py
|
Python
|
bsd-3-clause
| 517
|
from __future__ import print_function
import unittest2
from lldbsuite.test.decorators import *
from lldbsuite.test.concurrent_base import ConcurrentEventsBase
from lldbsuite.test.lldbtest import TestBase
@skipIfWindows
class ConcurrentTwoBreakpointsOneSignal(ConcurrentEventsBase):
mydir = ConcurrentEventsBase.compute_mydir(__file__)
@skipIfFreeBSD # timing out on buildbot
# Atomic sequences are not supported yet for MIPS in LLDB.
@skipIf(triple='^mips')
def test(self):
"""Test two threads that trigger a breakpoint and one signal thread. """
self.build(dictionary=self.getBuildFlags())
self.do_thread_actions(num_breakpoint_threads=2, num_signal_threads=1)
|
youtube/cobalt
|
third_party/llvm-project/lldb/packages/Python/lldbsuite/test/functionalities/thread/concurrent_events/TestConcurrentTwoBreakpointsOneSignal.py
|
Python
|
bsd-3-clause
| 713
|
# License: BSD 3 clause
import sys
from warnings import warn
import numpy as np
from tick.base_model import LOSS_AND_GRAD
from tick.hawkes.model.build.hawkes_model import (
ModelHawkesSumExpKernLeastSq as _ModelHawkesSumExpKernLeastSq)
from .base import ModelHawkes
class ModelHawkesSumExpKernLeastSq(ModelHawkes):
"""Hawkes process model for sum-exponential kernels with fixed and
given decays.
It is modeled with least square loss:
.. math::
\\sum_{i=1}^{D} \\left(
\\int_0^T \\lambda_i(t)^2 dt
- 2 \\int_0^T \\lambda_i(t) dN_i(t)
\\right)
where :math:`\\lambda_i` is the intensity:
.. math::
\\forall i \\in [1 \\dots D], \\quad
\\lambda_i(t) = \\mu_i(t) + \\sum_{j=1}^D
\\sum_{t_k^j < t} \\phi_{ij}(t - t_k^j)
where
* :math:`D` is the number of nodes
* :math:`\mu_i(t)` are the baseline intensities
* :math:`\phi_{ij}` are the kernels
* :math:`t_k^j` are the timestamps of all events of node :math:`j`
and with a sum-exponential parametrisation of the kernels
.. math::
\phi_{ij}(t) = \sum_{u=1}^{U} \\alpha^u_{ij} \\beta^u
\exp (- \\beta^u t) 1_{t > 0}
In our implementation we denote:
* Integer :math:`D` by the attribute `n_nodes`
* Integer :math:`U` by the attribute `n_decays`
* Vector :math:`\\beta \in \mathbb{R}^{U}` by the
parameter `decays`. This parameter is given to the model
Parameters
----------
decays : `numpy.ndarray`, shape=(n_decays, )
An array giving the different decays of the exponentials kernels.
n_baselines : `int`, default=1
In this model baseline is supposed to be either constant or piecewise
constant. If `n_baseline > 1` then piecewise constant setting is
enabled. In this case :math:`\\mu_i(t)` is piecewise constant on
intervals of size `period_length / n_baselines` and periodic.
period_length : `float`, default=None
In piecewise constant setting this denotes the period of the
piecewise constant baseline function.
approx : `int`, default=0 (read-only)
Level of approximation used for computing exponential functions
* if 0: no approximation
* if 1: a fast approximated exponential function is used
n_threads : `int`, default=-1 (read-only)
Number of threads used for parallel computation.
* if ``int <= 0``: the number of threads available on
the CPU
* otherwise the desired number of threads
Attributes
----------
n_nodes : `int` (read-only)
Number of components, or dimension of the Hawkes model
n_decays : `int` (read-only)
Number of decays used in the sum-exponential kernel
baseline_intervals : `np.ndarray`, shape=(n_baselines)
Start time of each interval on which baseline is piecewise constant.
data : `list` of `numpy.array` (read-only)
The events given to the model through `fit` method.
Note that data given through `incremental_fit` is not stored
"""
# In Hawkes case, getting value and grad at the same time need only
# one pas over the data
pass_per_operation = \
{k: v for d in [ModelHawkes.pass_per_operation,
{LOSS_AND_GRAD: 2}] for k, v in d.items()}
_attrinfos = {
"decays": {
"writable": True,
"cpp_setter": "set_decays"
},
"n_baselines": {
"writable": True,
"cpp_setter": "set_n_baselines"
},
"_period_length": {
"writable": False,
},
}
def __init__(self, decays: np.ndarray, n_baselines=1, period_length=None,
approx: int = 0, n_threads: int = 1):
ModelHawkes.__init__(self, approx=approx, n_threads=n_threads)
self._end_times = None
if n_baselines <= 0:
raise ValueError('n_baselines must be positive')
if n_baselines > 1 and period_length is None:
raise ValueError('period_length must be given if multiple '
'baselines are used')
if period_length is not None and n_baselines == 1:
warn('period_length has no effect when using a constant baseline')
if isinstance(decays, list):
decays = np.array(decays, dtype=float)
elif decays.dtype != float:
decays = decays.astype(float)
self.decays = decays.copy()
self.n_baselines = n_baselines
self.period_length = period_length
self._model = _ModelHawkesSumExpKernLeastSq(
self.decays, self.n_baselines, self.cast_period_length(),
self.n_threads, self.approx)
@property
def n_decays(self):
return self._model.get_n_decays()
@property
def _epoch_size(self):
# This gives the typical size of an epoch when using a
# stochastic optimization algorithm
return self.n_nodes
@property
def _rand_max(self):
# This allows to obtain the range of the random sampling when
# using a stochastic optimization algorithm
return self.n_nodes
@property
def period_length(self):
return self._period_length
@period_length.setter
def period_length(self, val):
self._set("_period_length", val)
if hasattr(self, '_model') and self._model is not None:
self._model.set_period_length(self.cast_period_length())
def cast_period_length(self):
if self.period_length is None:
return sys.float_info.max
else:
return self.period_length
@property
def baseline_intervals(self):
return np.arange(self.n_baselines) * (
self._model.get_period_length() / self.n_baselines)
|
X-DataInitiative/tick
|
tick/hawkes/model/model_hawkes_sumexpkern_leastsq.py
|
Python
|
bsd-3-clause
| 5,875
|
# -*- coding: utf-8 -*-
#
# Briefcase documentation build configuration file, created by
# sphinx-quickstart on Sat Jul 27 14:58:42 2013.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = []
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Briefcase'
copyright = u'2013, Russell Keith-Magee'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.1'
# The full version, including alpha/beta/rc tags.
release = '0.1'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'briefcasedoc'
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'briefcase.tex', u'Briefcase Documentation',
u'Russell Keith-Magee', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'briefcase', u'Briefcase Documentation',
[u'Russell Keith-Magee'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'briefcase', u'Briefcase Documentation',
u'Russell Keith-Magee', 'Briefcase', 'Tools to support development of Python on mobile platforms.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
|
pombredanne/briefcase
|
docs/conf.py
|
Python
|
bsd-3-clause
| 7,797
|
"""
A Canarydrop ties a canarytoken to an alerting mechanisms,
and records accounting information about the Canarytoken.
Maps to the object stored in Redis.
"""
import datetime
import random
import md5
from constants import OUTPUT_CHANNEL_EMAIL, OUTPUT_CHANNEL_TWILIO_SMS
from queries import get_all_canary_sites, get_all_canary_path_elements,\
get_all_canary_pages, get_all_canary_domains, get_all_canary_nxdomains,\
load_user
from tokens import Canarytoken
from users import User, AnonymousUser
from exception import NoUser, NoCanarytokenPresent, UnknownAttribute
class Canarydrop(object):
allowed_attrs = ['alert_email_enabled', 'alert_email_recipient',\
'alert_sms_enabled', 'alert_sms_recipient', 'canarytoken',\
'triggered_count', 'triggered_list','memo', 'generated_url',\
'generated_email', 'generated_hostname','timestamp', 'user',
'imgur_token' ,'imgur', 'auth']
def __init__(self, generate=False, **kwargs):
self._drop = {}
for k, v in kwargs.iteritems():
if k not in self.allowed_attrs:
raise UnknownAttribute(attribute=k)
self._drop[k] = v
if 'canarytoken' not in self._drop:
raise NoCanarytokenPresent()
if 'timestamp' not in self._drop:
self._drop['timestamp'] = datetime.datetime.utcnow()\
.strftime("%s.%f")
if 'imgur_token' in self._drop and not self._drop['imgur_token']['id']:
raise Exception('Missing imgur_token from Canarydrop')
if 'user' not in self._drop or self._drop['user'] in ('None', 'Anonymous'):
self._drop['user'] = AnonymousUser()
else:
self._drop['user'] = load_user(self._drop['user'])
if not self._drop['user']:
raise NoUser()
if 'auth' not in self._drop:
self._drop['auth'] = md5.md5(str(random.SystemRandom()\
.randrange(1,2**128))).hexdigest()
if self._drop.get('alert_email_enabled', '') in ('True', True):
self._drop['alert_email_enabled'] = True
else:
self._drop['alert_email_enabled'] = False
if self._drop.get('alert_sms_enabled', '') in ('True', True):
self._drop['alert_sms_enabled'] = True
else:
self._drop['alert_sms_enabled'] = False
if generate:
self.generate_random_url()
self.generate_random_hostname()
def generate_random_url(self,):
"""Return a URL generated at random with the saved Canarytoken.
The random URL is also saved into the Canarydrop."""
sites = get_all_canary_sites()
path_elements = get_all_canary_path_elements()
pages = get_all_canary_pages()
generated_url = sites[random.randint(0,len(sites)-1)]+'/'
path = []
for count in range(0,random.randint(1,4)):
if len(path_elements) == 0:
break
elem = path_elements[random.randint(0,len(path_elements)-1)]
path.append(elem)
path_elements.remove(elem)
path.append(self._drop['canarytoken'])
path.append(pages[random.randint(0,len(pages)-1)])
generated_url += '/'.join(path)
self._drop['generated_url'] = generated_url
return self._drop['generated_url']
def get_url(self,):
if 'generated_url' in self._drop:
return self._drop['generated_url']
return self.generate_random_url()
def generate_random_hostname(self, with_random=False, nxdomain=False):
"""Return a hostname generated at random with the saved Canarytoken.
The random hostname is also saved into the Canarydrop."""
if nxdomain:
domains = get_all_canary_nxdomains()
else:
domains = get_all_canary_domains()
if with_random:
generated_hostname = str(random.randint(1,2**24))+'.'
else:
generated_hostname = ''
generated_hostname += self._drop['canarytoken']+'.'+\
domains[random.randint(0,len(domains)-1)]
return generated_hostname
def get_hostname(self, with_random=False, nxdomain=False):
if nxdomain:
if 'generated_nx_hostname' not in self._drop:
self._drop['generated_nx_hostname'] = \
self.generate_random_hostname(with_random=with_random, nxdomain=True)
return self._drop['generated_nx_hostname']
else:
if 'generated_hostname' not in self._drop:
self._drop['generated_hostname'] = \
self.generate_random_hostname(with_random=with_random, nxdomain=False)
return self._drop['generated_hostname']
def get_requested_output_channels(self,):
"""Return a list containing the output channels configured in this
Canarydrop."""
channels = []
if (self._drop.get('alert_email_enabled', False) and
self._drop.get('alert_email_recipient', None)):
channels.append(OUTPUT_CHANNEL_EMAIL)
if (self._drop.get('alert_sms_enabled', False) and
self._drop.get('alert_sms_recipient', None)):
channels.append(OUTPUT_CHANNEL_TWILIO_SMS)
return channels
@property
def canarytoken(self):
"""Return the Canarydrop's Canarytoken object."""
return Canarytoken(value=self._drop['canarytoken'])
@property
def memo(self):
"""Return the Canarydrop's memo."""
return self._drop['memo']
@property
def user(self):
return self._drop['user']
@property
def imgur_token(self):
return self._drop['imgur_token']
@imgur_token.setter
def imgur_token(self, value):
self._drop['imgur_token'] = value
def serialize(self,):
"""Return a representation of this Canarydrop suitable for saving
into redis."""
serialized = self._drop.copy()
if serialized['user']:
serialized['user'] = serialized['user'].username
return serialized
def alertable(self,):
if self.user.can_send_alert(canarydrop=self):
return True
else:
return False
def alerting(self,):
self.user.do_accounting(canarydrop=self)
def __getitem__(self, key):
return self._drop[key]
def __setitem__(self, key, value):
self._drop[key] = value
|
tdr130/canarytokens
|
canarydrop.py
|
Python
|
bsd-3-clause
| 6,563
|
"""
The Response class in REST framework is similar to HTTPResponse, except that
it is initialized with unrendered data, instead of a pre-rendered string.
The appropriate renderer is called during Django's template response rendering.
"""
from __future__ import unicode_literals
import django
from django.core.handlers.wsgi import STATUS_CODE_TEXT
from django.template.response import SimpleTemplateResponse
from rest_framework.compat import six
class Response(SimpleTemplateResponse):
"""
An HttpResponse that allows its data to be rendered into
arbitrary media types.
"""
# TODO: remove that once Django 1.3 isn't supported
if django.VERSION >= (1, 4):
rendering_attrs = SimpleTemplateResponse.rendering_attrs + ['_closable_objects']
def __init__(self, data=None, status=200,
template_name=None, headers=None,
exception=False, content_type=None):
"""
Alters the init arguments slightly.
For example, drop 'template_name', and instead use 'data'.
Setting 'renderer' and 'media_type' will typically be deferred,
For example being set automatically by the `APIView`.
"""
super(Response, self).__init__(None, status=status)
self.data = data
self.template_name = template_name
self.exception = exception
self.content_type = content_type
if headers:
for name, value in six.iteritems(headers):
self[name] = value
@property
def rendered_content(self):
renderer = getattr(self, 'accepted_renderer', None)
media_type = getattr(self, 'accepted_media_type', None)
context = getattr(self, 'renderer_context', None)
assert renderer, ".accepted_renderer not set on Response"
assert media_type, ".accepted_media_type not set on Response"
assert context, ".renderer_context not set on Response"
context['response'] = self
charset = renderer.charset
content_type = self.content_type
if content_type is None and charset is not None:
content_type = "{0}; charset={1}".format(media_type, charset)
elif content_type is None:
content_type = media_type
self['Content-Type'] = content_type
ret = renderer.render(self.data, media_type, context)
if isinstance(ret, six.text_type):
assert charset, 'renderer returned unicode, and did not specify ' \
'a charset value.'
return bytes(ret.encode(charset))
if not ret:
del self['Content-Type']
return ret
@property
def status_text(self):
"""
Returns reason text corresponding to our HTTP response status code.
Provided for convenience.
"""
# TODO: Deprecate and use a template tag instead
# TODO: Status code text for RFC 6585 status codes
return STATUS_CODE_TEXT.get(self.status_code, '')
def __getstate__(self):
"""
Remove attributes from the response that shouldn't be cached
"""
state = super(Response, self).__getstate__()
for key in ('accepted_renderer', 'renderer_context', 'data'):
if key in state:
del state[key]
return state
|
gminds/rapidnewsng
|
rest_framework/response.py
|
Python
|
bsd-3-clause
| 3,316
|
#-----------------------------------------------------------------------------
# Copyright (c) 2012 - 2015, Continuum Analytics, Inc. All rights reserved.
#
# Powered by the Bokeh Development Team.
#
# The full license is in the file LICENSE.txt, distributed with this software.
#-----------------------------------------------------------------------------
''' Functions for configuring Bokeh output.
'''
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
from __future__ import absolute_import
# Stdlib imports
import logging
logger = logging.getLogger(__name__)
import io
import json
import os
import warnings
# Third-party imports
# Bokeh imports
from .core.state import State
from .document import Document
from .embed import notebook_div, standalone_html_page_for_models, autoload_server
from .models.layouts import LayoutDOM, Row, Column, WidgetBox, VBoxForm
from .layouts import gridplot
from .model import _ModelInDocument
from .util.deprecate import deprecated
from .util.notebook import load_notebook, publish_display_data, get_comms
from .util.string import decode_utf8
from .util.serialization import make_id
import bokeh.util.browser as browserlib # full import needed for test mocking to work
from .client import DEFAULT_SESSION_ID, push_session, show_session
#-----------------------------------------------------------------------------
# Globals and constants
#-----------------------------------------------------------------------------
_new_param = {'tab': 2, 'window': 1}
_state = State()
#-----------------------------------------------------------------------------
# Local utilities
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Classes and functions
#-----------------------------------------------------------------------------
class _CommsHandle(object):
_json = {}
def __init__(self, comms, doc, json):
self._cellno = None
try:
from IPython import get_ipython
ip = get_ipython()
hm = ip.history_manager
p_prompt = list(hm.get_tail(1, include_latest=True))[0][1]
self._cellno = p_prompt
except Exception as e:
logger.debug("Could not get Notebook cell number, reason: %s", e)
self._comms = comms
self._doc = doc
self._json[doc] = json
def _repr_html_(self):
if self._cellno is not None:
return "<p><code><Bokeh Notebook handle for <strong>In[%s]</strong>></code></p>" % str(self._cellno)
else:
return "<p><code><Bokeh Notebook handle></code></p>"
@property
def comms(self):
return self._comms
@property
def doc(self):
return self._doc
@property
def json(self):
return self._json[self._doc]
def update(self, doc, json):
self._doc = doc
self._json[doc] = json
def output_file(filename, title="Bokeh Plot", autosave=False, mode="cdn", root_dir=None):
'''Configure the default output state to generate output saved
to a file when :func:`show` is called.
Does not change the current Document from curdoc(). File,
server, and notebook output may be active at the same time, so
this does not clear the effects of output_server() or
output_notebook().
Args:
filename (str) : a filename for saving the HTML document
title (str, optional) : a title for the HTML document (default: "Bokeh Plot")
autosave (bool, optional) : whether to automatically save (default: False)
If True, then Bokeh plotting APIs may opt to automatically
save the file more frequently (e.g., after any plotting
command). If False, then the file is only saved upon calling
:func:`show` or :func:`save`.
mode (str, optional) : how to include BokehJS (default: ``'cdn'``)
One of: ``'inline'``, ``'cdn'``, ``'relative(-dev)'`` or
``'absolute(-dev)'``. See :class:`bokeh.resources.Resources` for more details.
root_dir (str, optional) : root directory to use for 'absolute' resources. (default: None)
This value is ignored for other resource types, e.g. ``INLINE`` or
``CDN``.
Returns:
None
.. note::
Generally, this should be called at the beginning of an interactive
session or the top of a script.
.. warning::
This output file will be overwritten on every save, e.g., each time
show() or save() is invoked, or any time a Bokeh plotting API
causes a save, if ``autosave`` is True.
'''
_state.output_file(
filename,
title=title,
autosave=autosave,
mode=mode,
root_dir=root_dir
)
def output_notebook(resources=None, verbose=False, hide_banner=False):
''' Configure the default output state to generate output in
Jupyter/IPython notebook cells when :func:`show` is called.
If output_server() has also been called, the notebook cells
are loaded from the configured server; otherwise, Bokeh pushes
HTML to the notebook directly.
Args:
resources (Resource, optional) :
How and where to load BokehJS from (default: CDN)
verbose (bool, optional) :
whether to display detailed BokehJS banner (default: False)
hide_banner (bool, optional):
whether to hide the Bokeh banner (default: False)
Returns:
None
.. note::
Generally, this should be called at the beginning of an interactive
session or the top of a script.
'''
load_notebook(resources, verbose, hide_banner)
_state.output_notebook()
# usually we default session_id to "generate a random one" but
# here we default to a hardcoded one. This is to support local
# usage e.g. with a notebook.
def output_server(session_id=DEFAULT_SESSION_ID, url="default", app_path="/", autopush=False):
""" Configure the default output state to push its document to a
session on a Bokeh server.
Sessions are in-memory and not persisted to disk; in a typical
production deployment, you would have a fresh session ID for each
browser tab. If different users share the same session ID, it will
create security and scalability problems.
``output_server()`` defaults to always using the
``session_id`` ``"default"``, which is useful for running
local demos or notebooks. However, if you are creating
production sessions, you'll need to set ``session_id`` to None
(to generate a fresh ID) or to a session ID generated elsewhere.
File, server, and notebook output may be active at the same
time, so output_server() does not clear the effects of
output_file() or output_notebook(). output_server() changes
the behavior of output_notebook(), so the notebook will load
output cells from the server rather than receiving them as
inline HTML.
Args:
session_id (str, optional) : Name of session to push on Bokeh server (default: "default")
Any existing session with the same name will be overwritten.
url (str, optional) : base URL of the Bokeh server (default: "default")
If "default" use the default localhost URL.
app_path (str, optional) : relative path of the app on the Bokeh server (default: "/")
autopush (bool, optional) : whether to automatically push (default: False)
If True, then Bokeh plotting APIs may opt to automatically
push the document more frequently (e.g., after any plotting
command). If False, then the document is only pushed upon calling
:func:`show` or :func:`push`.
Returns:
None
.. warning::
Calling this function will replace any existing server-side document in the named session.
"""
_state.output_server(session_id=session_id, url=url, app_path=app_path, autopush=autopush)
def set_curdoc(doc):
'''Configure the current document (returned by curdoc()).
This is the document we will save or push according to
output_file(), output_server(), etc. configuration.
Args:
doc (Document) : Document we will output.
Returns:
None
.. note::
Generally, this should be called at the beginning of an interactive
session or the top of a script.
.. warning::
Calling this function will replace any existing document.
'''
_state.document = doc
def curdoc():
''' Return the document for the current default state.
Returns:
doc : the current default document object.
'''
return _state.document
def curstate():
''' Return the current State object
Returns:
state : the current default State object
'''
return _state
def show(obj, browser=None, new="tab"):
''' Immediately display a plot object.
In an IPython/Jupyter notebook, the output is displayed in an output
cell. Otherwise, a browser window or tab is autoraised to display the
plot object.
If both a server session and notebook output have been configured on
the default output state then the notebook output will be generated to
load the plot from that server session.
Args:
obj (LayoutDOM object) : a Layout (Row/Column), Plot or Widget object to display
browser (str, optional) : browser to show with (default: None)
For systems that support it, the **browser** argument allows
specifying which browser to display in, e.g. "safari", "firefox",
"opera", "windows-default" (see the ``webbrowser`` module
documentation in the standard lib for more details).
new (str, optional) : new file output mode (default: "tab")
For file-based output, opens or raises the browser window
showing the current output file. If **new** is 'tab', then
opens a new tab. If **new** is 'window', then opens a new window.
Returns:
when in a a jupyter notebook (with ``output_notebook`` enabled), returns
a handle that can be used by ``push_notebook``, None otherwise.
.. note::
The ``browser`` and ``new`` parameters are ignored when showing in
an IPython/Jupyter notebook.
'''
if obj not in _state.document.roots:
_state.document.add_root(obj)
return _show_with_state(obj, _state, browser, new)
def _show_with_state(obj, state, browser, new):
controller = browserlib.get_browser_controller(browser=browser)
comms_handle = None
shown = False
if state.notebook:
comms_handle = _show_notebook_with_state(obj, state)
shown = True
elif state.server_enabled:
_show_server_with_state(obj, state, new, controller)
shown = True
if state.file or not shown:
_show_file_with_state(obj, state, new, controller)
return comms_handle
def _show_file_with_state(obj, state, new, controller):
filename = save(obj, state=state)
controller.open("file://" + filename, new=_new_param[new])
def _show_notebook_with_state(obj, state):
if state.server_enabled:
push(state=state)
snippet = autoload_server(obj, session_id=state.session_id_allowing_none, url=state.url, app_path=state.app_path)
publish_display_data({'text/html': snippet})
else:
comms_target = make_id()
publish_display_data({'text/html': notebook_div(obj, comms_target)})
handle = _CommsHandle(get_comms(comms_target), state.document, state.document.to_json())
state.last_comms_handle = handle
return handle
def _show_server_with_state(obj, state, new, controller):
push(state=state)
show_session(session_id=state.session_id_allowing_none, url=state.url, app_path=state.app_path,
new=new, controller=controller)
def save(obj, filename=None, resources=None, title=None, state=None, validate=True):
''' Save an HTML file with the data for the current document.
Will fall back to the default output state (or an explicitly provided
:class:`State` object) for ``filename``, ``resources``, or ``title`` if they
are not provided. If the filename is not given and not provided via output state,
it is derived from the script name (e.g. ``/foo/myplot.py`` will create
``/foo/myplot.html``)
Args:
obj (Document or model object) : a plot object to save
filename (str, optional) : filename to save document under (default: None)
If None, use the default state configuration, otherwise raise a
``RuntimeError``.
resources (Resources, optional) : A Resources config to use (default: None)
If None, use the default state configuration, if there is one.
otherwise use ``resources.INLINE``.
title (str, optional) : a title for the HTML document (default: None)
If None, use the default state title value, if there is one.
Otherwise, use "Bokeh Plot"
validate (bool, optional) : True to check integrity of the models
Returns:
filename (str) : the filename where the HTML file is saved.
Raises:
RuntimeError
'''
if state is None:
state = _state
filename, resources, title = _get_save_args(state, filename, resources, title)
_save_helper(obj, filename, resources, title, validate)
return os.path.abspath(filename)
def _detect_filename(ext):
""" Detect filename from the name of the script being run. Returns
None if the script could not be found (e.g. interactive mode).
"""
import inspect
from os.path import isfile, dirname, basename, splitext, join
from inspect import currentframe
frame = inspect.currentframe()
while frame.f_back and frame.f_globals.get('name') != '__main__':
frame = frame.f_back
filename = frame.f_globals.get('__file__')
if filename and isfile(filename):
name, _ = splitext(basename(filename))
return join(dirname(filename), name + "." + ext)
def _get_save_args(state, filename, resources, title):
warn = True
if filename is None and state.file:
filename = state.file['filename']
if filename is None:
warn = False
filename = _detect_filename("html")
if filename is None:
raise RuntimeError("save() called but no filename was supplied or detected, and output_file(...) was never called, nothing saved")
if resources is None and state.file:
resources = state.file['resources']
if resources is None:
if warn:
warnings.warn("save() called but no resources were supplied and output_file(...) was never called, defaulting to resources.CDN")
from .resources import CDN
resources = CDN
if title is None and state.file:
title = state.file['title']
if title is None:
if warn:
warnings.warn("save() called but no title was supplied and output_file(...) was never called, using default title 'Bokeh Plot'")
title = "Bokeh Plot"
return filename, resources, title
def _save_helper(obj, filename, resources, title, validate):
with _ModelInDocument(obj):
if isinstance(obj, LayoutDOM):
doc = obj.document
elif isinstance(obj, Document):
doc = obj
else:
raise RuntimeError("Unable to save object of type '%s'" % type(obj))
if validate:
doc.validate()
html = standalone_html_page_for_models(obj, resources, title)
with io.open(filename, "w", encoding="utf-8") as f:
f.write(decode_utf8(html))
# this function exists mostly to be mocked in tests
def _push_to_server(session_id, url, app_path, document, io_loop):
session = push_session(document, session_id=session_id, url=url, app_path=app_path, io_loop=io_loop)
session.close()
session.loop_until_closed()
def push(session_id=None, url=None, app_path=None, document=None, state=None, io_loop=None, validate=True):
''' Update the server with the data for the current document.
Will fall back to the default output state (or an explicitly
provided :class:`State` object) for ``session_id``, ``url``,
``app_path``, or ``document`` if they are not provided.
Args:
session_id (str, optional) : a Bokeh server session ID to push objects to
url (str, optional) : a Bokeh server URL to push objects to
app_path (str, optional) : Relative application path to push objects to
document (Document, optional) : A :class:`bokeh.document.Document` to use
state (State, optional) : A state to use for any output_server() configuration of session or url
io_loop (tornado.ioloop.IOLoop, optional) : Tornado IOLoop to use for connecting to server
validate (bool, optional) : True to check integrity of the document we are pushing
Returns:
None
'''
if state is None:
state = _state
if not session_id:
session_id = state.session_id_allowing_none
if not url:
url = state.url
if not app_path:
app_path = state.app_path
# State is supposed to ensure these are set
assert session_id is not None
assert url is not None
assert app_path is not None
if not document:
document = state.document
if not document:
warnings.warn("No document to push")
if validate:
document.validate()
_push_to_server(session_id=session_id, url=url, app_path=app_path,
document=document, io_loop=io_loop)
def push_notebook(document=None, state=None, handle=None):
''' Update the last-shown plot in a Jupyter notebook with the new data
or property values.
Args:
document (Document, optional) :
A :class:`~bokeh.document.Document` to push from. If None,
uses ``curdoc()``.
state (State, optional) :
A Bokeh State object
Returns:
None
Examples:
Typical usage is typically similar to this:
.. code-block:: python
from bokeh.io import push_notebook
# code to create a plot
show(plot)
plot.title = "New Title"
# This will cause the title to update
push_notebook()
'''
if state is None:
state = _state
if state.server_enabled:
raise RuntimeError("output_server() has been called, use push() to push to server")
if not document:
document = state.document
if not document:
warnings.warn("No document to push")
return
if handle is None:
handle = state.last_comms_handle
if not handle:
warnings.warn("Cannot find a last shown plot to update. Call output_notebook() and show() before push_notebook()")
return
to_json = document.to_json()
if handle.doc is not document:
msg = dict(doc=to_json)
else:
msg = Document._compute_patch_between_json(handle.json, to_json)
handle.comms.send(json.dumps(msg))
handle.update(document, to_json)
def reset_output(state=None):
''' Clear the default state of all output modes.
Returns:
None
'''
_state.reset()
def _remove_roots(subplots):
doc = _state.document
for sub in subplots:
if sub in doc.roots:
doc.remove_root(sub)
def _push_or_save(obj):
if _state.server_enabled and _state.autopush:
push()
if _state.file and _state.autosave:
save(obj)
@deprecated("Bokeh 0.12.0", "bokeh.models.layouts.Row")
def hplot(*children, **kwargs):
layout = Row(children=list(children), **kwargs)
return layout
@deprecated("Bokeh 0.12.0", "bokeh.models.layouts.Column")
def vplot(*children, **kwargs):
layout = Column(children=list(children), **kwargs)
return layout
@deprecated("Bokeh 0.12.0", "bokeh.models.layouts.WidgetBox")
def vform(*children, **kwargs):
# Returning a VBoxForm, because it has helpers so that
# Bokeh deprecates gracefully.
return VBoxForm(*children, **kwargs)
|
clairetang6/bokeh
|
bokeh/io.py
|
Python
|
bsd-3-clause
| 20,374
|
# -*- coding: utf-8 -*-
"""
flaskbb.message.models
~~~~~~~~~~~~~~~~~~~~~~
The models for the conversations and messages are located here.
:copyright: (c) 2014 by the FlaskBB Team.
:license: BSD, see LICENSE for more details.
"""
from sqlalchemy_utils import UUIDType
from flaskbb.extensions import db
from flaskbb.utils.helpers import time_utcnow
from flaskbb.utils.database import CRUDMixin, UTCDateTime
class Conversation(db.Model, CRUDMixin):
__tablename__ = "conversations"
id = db.Column(db.Integer, primary_key=True)
user_id = db.Column(db.Integer, db.ForeignKey("users.id"), nullable=False)
from_user_id = db.Column(db.Integer, db.ForeignKey("users.id"))
to_user_id = db.Column(db.Integer, db.ForeignKey("users.id"))
shared_id = db.Column(UUIDType, nullable=False)
subject = db.Column(db.String(255))
date_created = db.Column(UTCDateTime(timezone=True), default=time_utcnow)
trash = db.Column(db.Boolean, nullable=False, default=False)
draft = db.Column(db.Boolean, nullable=False, default=False)
unread = db.Column(db.Boolean, nullable=False, default=True)
messages = db.relationship(
"Message", lazy="joined", backref="conversation",
primaryjoin="Message.conversation_id == Conversation.id",
order_by="asc(Message.id)",
cascade="all, delete-orphan"
)
# this is actually the users message box
user = db.relationship("User", lazy="joined", foreign_keys=[user_id])
# the user to whom the conversation is addressed
to_user = db.relationship("User", lazy="joined", foreign_keys=[to_user_id])
# the user who sent the message
from_user = db.relationship("User", lazy="joined",
foreign_keys=[from_user_id])
@property
def first_message(self):
"""Returns the first message object."""
return self.messages[0]
@property
def last_message(self):
"""Returns the last message object."""
return self.messages[-1]
def save(self, message=None):
"""Saves a conversation and returns the saved conversation object.
:param message: If given, it will also save the message for the
conversation. It expects a Message object.
"""
if message is not None:
# create the conversation
self.date_created = time_utcnow()
db.session.add(self)
db.session.commit()
# create the actual message for the conversation
message.save(self)
return self
db.session.add(self)
db.session.commit()
return self
class Message(db.Model, CRUDMixin):
__tablename__ = "messages"
id = db.Column(db.Integer, primary_key=True)
conversation_id = db.Column(db.Integer, db.ForeignKey("conversations.id"),
nullable=False)
# the user who wrote the message
user_id = db.Column(db.Integer, db.ForeignKey("users.id"), nullable=False)
message = db.Column(db.Text, nullable=False)
date_created = db.Column(UTCDateTime(timezone=True), default=time_utcnow)
user = db.relationship("User", lazy="joined")
def save(self, conversation=None):
"""Saves a private message.
:param conversation: The conversation to which the message
belongs to.
"""
if conversation is not None:
self.conversation_id = conversation.id
self.date_created = time_utcnow()
db.session.add(self)
db.session.commit()
return self
|
realityone/flaskbb
|
flaskbb/message/models.py
|
Python
|
bsd-3-clause
| 3,612
|
import inspect
from chatterbot import languages
from unittest import TestCase
class LanguageClassTests(TestCase):
def test_classes_have_correct_attributes(self):
language_classes = languages.get_language_classes()
for name, obj in language_classes:
self.assertTrue(inspect.isclass(obj))
self.assertTrue(hasattr(obj, 'ISO_639'))
self.assertTrue(hasattr(obj, 'ISO_639_1'))
self.assertTrue(hasattr(obj, 'ENGLISH_NAME'))
self.assertEqual(name, obj.ISO_639.upper())
self.assertEqual(len(language_classes), 402)
|
vkosuri/ChatterBot
|
tests/test_languages.py
|
Python
|
bsd-3-clause
| 599
|
from django.test import SimpleTestCase
from couchexport.transforms import couch_to_excel_datetime
class ExportTransformTest(SimpleTestCase):
def test_couch_to_excel_datetime_current_fmt(self):
self.assertEqual('2015-05-14 13:03:06', couch_to_excel_datetime('2015-05-14T13:03:06.455000Z', {}))
def test_couch_to_excel_datetime_old_fmt(self):
self.assertEqual('2014-10-07 12:27:15', couch_to_excel_datetime('2014-10-07T12:27:15Z', {}))
|
dimagi/commcare-hq
|
corehq/ex-submodules/couchexport/tests/test_transforms.py
|
Python
|
bsd-3-clause
| 462
|
# Copyright (c) 2021-2022 Renata Hodovan, Akos Kiss.
#
# Licensed under the BSD 3-Clause License
# <LICENSE.rst or https://opensource.org/licenses/BSD-3-Clause>.
# This file may not be copied, modified, or distributed except
# according to those terms.
from inspect import signature
class FormatterDecorator(object):
"""
Base class for :class:`Formatter` decorators.
"""
def init(self, cls, obj, **kwargs):
"""
Initialize ``obj`` of type ``cls``. The default operation is to call the
``__init__`` method of the original version of the formatter class.
Sub-classes of :class:`FormatterDecorator` may override this method if
customization of the initialization is needed. Usually, the overridden
method has to call the original ``__init__`` at some point, which can
be performed either by ``super().init(cls, obj, **kwargs)`` (which will
call this method, and then transitively the original ``__init__``) or by
``super(cls, obj).__init__(**kwargs)`` (which calls the original
``__init__`` directly).
:param cls: The decorated version of the formatter class, as returned by
:meth:`__call__`.
:param obj: The issue formatter instance to initialize.
"""
super(cls, obj).__init__(**kwargs)
def call(self, cls, obj, *, issue):
"""
Call ``obj`` of type ``cls``. The default operation is to call the
``__call__`` method of the original version of the formatter class and
return its result.
Sub-classes of :class:`FormatterDecorator` may override this method if
customization of calling the formatter is needed. Usually, the
overridden method has to call the original ``__call__`` at some point,
which can be performed either by ``super().call(cls, obj, issue=issue)``
(which will call this method, and then transitively the original
``__call__``) or by ``super(cls, obj).__call__(issue=issue)`` (which
calls the original ``__call__`` directly).
:param cls: The decorated version of the formatter class, as returned by
:meth:`__call__`.
:param obj: The issue formatter instance to invoke.
:param issue: The issue to be formatted, as defined by
:meth:`Formatter.__call__`.
:return: The string representation of the issue, as defined by
:meth:`Formatter.__call__`.
"""
return super(cls, obj).__call__(issue=issue)
def summary(self, cls, obj, *, issue):
"""
Call ``summary`` of ``obj`` of type ``cls``. The default operation is to
call the ``summary`` method of the original version of the formatter
class and return its result.
Sub-classes of :class:`FormatterDecorator` may override this method if
customization of the issue summary is needed. Usually, the overridden
method has to call the original ``summary`` at some point, which can be
performed either by ``super().summary(cls, obj, issue=issue)`` (which
will call this method, and then transitively the original ``summary``)
or by ``super(cls, obj).summary(issue=issue)`` (which calls the original
``summary`` directly).
:param cls: The decorated version of the formatter class, as returned by
:meth:`__call__`.
:param obj: The issue formatter instance to invoke.
:param issue: The issue to be formatted, as defined by
:meth:`Formatter.summary`.
:return: The summary description of the issue, as defined by
:meth:`Formatter.summary`.
"""
return super(cls, obj).summary(issue=issue)
def __call__(self, formatter_class):
"""
Return a decorated version of ``formatter_class``. Create a sub-class of
``formatter_class`` that transfers control to :meth:`init`,
:meth:`call`, or :meth:`summary` when its ``__init__``, ``__call__``, or
``summary`` methods are invoked.
:param formatter_class: The issue formatter class to decorate.
:return: The decorated version of the issue formatter class.
"""
decorator = self
class DecoratedFormatter(formatter_class):
def __init__(self, **kwargs):
signature(self.__init__).bind(**kwargs)
decorator.init(DecoratedFormatter, self, **kwargs)
__init__.__signature__ = signature(formatter_class.__init__)
def __call__(self, *, issue):
return decorator.call(DecoratedFormatter, self, issue=issue)
def summary(self, *, issue):
return decorator.summary(DecoratedFormatter, self, issue=issue)
return DecoratedFormatter
|
renatahodovan/fuzzinator
|
fuzzinator/formatter/formatter_decorator.py
|
Python
|
bsd-3-clause
| 4,802
|
import numpy
from numpy.distutils.misc_util import Configuration
def configuration(parent_package="", top_path=None):
config = Configuration("tree", parent_package, top_path)
config.add_extension("_tree",
sources=["_tree.c"],
include_dirs=[numpy.get_include()])
config.add_subpackage("tests")
return config
if __name__ == "__main__":
from numpy.distutils.core import setup
setup(**configuration().todict())
|
cdegroc/scikit-learn
|
sklearn/tree/setup.py
|
Python
|
bsd-3-clause
| 486
|
from uuid import uuid4
from celery import shared_task, current_app, Task
from django.utils.timezone import now
from django_celery_fulldbresult import serialization
from django_celery_fulldbresult.errors import SchedulingStopPublishing
from django_celery_fulldbresult.models import (
TaskResultMeta, SCHEDULED, SCHEDULED_SENT)
class ScheduledTask(Task):
abstract = True
def apply_async(self, *args, **kwargs):
try:
return super(ScheduledTask, self).apply_async(*args, **kwargs)
except SchedulingStopPublishing as exc:
# There was an ETA and the task was not sent to the broker.
# A scheduled task was created instead.
return self.AsyncResult(exc.task_id)
@shared_task(ignore_result=True)
def send_scheduled_task():
"""Task that sends due scheduled tasks for execution.
Each DB operation must be in its own commit so that even if
send_scheduled_task is called multiple times concurrently, a task is
ensured to only be sent **at most once**. If a crash occurs while sending a
task, the task will stay indefinitely in the SCHEDULED state while having a
schedule id.
1. Tasks due to be executed are marked with a schedule id. This prevents
the task from being sent for execution twice.
2. Each task marked by the schedule id is sent for exeuction without an
ETA.
3. We change the status from SCHEDULED to SCHEDULED SENT after a task is
being sent for execution.
**IMPORTANT: ** Never call this task inside an atomic block or you could
end up sending tasks more than once. Always use autocommit (each SQL query
executed in its own transaction).
"""
limit = now()
schedule_id = uuid4().hex
# Mark tasks ready to be scheduled
TaskResultMeta.objects.filter(
scheduled_id__isnull=True, eta__lt=limit,
status=SCHEDULED).update(
scheduled_id=schedule_id)
# Fetch and apply by removing eta
for task in TaskResultMeta.objects.filter(
scheduled_id=schedule_id).all():
task_name = task.task
task_args = serialization.loads(task.args)
task_kwargs = serialization.loads(task.kwargs)
result = current_app.send_task(
task_name, args=task_args, kwargs=task_kwargs)
task.status = SCHEDULED_SENT
task.result = {"new_task_id": result.task_id}
task.save()
|
resulto/django-celery-fulldbresult
|
django_celery_fulldbresult/tasks.py
|
Python
|
bsd-3-clause
| 2,426
|
# -*- coding: utf-8 -*-
"""
force_rekey.py forces a VSD keyserver to reissue Seed, SEKs, and other primitives.
--- Author ---
Roman Dodin <dodin.roman@gmail.com>
--- Usage ---
python force_rekey.py
--- Documentation ---
https://github.com/nuagenetworks/vspk-examples/blob/master/python/force_rekey.md
"""
from __future__ import print_function
import time
from vspk import v6 as vspk
# Login variables
n_username = 'csproot'
n_password = 'csproot'
n_org = 'csp'
api_url = 'https://localhost:8443'
# script variables
org_name = '521_CATS_FIXED'
job_timeout = 600 # in seconds
def is_job_ready(job, timeout=600):
"""
Waits for job to succeed and returns job.result
"""
timeout_start = time.time()
while time.time() < timeout_start + timeout:
job.fetch()
if job.status == 'SUCCESS':
print('SUCCESS :: Re-keying Job succeeded!')
return True
if job.status == 'FAILED':
return False
time.sleep(1)
print('ERROR :: Job {} failed to return its status in {}sec interval'.format(
job.command, timeout))
nuage_session = vspk.NUVSDSession(
username=n_username,
password=n_password,
enterprise=n_org,
api_url=api_url)
me = nuage_session.start().user
# get parent for the re-key job object
org = me.enterprises.get_first(filter='name == "{}"'.format(org_name))
# create job object
job = vspk.NUJob(command='FORCE_KEYSERVER_UPDATE')
print('Starting {} job for the {} Organization'.format(
'FORCE_KEYSERVER_UPDATE',
org.name))
# run job object under its parent
org.create_child(job)
# wait for object to finish
is_job_ready(job, timeout=job_timeout)
|
nuagenetworks/vspk-examples
|
python/force_rekey.py
|
Python
|
bsd-3-clause
| 1,673
|
#!/usr/bin/env python
import argparse
import os.path as osp
import instance_occlsegm_lib
if __name__ == '__main__':
here = osp.dirname(osp.abspath(__file__))
default_logs_dir = osp.join(here, 'logs')
parser = argparse.ArgumentParser(
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('logs_dir', default=default_logs_dir, nargs='?',
help='logs dir')
parser.add_argument('--sort', '-s', nargs='+', help='sort key (ex. name)')
parser.add_argument('--show-range', '-r', action='store_true',
help='show value range')
parser.add_argument('--as-df', action='store_true', help='as df')
args = parser.parse_args()
print('# logs_dir = %s' % args.logs_dir)
keys = [
'name',
'last_time',
'dataset',
'hostname',
'git_hash',
'model',
'epoch',
'iteration',
'validation/main/msq/vis',
'validation/main/msq/occ',
'validation/main/msq',
'validation/main/mdq',
'validation/main/map',
'validation/main/mpq',
]
key_remap = {
key: key[len('validation/main/'):]
for key in keys
if key.startswith('validation/main/')
}
df = instance_occlsegm_lib.utils.summarize_logs(
args.logs_dir,
keys,
target_key=keys[-1],
objective='max',
sort=args.sort,
show_range=args.show_range,
as_df=args.as_df,
key_remap=key_remap,
)
|
start-jsk/jsk_apc
|
demos/instance_occlsegm/examples/instance_occlsegm/instance_occlusion_segmentation/summarize_logs.py
|
Python
|
bsd-3-clause
| 1,538
|
"""
Tests for FormGroups
"""
from django import forms
from unittest import TestCase
from rebar.group import formgroup_factory, FormGroup
class FormGroupTests(TestCase):
def test_factory(self):
FormGroupClass = formgroup_factory([])
self.assert_(issubclass(FormGroupClass, FormGroup))
def test_creation(self):
"""A FormGroup can hold Forms or FormSets."""
def test_form_access(self):
"""You can access individual Forms as properties."""
FormGroupClass = formgroup_factory([
(TestForm, 'test1'),
(TestForm2, 'test2'),
])
fg = FormGroupClass(instance=CallSentinel())
self.assert_(len(fg), 2)
self.assert_(isinstance(fg.test1, TestForm))
self.assert_(isinstance(fg.test2, TestForm2))
self.assert_(fg.test1 == fg.forms[0])
self.assert_(fg.test2 == fg.forms[1])
def test_form_prefixes(self):
FormGroupClass = formgroup_factory([
(TestForm, 'test1'),
(TestForm2, 'test2'),
])
instance = CallSentinel()
fg = FormGroupClass(instance=instance)
# members have different prefixes
self.assert_(fg.forms[0].prefix != fg.forms[1].prefix)
# the prefixes all start with the same string
self.assert_(fg.forms[0].prefix.find(fg.prefix) == 0)
self.assert_(fg.forms[1].prefix.find(fg.prefix) == 0)
def test_save(self):
"""Calling .save() calls save on all elements."""
FormGroupClass = formgroup_factory([
(TestForm, 'test1'),
(TestForm2, 'test2'),
])
instance = CallSentinel()
fg = FormGroupClass(instance=instance)
# assert our save sentinel values is False to start with
self.assertFalse(instance.called.get('save', False))
self.assertFalse(fg.forms[0].called.get('save', False))
self.assertFalse(fg.forms[1].called.get('save', False))
# calling .save() will call .save() on both Forms, flipping the flag
fg.save()
self.assert_(fg.forms[0].called.get('save', False))
self.assert_(fg.forms[1].called.get('save', False))
# this also calls save() on the instance
self.assert_(instance.called.get('save', False))
def test_validation(self):
FormGroupClass = formgroup_factory([
(TestForm, 'test1'),
(TestForm2, 'test2'),
])
# create some form data -- missing a required field
data = {
'group-test1-name' : '',
'group-test2-name' : 'Anita Man',
}
fg = FormGroupClass(data, instance=CallSentinel())
self.assertFalse(fg.is_valid())
self.assert_(fg.forms[0].called.get('is_valid', False))
self.assert_(fg.forms[1].called.get('is_valid', False))
# formgroup.errors is a dict of error dicts
# -- TestForm2 is valid
self.assertFalse(fg.errors[1])
# -- TestForm is not valid
self.assert_(fg.errors[0])
# create some form data that passes validation
data = {
'group-test1-name' : 'Anita Man',
'group-test2-name' : 'Mike Rotch',
}
fg = FormGroupClass(data, instance=CallSentinel())
self.assert_(fg.is_valid())
self.assert_(fg.forms[0].called.get('is_valid', False))
self.assert_(fg.forms[1].called.get('is_valid', False))
# Support objects for testing FormGroups --
class CallSentinel(object):
def __init__(self, *args, **kwargs):
super(CallSentinel, self).__init__(*args, **kwargs)
self.called = {}
def save(self, *args, **kwargs):
self.called['save'] = True
def is_valid(self):
self.called['is_valid'] = True
return super(CallSentinel, self).is_valid()
class TestForm(CallSentinel, forms.Form):
name = forms.CharField(required=True)
def __init__(self, *args, **kwargs):
self.instance = kwargs.pop('instance')
super(TestForm, self).__init__(*args, **kwargs)
class TestForm2(TestForm):
pass
|
dawalama/rebar
|
src/rebar/tests/test_formgroup.py
|
Python
|
bsd-3-clause
| 4,134
|
import os
import pkgutil
import re
import shutil
import subprocess
import sys
from distutils.version import LooseVersion
from distutils import log
from sphinx.setup_command import BuildDoc as SphinxBuildDoc
SUBPROCESS_TEMPLATE = """
import os
import sys
{build_main}
os.chdir({srcdir!r})
{sys_path_inserts}
for builder in {builders!r}:
retcode = build_main(argv={argv!r} + ['-b', builder, '.', os.path.join({output_dir!r}, builder)])
if retcode != 0:
sys.exit(retcode)
"""
def ensure_sphinx_astropy_installed():
"""
Make sure that sphinx-astropy is available.
"""
try:
from sphinx_astropy import __version__ as sphinx_astropy_version # noqa
except ImportError:
sphinx_astropy_version = None
if (sphinx_astropy_version is None
or LooseVersion(sphinx_astropy_version) < LooseVersion('1.2')):
raise ImportError("sphinx-astropy 1.2 or later needs to be installed to build "
"the documentation.")
class AstropyBuildDocs(SphinxBuildDoc):
"""
A version of the ``build_docs`` command that uses the version of Astropy
that is built by the setup ``build`` command, rather than whatever is
installed on the system. To build docs against the installed version, run
``make html`` in the ``astropy/docs`` directory.
"""
description = 'Build Sphinx documentation for Astropy environment'
user_options = SphinxBuildDoc.user_options[:]
user_options.append(
('warnings-returncode', 'w',
'Parses the sphinx output and sets the return code to 1 if there '
'are any warnings. Note that this will cause the sphinx log to '
'only update when it completes, rather than continuously as is '
'normally the case.'))
user_options.append(
('clean-docs', 'l',
'Completely clean previous builds, including '
'automodapi-generated files before building new ones'))
user_options.append(
('no-intersphinx', 'n',
'Skip intersphinx, even if conf.py says to use it'))
user_options.append(
('open-docs-in-browser', 'o',
'Open the docs in a browser (using the webbrowser module) if the '
'build finishes successfully.'))
user_options.append(
('parallel=', 'j',
'Build the docs in parallel on the specified number of '
'processes. If "auto", all the cores on the machine will be '
'used.'))
boolean_options = SphinxBuildDoc.boolean_options[:]
boolean_options.append('warnings-returncode')
boolean_options.append('clean-docs')
boolean_options.append('no-intersphinx')
boolean_options.append('open-docs-in-browser')
_self_iden_rex = re.compile(r"self\.([^\d\W][\w]+)", re.UNICODE)
def initialize_options(self):
SphinxBuildDoc.initialize_options(self)
self.clean_docs = False
self.no_intersphinx = False
self.open_docs_in_browser = False
self.warnings_returncode = False
self.traceback = False
self.parallel = None
def finalize_options(self):
# This has to happen before we call the parent class's finalize_options
if self.build_dir is None:
self.build_dir = 'docs/_build'
SphinxBuildDoc.finalize_options(self)
# Clear out previous sphinx builds, if requested
if self.clean_docs:
dirstorm = [os.path.join(self.source_dir, 'api'),
os.path.join(self.source_dir, 'generated')]
dirstorm.append(self.build_dir)
for d in dirstorm:
if os.path.isdir(d):
log.info('Cleaning directory ' + d)
shutil.rmtree(d)
else:
log.info('Not cleaning directory ' + d + ' because '
'not present or not a directory')
def run(self):
# TODO: Break this method up into a few more subroutines and
# document them better
import webbrowser
from urllib.request import pathname2url
# This is used at the very end of `run` to decide if sys.exit should
# be called. If it's None, it won't be.
retcode = None
# Now make sure Astropy is built and determine where it was built
build_cmd = self.reinitialize_command('build')
build_cmd.inplace = 0
self.run_command('build')
build_cmd = self.get_finalized_command('build')
build_cmd_path = os.path.abspath(build_cmd.build_lib)
ah_importer = pkgutil.get_importer('astropy_helpers')
if ah_importer is None:
ah_path = '.'
else:
ah_path = os.path.abspath(ah_importer.path)
build_main = 'from sphinx.cmd.build import build_main'
# We need to make sure sphinx-astropy is installed
ensure_sphinx_astropy_installed()
sys_path_inserts = [build_cmd_path, ah_path]
sys_path_inserts = os.linesep.join(['sys.path.insert(0, {0!r})'.format(path) for path in sys_path_inserts])
argv = []
if self.warnings_returncode:
argv.append('-W')
if self.no_intersphinx:
argv.extend(['-D', 'disable_intersphinx=1'])
# We now need to adjust the flags based on the parent class's options
if self.fresh_env:
argv.append('-E')
if self.all_files:
argv.append('-a')
if getattr(self, 'pdb', False):
argv.append('-P')
if getattr(self, 'nitpicky', False):
argv.append('-n')
if self.traceback:
argv.append('-T')
# The default verbosity level is 1, so in that case we just don't add a flag
if self.verbose == 0:
argv.append('-q')
elif self.verbose > 1:
argv.append('-v')
if self.parallel is not None:
argv.append(f'-j={self.parallel}')
if isinstance(self.builder, str):
builders = [self.builder]
else:
builders = self.builder
subproccode = SUBPROCESS_TEMPLATE.format(build_main=build_main,
srcdir=self.source_dir,
sys_path_inserts=sys_path_inserts,
builders=builders,
argv=argv,
output_dir=os.path.abspath(self.build_dir))
log.debug('Starting subprocess of {0} with python code:\n{1}\n'
'[CODE END])'.format(sys.executable, subproccode))
proc = subprocess.Popen([sys.executable], stdin=subprocess.PIPE)
proc.communicate(subproccode.encode('utf-8'))
if proc.returncode != 0:
retcode = proc.returncode
if retcode is None:
if self.open_docs_in_browser:
if self.builder == 'html':
absdir = os.path.abspath(self.builder_target_dir)
index_path = os.path.join(absdir, 'index.html')
fileurl = 'file://' + pathname2url(index_path)
webbrowser.open(fileurl)
else:
log.warn('open-docs-in-browser option was given, but '
'the builder is not html! Ignoring.')
# Here we explicitly check proc.returncode since we only want to output
# this for cases where the return code really wasn't 0.
if proc.returncode:
log.warn('Sphinx Documentation subprocess failed with return '
'code ' + str(proc.returncode))
if retcode is not None:
# this is potentially dangerous in that there might be something
# after the call to `setup` in `setup.py`, and exiting here will
# prevent that from running. But there's no other apparent way
# to signal what the return code should be.
sys.exit(retcode)
class AstropyBuildSphinx(AstropyBuildDocs): # pragma: no cover
def run(self):
AstropyBuildDocs.run(self)
|
astropy/astropy-helpers
|
astropy_helpers/commands/build_sphinx.py
|
Python
|
bsd-3-clause
| 8,176
|
# ######################################################################
# Copyright (c) 2014, Brookhaven Science Associates, Brookhaven #
# National Laboratory. All rights reserved. #
# #
# Redistribution and use in source and binary forms, with or without #
# modification, are permitted provided that the following conditions #
# are met: #
# #
# * Redistributions of source code must retain the above copyright #
# notice, this list of conditions and the following disclaimer. #
# #
# * Redistributions in binary form must reproduce the above copyright #
# notice this list of conditions and the following disclaimer in #
# the documentation and/or other materials provided with the #
# distribution. #
# #
# * Neither the name of the Brookhaven Science Associates, Brookhaven #
# National Laboratory nor the names of its contributors may be used #
# to endorse or promote products derived from this software without #
# specific prior written permission. #
# #
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS #
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT #
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS #
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE #
# COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, #
# INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES #
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR #
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) #
# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, #
# STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OTHERWISE) ARISING #
# IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE #
# POSSIBILITY OF SUCH DAMAGE. #
########################################################################
from __future__ import absolute_import, division, print_function
import logging
import numpy as np
from numpy.testing import assert_equal, assert_array_almost_equal
import pytest
import skbeam.core.utils as utils
from skbeam.core.correlation import (multi_tau_auto_corr,
auto_corr_scat_factor,
lazy_one_time,
lazy_two_time, two_time_corr,
two_time_state_to_results,
one_time_from_two_time,
CrossCorrelator)
from skbeam.core.mask import bad_to_nan_gen
from skbeam.core.roi import ring_edges, segmented_rings
logger = logging.getLogger(__name__)
def setup():
global num_levels, num_bufs, xdim, ydim, stack_size, img_stack, rois
num_levels = 6
num_bufs = 4 # must be even
xdim = 256
ydim = 512
stack_size = 100
img_stack = np.random.randint(1, 3, (stack_size, xdim, ydim))
rois = np.zeros_like(img_stack[0])
# make sure that the ROIs can be any integers greater than 1.
# They do not have to start at 1 and be continuous
rois[0:xdim//10, 0:ydim//10] = 5
rois[xdim//10:xdim//5, ydim//10:ydim//5] = 3
def test_lazy_vs_original():
setup()
# run the correlation on the full stack
full_gen_one = lazy_one_time(
img_stack, num_levels, num_bufs, rois)
for gen_state_one in full_gen_one:
pass
g2, lag_steps = multi_tau_auto_corr(num_levels, num_bufs,
rois, img_stack)
assert np.all(g2 == gen_state_one.g2)
assert np.all(lag_steps == gen_state_one.lag_steps)
full_gen_two = lazy_two_time(rois, img_stack, stack_size,
num_bufs, num_levels)
for gen_state_two in full_gen_two:
pass
final_gen_result_two = two_time_state_to_results(gen_state_two)
two_time = two_time_corr(rois, img_stack, stack_size,
num_bufs, num_levels)
assert np.all(two_time[0] == final_gen_result_two.g2)
assert np.all(two_time[1] == final_gen_result_two.lag_steps)
def test_lazy_two_time():
setup()
# run the correlation on the full stack
full_gen = lazy_two_time(rois, img_stack, stack_size,
stack_size, 1)
for full_state in full_gen:
pass
final_result = two_time_state_to_results(full_state)
# make sure we have essentially zero correlation in the images,
# since they are random integers
assert np.average(final_result.g2-1) < 0.01
# run the correlation on the first half
gen_first_half = lazy_two_time(rois, img_stack[:stack_size//2], stack_size,
num_bufs=stack_size, num_levels=1)
for first_half_state in gen_first_half:
pass
# run the correlation on the second half by passing in the state from the
# first half
gen_second_half = lazy_two_time(rois, img_stack[stack_size//2:],
stack_size, num_bufs=stack_size,
num_levels=1,
two_time_internal_state=first_half_state)
for second_half_state in gen_second_half:
pass
result = two_time_state_to_results(second_half_state)
assert np.all(full_state.g2 == result.g2)
def test_lazy_one_time():
setup()
# run the correlation on the full stack
full_gen = lazy_one_time(img_stack, num_levels, num_bufs, rois)
for full_result in full_gen:
pass
# make sure we have essentially zero correlation in the images,
# since they are random integers
assert np.average(full_result.g2-1) < 0.01
# run the correlation on the first half
gen_first_half = lazy_one_time(
img_stack[:stack_size//2], num_levels, num_bufs, rois)
for first_half_result in gen_first_half:
pass
# run the correlation on the second half by passing in the state from the
# first half
gen_second_half = lazy_one_time(
img_stack[stack_size//2:], num_levels, num_bufs, rois,
internal_state=first_half_result.internal_state
)
for second_half_result in gen_second_half:
pass
assert np.all(full_result.g2 ==
second_half_result.g2)
def test_two_time_corr():
setup()
y = []
for i in range(50):
y.append(img_stack[0])
two_time = two_time_corr(rois, np.asarray(y), 50,
num_bufs=50, num_levels=1)
assert np.all(two_time[0])
# check the number of buffers are even
with pytest.raises(ValueError):
two_time_corr(rois, np.asarray(y), 50, num_bufs=25, num_levels=1)
def test_auto_corr_scat_factor():
num_levels, num_bufs = 3, 4
tot_channels, lags, dict_lags = utils.multi_tau_lags(num_levels, num_bufs)
beta = 0.5
relaxation_rate = 10.0
baseline = 1.0
g2 = auto_corr_scat_factor(lags, beta, relaxation_rate, baseline)
assert_array_almost_equal(g2, np.array([1.5, 1.0, 1.0, 1.0, 1.0,
1.0, 1.0, 1.0]), decimal=8)
def test_bad_images():
setup()
g2, lag_steps = multi_tau_auto_corr(4, num_bufs,
rois, img_stack)
# introduce bad images
bad_img_list = [3, 21, 35, 48]
# convert each bad image to np.nan array
images = bad_to_nan_gen(img_stack, bad_img_list)
# then use new images (including bad images)
g2_n, lag_steps_n = multi_tau_auto_corr(4, num_bufs,
rois, images)
assert_array_almost_equal(g2[:, 0], g2_n[:, 0], decimal=3)
assert_array_almost_equal(g2[:, 1], g2_n[:, 1], decimal=3)
def test_one_time_from_two_time():
num_lev = 1
num_buf = 10 # must be even
x_dim = 10
y_dim = 10
stack = 10
imgs = np.random.randint(1, 3, (stack, x_dim, y_dim))
roi = np.zeros_like(imgs[0])
# make sure that the ROIs can be any integers greater than 1.
# They do not have to start at 1 and be continuous
roi[0:x_dim//10, 0:y_dim//10] = 5
roi[x_dim//10:x_dim//5, y_dim//10:y_dim//5] = 3
g2, lag_steps, _state = two_time_corr(roi, imgs, stack,
num_buf, num_lev)
one_time = one_time_from_two_time(g2)
assert_array_almost_equal(one_time[0, :], np.array([1.0, 0.9, 0.8, 0.7,
0.6, 0.5, 0.4, 0.3,
0.2, 0.1]))
@pytest.mark.skipif(int(np.__version__.split('.')[1]) > 14, reason="Test is numerically unstable")
def test_CrossCorrelator1d():
''' Test the 1d version of the cross correlator with these methods:
-method='regular', no mask
-method='regular', masked
-method='symavg', no mask
-method='symavg', masked
'''
np.random.seed(123)
# test 1D data
sigma = .1
Npoints = 100
x = np.linspace(-10, 10, Npoints)
sigma = .2
# purposely have sparsely filled values (with lots of zeros)
peak_positions = (np.random.random(10)-.5)*20
y = np.zeros_like(x)
for peak_position in peak_positions:
y += np.exp(-(x-peak_position)**2/2./sigma**2)
mask_1D = np.ones_like(y)
mask_1D[10:20] = 0
mask_1D[60:90] = 0
mask_1D[111:137] = 0
mask_1D[211:237] = 0
mask_1D[411:537] = 0
mask_1D *= mask_1D[::-1]
cc1D = CrossCorrelator(mask_1D.shape)
cc1D_symavg = CrossCorrelator(mask_1D.shape, normalization='symavg')
cc1D_masked = CrossCorrelator(mask_1D.shape, mask=mask_1D)
cc1D_masked_symavg = CrossCorrelator(mask_1D.shape, mask=mask_1D,
normalization='symavg')
assert_equal(cc1D.nids, 1)
ycorr_1D = cc1D(y)
ycorr_1D_masked = cc1D_masked(y*mask_1D)
ycorr_1D_symavg = cc1D_symavg(y)
ycorr_1D_masked_symavg = cc1D_masked_symavg(y*mask_1D)
assert_array_almost_equal(ycorr_1D[::20],
np.array([-1.155123e-14, 6.750373e-03,
6.221636e-01, 7.105527e-01,
1.187275e+00, 2.984563e+00,
1.092725e+00, 1.198341e+00,
1.045922e-01, 5.451511e-06]))
assert_array_almost_equal(ycorr_1D_masked[::20],
np.array([-5.172377e-16, np.nan, 7.481473e-01,
6.066887e-02, 4.470989e-04,
2.330335e+00, np.nan, 7.109758e-01,
np.nan, 2.275846e-14]))
assert_array_almost_equal(ycorr_1D_symavg[::20],
np.array([-5.3002753, 1.54268227, 0.86220476,
0.57715207, 0.86503802, 2.94383202,
0.7587901, 0.99763715, 0.16800951,
1.23506293]))
assert_array_almost_equal(ycorr_1D_masked_symavg[::20][:-1],
np.array([-5.30027530e-01, np.nan,
1.99940257e+00, 7.33127871e-02,
1.00000000e+00, 2.15887870e+00,
np.nan, 9.12832602e-01,
np.nan]))
def test_CrossCorrelator2d():
''' Test the 2D case of the cross correlator.
With non-binary labels.
'''
np.random.seed(123)
# test 2D data
Npoints2 = 10
x2 = np.linspace(-10, 10, Npoints2)
X, Y = np.meshgrid(x2, x2)
Z = np.random.random((Npoints2, Npoints2))
np.random.seed(123)
sigma = .2
# purposely have sparsely filled values (with lots of zeros)
# place peaks in random positions
peak_positions = (np.random.random((2, 10))-.5)*20
for peak_position in peak_positions:
Z += np.exp(-((X - peak_position[0])**2 +
(Y - peak_position[1])**2)/2./sigma**2)
mask_2D = np.ones_like(Z)
mask_2D[1:2, 1:2] = 0
mask_2D[7:9, 4:6] = 0
mask_2D[1:2, 9:] = 0
# Compute with segmented rings
edges = ring_edges(1, 3, num_rings=2)
segments = 5
x0, y0 = np.array(mask_2D.shape)//2
maskids = segmented_rings(edges, segments, (y0, x0), mask_2D.shape)
cc2D_ids = CrossCorrelator(mask_2D.shape, mask=maskids)
cc2D_ids_symavg = CrossCorrelator(mask_2D.shape, mask=maskids,
normalization='symavg')
# 10 ids
assert_equal(cc2D_ids.nids, 10)
ycorr_ids_2D = cc2D_ids(Z)
ycorr_ids_2D_symavg = cc2D_ids_symavg(Z)
index = 0
ycorr_ids_2D[index][ycorr_ids_2D[index].shape[0]//2]
assert_array_almost_equal(ycorr_ids_2D[index]
[ycorr_ids_2D[index].shape[0]//2],
np.array([1.22195059, 1.08685771,
1.43246508, 1.08685771, 1.22195059
])
)
index = 1
ycorr_ids_2D[index][ycorr_ids_2D[index].shape[0]//2]
assert_array_almost_equal(ycorr_ids_2D[index]
[ycorr_ids_2D[index].shape[0]//2],
np.array([1.24324268, 0.80748997,
1.35790022, 0.80748997, 1.24324268
])
)
index = 0
ycorr_ids_2D_symavg[index][ycorr_ids_2D[index].shape[0]//2]
assert_array_almost_equal(ycorr_ids_2D_symavg[index]
[ycorr_ids_2D[index].shape[0]//2],
np.array([0.84532695, 1.16405848, 1.43246508,
1.16405848, 0.84532695])
)
index = 1
ycorr_ids_2D_symavg[index][ycorr_ids_2D[index].shape[0]//2]
assert_array_almost_equal(ycorr_ids_2D_symavg[index]
[ycorr_ids_2D[index].shape[0]//2],
np.array([0.94823482, 0.8629459, 1.35790022,
0.8629459, 0.94823482])
)
def test_CrossCorrelator_badinputs():
with pytest.raises(ValueError):
CrossCorrelator((1, 1, 1))
with pytest.raises(ValueError):
cc = CrossCorrelator((10, 10))
a = np.ones((10, 11))
cc(a)
with pytest.raises(ValueError):
cc = CrossCorrelator((10, 10))
a = np.ones((10, 10))
a2 = np.ones((10, 11))
cc(a, a2)
|
tacaswell/scikit-xray
|
skbeam/core/tests/test_correlation.py
|
Python
|
bsd-3-clause
| 15,145
|
#!/usr/bin/env python
import os
import sys
import numpy as np
from ..galaxymakers import ExpNGMixGalaxyMaker
from ..medsmakers import MemoryMEDSMaker
import fitsio
import copy
class BlendedPairMEDSMaker(object):
def __init__(self,Np,Npsf=25,**kwargs):
if 'seed' in kwargs:
self.rs = np.random.RandomState(seed=kwargs['seed'])
else:
self.rs = np.random.RandomState()
if 'noise_obj' not in kwargs:
kwargs['noise_obj'] = 1e-4
gm_pars = {}
gm_pars.update(kwargs)
if 'noise_obj' in gm_pars:
del gm_pars['noise_obj']
gm_pars['noise_obj'] = 0.0
if 'noise_psf' in gm_pars:
del gm_pars['noise_psf']
gm_pars['noise_psf'] = 0.0
self.gm = ExpNGMixGalaxyMaker()
self.gm.set_params(**gm_pars)
self.Npsf = Npsf
self.Np = Np
self.Nobj = Np*2
self.noise_obj = kwargs['noise_obj']
if 'minoff' not in kwargs:
kwargs['minoff'] = 4
if 'maxoff' not in kwargs:
kwargs['maxoff'] = 4
self.minoff = kwargs['minoff']
self.maxoff = kwargs['maxoff']
self.tail = 'data'
def make_data(self):
self.make_psfs()
self.make_gals()
self.make_nbrs_fofs()
self.do_blends()
def write_data(self):
fitsio.write('psf_%s.fits'%self.tail,self.psf_data,clobber=True)
fitsio.write('obj_%s.fits'%self.tail,self.obj_data,clobber=True)
fitsio.write('nbrs_%s.fits'%self.tail,self.nbrs,clobber=True)
fitsio.write('fof_%s.fits'%self.tail,self.fofs,clobber=True)
self.mm.write('meds_%s.fits' % self.tail)
self.mm.fpack()
os.remove('meds_%s.fits' % self.tail)
self.bmm.write('blended_meds_%s.fits' % self.tail)
self.bmm.fpack()
os.remove('blended_meds_%s.fits' % self.tail)
def do_blends(self):
# do blends
noise_obj = self.noise_obj
self.mm = MemoryMEDSMaker(extra_percutout_data=[('ind_psf','i8')])
self.bmm = MemoryMEDSMaker(extra_percutout_data=[('ind_psf','i8')])
for i in xrange(self.Np):
g1 = self.gals[i*2]
g2 = self.gals[i*2+1]
maxoff = self.maxoff
minoff = self.minoff
off_x = self.rs.choice(maxoff,size=1,replace=True)[0]
off_y = self.rs.choice(maxoff,size=1,replace=True)[0]
if off_x < minoff:
off_x = minoff
if off_y < minoff:
off_y = minoff
if self.rs.uniform() > 0.5:
off_x *= -1
if self.rs.uniform() > 0.5:
off_y *= -1
minx = min([0,off_x])
maxx = max([g1.image.shape[0],g2.image.shape[0]+off_x])
miny = min([0,off_y])
maxy = max([g1.image.shape[1],g2.image.shape[1]+off_y])
sze = max([maxx-minx,maxy-miny])
imtot = np.zeros((sze,sze))
if off_x < 0:
xl1 = abs(off_x)
else:
xl1 = 0
if off_y < 0:
yl1 = abs(off_y)
else:
yl1 = 0
imtot[xl1:xl1+g1.image.shape[0],yl1:yl1+g1.image.shape[1]] = g1.image
if off_x < 0:
xl2 = 0
else:
xl2 = off_x
if off_y < 0:
yl2 = 0
else:
yl2 = off_y
imtot[xl2:xl2+g2.image.shape[0],yl2:yl2+g2.image.shape[1]] += g2.image
nse = self.rs.normal(size=imtot.shape)*self.noise_obj
imtot += nse
cens = [[xl1+g1.image.shape[0]/2.0,yl1+g1.image.shape[1]/2.0],
[xl2+g2.image.shape[0]/2.0,yl2+g2.image.shape[1]/2.0]]
nums = [i*2+1,i*2+2]
seg = get_seg(imtot,self.noise_obj,10.0,cens,nums)
fitsio.write('images%d_%s.fits' % (i,self.tail),imtot,clobber=True)
fitsio.write('images%d_%s.fits' % (i,self.tail),seg)
bobjinfo1 = dict(id=i*2,number=i*2+1,
orig_row=xl1+g1.image.shape[0]/2.0,
orig_col=yl1+g1.image.shape[1]/2.0,
orig_start_row=xl1,
orig_start_col=yl1,
dudrow=1.0,
dudcol=0.0,
dvdrow=0.0,
dvdcol=1.0,
cutout_row=g1.image.shape[0]/2.0,
cutout_col=g1.image.shape[1]/2.0,
ind_psf=g1.meta['ind_psf'])
self.bmm.add_object(bobjinfo1,[imtot[xl1:xl1+g1.image.shape[0],yl1:yl1+g1.image.shape[1]]],
[g1.image*0.0 + 1.0/noise_obj/noise_obj],
[seg[xl1:xl1+g1.image.shape[0],yl1:yl1+g1.image.shape[1]]])
objinfo1 = dict(id=i*2,number=i*2+1,
orig_row=g1.image.shape[0]/2.0,
orig_col=g1.image.shape[1]/2.0,
orig_start_row=0,
orig_start_col=0,
dudrow=1.0,
dudcol=0.0,
dvdrow=0.0,
dvdcol=1.0,
cutout_row=g1.image.shape[0]/2.0,
cutout_col=g1.image.shape[1]/2.0,
ind_psf=g1.meta['ind_psf'])
self.mm.add_object(objinfo1,[g1.image+nse[xl1:xl1+g1.image.shape[0],yl1:yl1+g1.image.shape[1]]],
[g1.image*0.0 + 1.0/noise_obj/noise_obj],
[np.zeros(g1.image.shape,dtype='i4')+i*2+1])
bobjinfo2 = dict(id=i*2+1,number=i*2+1+1,
orig_row=xl2+g2.image.shape[0]/2.0,
orig_col=yl2+g2.image.shape[1]/2.0,
orig_start_row=xl2,
orig_start_col=yl2,
dudrow=1.0,
dudcol=0.0,
dvdrow=0.0,
dvdcol=1.0,
cutout_row=g2.image.shape[0]/2.0,
cutout_col=g2.image.shape[1]/2.0,
ind_psf=g2.meta['ind_psf'])
self.bmm.add_object(bobjinfo2,[imtot[xl2:xl2+g2.image.shape[0],yl2:yl2+g2.image.shape[1]]],
[g2.image*0.0 + 1.0/noise_obj/noise_obj],
[seg[xl2:xl2+g2.image.shape[0],yl2:yl2+g2.image.shape[1]]])
objinfo2 = dict(id=i*2+1,number=i*2+1+1,
orig_row=g2.image.shape[0]/2.0,
orig_col=g2.image.shape[1]/2.0,
orig_start_row=0,
orig_start_col=0,
dudrow=1.0,
dudcol=0.0,
dvdrow=0.0,
dvdcol=1.0,
cutout_row=g2.image.shape[0]/2.0,
cutout_col=g2.image.shape[1]/2.0,
ind_psf=g2.meta['ind_psf'])
self.mm.add_object(objinfo2,[g2.image+nse[xl2:xl2+g2.image.shape[0],yl2:yl2+g2.image.shape[1]]],
[g2.image*0.0 + 1.0/noise_obj/noise_obj],
[np.zeros(g2.image.shape,dtype='i4')+i*2+1+1])
def make_gals(self,verbose=False):
gals = []
d =[]
if verbose:
import progressbar
bar = progressbar.ProgressBar(maxval=self.Nobj,widgets=[progressbar.Bar(marker='|',left='doing work: |',right=''),' ',progressbar.Percentage(),' ',progressbar.AdaptiveETA()])
bar.start()
for i in xrange(self.Nobj):
if verbose:
bar.update(i+1)
psf_ind = self.rs.choice(self.Npsf,size=1,replace=True)
obs = self.gm.get_galaxy(pars_psf=self.psf_data['pars'][psf_ind[0]])
meta = dict(ind_psf=psf_ind)
obs.update_meta_data(meta)
gals.append(obs)
d.append((i,i+1,obs.meta['pars_obj']))
if verbose:
bar.finish()
d = np.array(d,dtype=[('id','i8'),('number','i4'),('pars','f8',len(obs.meta['pars_obj']))])
self.obj_data = d
self.gals = gals
def make_nbrs_fofs(self):
fofs = []
nbrs = []
for i in xrange(self.Np):
fofs.append((i,i*2+1))
fofs.append((i,i*2+2))
nbrs.append((i*2+1,i*2+2))
nbrs.append((i*2+2,i*2+1))
self.fofs = np.array(fofs,dtype=[('fofid','i8'),('number','i4')])
self.nbrs = np.array(nbrs,dtype=[('number','i4'),('nbr_number','i4')])
def make_psfs(self):
ms = -np.inf
psf_data = []
for i in xrange(self.Npsf):
obs = self.gm.get_galaxy()
im_psf = obs.get_psf().image
psf_data.append((im_psf,obs.meta['pars_psf']))
if im_psf.shape[0] > ms:
ms = im_psf.shape[0]
assert im_psf.shape[0] == ms
psf_data = np.array(psf_data,dtype=[('im','f8',(ms,ms)),('pars','f8',len(obs.meta['pars_psf']))])
self.psf_data = psf_data
def get_seg(im,sigma,nsigma,cens,nums):
"""
stupid simple code to make a pseudo-seg map
"""
xc = []
yc = []
for ceni in cens:
xc.append(ceni[0])
yc.append(ceni[1])
xc = np.array(xc)
yc = np.array(yc)
seg = np.zeros_like(im,dtype='i4')
qx,qy = np.where(im > sigma*nsigma)
for i,j in zip(qx,qy):
d2 = (i*1.0-xc)**2 + (j*1.0-yc)**2.0
q = np.argmin(d2)
seg[i,j] = nums[q]
return seg
|
esheldon/egret
|
egret/blendmakers/medspairblendmaker.py
|
Python
|
bsd-3-clause
| 9,993
|
# -*- coding: utf-8 -*-
from __future__ import with_statement
import itertools
import warnings
from django.conf import settings
from django.contrib import admin
from django.contrib.auth.models import Permission
from django.contrib.messages.storage import default_storage
from django.core.cache import cache
from django.core.exceptions import ImproperlyConfigured
from django.core.urlresolvers import reverse
from django.db import models
from django.http import HttpResponseForbidden, HttpResponse
from django.template import TemplateSyntaxError, Template
from django.template.context import Context, RequestContext
from django.test import TestCase
from django.utils.numberformat import format
from djangocms_link.cms_plugins import LinkPlugin
from djangocms_text_ckeditor.cms_plugins import TextPlugin
from djangocms_text_ckeditor.models import Text
from sekizai.context import SekizaiContext
from cms import constants
from cms.admin.placeholderadmin import PlaceholderAdmin, PlaceholderAdminMixin
from cms.api import add_plugin, create_page, create_title
from cms.exceptions import DuplicatePlaceholderWarning
from cms.models.fields import PlaceholderField
from cms.models.placeholdermodel import Placeholder
from cms.plugin_pool import plugin_pool
from cms.plugin_rendering import render_placeholder
from cms.test_utils.fixtures.fakemlng import FakemlngFixtures
from cms.test_utils.project.fakemlng.models import Translations
from cms.test_utils.project.objectpermissionsapp.models import UserObjectPermission
from cms.test_utils.project.placeholderapp.models import (
Example1,
TwoPlaceholderExample,
DynamicPlaceholderSlotExample,
MultilingualExample1
)
from cms.test_utils.testcases import CMSTestCase
from cms.test_utils.util.context_managers import (SettingsOverride, UserLoginContext)
from cms.test_utils.util.mock import AttributeObject
from cms.toolbar.toolbar import CMSToolbar
from cms.utils.compat.dj import force_unicode, get_user_model
from cms.utils.compat.tests import UnittestCompatMixin
from cms.utils.conf import get_cms_setting
from cms.utils.placeholder import PlaceholderNoAction, MLNGPlaceholderActions, get_placeholder_conf
from cms.utils.plugins import get_placeholders, assign_plugins
class PlaceholderTestCase(CMSTestCase, UnittestCompatMixin):
def setUp(self):
u = self._create_user("test", True, True)
self._login_context = self.login_user_context(u)
self._login_context.__enter__()
def tearDown(self):
self._login_context.__exit__(None, None, None)
def test_placeholder_scanning_extend(self):
placeholders = get_placeholders('placeholder_tests/test_one.html')
self.assertEqual(sorted(placeholders), sorted([u'new_one', u'two', u'three']))
def test_placeholder_scanning_sekizai_extend(self):
placeholders = get_placeholders('placeholder_tests/test_one_sekizai.html')
self.assertEqual(sorted(placeholders), sorted([u'new_one', u'two', u'three']))
def test_placeholder_scanning_include(self):
placeholders = get_placeholders('placeholder_tests/test_two.html')
self.assertEqual(sorted(placeholders), sorted([u'child', u'three']))
def test_placeholder_scanning_double_extend(self):
placeholders = get_placeholders('placeholder_tests/test_three.html')
self.assertEqual(sorted(placeholders), sorted([u'new_one', u'two', u'new_three']))
def test_placeholder_scanning_sekizai_double_extend(self):
placeholders = get_placeholders('placeholder_tests/test_three_sekizai.html')
self.assertEqual(sorted(placeholders), sorted([u'new_one', u'two', u'new_three']))
def test_placeholder_scanning_complex(self):
placeholders = get_placeholders('placeholder_tests/test_four.html')
self.assertEqual(sorted(placeholders), sorted([u'new_one', u'child', u'four']))
def test_placeholder_scanning_super(self):
placeholders = get_placeholders('placeholder_tests/test_five.html')
self.assertEqual(sorted(placeholders), sorted([u'one', u'extra_one', u'two', u'three']))
def test_placeholder_scanning_nested(self):
placeholders = get_placeholders('placeholder_tests/test_six.html')
self.assertEqual(sorted(placeholders), sorted([u'new_one', u'new_two', u'new_three']))
def test_placeholder_scanning_duplicate(self):
placeholders = self.assertWarns(DuplicatePlaceholderWarning,
'Duplicate {% placeholder "one" %} in template placeholder_tests/test_seven.html.',
get_placeholders, 'placeholder_tests/test_seven.html')
self.assertEqual(sorted(placeholders), sorted([u'one']))
def test_placeholder_scanning_extend_outside_block(self):
placeholders = get_placeholders('placeholder_tests/outside.html')
self.assertEqual(sorted(placeholders), sorted([u'new_one', u'two', u'base_outside']))
def test_placeholder_scanning_sekizai_extend_outside_block(self):
placeholders = get_placeholders('placeholder_tests/outside_sekizai.html')
self.assertEqual(sorted(placeholders), sorted([u'new_one', u'two', u'base_outside']))
def test_placeholder_scanning_extend_outside_block_nested(self):
placeholders = get_placeholders('placeholder_tests/outside_nested.html')
self.assertEqual(sorted(placeholders), sorted([u'new_one', u'two', u'base_outside']))
def test_placeholder_scanning_sekizai_extend_outside_block_nested(self):
placeholders = get_placeholders('placeholder_tests/outside_nested_sekizai.html')
self.assertEqual(sorted(placeholders), sorted([u'new_one', u'two', u'base_outside']))
def test_fieldsets_requests(self):
response = self.client.get(reverse('admin:placeholderapp_example1_add'))
self.assertEqual(response.status_code, 200)
response = self.client.get(reverse('admin:placeholderapp_twoplaceholderexample_add'))
self.assertEqual(response.status_code, 200)
def test_page_only_plugins(self):
ex = Example1(
char_1='one',
char_2='two',
char_3='tree',
char_4='four'
)
ex.save()
response = self.client.get(reverse('admin:placeholderapp_example1_change', args=(ex.pk,)))
self.assertEqual(response.status_code, 200)
self.assertNotContains(response, 'InheritPagePlaceholderPlugin')
def test_inter_placeholder_plugin_move(self):
ex = TwoPlaceholderExample(
char_1='one',
char_2='two',
char_3='tree',
char_4='four'
)
ex.save()
ph1 = ex.placeholder_1
ph2 = ex.placeholder_2
ph1_pl1 = add_plugin(ph1, TextPlugin, 'en', body='ph1 plugin1').cmsplugin_ptr
ph1_pl2 = add_plugin(ph1, TextPlugin, 'en', body='ph1 plugin2').cmsplugin_ptr
ph1_pl3 = add_plugin(ph1, TextPlugin, 'en', body='ph1 plugin3').cmsplugin_ptr
ph2_pl1 = add_plugin(ph2, TextPlugin, 'en', body='ph2 plugin1').cmsplugin_ptr
ph2_pl2 = add_plugin(ph2, TextPlugin, 'en', body='ph2 plugin2').cmsplugin_ptr
ph2_pl3 = add_plugin(ph2, TextPlugin, 'en', body='ph2 plugin3').cmsplugin_ptr
response = self.client.post(reverse('admin:placeholderapp_twoplaceholderexample_move_plugin'), {
'placeholder_id': str(ph2.pk),
'plugin_id': str(ph1_pl2.pk),
'plugin_order[]': [str(p.pk) for p in [ph2_pl3, ph2_pl1, ph2_pl2, ph1_pl2]]
})
self.assertEqual(response.status_code, 200)
self.assertEqual([ph1_pl1, ph1_pl3], list(ph1.cmsplugin_set.order_by('position')))
self.assertEqual([ph2_pl3, ph2_pl1, ph2_pl2, ph1_pl2, ], list(ph2.cmsplugin_set.order_by('position')))
def test_nested_plugin_escapejs(self):
"""
Checks #1366 error condition.
When adding/editing a plugin whose icon_src() method returns a URL
containing an hyphen, the hyphen is escaped by django escapejs resulting
in a incorrect URL
"""
with SettingsOverride(CMS_PERMISSION=False):
ex = Example1(
char_1='one',
char_2='two',
char_3='tree',
char_4='four'
)
ex.save()
ph1 = ex.placeholder
###
# add the test plugin
###
test_plugin = add_plugin(ph1, u"EmptyPlugin", u"en")
test_plugin.save()
pl_url = "%sedit-plugin/%s/" % (
reverse('admin:placeholderapp_example1_change', args=(ex.pk,)),
test_plugin.pk)
response = self.client.post(pl_url, {})
self.assertContains(response, "CMS.API.Helpers.reloadBrowser")
def test_nested_plugin_escapejs_page(self):
"""
Sibling test of the above, on a page.
#1366 does not apply to placeholder defined in a page
"""
with SettingsOverride(CMS_PERMISSION=False):
page = create_page('page', 'col_two.html', 'en')
ph1 = page.placeholders.get(slot='col_left')
###
# add the test plugin
###
test_plugin = add_plugin(ph1, u"EmptyPlugin", u"en")
test_plugin.save()
pl_url = "%sedit-plugin/%s/" % (
reverse('admin:cms_page_change', args=(page.pk,)),
test_plugin.pk)
response = self.client.post(pl_url, {})
self.assertContains(response, "CMS.API.Helpers.reloadBrowser")
def test_placeholder_scanning_fail(self):
self.assertRaises(TemplateSyntaxError, get_placeholders, 'placeholder_tests/test_eleven.html')
def test_placeholder_tag(self):
template = Template("{% load cms_tags %}{% render_placeholder placeholder %}")
ctx = Context()
self.assertEqual(template.render(ctx), "")
request = self.get_request('/')
rctx = RequestContext(request)
self.assertEqual(template.render(rctx), "")
placeholder = Placeholder.objects.create(slot="test")
rctx['placeholder'] = placeholder
self.assertEqual(template.render(rctx), "")
self.assertEqual(placeholder.cmsplugin_set.count(), 0)
add_plugin(placeholder, "TextPlugin", settings.LANGUAGES[0][0], body="test")
self.assertEqual(placeholder.cmsplugin_set.count(), 1)
rctx = RequestContext(request)
placeholder = self.reload(placeholder)
rctx['placeholder'] = placeholder
self.assertEqual(template.render(rctx).strip(), "test")
def test_placeholder_tag_language(self):
template = Template("{% load cms_tags %}{% render_placeholder placeholder language language %}")
placeholder = Placeholder.objects.create(slot="test")
add_plugin(placeholder, "TextPlugin", 'en', body="English")
add_plugin(placeholder, "TextPlugin", 'de', body="Deutsch")
request = self.get_request('/')
rctx = RequestContext(request)
rctx['placeholder'] = placeholder
rctx['language'] = 'en'
self.assertEqual(template.render(rctx).strip(), "English")
del placeholder._plugins_cache
rctx['language'] = 'de'
self.assertEqual(template.render(rctx).strip(), "Deutsch")
def test_get_placeholder_conf(self):
TEST_CONF = {
'main': {
'name': 'main content',
'plugins': ['TextPlugin', 'LinkPlugin'],
'default_plugins':[
{
'plugin_type':'TextPlugin',
'values':{
'body':'<p>Some default text</p>'
},
},
],
},
'layout/home.html main': {
'name': u'main content with FilerImagePlugin and limit',
'plugins': ['TextPlugin', 'FilerImagePlugin', 'LinkPlugin',],
'inherit':'main',
'limits': {'global': 1,},
},
'layout/other.html main': {
'name': u'main content with FilerImagePlugin and no limit',
'inherit':'layout/home.html main',
'limits': {},
},
}
with SettingsOverride(CMS_PLACEHOLDER_CONF=TEST_CONF):
#test no inheritance
returned = get_placeholder_conf('plugins', 'main')
self.assertEqual(returned, TEST_CONF['main']['plugins'])
#test no inherited value with inheritance enabled
returned = get_placeholder_conf('plugins', 'main', 'layout/home.html')
self.assertEqual(returned, TEST_CONF['layout/home.html main']['plugins'])
#test direct inherited value
returned = get_placeholder_conf('plugins', 'main', 'layout/other.html')
self.assertEqual(returned, TEST_CONF['layout/home.html main']['plugins'])
#test grandparent inherited value
returned = get_placeholder_conf('default_plugins', 'main', 'layout/other.html')
self.assertEqual(returned, TEST_CONF['main']['default_plugins'])
def test_placeholder_context_leaking(self):
TEST_CONF = {'test': {'extra_context': {'width': 10}}}
ph = Placeholder.objects.create(slot='test')
class NoPushPopContext(Context):
def push(self):
pass
pop = push
context = NoPushPopContext()
context['request'] = self.get_request()
with SettingsOverride(CMS_PLACEHOLDER_CONF=TEST_CONF):
render_placeholder(ph, context)
self.assertTrue('width' in context)
self.assertEqual(context['width'], 10)
ph.render(context, None)
self.assertTrue('width' in context)
self.assertEqual(context['width'], 10)
def test_placeholder_scanning_nested_super(self):
placeholders = get_placeholders('placeholder_tests/nested_super_level1.html')
self.assertEqual(sorted(placeholders), sorted([u'level1', u'level2', u'level3', u'level4']))
def test_placeholder_field_no_related_name(self):
self.assertRaises(ValueError, PlaceholderField, 'placeholder', related_name='+')
def test_placeholder_field_valid_slotname(self):
self.assertRaises(ImproperlyConfigured, PlaceholderField, 10)
def test_placeholder_field_dynamic_slot_generation(self):
instance = DynamicPlaceholderSlotExample.objects.create(char_1='slot1', char_2='slot2')
self.assertEqual(instance.char_1, instance.placeholder_1.slot)
self.assertEqual(instance.char_2, instance.placeholder_2.slot)
def test_placeholder_field_dynamic_slot_update(self):
instance = DynamicPlaceholderSlotExample.objects.create(char_1='slot1', char_2='slot2')
# Plugin counts
old_placeholder_1_plugin_count = len(instance.placeholder_1.get_plugins())
old_placeholder_2_plugin_count = len(instance.placeholder_2.get_plugins())
# Switch around the slot names
instance.char_1, instance.char_2 = instance.char_2, instance.char_1
# Store the ids before save, to test that a new placeholder is NOT created.
placeholder_1_id = instance.placeholder_1.pk
placeholder_2_id = instance.placeholder_2.pk
# Save instance
instance.save()
current_placeholder_1_plugin_count = len(instance.placeholder_1.get_plugins())
current_placeholder_2_plugin_count = len(instance.placeholder_2.get_plugins())
# Now test that the placeholder slots have changed
self.assertEqual(instance.char_2, 'slot1')
self.assertEqual(instance.char_1, 'slot2')
# Test that a new placeholder was never created
self.assertEqual(instance.placeholder_1.pk, placeholder_1_id)
self.assertEqual(instance.placeholder_2.pk, placeholder_2_id)
# And test the plugin counts remain the same
self.assertEqual(old_placeholder_1_plugin_count, current_placeholder_1_plugin_count)
self.assertEqual(old_placeholder_2_plugin_count, current_placeholder_2_plugin_count)
def test_plugins_language_fallback(self):
""" Tests language_fallback placeholder configuration """
page_en = create_page('page_en', 'col_two.html', 'en')
title_de = create_title("de", "page_de", page_en)
placeholder_en = page_en.placeholders.get(slot='col_left')
placeholder_de = title_de.page.placeholders.get(slot='col_left')
add_plugin(placeholder_en, TextPlugin, 'en', body='en body')
class NoPushPopContext(SekizaiContext):
def push(self):
pass
pop = push
context_en = NoPushPopContext()
context_en['request'] = self.get_request(language="en", page=page_en)
context_de = NoPushPopContext()
context_de['request'] = self.get_request(language="de", page=page_en)
# First test the default (non-fallback) behavior)
## English page should have the text plugin
content_en = render_placeholder(placeholder_en, context_en)
self.assertRegexpMatches(content_en, "^en body$")
## Deutsch page should have no text
content_de = render_placeholder(placeholder_de, context_de)
self.assertNotRegex(content_de, "^en body$")
self.assertEqual(len(content_de), 0)
conf = {
'col_left': {
'language_fallback': True,
},
}
with SettingsOverride(CMS_PLACEHOLDER_CONF=conf):
## Deutsch page should have no text
del(placeholder_de._plugins_cache)
cache.clear()
content_de = render_placeholder(placeholder_de, context_de)
self.assertRegexpMatches(content_de, "^en body$")
context_de2 = NoPushPopContext()
request = self.get_request(language="de", page=page_en)
request.user = self.get_superuser()
request.toolbar = CMSToolbar(request)
request.toolbar.edit_mode = True
context_de2['request'] = request
del(placeholder_de._plugins_cache)
cache.clear()
content_de2 = render_placeholder(placeholder_de, context_de2)
self.assertFalse("en body" in content_de2)
# remove the cached plugins instances
del(placeholder_de._plugins_cache)
cache.clear()
# Then we add a plugin to check for proper rendering
add_plugin(placeholder_de, TextPlugin, 'de', body='de body')
content_de = render_placeholder(placeholder_de, context_de)
self.assertRegexpMatches(content_de, "^de body$")
def test_plugins_non_default_language_fallback(self):
""" Tests language_fallback placeholder configuration """
page_en = create_page('page_en', 'col_two.html', 'en')
create_title("de", "page_de", page_en)
placeholder_en = page_en.placeholders.get(slot='col_left')
placeholder_de = page_en.placeholders.get(slot='col_left')
add_plugin(placeholder_de, TextPlugin, 'de', body='de body')
class NoPushPopContext(Context):
def push(self):
pass
pop = push
context_en = NoPushPopContext()
context_en['request'] = self.get_request(language="en", page=page_en)
context_de = NoPushPopContext()
context_de['request'] = self.get_request(language="de", page=page_en)
# First test the default (non-fallback) behavior)
## Deutsch page should have the text plugin
content_de = render_placeholder(placeholder_en, context_de)
self.assertRegexpMatches(content_de, "^de body$")
del(placeholder_en._plugins_cache)
cache.clear()
## English page should have no text
content_en = render_placeholder(placeholder_en, context_en)
self.assertNotRegex(content_en, "^de body$")
self.assertEqual(len(content_en), 0)
del(placeholder_en._plugins_cache)
cache.clear()
conf = {
'col_left': {
'language_fallback': True,
},
}
with SettingsOverride(CMS_PLACEHOLDER_CONF=conf):
## English page should have deutsch text
content_en = render_placeholder(placeholder_en, context_en)
self.assertRegexpMatches(content_en, "^de body$")
# remove the cached plugins instances
del(placeholder_en._plugins_cache)
cache.clear()
# Then we add a plugin to check for proper rendering
add_plugin(placeholder_en, TextPlugin, 'en', body='en body')
content_en = render_placeholder(placeholder_en, context_en)
self.assertRegexpMatches(content_en, "^en body$")
def test_plugins_discarded_with_language_fallback(self):
"""
Tests side effect of language fallback: if fallback enabled placeholder
existed, it discards all other existing plugins
"""
page_en = create_page('page_en', 'col_two.html', 'en')
create_title("de", "page_de", page_en)
placeholder_sidebar_en = page_en.placeholders.get(slot='col_sidebar')
placeholder_en = page_en.placeholders.get(slot='col_left')
add_plugin(placeholder_sidebar_en, TextPlugin, 'en', body='en body')
class NoPushPopContext(Context):
def push(self):
pass
pop = push
context_en = NoPushPopContext()
context_en['request'] = self.get_request(language="en", page=page_en)
conf = {
'col_left': {
'language_fallback': True,
},
}
with SettingsOverride(CMS_PLACEHOLDER_CONF=conf):
# call assign plugins first, as this is what is done in real cms life
# for all placeholders in a page at once
assign_plugins(context_en['request'],
[placeholder_sidebar_en, placeholder_en], 'col_two.html')
# if the normal, non fallback enabled placeholder still has content
content_en = render_placeholder(placeholder_sidebar_en, context_en)
self.assertRegexpMatches(content_en, "^en body$")
# remove the cached plugins instances
del(placeholder_sidebar_en._plugins_cache)
cache.clear()
def test_plugins_prepopulate(self):
""" Tests prepopulate placeholder configuration """
class NoPushPopContext(Context):
def push(self):
pass
pop = push
conf = {
'col_left': {
'default_plugins' : [
{
'plugin_type':'TextPlugin',
'values':{'body':'<p>en default body 1</p>'},
},
{
'plugin_type':'TextPlugin',
'values':{'body':'<p>en default body 2</p>'},
},
]
},
}
with SettingsOverride(CMS_PLACEHOLDER_CONF=conf):
page = create_page('page_en', 'col_two.html', 'en')
placeholder = page.placeholders.get(slot='col_left')
context = NoPushPopContext()
context['request'] = self.get_request(language="en", page=page)
# Our page should have "en default body 1" AND "en default body 2"
content = render_placeholder(placeholder, context)
self.assertRegexpMatches(content, "^<p>en default body 1</p>\s*<p>en default body 2</p>$")
def test_plugins_children_prepopulate(self):
"""
Validate a default textplugin with a nested default link plugin
"""
class NoPushPopContext(Context):
def push(self):
pass
pop = push
conf = {
'col_left': {
'default_plugins': [
{
'plugin_type': 'TextPlugin',
'values': {
'body': '<p>body %(_tag_child_1)s and %(_tag_child_2)s</p>'
},
'children': [
{
'plugin_type': 'LinkPlugin',
'values': {
'name': 'django',
'url': 'https://www.djangoproject.com/'
},
},
{
'plugin_type': 'LinkPlugin',
'values': {
'name': 'django-cms',
'url': 'https://www.django-cms.org'
},
},
]
},
]
},
}
with SettingsOverride(CMS_PLACEHOLDER_CONF=conf):
page = create_page('page_en', 'col_two.html', 'en')
placeholder = page.placeholders.get(slot='col_left')
context = NoPushPopContext()
context['request'] = self.get_request(language="en", page=page)
render_placeholder(placeholder, context)
plugins = placeholder.get_plugins_list()
self.assertEqual(len(plugins), 3)
self.assertEqual(plugins[0].plugin_type, 'TextPlugin')
self.assertEqual(plugins[1].plugin_type, 'LinkPlugin')
self.assertEqual(plugins[2].plugin_type, 'LinkPlugin')
self.assertTrue(plugins[1].parent == plugins[2].parent and plugins[1].parent == plugins[0])
def test_placeholder_pk_thousands_format(self):
page = create_page("page", "nav_playground.html", "en", published=True)
for placeholder in page.placeholders.all():
page.placeholders.remove(placeholder)
placeholder.pk += 1000
placeholder.save()
page.placeholders.add(placeholder)
page.reload()
for placeholder in page.placeholders.all():
add_plugin(placeholder, "TextPlugin", "en", body="body",
id=placeholder.pk)
with SettingsOverride(USE_THOUSAND_SEPARATOR=True, USE_L10N=True):
# Superuser
user = self.get_superuser()
self.client.login(username=getattr(user, get_user_model().USERNAME_FIELD),
password=getattr(user, get_user_model().USERNAME_FIELD))
response = self.client.get("/en/?%s" % get_cms_setting('CMS_TOOLBAR_URL__EDIT_ON'))
for placeholder in page.placeholders.all():
self.assertContains(
response, "'placeholder_id': '%s'" % placeholder.pk)
self.assertNotContains(
response, "'placeholder_id': '%s'" % format(
placeholder.pk, ".", grouping=3, thousand_sep=","))
self.assertNotContains(
response, "'plugin_id': '%s'" % format(
placeholder.pk, ".", grouping=3, thousand_sep=","))
self.assertNotContains(
response, "'clipboard': '%s'" % format(
response.context['request'].toolbar.clipboard.pk, ".",
grouping=3, thousand_sep=","))
def test_placeholder_languages_model(self):
"""
Checks the retrieval of filled languages for a placeholder in a django
model
"""
avail_langs = set([u'en', u'de', u'fr'])
# Setup instance
ex = Example1(
char_1='one',
char_2='two',
char_3='tree',
char_4='four'
)
ex.save()
###
# add the test plugin
###
for lang in avail_langs:
add_plugin(ex.placeholder, u"EmptyPlugin", lang)
# reload instance from database
ex = Example1.objects.get(pk=ex.pk)
#get languages
langs = [lang['code'] for lang in ex.placeholder.get_filled_languages()]
self.assertEqual(avail_langs, set(langs))
def test_placeholder_languages_page(self):
"""
Checks the retrieval of filled languages for a placeholder in a django
model
"""
avail_langs = set([u'en', u'de', u'fr'])
# Setup instances
page = create_page('test page', 'col_two.html', u'en')
for lang in avail_langs:
if lang != u'en':
create_title(lang, 'test page %s' % lang, page)
placeholder = page.placeholders.get(slot='col_sidebar')
###
# add the test plugin
###
for lang in avail_langs:
add_plugin(placeholder, u"EmptyPlugin", lang)
# reload placeholder from database
placeholder = page.placeholders.get(slot='col_sidebar')
# get languages
langs = [lang['code'] for lang in placeholder.get_filled_languages()]
self.assertEqual(avail_langs, set(langs))
def test_deprecated_PlaceholderAdmin(self):
admin_site = admin.sites.AdminSite()
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
pa = PlaceholderAdmin(Placeholder, admin_site)
self.assertEqual(len(w), 1)
self.assertTrue(issubclass(w[-1].category, DeprecationWarning))
self.assertTrue("PlaceholderAdminMixin with admin.ModelAdmin" in str(w[-1].message))
self.assertIsInstance(pa, admin.ModelAdmin, 'PlaceholderAdmin not admin.ModelAdmin')
self.assertIsInstance(pa, PlaceholderAdminMixin, 'PlaceholderAdmin not PlaceholderAdminMixin')
class PlaceholderActionTests(FakemlngFixtures, CMSTestCase):
def test_placeholder_no_action(self):
actions = PlaceholderNoAction()
self.assertEqual(actions.get_copy_languages(), [])
self.assertFalse(actions.copy())
def test_mlng_placeholder_actions_get_copy_languages(self):
actions = MLNGPlaceholderActions()
fr = Translations.objects.get(language_code='fr')
de = Translations.objects.get(language_code='de')
en = Translations.objects.get(language_code='en')
fieldname = 'placeholder'
fr_copy_languages = actions.get_copy_languages(
fr.placeholder, Translations, fieldname
)
de_copy_languages = actions.get_copy_languages(
de.placeholder, Translations, fieldname
)
en_copy_languages = actions.get_copy_languages(
en.placeholder, Translations, fieldname
)
EN = ('en', 'English')
FR = ('fr', 'French')
self.assertEqual(set(fr_copy_languages), set([EN]))
self.assertEqual(set(de_copy_languages), set([EN, FR]))
self.assertEqual(set(en_copy_languages), set([FR]))
def test_mlng_placeholder_actions_copy(self):
actions = MLNGPlaceholderActions()
fr = Translations.objects.get(language_code='fr')
de = Translations.objects.get(language_code='de')
self.assertEqual(fr.placeholder.cmsplugin_set.count(), 1)
self.assertEqual(de.placeholder.cmsplugin_set.count(), 0)
new_plugins = actions.copy(de.placeholder, 'fr', 'placeholder', Translations, 'de')
self.assertEqual(len(new_plugins), 1)
de = self.reload(de)
fr = self.reload(fr)
self.assertEqual(fr.placeholder.cmsplugin_set.count(), 1)
self.assertEqual(de.placeholder.cmsplugin_set.count(), 1)
def test_mlng_placeholder_actions_empty_copy(self):
actions = MLNGPlaceholderActions()
fr = Translations.objects.get(language_code='fr')
de = Translations.objects.get(language_code='de')
self.assertEqual(fr.placeholder.cmsplugin_set.count(), 1)
self.assertEqual(de.placeholder.cmsplugin_set.count(), 0)
new_plugins = actions.copy(fr.placeholder, 'de', 'placeholder', Translations, 'fr')
self.assertEqual(len(new_plugins), 0)
de = self.reload(de)
fr = self.reload(fr)
self.assertEqual(fr.placeholder.cmsplugin_set.count(), 1)
self.assertEqual(de.placeholder.cmsplugin_set.count(), 0)
def test_mlng_placeholder_actions_no_placeholder(self):
actions = MLNGPlaceholderActions()
Translations.objects.filter(language_code='nl').update(placeholder=None)
de = Translations.objects.get(language_code='de')
nl = Translations.objects.get(language_code='nl')
self.assertEqual(nl.placeholder, None)
self.assertEqual(de.placeholder.cmsplugin_set.count(), 0)
okay = actions.copy(de.placeholder, 'nl', 'placeholder', Translations, 'de')
self.assertEqual(okay, False)
de = self.reload(de)
nl = self.reload(nl)
nl = Translations.objects.get(language_code='nl')
de = Translations.objects.get(language_code='de')
class PlaceholderModelTests(CMSTestCase):
def get_mock_user(self, superuser):
return AttributeObject(
is_superuser=superuser,
has_perm=lambda string: False,
)
def get_mock_request(self, superuser=True):
return AttributeObject(
superuser=superuser,
user=self.get_mock_user(superuser)
)
def test_check_placeholder_permissions_ok_for_superuser(self):
ph = Placeholder.objects.create(slot='test', default_width=300)
result = ph.has_change_permission(self.get_mock_request(True))
self.assertTrue(result)
def test_check_placeholder_permissions_nok_for_user(self):
ph = Placeholder.objects.create(slot='test', default_width=300)
result = ph.has_change_permission(self.get_mock_request(False))
self.assertFalse(result)
def test_check_unicode_rendering(self):
ph = Placeholder.objects.create(slot='test', default_width=300)
result = force_unicode(ph)
self.assertEqual(result, u'test')
def test_excercise_get_attached_model(self):
ph = Placeholder.objects.create(slot='test', default_width=300)
result = ph._get_attached_model()
self.assertEqual(result, None) # Simple PH - no model
def test_excercise_get_attached_field_name(self):
ph = Placeholder.objects.create(slot='test', default_width=300)
result = ph._get_attached_field_name()
self.assertEqual(result, None) # Simple PH - no field name
def test_excercise_get_attached_models_notplugins(self):
ex = Example1(
char_1='one',
char_2='two',
char_3='tree',
char_4='four'
)
ex.save()
ph = ex.placeholder
result = list(ph._get_attached_models())
self.assertEqual(result, [Example1]) # Simple PH - Example1 model
add_plugin(ph, TextPlugin, 'en', body='en body')
result = list(ph._get_attached_models())
self.assertEqual(result, [Example1]) # Simple PH still one Example1 model
def test_excercise_get_attached_fields_notplugins(self):
ex = Example1(
char_1='one',
char_2='two',
char_3='tree',
char_4='four',
)
ex.save()
ph = ex.placeholder
result = [f.name for f in list(ph._get_attached_fields())]
self.assertEqual(result, ['placeholder']) # Simple PH - placeholder field name
add_plugin(ph, TextPlugin, 'en', body='en body')
result = [f.name for f in list(ph._get_attached_fields())]
self.assertEqual(result, ['placeholder']) # Simple PH - still one placeholder field name
class PlaceholderAdminTestBase(CMSTestCase):
def get_placeholder(self):
return Placeholder.objects.create(slot='test')
def get_admin(self):
admin.autodiscover()
return admin.site._registry[Example1]
def get_post_request(self, data):
return self.get_request(post_data=data)
class PlaceholderAdminTest(PlaceholderAdminTestBase):
placeholderconf = {'test': {
'limits': {
'global': 2,
'TextPlugin': 1,
}
}
}
def test_global_limit(self):
placeholder = self.get_placeholder()
admin_instance = self.get_admin()
data = {
'plugin_type': 'LinkPlugin',
'placeholder_id': placeholder.pk,
'plugin_language': 'en',
}
superuser = self.get_superuser()
with UserLoginContext(self, superuser):
with SettingsOverride(CMS_PLACEHOLDER_CONF=self.placeholderconf):
request = self.get_post_request(data)
response = admin_instance.add_plugin(request) # first
self.assertEqual(response.status_code, 200)
response = admin_instance.add_plugin(request) # second
self.assertEqual(response.status_code, 200)
response = admin_instance.add_plugin(request) # third
self.assertEqual(response.status_code, 400)
self.assertEqual(response.content, b"This placeholder already has the maximum number of plugins (2).")
def test_type_limit(self):
placeholder = self.get_placeholder()
admin_instance = self.get_admin()
data = {
'plugin_type': 'TextPlugin',
'placeholder_id': placeholder.pk,
'plugin_language': 'en',
}
superuser = self.get_superuser()
with UserLoginContext(self, superuser):
with SettingsOverride(CMS_PLACEHOLDER_CONF=self.placeholderconf):
request = self.get_post_request(data)
response = admin_instance.add_plugin(request) # first
self.assertEqual(response.status_code, 200)
response = admin_instance.add_plugin(request) # second
self.assertEqual(response.status_code, 400)
self.assertEqual(response.content,
b"This placeholder already has the maximum number (1) of allowed Text plugins.")
def test_global_limit_on_plugin_move(self):
admin_instance = self.get_admin()
superuser = self.get_superuser()
source_placeholder = Placeholder.objects.create(slot='source')
target_placeholder = self.get_placeholder()
data = {
'placeholder': source_placeholder,
'plugin_type': 'LinkPlugin',
'language': 'en',
}
plugin_1 = add_plugin(**data)
plugin_2 = add_plugin(**data)
plugin_3 = add_plugin(**data)
with UserLoginContext(self, superuser):
with SettingsOverride(CMS_PLACEHOLDER_CONF=self.placeholderconf):
request = self.get_post_request({'placeholder_id': target_placeholder.pk, 'plugin_id': plugin_1.pk})
response = admin_instance.move_plugin(request) # first
self.assertEqual(response.status_code, 200)
request = self.get_post_request({'placeholder_id': target_placeholder.pk, 'plugin_id': plugin_2.pk})
response = admin_instance.move_plugin(request) # second
self.assertEqual(response.status_code, 200)
request = self.get_post_request({'placeholder_id': target_placeholder.pk, 'plugin_id': plugin_3.pk})
response = admin_instance.move_plugin(request) # third
self.assertEqual(response.status_code, 400)
self.assertEqual(response.content, b"This placeholder already has the maximum number of plugins (2).")
def test_type_limit_on_plugin_move(self):
admin_instance = self.get_admin()
superuser = self.get_superuser()
source_placeholder = Placeholder.objects.create(slot='source')
target_placeholder = self.get_placeholder()
data = {
'placeholder': source_placeholder,
'plugin_type': 'TextPlugin',
'language': 'en',
}
plugin_1 = add_plugin(**data)
plugin_2 = add_plugin(**data)
with UserLoginContext(self, superuser):
with SettingsOverride(CMS_PLACEHOLDER_CONF=self.placeholderconf):
request = self.get_post_request({'placeholder_id': target_placeholder.pk, 'plugin_id': plugin_1.pk})
response = admin_instance.move_plugin(request) # first
self.assertEqual(response.status_code, 200)
request = self.get_post_request({'placeholder_id': target_placeholder.pk, 'plugin_id': plugin_2.pk})
response = admin_instance.move_plugin(request) # second
self.assertEqual(response.status_code, 400)
self.assertEqual(response.content,
b"This placeholder already has the maximum number (1) of allowed Text plugins.")
def test_no_limit_check_same_placeholder_move(self):
admin_instance = self.get_admin()
superuser = self.get_superuser()
source_placeholder = self.get_placeholder()
data = {
'placeholder': source_placeholder,
'plugin_type': 'LinkPlugin',
'language': 'en',
}
plugin_1 = add_plugin(**data)
add_plugin(**data)
with UserLoginContext(self, superuser):
with SettingsOverride(CMS_PLACEHOLDER_CONF=self.placeholderconf):
request = self.get_post_request({'placeholder_id': source_placeholder.pk, 'plugin_id': plugin_1.pk,
'plugin_order': 1, })
response = admin_instance.move_plugin(request) # first
self.assertEqual(response.status_code, 200)
def test_edit_plugin_and_cancel(self):
placeholder = self.get_placeholder()
admin_instance = self.get_admin()
data = {
'plugin_type': 'TextPlugin',
'placeholder_id': placeholder.pk,
'plugin_language': 'en',
}
superuser = self.get_superuser()
with UserLoginContext(self, superuser):
with SettingsOverride(CMS_PLACEHOLDER_CONF=self.placeholderconf):
request = self.get_post_request(data)
response = admin_instance.add_plugin(request)
self.assertEqual(response.status_code, 200)
plugin_id = int(str(response.content).split('edit-plugin/')[1].split("/")[0])
data = {
'body': 'Hello World',
}
request = self.get_post_request(data)
response = admin_instance.edit_plugin(request, plugin_id)
self.assertEqual(response.status_code, 200)
text_plugin = Text.objects.get(pk=plugin_id)
self.assertEqual('Hello World', text_plugin.body)
# edit again, but this time press cancel
data = {
'body': 'Hello World!!',
'_cancel': True,
}
request = self.get_post_request(data)
response = admin_instance.edit_plugin(request, plugin_id)
self.assertEqual(response.status_code, 200)
text_plugin = Text.objects.get(pk=plugin_id)
self.assertEqual('Hello World', text_plugin.body)
class PlaceholderPluginPermissionTests(PlaceholderAdminTestBase):
def _testuser(self):
User = get_user_model()
u = User(is_staff=True, is_active=True, is_superuser=False)
setattr(u, u.USERNAME_FIELD, "test")
u.set_password("test")
u.save()
return u
def _create_example(self):
ex = Example1(
char_1='one',
char_2='two',
char_3='tree',
char_4='four'
)
ex.save()
self._placeholder = ex.placeholder
self.example_object = ex
def _create_plugin(self):
self._plugin = add_plugin(self._placeholder, 'TextPlugin', 'en')
def _give_permission(self, user, model, permission_type, save=True):
codename = '%s_%s' % (permission_type, model._meta.object_name.lower())
user.user_permissions.add(Permission.objects.get(codename=codename))
def _delete_permission(self, user, model, permission_type, save=True):
codename = '%s_%s' % (permission_type, model._meta.object_name.lower())
user.user_permissions.remove(Permission.objects.get(codename=codename))
def _give_object_permission(self, user, object, permission_type, save=True):
codename = '%s_%s' % (permission_type, object.__class__._meta.object_name.lower())
UserObjectPermission.objects.assign_perm(codename, user=user, obj=object)
def _delete_object_permission(self, user, object, permission_type, save=True):
codename = '%s_%s' % (permission_type, object.__class__._meta.object_name.lower())
UserObjectPermission.objects.remove_perm(codename, user=user, obj=object)
def _post_request(self, user):
data = {
'plugin_type': 'TextPlugin',
'placeholder_id': self._placeholder.pk,
'plugin_language': 'en',
}
request = self.get_post_request(data)
request.user = self.reload(user)
request._messages = default_storage(request)
return request
def test_plugin_add_requires_permissions(self):
"""User wants to add a plugin to the example app placeholder but has no permissions"""
self._test_plugin_action_requires_permissions('add')
def test_plugin_edit_requires_permissions(self):
"""User wants to edit a plugin to the example app placeholder but has no permissions"""
self._test_plugin_action_requires_permissions('change')
def _test_plugin_action_requires_permissions(self, key):
self._create_example()
if key == 'change':
self._create_plugin()
normal_guy = self._testuser()
admin_instance = self.get_admin()
# check all combinations of plugin, app and object permission
for perms in itertools.product(*[[False, True]]*3):
self._set_perms(normal_guy, [Text, Example1, self.example_object], perms, key)
request = self._post_request(normal_guy)
if key == 'add':
response = admin_instance.add_plugin(request)
elif key == 'change':
response = admin_instance.edit_plugin(request, self._plugin.id)
should_pass = perms[0] and (perms[1] or perms[2])
expected_status_code = HttpResponse.status_code if should_pass else HttpResponseForbidden.status_code
self.assertEqual(response.status_code, expected_status_code)
# cleanup
self._set_perms(normal_guy, [Text, Example1, self.example_object], (False,)*3, key)
def _set_perms(self, user, objects, perms, key):
for obj, perm in zip(objects, perms):
action = 'give' if perm else 'delete'
object_key = '_object' if isinstance(obj, models.Model) else ''
method_name = '_%s%s_permission' % (action, object_key)
getattr(self, method_name)(user, obj, key)
class PlaceholderConfTests(TestCase):
def test_get_all_plugins_single_page(self):
page = create_page('page', 'col_two.html', 'en')
placeholder = page.placeholders.get(slot='col_left')
conf = {
'col_two': {
'plugins': ['TextPlugin', 'LinkPlugin'],
},
'col_two.html col_left': {
'plugins': ['LinkPlugin'],
},
}
with SettingsOverride(CMS_PLACEHOLDER_CONF=conf):
plugins = plugin_pool.get_all_plugins(placeholder, page)
self.assertEqual(len(plugins), 1, plugins)
self.assertEqual(plugins[0], LinkPlugin)
def test_get_all_plugins_inherit(self):
parent = create_page('parent', 'col_two.html', 'en')
page = create_page('page', constants.TEMPLATE_INHERITANCE_MAGIC, 'en', parent=parent)
placeholder = page.placeholders.get(slot='col_left')
conf = {
'col_two': {
'plugins': ['TextPlugin', 'LinkPlugin'],
},
'col_two.html col_left': {
'plugins': ['LinkPlugin'],
},
}
with SettingsOverride(CMS_PLACEHOLDER_CONF=conf):
plugins = plugin_pool.get_all_plugins(placeholder, page)
self.assertEqual(len(plugins), 1, plugins)
self.assertEqual(plugins[0], LinkPlugin)
class PlaceholderI18NTest(CMSTestCase):
def _testuser(self):
User = get_user_model()
u = User(is_staff=True, is_active=True, is_superuser=True)
setattr(u, u.USERNAME_FIELD, "test")
u.set_password("test")
u.save()
return u
def test_hvad_tabs(self):
ex = MultilingualExample1(
char_1='one',
char_2='two',
)
ex.save()
self._testuser()
self.client.login(username='test', password='test')
response = self.client.get('/de/admin/placeholderapp/multilingualexample1/%d/' % ex.pk)
self.assertContains(response, '<input type="hidden" class="language_button selected" name="de" />')
def test_no_tabs(self):
ex = Example1(
char_1='one',
char_2='two',
char_3='one',
char_4='two',
)
ex.save()
self._testuser()
self.client.login(username='test', password='test')
response = self.client.get('/de/admin/placeholderapp/example1/%d/' % ex.pk)
self.assertNotContains(response, '<input type="hidden" class="language_button selected" name="de" />')
def test_placeholder_tabs(self):
ex = TwoPlaceholderExample(
char_1='one',
char_2='two',
char_3='one',
char_4='two',
)
ex.save()
self._testuser()
self.client.login(username='test', password='test')
response = self.client.get('/de/admin/placeholderapp/twoplaceholderexample/%d/' % ex.pk)
self.assertNotContains(response,
"""<input type="button" onclick="trigger_lang_button(this,'./?language=en');" class="language_button selected" id="debutton" name="en" value="English">""")
|
jrief/django-cms
|
cms/tests/placeholder.py
|
Python
|
bsd-3-clause
| 50,452
|
"""
human_curl.async
~~~~~~~~~~~~~~~~
Async module
:copyright: (c) 2011 - 2012 by Alexandr Lispython (alex@obout.ru).
:license: BSD, see LICENSE for more details.
"""
from logging import getLogger
from types import FunctionType
try:
import pycurl2 as pycurl
except ImportError:
import pycurl
# Lib imports
from . import get_version
from .core import Request
from .exceptions import InterfaceError, CurlError
__all__ = ("AsyncClient", "map", "async_client", "get", "head", "post", "put", "options", "delete")
logger = getLogger('human_curl.async')
DEFAULT_MAX_OPENERS = 1000
DEFAULT_SLEEP_TIMEOUT = 2.0
DEFAULT_INFO_READ_RETRIES_MAX = 10
class AsyncClient(object):
"""Client to create async requests
.. versionadded:: 0.0.5
"""
def __init__(self, size=DEFAULT_MAX_OPENERS,
success_callback=None, fail_callback=None,
process_func=None,
sleep_timeout=DEFAULT_SLEEP_TIMEOUT,
info_read_retries_max=DEFAULT_INFO_READ_RETRIES_MAX, **kwargs):
"""Create `AsyncClient`
:param size: openers count
:param success_callback: default success cullback function
:param fail_callback: default fail callback function
:param sleep_timeout: sleep in perform
:param \*\*kwargs: global request parameters
"""
self.success_callback = success_callback
self.fail_callback = fail_callback
self._remaining = 0
self._openers_pool = None
self._num_conn = size
self._data_queue = []
self._num_urls = 0
self._sleep_timeout = sleep_timeout
self.num_processed = 0
self._process_func = process_func
self._free_openers = []
self.responses = []
self._default_user_agent = None
self._default_params = kwargs
self._finished = False
@property
def user_agent(self):
"""Setup user agent
"""
if not self._default_user_agent:
self._default_user_agent = "Mozilla/5.0 (compatible; human_curl.async; {0}; +http://h.wrttn.me/human_curl)".format(get_version())
return self._default_user_agent
def add_handler(self, **params):
"""Add request params to data queue
:param \*\*kwargs: Optional arguments that passed to `Request`.
"""
# Check callback functions
if ('success_callback' not in params and not self.success_callback) or \
('fail_callback' not in params and not self.fail_callback):
raise InterfaceError("You must specify success_calback or fail_callback")
self._data_queue.append(params)
self._remaining += 1
self._num_urls = self._remaining
@property
def connections_count(self):
"""Calculace and return number of connections
:return: number of connections
"""
return min(self._num_conn, self._remaining)
def build_pool(self):
"""Make openers pool
:return: returns a new :class:`pycurl.MultiCUrl` object.
"""
self._openers_pool = pycurl.CurlMulti()
self._openers_pool.handles = []
# Get calculated connections count
num_openers = self.connections_count
for i in xrange(num_openers):
self._openers_pool.handles.append(self.get_opener())
logger.info("Created {0} openers".format(num_openers))
return self._openers_pool
@staticmethod
def get_opener():
"""Make `pycurl.Curl` objcet
:return opener: :class:`pycurl.Curl` object
"""
opener = pycurl.Curl()
opener.fp = None
opener.setopt(pycurl.NOSIGNAL, 1)
opener.dirty = False
return opener
def perform_pool(self):
"""Perform openers in pool
"""
while True:
ret, num_handles = self._openers_pool.perform()
if ret != pycurl.E_CALL_MULTI_PERFORM:
break
def start(self, process_func=None):
"""Start workers poll
:param process_func: function to call in process
"""
if process_func and isinstance(process_func, FunctionType):
self._process_func = process_func
elif process_func:
raise InterfaceError("process_func must be function")
if not self._openers_pool:
self._openers_pool = self.build_pool()
self._free_openers = self._openers_pool.handles[:]
while self._remaining:
self.process_raw_data()
self.perform_pool()
self.process_pending_requests()
logger.info("Processed {0} from {1} items".format(
self.num_processed, self._num_urls))
# Sleep timeout
self._openers_pool.select(self._sleep_timeout)
self.cleanup_pool()
def configure_opener(self, opener, data):
"""Make and configure `Request` from data
:param opener: :class:`pycurl.Curl` instance
:param data: `Request` params as dict
"""
opener = self.reset_opener(opener)
if 'user_agent' not in data:
data['user_agent'] = self.user_agent
mixed_data = self._default_params
mixed_data.update(data)
data = mixed_data
request = Request(**data)
request.build_opener(request.url, opener)
# Reset opener settings to defaults
opener.request = request
opener.success_callback = data.pop('success_callback', None) or \
self.success_callback
opener.fail_callback = data.get('fail_callback', None) or \
self.fail_callback
return opener
def reset_opener(self, opener):
"""Reset opener settings to defaults
:param opener: :class:`pycurl.Curl` object
"""
opener.success_callback = None
opener.fail_callback = None
opener.request = None
if getattr(opener, "dirty", False) is True:
# After appling this method curl raise error
# Unable to fetch curl handle from curl object
opener.reset()
# Maybe need delete cookies?
return opener
def make_response(self, opener):
"""Make response from successed request
:param opener: :class:`pycurl.Curl` object
:return response: :class:`Response` object
"""
response = opener.request.make_response()
return response
def process_raw_data(self):
"""Load data from queue, make request instance and add handler
"""
while self._data_queue and self._free_openers:
request_data = self._data_queue.pop()
opener = self._free_openers.pop()
# Create request object
self.configure_opener(opener, request_data)
# Add configured opener to handles pool
self._openers_pool.add_handle(opener)
def process_pending_requests(self):
"""Process any requests that were completed by the last
call to multi.socket_action.
"""
while True:
try:
num_queued, success_list, error_list = self._openers_pool.info_read()
except Exception, e:
logger.warn(e)
raise CurlError(e[0], e[1])
for opener in success_list:
opener.fp = None
self._openers_pool.remove_handle(opener)
# Make `Response` object from opener
response = self.make_response(opener)
opener.success_callback(response=response,
async_client=self, opener=opener)
## FIXME: after pycurl.MultiCurl reset error
## opener.dirty = True
self._free_openers.append(opener)
for opener, errno, errmsg in error_list:
opener.fp = None
self._openers_pool.remove_handle(opener)
opener.fail_callback(errno=errno, errmsg=errmsg,
async_client=self, opener=opener,
request=opener.request)
## FIXME: after pycurl.MultiCurl reset error
## opener.dirty = True
self._free_openers.append(opener)
success_len = len(success_list)
error_len = len(error_list)
self.num_processed = self.num_processed + success_len + error_len
self._remaining -= success_len + error_len
if self._process_func:
self._process_func(num_processed=self.num_processed, remaining=self._remaining,
num_urls=self._num_urls, success_len=success_len,
error_len=error_len)
if num_queued == 0:
break
def cleanup_pool(self):
"""Close all fp, clean objects
:param openers_pool:
"""
if not self._openers_pool:
return None
for opener in self._openers_pool.handles:
if opener.fp is not None:
opener.fp.close()
opener.fp = None
opener.close()
self._openers_pool.close()
def method(self, method, **kwargs):
"""Added request params to data_queue
:param method: request method
:return self: :class:`AsyncClient` object
"""
if 'url' not in kwargs:
raise InterfaceError("You need specify url param")
self.add_handler(method=method, **kwargs)
# Return self to make chain calls
return self
def get(self, url, **kwargs):
return self.method("get", url=url, **kwargs)
def post(self, url, data='', **kwargs):
return self.method("post", url=url, data=data, **kwargs)
def head(self, url, **kwargs):
return self.method("head", url=url, **kwargs)
def options(self, url, **kwargs):
return self.method("options", url=url, **kwargs)
def put(self, url, **kwargs):
return self.method("put", url=url, **kwargs)
def delete(self, url, **kwargs):
return self.method("delete", url=url, **kwargs)
def __del__(self):
""" Close deascriptors after object delete
"""
self.cleanup_pool()
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, traceback):
logger.debug((exc_type, exc_value, traceback))
self.start()
def default_success_callback(response, async_client, opener, **kwargs):
"""Default callback for collect `Response` objects
:param response: :class:`Response` object
:param async_client: :class:`AsyncClient` object
:param opener: :class:`pycurl.Curl` object
"""
async_client.responses.append(response)
def default_fail_callback(request, errno, errmsg, async_client, opener):
"""Default callback for collect fails
:param request: :class:`Request` object
:param errno: error number code
:param errmsg: error message
:param async_client: :class:`AsyncClient` object
:param opener: :class:`pycurl.Curl` object
"""
async_client = AsyncClient(success_callback=default_success_callback,
fail_callback=default_fail_callback)
def map(requests):
"""
:param requests: iterate methods
"""
if not requests:
return []
requests = [request for request in requests]
async_client.start()
return async_client.responses
# Make aliases
get = async_client.get
put = async_client.put
post = async_client.post
delete = async_client.delete
head = async_client.head
options = async_client.options
|
andrewleech/script.module.human_curl
|
lib/human_curl/async.py
|
Python
|
bsd-3-clause
| 11,756
|
def func():
return 'something'
class Integer(object):
def __init__(self, value=None):
self.value = value
def add_value(self, value):
self.value = value
def convert_val_to_str(self):
self.value = self.__str__()
def con_val_to_str(self):
self.value = str(self.value)
def con_to_str(self):
return str(self.value)
intgr = Integer(10)
|
noisebridge/PythonClass
|
instructors/need-rework/15_classy_OOP/reimp.py
|
Python
|
mit
| 360
|
# -*- test-case-name: twisted.application.test.test_service -*-
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Service architecture for Twisted.
Services are arranged in a hierarchy. At the leafs of the hierarchy,
the services which actually interact with the outside world are started.
Services can be named or anonymous -- usually, they will be named if
there is need to access them through the hierarchy (from a parent or
a sibling).
Maintainer: Moshe Zadka
"""
from __future__ import absolute_import, division
from zope.interface import implementer, Interface, Attribute
from twisted.persisted import sob
from twisted.python.reflect import namedAny
from twisted.python import components
from twisted.python._oldstyle import _oldStyle
from twisted.internet import defer
from twisted.plugin import IPlugin
class IServiceMaker(Interface):
"""
An object which can be used to construct services in a flexible
way.
This interface should most often be implemented along with
L{twisted.plugin.IPlugin}, and will most often be used by the
'twistd' command.
"""
tapname = Attribute(
"A short string naming this Twisted plugin, for example 'web' or "
"'pencil'. This name will be used as the subcommand of 'twistd'.")
description = Attribute(
"A brief summary of the features provided by this "
"Twisted application plugin.")
options = Attribute(
"A C{twisted.python.usage.Options} subclass defining the "
"configuration options for this application.")
def makeService(options):
"""
Create and return an object providing
L{twisted.application.service.IService}.
@param options: A mapping (typically a C{dict} or
L{twisted.python.usage.Options} instance) of configuration
options to desired configuration values.
"""
@implementer(IPlugin, IServiceMaker)
class ServiceMaker(object):
"""
Utility class to simplify the definition of L{IServiceMaker} plugins.
"""
def __init__(self, name, module, description, tapname):
self.name = name
self.module = module
self.description = description
self.tapname = tapname
def options():
def get(self):
return namedAny(self.module).Options
return get,
options = property(*options())
def makeService():
def get(self):
return namedAny(self.module).makeService
return get,
makeService = property(*makeService())
class IService(Interface):
"""
A service.
Run start-up and shut-down code at the appropriate times.
@type name: C{string}
@ivar name: The name of the service (or None)
@type running: C{boolean}
@ivar running: Whether the service is running.
"""
def setName(name):
"""
Set the name of the service.
@type name: C{str}
@raise RuntimeError: Raised if the service already has a parent.
"""
def setServiceParent(parent):
"""
Set the parent of the service. This method is responsible for setting
the C{parent} attribute on this service (the child service).
@type parent: L{IServiceCollection}
@raise RuntimeError: Raised if the service already has a parent
or if the service has a name and the parent already has a child
by that name.
"""
def disownServiceParent():
"""
Use this API to remove an L{IService} from an L{IServiceCollection}.
This method is used symmetrically with L{setServiceParent} in that it
sets the C{parent} attribute on the child.
@rtype: L{Deferred<defer.Deferred>}
@return: a L{Deferred<defer.Deferred>} which is triggered when the
service has finished shutting down. If shutting down is immediate,
a value can be returned (usually, L{None}).
"""
def startService():
"""
Start the service.
"""
def stopService():
"""
Stop the service.
@rtype: L{Deferred<defer.Deferred>}
@return: a L{Deferred<defer.Deferred>} which is triggered when the
service has finished shutting down. If shutting down is immediate,
a value can be returned (usually, L{None}).
"""
def privilegedStartService():
"""
Do preparation work for starting the service.
Here things which should be done before changing directory,
root or shedding privileges are done.
"""
@implementer(IService)
class Service(object):
"""
Base class for services.
Most services should inherit from this class. It handles the
book-keeping responsibilities of starting and stopping, as well
as not serializing this book-keeping information.
"""
running = 0
name = None
parent = None
def __getstate__(self):
dict = self.__dict__.copy()
if "running" in dict:
del dict['running']
return dict
def setName(self, name):
if self.parent is not None:
raise RuntimeError("cannot change name when parent exists")
self.name = name
def setServiceParent(self, parent):
if self.parent is not None:
self.disownServiceParent()
parent = IServiceCollection(parent, parent)
self.parent = parent
self.parent.addService(self)
def disownServiceParent(self):
d = self.parent.removeService(self)
self.parent = None
return d
def privilegedStartService(self):
pass
def startService(self):
self.running = 1
def stopService(self):
self.running = 0
class IServiceCollection(Interface):
"""
Collection of services.
Contain several services, and manage their start-up/shut-down.
Services can be accessed by name if they have a name, and it
is always possible to iterate over them.
"""
def getServiceNamed(name):
"""
Get the child service with a given name.
@type name: C{str}
@rtype: L{IService}
@raise KeyError: Raised if the service has no child with the
given name.
"""
def __iter__():
"""
Get an iterator over all child services.
"""
def addService(service):
"""
Add a child service.
Only implementations of L{IService.setServiceParent} should use this
method.
@type service: L{IService}
@raise RuntimeError: Raised if the service has a child with
the given name.
"""
def removeService(service):
"""
Remove a child service.
Only implementations of L{IService.disownServiceParent} should
use this method.
@type service: L{IService}
@raise ValueError: Raised if the given service is not a child.
@rtype: L{Deferred<defer.Deferred>}
@return: a L{Deferred<defer.Deferred>} which is triggered when the
service has finished shutting down. If shutting down is immediate,
a value can be returned (usually, L{None}).
"""
@implementer(IServiceCollection)
class MultiService(Service):
"""
Straightforward Service Container.
Hold a collection of services, and manage them in a simplistic
way. No service will wait for another, but this object itself
will not finish shutting down until all of its child services
will finish.
"""
def __init__(self):
self.services = []
self.namedServices = {}
self.parent = None
def privilegedStartService(self):
Service.privilegedStartService(self)
for service in self:
service.privilegedStartService()
def startService(self):
Service.startService(self)
for service in self:
service.startService()
def stopService(self):
Service.stopService(self)
l = []
services = list(self)
services.reverse()
for service in services:
l.append(defer.maybeDeferred(service.stopService))
return defer.DeferredList(l)
def getServiceNamed(self, name):
return self.namedServices[name]
def __iter__(self):
return iter(self.services)
def addService(self, service):
if service.name is not None:
if service.name in self.namedServices:
raise RuntimeError("cannot have two services with same name"
" '%s'" % service.name)
self.namedServices[service.name] = service
self.services.append(service)
if self.running:
# It may be too late for that, but we will do our best
service.privilegedStartService()
service.startService()
def removeService(self, service):
if service.name:
del self.namedServices[service.name]
self.services.remove(service)
if self.running:
# Returning this so as not to lose information from the
# MultiService.stopService deferred.
return service.stopService()
else:
return None
class IProcess(Interface):
"""
Process running parameters.
Represents parameters for how processes should be run.
"""
processName = Attribute(
"""
A C{str} giving the name the process should have in ps (or L{None}
to leave the name alone).
""")
uid = Attribute(
"""
An C{int} giving the user id as which the process should run (or
L{None} to leave the UID alone).
""")
gid = Attribute(
"""
An C{int} giving the group id as which the process should run (or
L{None} to leave the GID alone).
""")
@implementer(IProcess)
@_oldStyle
class Process:
"""
Process running parameters.
Sets up uid/gid in the constructor, and has a default
of L{None} as C{processName}.
"""
processName = None
def __init__(self, uid=None, gid=None):
"""
Set uid and gid.
@param uid: The user ID as whom to execute the process. If
this is L{None}, no attempt will be made to change the UID.
@param gid: The group ID as whom to execute the process. If
this is L{None}, no attempt will be made to change the GID.
"""
self.uid = uid
self.gid = gid
def Application(name, uid=None, gid=None):
"""
Return a compound class.
Return an object supporting the L{IService}, L{IServiceCollection},
L{IProcess} and L{sob.IPersistable} interfaces, with the given
parameters. Always access the return value by explicit casting to
one of the interfaces.
"""
ret = components.Componentized()
availableComponents = [MultiService(), Process(uid, gid),
sob.Persistent(ret, name)]
for comp in availableComponents:
ret.addComponent(comp, ignoreClass=1)
IService(ret).setName(name)
return ret
def loadApplication(filename, kind, passphrase=None):
"""
Load Application from a given file.
The serialization format it was saved in should be given as
C{kind}, and is one of C{pickle}, C{source}, C{xml} or C{python}. If
C{passphrase} is given, the application was encrypted with the
given passphrase.
@type filename: C{str}
@type kind: C{str}
@type passphrase: C{str}
"""
if kind == 'python':
application = sob.loadValueFromFile(filename, 'application')
else:
application = sob.load(filename, kind)
return application
__all__ = ['IServiceMaker', 'IService', 'Service',
'IServiceCollection', 'MultiService',
'IProcess', 'Process', 'Application', 'loadApplication']
|
whitehorse-io/encarnia
|
pyenv/lib/python2.7/site-packages/twisted/application/service.py
|
Python
|
mit
| 11,938
|
from toolchain import Recipe, shprint
from os.path import join
import sh
import os
class pkg_resources(Recipe):
depends = ["hostpython", "python"]
archs = ['i386']
url = ""
def prebuild_arch(self, arch):
sh.cp("pkg_resources.py", join(self.ctx.dist_dir, "root", "python", "lib", "python2.7", "site-packages", "pkg_resources.py"))
recipe = pkg_resources()
|
rnixx/kivy-ios
|
recipes/pkgresources/__init__.py
|
Python
|
mit
| 385
|
"""
Example sentences to test spaCy and its language models.
>>> from spacy.lang.he.examples import sentences
>>> docs = nlp.pipe(sentences)
"""
sentences = [
"סין מקימה קרן של 440 מיליון דולר להשקעה בהייטק בישראל",
'רה"מ הודיע כי יחרים טקס בחסותו',
"הכנסת צפויה לאשר איכון אוטומטי של שיחות למוקד 100",
"תוכנית לאומית תהפוך את ישראל למעצמה דיגיטלית",
"סע לשלום, המפתחות בפנים.",
"מלצר, פעמיים טורקי!",
"ואהבת לרעך כמוך.",
"היום נעשה משהו בלתי נשכח.",
"איפה הילד?",
"מיהו נשיא צרפת?",
"מהי בירת ארצות הברית?",
"איך קוראים בעברית לצ'ופצ'יק של הקומקום?",
"מה הייתה הדקה?",
"מי אומר שלום ראשון, זה שעולה או זה שיורד?",
]
|
explosion/spaCy
|
spacy/lang/he/examples.py
|
Python
|
mit
| 994
|
#!/usr/bin/env python
#Copyright (C) 2011 by Benedict Paten (benedictpaten@gmail.com)
#
#Permission is hereby granted, free of charge, to any person obtaining a copy
#of this software and associated documentation files (the "Software"), to deal
#in the Software without restriction, including without limitation the rights
#to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
#copies of the Software, and to permit persons to whom the Software is
#furnished to do so, subject to the following conditions:
#
#The above copyright notice and this permission notice shall be included in
#all copies or substantial portions of the Software.
#
#THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
#IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
#FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
#AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
#LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
#OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
#THE SOFTWARE.
import sys
import marshal
import types
from sonLib.bioio import system
import importlib
class Target(object):
"""Each job wrapper extends this class.
"""
def __init__(self, time=sys.maxint, memory=sys.maxint, cpu=sys.maxint):
"""This method must be called by any overiding constructor.
"""
self.__followOn = None
self.__children = []
self.__childCommands = []
self.__memory = memory
self.__time = time #This parameter is no longer used by the batch system.
self.__cpu = cpu
self.globalTempDir = None
if self.__module__ == "__main__":
raise RuntimeError("The module name of class %s is __main__, which prevents us from serialising it properly, \
please ensure you re-import targets defined in main" % self.__class__.__name__)
self.importStrings = set((".".join((self.__module__, self.__class__.__name__)),))
self.loggingMessages = []
def run(self):
"""Do user stuff here, including creating any follow on jobs.
This function must not re-pickle the pickle file, which is an input file.
"""
pass
def setFollowOnTarget(self, followOn):
"""Set the follow on target.
Will complain if follow on already set.
"""
assert self.__followOn == None
self.__followOn = followOn
def setFollowOnFn(self, fn, args=(), kwargs={}, time=sys.maxint, memory=sys.maxint, cpu=sys.maxint):
"""Sets a follow on target fn. See FunctionWrappingTarget.
"""
self.setFollowOnTarget(FunctionWrappingTarget(fn=fn, args=args, kwargs=kwargs, time=time, memory=memory, cpu=cpu))
def setFollowOnTargetFn(self, fn, args=(), kwargs={}, time=sys.maxint, memory=sys.maxint, cpu=sys.maxint):
"""Sets a follow on target fn. See TargetFunctionWrappingTarget.
"""
self.setFollowOnTarget(TargetFunctionWrappingTarget(fn=fn, args=args, kwargs=kwargs, time=time, memory=memory, cpu=cpu))
def addChildTarget(self, childTarget):
"""Adds the child target to be run as child of this target.
"""
self.__children.append(childTarget)
def addChildFn(self, fn, args=(), kwargs={}, time=sys.maxint, memory=sys.maxint, cpu=sys.maxint):
"""Adds a child fn. See FunctionWrappingTarget.
"""
self.addChildTarget(FunctionWrappingTarget(fn=fn, args=args, kwargs=kwargs, time=time, memory=memory, cpu=cpu))
def addChildTargetFn(self, fn, args=(), kwargs={}, time=sys.maxint, memory=sys.maxint, cpu=sys.maxint):
"""Adds a child target fn. See TargetFunctionWrappingTarget.
"""
self.addChildTarget(TargetFunctionWrappingTarget(fn=fn, args=args, kwargs=kwargs, time=time, memory=memory, cpu=cpu))
def addChildCommand(self, childCommand, runTime=sys.maxint):
"""A command to be run as child of the job tree.
"""
self.__childCommands.append((str(childCommand), float(runTime)))
def getRunTime(self):
"""Get the time the target is anticipated to run.
"""
return self.__time
def getGlobalTempDir(self):
"""Get the global temporary directory.
"""
#Check if we have initialised the global temp dir - doing this
#just in time prevents us from creating temp directories unless we have to.
if self.globalTempDir == None:
self.globalTempDir = self.stack.getGlobalTempDir()
return self.globalTempDir
def getLocalTempDir(self):
"""Get the local temporary directory.
"""
return self.stack.getLocalTempDir()
def getMemory(self):
"""Returns the number of bytes of memory that were requested by the job.
"""
return self.__memory
def getCpu(self):
"""Returns the number of cpus requested by the job.
"""
return self.__cpu
def getFollowOn(self):
"""Get the follow on target.
"""
return self.__followOn
def getChildren(self):
"""Get the child targets.
"""
return self.__children[:]
def getChildCommands(self):
"""Gets the child commands, as a list of tuples of strings and floats, representing the run times.
"""
return self.__childCommands[:]
def logToMaster(self, string):
"""Send a logging message to the master. Will only reported if logging is set to INFO level in the master.
"""
self.loggingMessages.append(str(string))
@staticmethod
def makeTargetFn(fn, args=(), kwargs={}, time=sys.maxint, memory=sys.maxint, cpu=sys.maxint):
"""Makes a Target out of a target function!
In a target function, the first argument to the function will be a reference to the wrapping target, allowing
the function to create children/follow ons.
Convenience function for constructor of TargetFunctionWrappingTarget
"""
return TargetFunctionWrappingTarget(fn=fn, args=args, kwargs=kwargs, time=time, memory=memory, cpu=cpu)
####
#Private functions
####
def setGlobalTempDir(self, globalTempDir):
"""Sets the global temp dir.
"""
self.globalTempDir = globalTempDir
def isGlobalTempDirSet(self):
return self.globalTempDir != None
def setStack(self, stack):
"""Sets the stack object that is calling the target.
"""
self.stack = stack
def getMasterLoggingMessages(self):
return self.loggingMessages[:]
class FunctionWrappingTarget(Target):
"""Target used to wrap a function.
Function can not be nested function or class function, currently.
"""
def __init__(self, fn, args=(), kwargs={}, time=sys.maxint, memory=sys.maxint, cpu=sys.maxint):
Target.__init__(self, time=time, memory=time, cpu=time)
self.fnModule = str(fn.__module__) #Module of function
self.fnName = str(fn.__name__) #Name of function
self.args=args
self.kwargs=kwargs
def run(self):
func = getattr(importlib.import_module(self.fnModule), self.fnName)
func(*self.args, **self.kwargs)
class TargetFunctionWrappingTarget(FunctionWrappingTarget):
"""Target used to wrap a function.
A target function is a function which takes as its first argument a reference
to the wrapping target.
Target function can not be closure.
"""
def run(self):
func = getattr(importlib.import_module(self.fnModule), self.fnName)
func(*((self,) + tuple(self.args)), **self.kwargs)
|
cooketho/jobTree
|
scriptTree/target.py
|
Python
|
mit
| 7,809
|
import pg
import random
PIPES = 8
SIZE = 16
TURN_PROBABILITY = 0.3
UPDATE_RATE = 0.05
RESTART_RATE = 30
DIRECTIONS = [
(0, -1, 0, 0), (0, 1, 0, 0),
(1, 0, -1, 0), (1, 0, 1, 0),
(2, 0, 0, -1), (2, 0, 0, 1),
]
COLORS = [
0x1f77b4, 0xaec7e8, 0xff7f0e, 0xffbb78, 0x2ca02c, 0x98df8a,
0xd62728, 0xff9896, 0x9467bd, 0xc5b0d5, 0x8c564b, 0xc49c94,
0xe377c2, 0xf7b6d2, 0x7f7f7f, 0xc7c7c7, 0x17becf, 0x9edae5,
]
SPHERE = pg.Sphere(2, 0.25, (0, 0, 0))
CYLINDERS = [
pg.Cylinder((-0.5, 0, 0), (0.5, 0, 0), 0.25, 16, True),
pg.Cylinder((0, -0.5, 0), (0, 0.5, 0), 0.25, 16, True),
pg.Cylinder((0, 0, -0.5), (0, 0, 0.5), 0.25, 16, True),
]
class Pipe(object):
def __init__(self, occupied):
self.occupied = occupied
self.context = pg.Context(pg.DirectionalLightProgram())
self.context.object_color = pg.hex_color(random.choice(COLORS))
self.context.position = self.positions = pg.VertexBuffer()
self.context.normal = self.normals = pg.VertexBuffer()
self.restart()
def add_cylinder(self, position, axis):
mesh = pg.Matrix().translate(position) * CYLINDERS[axis]
self.positions.extend(mesh.positions)
self.normals.extend(mesh.normals)
def add_sphere(self, position):
mesh = pg.Matrix().translate(position) * SPHERE
self.positions.extend(mesh.positions)
self.normals.extend(mesh.normals)
def restart(self):
while True:
x = random.randint(-SIZE, SIZE)
y = random.randint(-SIZE, SIZE)
z = random.randint(-SIZE, SIZE)
if (x, y, z) not in self.occupied:
break
self.position = (x, y, z)
self.direction = random.choice(DIRECTIONS)
self.occupied.add(self.position)
self.add_sphere(self.position)
def update(self):
x, y, z = self.position
directions = list(DIRECTIONS)
random.shuffle(directions)
if random.random() > TURN_PROBABILITY:
directions.remove(self.direction)
directions.insert(0, self.direction)
for direction in directions:
axis, dx, dy, dz = direction
nx, ny, nz = x + dx, y + dy, z + dz
if (nx, ny, nz) in self.occupied:
continue
if any(n < -SIZE or n > SIZE for n in (nx, ny, nz)):
continue
self.position = (nx, ny, nz)
self.occupied.add(self.position)
mx, my, mz = x + dx / 2.0, y + dy / 2.0, z + dz / 2.0
self.add_cylinder((mx, my, mz), axis)
if direction != self.direction:
self.add_sphere((x, y, z))
self.direction = direction
return
self.add_sphere((x, y, z))
self.restart()
class Window(pg.Window):
def setup(self):
self.wasd = pg.WASD(self, speed=10)
self.wasd.look_at((SIZE + 10, 0, 0), (0, 0, 0))
self.pipes = []
self.restart()
self.last_update = 0
self.last_restart = 0
def restart(self):
for pipe in self.pipes:
pipe.positions.delete()
pipe.normals.delete()
occupied = set()
self.pipes = [Pipe(occupied) for _ in xrange(PIPES)]
def update(self, t, dt):
if t - self.last_restart >= RESTART_RATE:
self.last_restart += RESTART_RATE
self.restart()
if t - self.last_update >= UPDATE_RATE:
self.last_update += UPDATE_RATE
for pipe in self.pipes:
pipe.update()
def draw(self):
matrix = self.wasd.get_matrix()
matrix = matrix.perspective(65, self.aspect, 0.1, 1000)
self.clear()
for pipe in self.pipes:
pipe.context.matrix = matrix
pipe.context.camera_position = self.wasd.position
pipe.context.draw()
if __name__ == "__main__":
pg.run(Window)
|
stuaxo/pg
|
examples/pipes.py
|
Python
|
mit
| 3,922
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import TYPE_CHECKING
from azure.core.configuration import Configuration
from azure.core.pipeline import policies
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Any
VERSION = "unknown"
class KeyVaultClientConfiguration(Configuration):
"""Configuration for KeyVaultClient.
Note that all parameters used to create this instance are saved as instance
attributes.
"""
def __init__(
self,
**kwargs # type: Any
):
# type: (...) -> None
super(KeyVaultClientConfiguration, self).__init__(**kwargs)
self.api_version = "7.2"
kwargs.setdefault('sdk_moniker', 'keyvault/{}'.format(VERSION))
self._configure(**kwargs)
def _configure(
self,
**kwargs # type: Any
):
# type: (...) -> None
self.user_agent_policy = kwargs.get('user_agent_policy') or policies.UserAgentPolicy(**kwargs)
self.headers_policy = kwargs.get('headers_policy') or policies.HeadersPolicy(**kwargs)
self.proxy_policy = kwargs.get('proxy_policy') or policies.ProxyPolicy(**kwargs)
self.logging_policy = kwargs.get('logging_policy') or policies.NetworkTraceLoggingPolicy(**kwargs)
self.http_logging_policy = kwargs.get('http_logging_policy') or policies.HttpLoggingPolicy(**kwargs)
self.retry_policy = kwargs.get('retry_policy') or policies.RetryPolicy(**kwargs)
self.custom_hook_policy = kwargs.get('custom_hook_policy') or policies.CustomHookPolicy(**kwargs)
self.redirect_policy = kwargs.get('redirect_policy') or policies.RedirectPolicy(**kwargs)
self.authentication_policy = kwargs.get('authentication_policy')
|
Azure/azure-sdk-for-python
|
sdk/keyvault/azure-keyvault-secrets/azure/keyvault/secrets/_generated/v7_2/_configuration.py
|
Python
|
mit
| 2,192
|
import logging
from autotest.client.shared import error
from virttest import utils_misc
def kdump_enable(vm, vm_name, crash_kernel_prob_cmd,
kernel_param_cmd, kdump_enable_cmd, timeout):
"""
Check, configure and enable the kdump
:param vm_name: vm name
:param crash_kernel_prob_cmd: check kdume loaded
:param kernel_param_cmd: the param add into kernel line for kdump
:param kdump_enable_cmd: enable kdump command
:param timeout: Timeout in seconds
"""
error.context("Try to log into guest '%s'." % vm_name, logging.info)
session = vm.wait_for_login(timeout=timeout)
error.context("Checking the existence of crash kernel in %s" %
vm_name, logging.info)
try:
session.cmd(crash_kernel_prob_cmd)
except Exception:
error.context("Crash kernel is not loaded. Trying to load it",
logging.info)
session.cmd(kernel_param_cmd)
session = vm.reboot(session, timeout=timeout)
if vm.params.get("kdump_config"):
error.context("Configuring the Core Collector", logging.info)
config_file = "/etc/kdump.conf"
for config_line in vm.params.get("kdump_config").split(";"):
config_cmd = "grep '^%s$' %s || echo -e '%s' >> %s "
config_con = config_line.strip()
session.cmd(config_cmd % ((config_con, config_file) * 2))
error.context("Enabling kdump service...", logging.info)
# the initrd may be rebuilt here so we need to wait a little more
session.cmd(kdump_enable_cmd, timeout=120)
return session
def crash_test(vm, vcpu, crash_cmd, timeout):
"""
Trigger a crash dump through sysrq-trigger
:param vcpu: vcpu which is used to trigger a crash
:param crash_cmd: crash_cmd which is triggered crash command
:param timeout: Timeout in seconds
"""
session = vm.wait_for_login(timeout=timeout)
logging.info("Delete the vmcore file.")
session.cmd_output("rm -rf /var/crash/*")
if crash_cmd == "nmi":
logging.info("Triggering crash with 'nmi' interrupt")
session.cmd("echo 1 > /proc/sys/kernel/unknown_nmi_panic")
vm.monitor.nmi()
else:
logging.info("Triggering crash on vcpu %d ...", vcpu)
session.sendline("taskset -c %d %s" % (vcpu, crash_cmd))
def check_vmcore(vm, session, timeout):
"""
Check the vmcore file after triggering a crash
:param session: A shell session object or None.
:param timeout: Timeout in seconds
"""
if not utils_misc.wait_for(lambda: not session.is_responsive(), 240, 0,
1):
raise error.TestFail("Could not trigger crash")
error.context("Waiting for kernel crash dump to complete",
logging.info)
session = vm.wait_for_login(timeout=timeout)
error.context("Probing vmcore file...", logging.info)
try:
session.cmd("ls -R /var/crash | grep vmcore")
except Exception:
raise error.TestFail("Could not found vmcore file.")
logging.info("Found vmcore.")
@error.context_aware
def run(test, params, env):
"""
KVM kdump test:
1) Log into the guest(s)
2) Check, configure and enable the kdump
3) Trigger a crash by 'sysrq-trigger' and check the vmcore for each vcpu,
or only trigger one crash with 'nmi' interrupt and check vmcore.
:param test: kvm test object
:param params: Dictionary with the test parameters
:param env: Dictionary with test environment.
"""
timeout = float(params.get("login_timeout", 240))
crash_timeout = float(params.get("crash_timeout", 360))
def_kernel_param_cmd = ("grubby --update-kernel=`grubby --default-kernel`"
" --args=crashkernel=128M@16M")
kernel_param_cmd = params.get("kernel_param_cmd", def_kernel_param_cmd)
def_kdump_enable_cmd = "chkconfig kdump on && service kdump restart"
kdump_enable_cmd = params.get("kdump_enable_cmd", def_kdump_enable_cmd)
def_crash_kernel_prob_cmd = "grep -q 1 /sys/kernel/kexec_crash_loaded"
crash_kernel_prob_cmd = params.get("crash_kernel_prob_cmd",
def_crash_kernel_prob_cmd)
vms = params.get("vms", "vm1 vm2").split()
vm_list = []
session_list = []
for vm_name in vms:
vm = env.get_vm(vm_name)
vm.verify_alive()
vm_list.append(vm)
session = kdump_enable(vm, vm_name, crash_kernel_prob_cmd,
kernel_param_cmd, kdump_enable_cmd, timeout)
session_list.append(session)
for vm in vm_list:
error.context("Kdump Testing, force the Linux kernel to crash",
logging.info)
crash_cmd = params.get("crash_cmd", "echo c > /proc/sysrq-trigger")
if crash_cmd == "nmi":
crash_test(vm, None, crash_cmd, timeout)
else:
# trigger crash for each vcpu
nvcpu = int(params.get("smp", 1))
for i in range(nvcpu):
crash_test(vm, i, crash_cmd, timeout)
for i in range(len(vm_list)):
error.context("Check the vmcore file after triggering a crash",
logging.info)
check_vmcore(vm_list[i], session_list[i], crash_timeout)
|
ypu/tp-qemu
|
generic/tests/kdump.py
|
Python
|
gpl-2.0
| 5,293
|
# -*- coding: utf-8 -*-
"""
werkzeug.testsuite.cache
~~~~~~~~~~~~~~~~~~~~~~~~
Tests the cache system
:copyright: (c) 2013 by Armin Ronacher.
:license: BSD, see LICENSE for more details.
"""
import os
import time
import unittest
import tempfile
import shutil
from werkzeug.testsuite import WerkzeugTestCase
from werkzeug.contrib import cache
try:
import redis
try:
from redis.exceptions import ConnectionError as RedisConnectionError
cache.RedisCache(key_prefix='werkzeug-test-case:')._client.set('test','connection')
except RedisConnectionError:
redis = None
except ImportError:
redis = None
class SimpleCacheTestCase(WerkzeugTestCase):
def test_get_dict(self):
c = cache.SimpleCache()
c.set('a', 'a')
c.set('b', 'b')
d = c.get_dict('a', 'b')
assert 'a' in d
assert 'a' == d['a']
assert 'b' in d
assert 'b' == d['b']
def test_set_many(self):
c = cache.SimpleCache()
c.set_many({0: 0, 1: 1, 2: 4})
assert c.get(2) == 4
c.set_many((i, i*i) for i in range(3))
assert c.get(2) == 4
class FileSystemCacheTestCase(WerkzeugTestCase):
def test_set_get(self):
tmp_dir = tempfile.mkdtemp()
try:
c = cache.FileSystemCache(cache_dir=tmp_dir)
for i in range(3):
c.set(str(i), i * i)
for i in range(3):
result = c.get(str(i))
assert result == i * i
finally:
shutil.rmtree(tmp_dir)
def test_filesystemcache_prune(self):
THRESHOLD = 13
tmp_dir = tempfile.mkdtemp()
c = cache.FileSystemCache(cache_dir=tmp_dir, threshold=THRESHOLD)
for i in range(2 * THRESHOLD):
c.set(str(i), i)
cache_files = os.listdir(tmp_dir)
shutil.rmtree(tmp_dir)
assert len(cache_files) <= THRESHOLD
def test_filesystemcache_clear(self):
tmp_dir = tempfile.mkdtemp()
c = cache.FileSystemCache(cache_dir=tmp_dir)
c.set('foo', 'bar')
cache_files = os.listdir(tmp_dir)
assert len(cache_files) == 1
c.clear()
cache_files = os.listdir(tmp_dir)
assert len(cache_files) == 0
shutil.rmtree(tmp_dir)
class RedisCacheTestCase(WerkzeugTestCase):
def make_cache(self):
return cache.RedisCache(key_prefix='werkzeug-test-case:')
def teardown(self):
self.make_cache().clear()
def test_compat(self):
c = self.make_cache()
c._client.set(c.key_prefix + 'foo', 'Awesome')
self.assert_equal(c.get('foo'), 'Awesome')
c._client.set(c.key_prefix + 'foo', '42')
self.assert_equal(c.get('foo'), 42)
def test_get_set(self):
c = self.make_cache()
c.set('foo', ['bar'])
assert c.get('foo') == ['bar']
def test_get_many(self):
c = self.make_cache()
c.set('foo', ['bar'])
c.set('spam', 'eggs')
assert c.get_many('foo', 'spam') == [['bar'], 'eggs']
def test_set_many(self):
c = self.make_cache()
c.set_many({'foo': 'bar', 'spam': ['eggs']})
assert c.get('foo') == 'bar'
assert c.get('spam') == ['eggs']
def test_expire(self):
c = self.make_cache()
c.set('foo', 'bar', 1)
time.sleep(2)
assert c.get('foo') is None
def test_add(self):
c = self.make_cache()
# sanity check that add() works like set()
c.add('foo', 'bar')
assert c.get('foo') == 'bar'
c.add('foo', 'qux')
assert c.get('foo') == 'bar'
def test_delete(self):
c = self.make_cache()
c.add('foo', 'bar')
assert c.get('foo') == 'bar'
c.delete('foo')
assert c.get('foo') is None
def test_delete_many(self):
c = self.make_cache()
c.add('foo', 'bar')
c.add('spam', 'eggs')
c.delete_many('foo', 'spam')
assert c.get('foo') is None
assert c.get('spam') is None
def test_inc_dec(self):
c = self.make_cache()
c.set('foo', 1)
assert c.inc('foo') == 2
assert c.dec('foo') == 1
c.delete('foo')
def test_true_false(self):
c = self.make_cache()
c.set('foo', True)
assert c.get('foo') == True
c.set('bar', False)
assert c.get('bar') == False
def suite():
suite = unittest.TestSuite()
suite.addTest(unittest.makeSuite(SimpleCacheTestCase))
suite.addTest(unittest.makeSuite(FileSystemCacheTestCase))
if redis is not None:
suite.addTest(unittest.makeSuite(RedisCacheTestCase))
return suite
|
jackTheRipper/iotrussia
|
web_server/lib/werkzeug-master/werkzeug/testsuite/contrib/cache.py
|
Python
|
gpl-2.0
| 4,699
|
from debug_toolbar.panels import Panel
from django.template.loader import render_to_string
from django.utils.translation import ugettext_lazy as _
class MisagoACLDebugPanel(Panel):
name = 'MisagoACL'
has_content = True
def nav_title(self):
return _('Misago ACL')
def title(self):
return _('Misago User ACL')
def url(self):
return ''
def process_request(self, request):
self.request = request
def content(self):
if self.request.heartbeat:
self.has_content = False
else:
context = self.context.copy()
try:
context['acl'] = self.request.acl
except AttributeError:
context['acl'] = {}
return render_to_string('debug_toolbar/panels/acl.html', context)
|
Maronato/aosalunos
|
misago/acl/panels.py
|
Python
|
gpl-2.0
| 821
|
# This file is part of Invenio.
# Copyright (C) 2009, 2010, 2011, 2014, 2015 CERN.
#
# Invenio is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# Invenio is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Invenio; if not, write to the Free Software Foundation, Inc.,
# 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
"""Receive OAI-PMH 2.0 requests and responds"""
__revision__ = "$Id$"
from six.moves import cPickle
import os
import re
import time
import tempfile
import sys
import datetime
if sys.hexversion < 0x2050000:
from glob import glob as iglob
else:
from glob import iglob
from flask import url_for, abort
from flask_login import current_user
from intbitset import intbitset
from six import iteritems
from invenio.config import \
CFG_BIBUPLOAD_EXTERNAL_OAIID_TAG, \
CFG_CACHEDIR, \
CFG_CERN_SITE, \
CFG_OAI_DELETED_POLICY, \
CFG_OAI_EXPIRE, \
CFG_OAI_FRIENDS, \
CFG_OAI_IDENTIFY_DESCRIPTION, \
CFG_OAI_ID_FIELD, \
CFG_OAI_ID_PREFIX, \
CFG_OAI_LOAD, \
CFG_OAI_METADATA_FORMATS, \
CFG_OAI_PREVIOUS_SET_FIELD, \
CFG_OAI_PROVENANCE_ALTERED_SUBFIELD, \
CFG_OAI_PROVENANCE_BASEURL_SUBFIELD, \
CFG_OAI_PROVENANCE_DATESTAMP_SUBFIELD, \
CFG_OAI_PROVENANCE_HARVESTDATE_SUBFIELD, \
CFG_OAI_PROVENANCE_METADATANAMESPACE_SUBFIELD, \
CFG_OAI_PROVENANCE_ORIGINDESCRIPTION_SUBFIELD, \
CFG_OAI_SAMPLE_IDENTIFIER, \
CFG_OAI_SET_FIELD, \
CFG_SITE_NAME, \
CFG_SITE_SUPPORT_EMAIL, \
CFG_SITE_URL, \
CFG_WEBSTYLE_HTTP_USE_COMPRESSION
from invenio.base.globals import cfg
from invenio.ext.logging import register_exception
from invenio.legacy.bibrecord import record_get_field_instances
from invenio.legacy.dbquery import run_sql, wash_table_column_name
from invenio.legacy.oairepository.config import CFG_OAI_REPOSITORY_GLOBAL_SET_SPEC
from invenio.legacy.search_engine import record_exists, get_all_restricted_recids, \
search_unit_in_bibxxx, get_record, search_pattern
from invenio.modules.formatter import format_record
from invenio.modules.search.utils import get_records_that_can_be_displayed
from invenio.utils.date import localtime_to_utc, utc_to_localtime
from invenio.utils.html import X, EscapedXMLString
CFG_VERBS = {
'GetRecord' : ['identifier', 'metadataPrefix'],
'Identify' : [],
'ListIdentifiers' : ['from', 'until',
'metadataPrefix',
'set',
'resumptionToken'],
'ListMetadataFormats': ['identifier'],
'ListRecords' : ['from', 'until',
'metadataPrefix',
'set',
'resumptionToken'],
'ListSets' : ['resumptionToken']
}
CFG_ERRORS = {
"badArgument": "The request includes illegal arguments, is missing required arguments, includes a repeated argument, or values for arguments have an illegal syntax:",
"badResumptionToken": "The value of the resumptionToken argument is invalid or expired:",
"badVerb": "Value of the verb argument is not a legal OAI-PMH verb, the verb argument is missing, or the verb argument is repeated:",
"cannotDisseminateFormat": "The metadata format identified by the value given for the metadataPrefix argument is not supported by the item or by the repository:",
"idDoesNotExist": "The value of the identifier argument is unknown or illegal in this repository:",
"noRecordsMatch": "The combination of the values of the from, until, set and metadataPrefix arguments results in an empty list:",
"noMetadataFormats": "There are no metadata formats available for the specified item:",
"noSetHierarchy": "The repository does not support sets:"
}
CFG_MIN_DATE = "1970-01-01T00:00:00Z"
CFG_MAX_DATE = "9999-12-31T23:59:59Z"
def get_all_field_values(tag):
"""
Return all existing values stored for a given tag.
@param tag: the full tag, e.g. 909C0b
@type tag: string
@return: the list of values
@rtype: list of strings
"""
table = 'bib%02dx' % int(tag[:2])
return [row[0] for row in run_sql("SELECT DISTINCT(value) FROM %s WHERE tag=%%s" % table, (tag, ))]
def oai_error(argd, errors):
"""
Return a well-formatted OAI-PMH error
"""
out = """<?xml version="1.0" encoding="UTF-8"?>
<OAI-PMH xmlns="http://www.openarchives.org/OAI/2.0/"
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xsi:schemaLocation="http://www.openarchives.org/OAI/2.0/
http://www.openarchives.org/OAI/2.0/OAI-PMH.xsd">"""
out += X.responseDate()(get_utc_now())
for error_code, error_msg in errors:
assert(error_code in CFG_ERRORS)
if error_code in ("badArgument", "badVerb"):
out += X.request()(oai_get_request_url())
break
else:
## There are no badArgument or badVerb errors so we can
## return the whole request information
out += X.request(**argd)(oai_get_request_url())
for error_code, error_msg in errors:
if error_msg is None:
error_msg = CFG_ERRORS[error_code]
else:
error_msg = "%s %s" % (CFG_ERRORS[error_code], error_msg)
out += X.error(code=error_code)(error_msg)
out += "</OAI-PMH>"
return out
def oai_header(argd, verb):
"""
Return OAI header
"""
out = "<?xml version=\"1.0\" encoding=\"UTF-8\"?>" + "\n"
out += "<?xml-stylesheet type=\"text/xsl\" href=\"%s\" ?>\n" % (
url_for('oairepository.static',
filename='xsl/oairepository/oai2.xsl.v1.0'))
out += "<OAI-PMH xmlns=\"http://www.openarchives.org/OAI/2.0/\" xmlns:xsi=\"http://www.w3.org/2001/XMLSchema-instance\" xsi:schemaLocation=\"http://www.openarchives.org/OAI/2.0/ http://www.openarchives.org/OAI/2.0/OAI-PMH.xsd\">\n"
#out += "<responseDate>%s</responseDate>" % get_utc_now()
out += X.responseDate()(get_utc_now())
if verb:
out += X.request(**argd)(oai_get_request_url())
out += "<%s>\n" % verb
else:
out += X.request()(oai_get_request_url())
return out
def oai_footer(verb):
"""
@return: the OAI footer.
"""
out = ""
if verb:
out += "</%s>\n" % (verb)
out += "</OAI-PMH>\n"
return out
def get_field(recid, field):
"""
Gets list of field 'field' for the record with 'recid' system number.
"""
digit = field[0:2]
bibbx = "bib%sx" % digit
bibx = "bibrec_bib%sx" % digit
query = "SELECT bx.value FROM %s AS bx, %s AS bibx WHERE bibx.id_bibrec=%%s AND bx.id=bibx.id_bibxxx AND bx.tag=%%s" % (wash_table_column_name(bibbx), wash_table_column_name(bibx))
return [row[0] for row in run_sql(query, (recid, field))]
def get_modification_date(recid):
"""Returns the date of last modification for the record 'recid'.
Return empty string if no record or modification date in UTC.
"""
out = ""
res = run_sql("SELECT DATE_FORMAT(modification_date,'%%Y-%%m-%%d %%H:%%i:%%s') FROM bibrec WHERE id=%s", (recid,), 1)
if res and res[0][0]:
out = localtime_to_utc(res[0][0])
return out
def get_earliest_datestamp():
"""Get earliest datestamp in the database
Return empty string if no records or earliest datestamp in UTC.
"""
out = CFG_MIN_DATE
res = run_sql("SELECT DATE_FORMAT(MIN(creation_date),'%Y-%m-%d %H:%i:%s') FROM bibrec", n=1)
if res and res[0][0]:
out = localtime_to_utc(res[0][0])
return out
def get_latest_datestamp():
"""Get latest datestamp in the database
Return empty string if no records or latest datestamp in UTC.
"""
out = CFG_MAX_DATE
res = run_sql("SELECT DATE_FORMAT(MAX(modification_date),'%Y-%m-%d %H:%i:%s') FROM bibrec", n=1)
if res and res[0][0]:
out = localtime_to_utc(res[0][0])
return out
def check_date(date):
"""Check if given date has a correct format, complying to "Complete date" or
"Complete date plus hours, minutes and seconds" formats defined in ISO8601."""
if(re.match("\d\d\d\d-\d\d-\d\d(T\d\d:\d\d:\d\dZ)?\Z", date) is not None):
return date
else:
return ""
def normalize_date(date, dtime="T00:00:00Z"):
"""
Normalize the given date to the
"Complete date plus hours, minutes and seconds" format defined in ISO8601
(If "hours, minutes and seconds" part is missing, append 'dtime' to date).
'date' must be checked before with check_date(..).
Returns empty string if cannot be normalized
"""
if len(date) == 10:
date = date + dtime
elif len(date) != 20:
date = ""
return date
def get_record_provenance(recid):
"""
Return the provenance XML representation of a record, suitable to be put
in the about tag.
"""
record = get_record(recid)
provenances = record_get_field_instances(record, CFG_BIBUPLOAD_EXTERNAL_OAIID_TAG[:3], CFG_BIBUPLOAD_EXTERNAL_OAIID_TAG[3], CFG_BIBUPLOAD_EXTERNAL_OAIID_TAG[4])
out = ""
for provenance in provenances:
base_url = identifier = datestamp = metadata_namespace = origin_description = harvest_date = altered = ""
for (code, value) in provenance[0]:
if code == CFG_OAI_PROVENANCE_BASEURL_SUBFIELD:
base_url = value
elif code == CFG_BIBUPLOAD_EXTERNAL_OAIID_TAG[5]:
identifier = value
elif code == CFG_OAI_PROVENANCE_DATESTAMP_SUBFIELD:
datestamp = value
elif code == CFG_OAI_PROVENANCE_METADATANAMESPACE_SUBFIELD:
metadata_namespace = value
elif code == CFG_OAI_PROVENANCE_ORIGINDESCRIPTION_SUBFIELD:
origin_description = value
elif code == CFG_OAI_PROVENANCE_HARVESTDATE_SUBFIELD:
harvest_date = value
elif code == CFG_OAI_PROVENANCE_ALTERED_SUBFIELD:
altered = value
if base_url:
out += """<provenance xmlns="http://www.openarchives.org/OAI/2.0/provenance" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://www.openarchives.org/OAI/2.0/provenance http://www.openarchives.org/OAI/2.0/provenance.xsd">"""
out += X.originDescription(harvestDate=harvest_date, altered=altered)(
X.baseURL()(base_url),
X.identifier()(identifier),
X.datestamp()(datestamp),
X.metadataNamespace()(metadata_namespace),
origin_description and X.originDescription(origin_description) or '' ## This is already XML
)
out += """</provenance>"""
return out
def get_record_rights(dummy):
"""
Return the record rights parts, suitable to be put in the about tag.
"""
return ""
## FIXME: This need to be thought in a good way. What shall we really
## put in the rights parts?
#record = get_record(recid)
#rights = record_get_field_instances(record, CFG_OAI_RIGHTS_FIELD[:3], CFG_OAI_RIGHTS_FIELD[3], CFG_OAI_RIGHTS_FIELD[4])
#license = record_get_field_instances(record, CFG_OAI_LICENSE_FIELD[:3], CFG_OAI_LICENSE_FIELD[3], CFG_OAI_LICENSE_FIELD[4])
#holder = date = rights_uri = contact = statement = terms = publisher = license_uri = ''
#if rights:
#for code, value in rights[0][0]:
#if code == CFG_OAI_RIGHTS_HOLDER_SUBFIELD:
#holder = value
#elif code == CFG_OAI_RIGHTS_DATE_SUBFIELD:
#date = value
#elif code == CFG_OAI_RIGHTS_URI_SUBFIELD:
#rights_uri = value
#elif code == CFG_OAI_RIGHTS_CONTACT_SUBFIELD:
#contact = value
#elif CFG_OAI_RIGHTS_STATEMENT_SUBFIELD:
#statement = value
#if license:
#for code, value in license[0][0]:
#if code == CFG_OAI_LICENSE_TERMS_SUBFIELD:
#terms = value
#elif code == CFG_OAI_LICENSE_PUBLISHER_SUBFIELD:
#publisher = value
#elif code == CFG_OAI_LICENSE_URI_SUBFIELD:
#license_uri = value
def print_record(recid, prefix='marcxml', verb='ListRecords', set_spec=None, set_last_updated=None):
"""Prints record 'recid' formatted according to 'prefix'.
- if record does not exist, return nothing.
- if record has been deleted and CFG_OAI_DELETED_POLICY is
'transient' or 'deleted', then return only header, with status
'deleted'.
- if record has been deleted and CFG_OAI_DELETED_POLICY is 'no',
then return nothing.
"""
record_exists_result = record_exists(recid) == 1
if record_exists_result:
sets = get_field(recid, CFG_OAI_SET_FIELD)
if set_spec is not None and not set_spec in sets and not [set_ for set_ in sets if set_.startswith("%s:" % set_spec)]:
## the record is not in the requested set, and is not
## in any subset
record_exists_result = False
if record_exists_result:
status = None
else:
status = 'deleted'
if not record_exists_result and CFG_OAI_DELETED_POLICY not in ('persistent', 'transient'):
return ""
idents = get_field(recid, CFG_OAI_ID_FIELD)
if not idents:
return ""
## FIXME: Move these checks in a bibtask
#try:
#assert idents, "No OAI ID for record %s, please do your checks!" % recid
#except AssertionError as err:
#register_exception(alert_admin=True)
#return ""
#try:
#assert len(idents) == 1, "More than OAI ID found for recid %s. Considering only the first one, but please do your checks: %s" % (recid, idents)
#except AssertionError as err:
#register_exception(alert_admin=True)
ident = idents[0]
header_body = EscapedXMLString('')
header_body += X.identifier()(ident)
if set_last_updated:
header_body += X.datestamp()(max(get_modification_date(recid), set_last_updated))
else:
header_body += X.datestamp()(get_modification_date(recid))
for set_spec in get_field(recid, CFG_OAI_SET_FIELD):
if set_spec and set_spec != CFG_OAI_REPOSITORY_GLOBAL_SET_SPEC:
# Print only if field not empty
header_body += X.setSpec()(set_spec)
header = X.header(status=status)(header_body)
if verb == 'ListIdentifiers':
return header
else:
if record_exists_result:
metadata_body = format_record(recid, CFG_OAI_METADATA_FORMATS[prefix][0])
metadata = X.metadata(body=metadata_body)
provenance_body = get_record_provenance(recid)
if provenance_body:
provenance = X.about(body=provenance_body)
else:
provenance = ''
rights_body = get_record_rights(recid)
if rights_body:
rights = X.about(body=rights_body)
else:
rights = ''
else:
metadata = ''
provenance = ''
rights = ''
return X.record()(header, metadata, provenance, rights)
def oai_list_metadata_formats(argd):
"""Generates response to oai_list_metadata_formats verb."""
if argd.get('identifier'):
recid = oai_get_recid(argd['identifier'])
_record_exists = record_exists(recid)
if _record_exists != 1 and (_record_exists != -1 or CFG_OAI_DELETED_POLICY == "no"):
return oai_error(argd, [("idDoesNotExist", "invalid record Identifier: %s" % argd['identifier'])])
out = ""
for prefix, (dummy, schema, namespace) in CFG_OAI_METADATA_FORMATS.items():
out += X.metadataFormat()(
X.metadataPrefix(prefix),
X.schema(schema),
X.metadataNamespace(namespace)
)
return oai_header(argd, "ListMetadataFormats") + out + oai_footer("ListMetadataFormats")
def oai_list_records_or_identifiers(req, argd):
"""Generates response to oai_list_records verb."""
verb = argd['verb']
resumption_token_was_specified = False
# check if the resumption_token did not expire
if argd.get('resumptionToken'):
resumption_token_was_specified = True
try:
cache = oai_cache_load(argd['resumptionToken'])
last_recid = cache['last_recid']
argd = cache['argd']
complete_list = cache['complete_list']
complete_list = filter_out_based_on_date_range(complete_list, argd.get('from', ''), argd.get('until', ''))
except Exception, e:
# Ignore cache not found errors
if not isinstance(e, IOError) or e.errno != 2:
register_exception(alert_admin=True)
req.write(oai_error(argd, [("badResumptionToken", "ResumptionToken expired or invalid: %s" % argd['resumptionToken'])]))
return
else:
last_recid = 0
complete_list = oai_get_recid_list(argd.get('set', ""), argd.get('from', ""), argd.get('until', ""))
if not complete_list: # noRecordsMatch error
req.write(oai_error(argd, [("noRecordsMatch", "no records correspond to the request")]))
return
cursor = 0
for cursor, recid in enumerate(complete_list):
## Let's fast-forward the cursor to point after the last recid that was
## disseminated successfully
if recid > last_recid:
break
set_last_updated = get_set_last_update(argd.get('set', ""))
req.write(oai_header(argd, verb))
for recid in list(complete_list)[cursor:cursor+CFG_OAI_LOAD]:
req.write(print_record(recid, argd['metadataPrefix'], verb=verb, set_spec=argd.get('set'), set_last_updated=set_last_updated))
if list(complete_list)[cursor+CFG_OAI_LOAD:]:
resumption_token = oai_generate_resumption_token(argd.get('set', ''))
cache = {
'argd': argd,
'last_recid': recid,
# FIXME introduce IP check if you use fireroles for guests
'id_user': current_user.get_id(),
'complete_list': complete_list.fastdump(),
}
oai_cache_dump(resumption_token, cache)
expdate = oai_get_response_date(CFG_OAI_EXPIRE)
req.write(X.resumptionToken(expirationDate=expdate, cursor=cursor, completeListSize=len(complete_list))(resumption_token))
elif resumption_token_was_specified:
## Since a resumptionToken was used we shall put a last empty resumptionToken
req.write(X.resumptionToken(cursor=cursor, completeListSize=len(complete_list))(""))
req.write(oai_footer(verb))
oai_cache_gc()
def oai_list_sets(argd):
"""
Lists available sets for OAI metadata harvesting.
"""
out = ""
# note: no flow control in ListSets
sets = get_all_sets().values()
if not sets:
return oai_error(argd, [("noSetHierarchy", "No sets have been configured for this repository")])
for set_ in sets:
out += " <set>\n"
out += X.setSpec()(set_[0]) + X.setName()(set_[1])
if set_[2]:
out += X.setDescription()(set_[2])
out = out + " </set>\n"
return oai_header(argd, "ListSets") + out + oai_footer("ListSets")
def oai_get_record(argd):
"""Returns record 'identifier' according to 'metadataPrefix' format for OAI metadata harvesting.
- if record does not exist, return oai_error 'idDoesNotExist'.
- if record has been deleted and CFG_OAI_DELETED_POLICY is
'transient' or 'deleted', then return only header, with status
'deleted'.
- if record has been deleted and CFG_OAI_DELETED_POLICY is 'no',
then return oai_error 'idDoesNotExist'.
"""
recid = oai_get_recid(argd['identifier'])
_record_exists = record_exists(recid)
if _record_exists == 1 or \
(_record_exists == -1 and CFG_OAI_DELETED_POLICY != 'no'):
out = print_record(recid, argd['metadataPrefix'], _record_exists)
out = oai_header(argd, "GetRecord") + out + oai_footer("GetRecord")
else:
return oai_error(argd, [("idDoesNotExist", "invalid record Identifier: %s" % argd['identifier'])])
return out
def oai_identify(argd):
"""Generates a response to oai_identify verb.
script_url - *str* URL of the script used to access the
service. This is made necessary since the gateway
can be accessed either via /oai2d or /oai2d/ (or for
backward compatibility: oai2d.py or oai2d.py/), and
that the base URL must be returned in the Identify
response
"""
out = X.repositoryName()(CFG_SITE_NAME)
out += X.baseURL()(CFG_SITE_URL + '/oai2d')
out += X.protocolVersion()("2.0")
out += X.adminEmail()(CFG_SITE_SUPPORT_EMAIL)
out += X.earliestDatestamp()(get_earliest_datestamp())
out += X.deletedRecord()(CFG_OAI_DELETED_POLICY)
out += X.granularity()("YYYY-MM-DDThh:mm:ssZ")
if CFG_WEBSTYLE_HTTP_USE_COMPRESSION:
out += X.compression()('deflate')
out += X.description("""<oai-identifier xmlns="http://www.openarchives.org/OAI/2.0/oai-identifier"
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xsi:schemaLocation="http://www.openarchives.org/OAI/2.0/oai-identifier
http://www.openarchives.org/OAI/2.0/oai-identifier.xsd">""" +
X.scheme()("oai") +
X.repositoryIdentifier()(CFG_OAI_ID_PREFIX) +
X.delimiter()(":") +
X.sampleIdentifier()(CFG_OAI_SAMPLE_IDENTIFIER) +
"""</oai-identifier>""")
out += CFG_OAI_IDENTIFY_DESCRIPTION % {'CFG_SITE_URL': EscapedXMLString(CFG_SITE_URL)}
if CFG_OAI_FRIENDS:
friends = """<friends xmlns="http://www.openarchives.org/OAI/2.0/friends/"
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xsi:schemaLocation="http://www.openarchives.org/OAI/2.0/friends/
http://www.openarchives.org/OAI/2.0/friends.xsd">"""
for baseurl in CFG_OAI_FRIENDS:
friends += X.baseURL()(baseurl)
friends += """</friends>"""
out += X.description(friends)
out = oai_header(argd, "Identify") + out + oai_footer("Identify")
return out
def get_utc_now():
"""
Return current UTC time in the OAI-PMH format.
"""
return datetime.datetime.utcnow().strftime('%Y-%m-%dT%H:%M:%SZ')
def oai_build_request_element(argd=None):
"""
Build the request tag.
"""
if argd is None:
argd = {}
return X.responseDate()(get_utc_now()) + X.request(**argd)("%s/oai2d" % CFG_SITE_URL)
def oai_get_request_url():
"""Generates requesturl tag for OAI."""
requesturl = CFG_SITE_URL + "/oai2d"
return requesturl
def oai_get_response_date(delay=0):
"""Generates responseDate tag for OAI."""
return time.strftime("%Y-%m-%dT%H:%M:%SZ", time.gmtime(time.time() + delay))
def oai_get_recid(identifier):
"""Returns the recid corresponding to the OAI identifier. Prefer a non deleted
record if multiple recids matches but some of them are deleted (e.g. in
case of merging). Returns None if no record matches."""
if identifier:
recids = search_pattern(p=identifier, f=CFG_OAI_ID_FIELD, m='e', ap=-9)
if recids:
displayable_recids = get_records_that_can_be_displayed(
current_user.get('precached_permitted_restricted_collections', []),
recids
)
for recid in displayable_recids:
if record_exists(recid) > 0:
return recid
return None
def get_set_last_update(set_spec=""):
"""
Returns the last_update of a given set (or of all sets) in UTC
"""
if set_spec:
last_update = run_sql("SELECT DATE_FORMAT(MAX(last_updated),'%%Y-%%m-%%d %%H:%%i:%%s') FROM oaiREPOSITORY WHERE setSpec=%s", (set_spec, ))[0][0]
else:
last_update = run_sql("SELECT DATE_FORMAT(MAX(last_updated),'%Y-%m-%d %H:%i:%s') FROM oaiREPOSITORY")[0][0]
if last_update:
return localtime_to_utc(last_update)
else:
return None
def filter_out_based_on_date_range(recids, fromdate="", untildate="", set_spec=None):
""" Filter out recids based on date range."""
if fromdate:
fromdate = normalize_date(fromdate, "T00:00:00Z")
else:
fromdate = get_earliest_datestamp()
fromdate = utc_to_localtime(fromdate)
if untildate:
untildate = normalize_date(untildate, "T23:59:59Z")
else:
untildate = get_latest_datestamp()
untildate = utc_to_localtime(untildate)
if set_spec is not None: ## either it has a value or it empty, thus meaning all records
last_updated = get_set_last_update(set_spec)
if last_updated is not None:
last_updated = utc_to_localtime(last_updated)
if last_updated > fromdate:
fromdate = utc_to_localtime(get_earliest_datestamp())
recids = intbitset(recids) ## Let's clone :-)
if fromdate and untildate:
recids &= intbitset(run_sql("SELECT id FROM bibrec WHERE modification_date BETWEEN %s AND %s", (fromdate, untildate)))
elif fromdate:
recids &= intbitset(run_sql("SELECT id FROM bibrec WHERE modification_date >= %s", (fromdate, )))
elif untildate:
recids &= intbitset(run_sql("SELECT id FROM bibrec WHERE modification_date <= %s", (untildate, )))
if cfg.get('CFG_OAI_FILTER_RESTRICTED_RECORDS', True):
recids = recids - get_all_restricted_recids()
return recids
def oai_get_recid_list(set_spec="", fromdate="", untildate=""):
"""
Returns list of recids for the OAI set 'set', modified from 'fromdate' until 'untildate'.
"""
ret = intbitset()
if not set_spec:
ret |= search_unit_in_bibxxx(p='*', f=CFG_OAI_SET_FIELD, m='e')
if CFG_OAI_DELETED_POLICY != 'no':
ret |= search_unit_in_bibxxx(p='*', f=CFG_OAI_PREVIOUS_SET_FIELD, m='e')
else:
ret |= search_unit_in_bibxxx(p=set_spec, f=CFG_OAI_SET_FIELD, m='e')
ret |= search_unit_in_bibxxx(p='%s:*' % set_spec, f=CFG_OAI_SET_FIELD, m='e')
if CFG_OAI_DELETED_POLICY != 'no':
ret |= search_unit_in_bibxxx(p=set_spec, f=CFG_OAI_PREVIOUS_SET_FIELD, m='e')
ret |= search_unit_in_bibxxx(p='%s:*' % set_spec, f=CFG_OAI_PREVIOUS_SET_FIELD, m='e')
if CFG_OAI_DELETED_POLICY == 'no':
ret -= search_unit_in_bibxxx(p='DELETED', f='980__%', m='e')
if CFG_CERN_SITE:
ret -= search_unit_in_bibxxx(p='DUMMY', f='980__%', m='e')
return filter_out_based_on_date_range(ret, fromdate, untildate, set_spec)
def oai_generate_resumption_token(set_spec):
"""Generates unique ID for resumption token management."""
fd, name = tempfile.mkstemp(dir=os.path.join(CFG_CACHEDIR, 'RTdata'), prefix='%s___' % set_spec)
os.close(fd)
return os.path.basename(name)
def oai_delete_resumption_tokens_for_set(set_spec):
"""
In case a set is modified by the admin interface, this will delete
any resumption token that is now invalid.
"""
aset = set_spec
while aset:
for name in iglob(os.path.join(CFG_CACHEDIR, 'RTdata', '%s___*' % set_spec)):
os.remove(name)
aset = aset.rsplit(":", 1)[0]
for name in iglob(os.path.join(CFG_CACHEDIR, 'RTdata', '___*')):
os.remove(name)
def oai_cache_dump(resumption_token, cache):
"""
Given a resumption_token and the cache, stores the cache.
"""
cPickle.dump(cache, open(os.path.join(CFG_CACHEDIR, 'RTdata', resumption_token), 'w'), -1)
def oai_cache_load(resumption_token):
"""Restore the cache from the resumption_token."""
fullpath = os.path.join(CFG_CACHEDIR, 'RTdata', resumption_token)
if os.path.dirname(os.path.abspath(fullpath)) != os.path.abspath(
os.path.join(CFG_CACHEDIR, 'RTdata')):
raise ValueError("Invalid path")
cache = cPickle.load(open(fullpath))
if cache.get('id_user', 0) == current_user.get_id():
return cache
abort(401)
def oai_cache_gc():
"""
OAI Cache Garbage Collector.
"""
cache_dir = os.path.join(CFG_CACHEDIR, 'RTdata')
if not os.path.isdir(cache_dir):
os.makedirs(cache_dir)
for file_ in os.listdir(cache_dir):
filename = os.path.join(cache_dir, file_)
# cache entry expires when not modified during a specified period of time
if ((time.time() - os.path.getmtime(filename)) > CFG_OAI_EXPIRE):
try:
os.remove(filename)
except OSError as e:
# Most probably the cache was already deleted
pass
def get_all_sets():
"""
Return all the sets.
"""
res = run_sql("SELECT setSpec, setName, setDescription FROM oaiREPOSITORY")
ret = {}
for row in res:
ret[row[0]] = row
## Let's expand with all the set that exist in the DB
for a_set in get_all_field_values(CFG_OAI_SET_FIELD):
if a_set not in ret:
ret[a_set] = (a_set, a_set, '')
## Let's expand with all the supersets
for a_set in ret.keys():
while ':' in a_set:
try:
a_set = a_set.rsplit(":", 1)[0]
except AttributeError:
a_set = ':'.join(a_set.split(":")[:-1])
if a_set not in ret:
ret[a_set] = (a_set, a_set, '')
if CFG_OAI_REPOSITORY_GLOBAL_SET_SPEC in ret:
## Let's remove the special global set
del ret[CFG_OAI_REPOSITORY_GLOBAL_SET_SPEC]
if '' in ret:
## '' is not a valid setSpec but might be in the MARC
del ret['']
return ret
def check_argd(argd):
"""
Check OAI arguments
Also transform them from lists to strings.
"""
errors = []
## no several times the same argument
bad_arguments_error = False
for param, value in iteritems(argd):
if len(value) > 1 and not bad_arguments_error:
errors.append(("badArgument", "More than one value specified for the %s argument: %s" % (param, value)))
bad_arguments_error = True ## This is needed only once
if len(value) > 0:
argd[param] = value[0]
else:
argd[param] = ''
## principal argument required
if argd['verb'] not in CFG_VERBS:
errors.append(("badVerb", "Illegal OAI verb: %s" % argd['verb']))
## defined argd
for param in argd.keys():
if not param in CFG_VERBS.get(argd['verb'], []) and param != 'verb' \
and not bad_arguments_error:
errors.append(("badArgument", "The request includes illegal arguments for the given verb: %s" % param))
bad_arguments_error = True
break # Indicate only once
## resumptionToken exclusive
if argd.get('resumptionToken', '') != "" and \
len(argd.keys()) != 2 and not bad_arguments_error:
errors.append(("badArgument", "The resumptionToken was specified together with other arguments"))
bad_arguments_error = True
if argd.get('resumptionToken', None) == '':
errors.append(("badResumptionToken", "ResumptionToken invalid: %s" % argd.get('resumptionToken', None)))
## datestamp formats
if 'from' in argd and \
'from' in CFG_VERBS.get(argd['verb'], []):
from_length = len(argd['from'])
if check_date(argd['from']) == "":
errors.append(("badArgument", "Bad datestamp format in from: %s" % argd['from']))
else:
from_length = 0
if 'until' in argd and \
'until' in CFG_VERBS.get(argd['verb'], []):
until_length = len(argd['until'])
if check_date(argd['until']) == "":
errors.append(("badArgument", "Bad datestamp format in until: %s" % argd['until']))
else:
until_length = 0
if from_length != 0:
if until_length != 0:
if from_length != until_length:
errors.append(("badArgument", "From and until have two different formats: %s Vs. %s" % (from_length, until_length)))
if 'from' in argd and 'until' in argd \
and argd['from'] > argd['until'] and \
'from' in CFG_VERBS.get(argd['verb'], []) and \
'until' in CFG_VERBS.get(argd['verb'], []):
errors.append(("badArgument", "from argument comes after until argument: %s > %s" % (argd['from'], argd['until'])))
## Identify exclusive
if argd['verb'] == "Identify" and \
len(argd.keys()) != 1:
if not bad_arguments_error: # Do not repeat this error
errors.append(("badArgument", "The request includes illegal arguments"))
bad_arguments_error = True
## parameters for GetRecord
if argd['verb'] == "GetRecord" and \
'identifier' not in argd:
errors.append(("badArgument", "Record identifier missing"))
if argd['verb'] == "GetRecord" and \
'metadataPrefix' not in argd:
errors.append(("badArgument", "Missing metadataPrefix"))
## parameters for ListRecords and ListIdentifiers
if (argd['verb'] == "ListRecords" or argd['verb'] == "ListIdentifiers") and \
('resumptionToken' not in argd and 'metadataPrefix' not in argd):
errors.append(("badArgument", "Missing metadataPrefix"))
## Metadata prefix defined and valid
if 'metadataPrefix' in argd and \
not argd['metadataPrefix'] in CFG_OAI_METADATA_FORMATS:
errors.append(("cannotDisseminateFormat", "Chosen format is not supported. Valid formats are: %s" % ', '.join(CFG_OAI_METADATA_FORMATS.keys())))
return errors
def oai_profile():
"""
Runs a benchmark
"""
from six import StringIO
oai_list_records_or_identifiers(StringIO(), argd={"metadataPrefix": "oai_dc", "verb": "ListRecords"})
oai_list_records_or_identifiers(StringIO(), argd={"metadataPrefix": "marcxml", "verb" :"ListRecords"})
oai_list_records_or_identifiers(StringIO(), argd={"metadataPrefix": "oai_dc", "verb": "ListIdentifiers"})
return
if __name__ == "__main__":
import profile
import pstats
profile.run('oai_profile()', "oai_profile")
p = pstats.Stats("oai_profile")
p.strip_dirs().sort_stats("cumulative").print_stats()
|
chokribr/invenio
|
invenio/legacy/oairepository/server.py
|
Python
|
gpl-2.0
| 34,702
|
#!/usr/bin/python2.7
#coding:utf-8
import requests
from dummy import *
info = {
'NAME':'Wordpress Reflect XSS',
'AUTHOR':'owlinrye,yangbh',
'TIME':'20140811',
'WEB':'',
'DESCRIPTION':'CVE-2012-3414'
}
opts = {
'url':'http://testasp.vulnweb.com', #'target ip'
}
# opts = [
# ['url','http://testasp.vulnweb.com','target url'],
# ]
def Assign(services):
if services.has_key('url') and services.has_key('cms'):
if services['cms'] == 'Wordpress':
return True
return False
def Audit(services):
url = services['url'] + '/wp-includes/js/swfupload/swfupload.swf'
try:
rqu = requests.get(url)
if rqu.status_code == 200 and validate(rqu.text):
security_note(url)
except:
pass
def validate(res):
val_hash = '3a1c6cc728dddc258091a601f28a9c12'
res_md5 = md5.new(res)
if val_hash == res_md5.hexdigest():
return True
else:
return False
# ----------------------------------------------------------------------------------------------------
# untest yet
# ----------------------------------------------------------------------------------------------------
if __name__=='__main__':
services = {'url':'http://www.eguan.cn'}
pprint(Audit(services))
pprint(services)
|
GHubgenius/Hammer
|
plugins/Web_Applications/wordpress_reflect_xss.py
|
Python
|
gpl-2.0
| 1,188
|
from navmazing import NavigateToSibling
from widgetastic.widget import View
from widgetastic_patternfly import Accordion
from widgetastic_patternfly import Dropdown
from cfme.base import Server
from cfme.common import BaseLoggedInPage
from cfme.utils.appliance.implementations.ui import CFMENavigateStep
from cfme.utils.appliance.implementations.ui import navigator
from widgetastic_manageiq import ManageIQTree
class ControlExplorerView(BaseLoggedInPage):
@property
def in_control_explorer(self):
return (
self.logged_in_as_current_user and
self.navigation.currently_selected == ['Control', 'Explorer'])
@property
def is_displayed(self):
return self.in_control_explorer
@View.nested
class policy_profiles(Accordion): # noqa
ACCORDION_NAME = "Policy Profiles"
tree = ManageIQTree()
@View.nested
class policies(Accordion): # noqa
tree = ManageIQTree()
@View.nested
class events(Accordion): # noqa
tree = ManageIQTree()
@View.nested
class conditions(Accordion): # noqa
tree = ManageIQTree()
@View.nested
class actions(Accordion): # noqa
tree = ManageIQTree()
@View.nested
class alert_profiles(Accordion): # noqa
ACCORDION_NAME = "Alert Profiles"
tree = ManageIQTree()
@View.nested
class alerts(Accordion): # noqa
tree = ManageIQTree()
configuration = Dropdown("Configuration")
@navigator.register(Server)
class ControlExplorer(CFMENavigateStep):
VIEW = ControlExplorerView
prerequisite = NavigateToSibling("LoggedIn")
def step(self, *args, **kwargs):
self.view.navigation.select("Control", "Explorer")
|
nachandr/cfme_tests
|
cfme/control/explorer/__init__.py
|
Python
|
gpl-2.0
| 1,733
|
#! /usr/bin/env python
#
# This file is part of the LibreOffice project.
#
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
#
import os
import unittest
from collections import deque
import unohelper
from org.libreoffice.unotest import UnoInProcess
class Fdo84315(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls._uno = UnoInProcess()
cls._uno.setUp()
workdir = os.environ[ "WORKDIR_FOR_BUILD" ]
cls._xDoc = cls._uno.openDoc(workdir + "/CppunitTest/fdo84315.odb")
@classmethod
def tearDownClass(cls):
cls._uno.tearDown()
def __test_Query(self, column_name, expected_type, xResultset):
self.assertTrue(xResultset)
xMeta = xResultset.MetaData
self.assertEqual(xMeta.ColumnCount, 1)
self.assertEqual(xResultset.findColumn(column_name), 1)
self.assertEqual(xMeta.getColumnName(1), column_name)
self.assertEqual(xMeta.getColumnType(1), expected_type)
return xMeta
def __test_ResultSetInteger(self, xResultset, expected_values):
while xResultset.next():
self.assertEqual(xResultset.getInt(1), expected_values.popleft())
self.assertEqual(len(expected_values), 0)
def __test_ResultSetString(self, xResultset, expected_values):
while xResultset.next():
self.assertEqual(xResultset.getString(1), expected_values.popleft())
self.assertEqual(len(expected_values), 0)
def test_fdo84315(self):
xDoc = self.__class__._xDoc
xDataSource = xDoc.DataSource
xCon = xDataSource.getConnection('','')
xStatement = xCon.createStatement()
NUMERIC = 2
VAR_CHAR = 12
INTEGER = 4
xResultset = xStatement.executeQuery('SELECT "count" FROM "test_table"')
expected_values = deque([42, 4711])
xMeta = self.__test_Query('count', NUMERIC, xResultset)
self.__test_ResultSetInteger(xResultset, expected_values)
xResultset = xStatement.executeQuery('SELECT "name" FROM "test_table"')
expected_values = deque(['foo', 'bar'])
xMeta = self.__test_Query('name', VAR_CHAR, xResultset)
self.__test_ResultSetString(xResultset, expected_values)
xResultset = xStatement.executeQuery('SELECT "id" FROM "test_table"')
expected_values = deque([0, 1])
xMeta = self.__test_Query('id', INTEGER, xResultset)
self.__test_ResultSetInteger(xResultset, expected_values)
xCon.dispose()
if __name__ == '__main__':
unittest.main()
|
beppec56/core
|
dbaccess/qa/python/fdo84315.py
|
Python
|
gpl-3.0
| 2,691
|
#!/usr/bin/python
#====================================================================#
# Script to get the eigenvalues from an abinit _EIG.nc netcdf file #
#====================================================================#
#########
#IMPORTS#
#########
import numpy as N
import matplotlib.pyplot as P
import netCDF4 as nc
import sys
import os
import argparse
import time
#############
##VARIABLES##
#############
class VariableContainer:pass
#Constants
csts = VariableContainer()
csts.hartree2ev = N.float(27.211396132)
csts.ev2hartree = N.float(1/csts.hartree2ev)
csts.sqrtpi = N.float(N.sqrt(N.pi))
csts.invsqrtpi = N.float(1/csts.sqrtpi)
csts.TOLKPTS = N.float(0.00001)
csts.fig_width = 8
csts.fig_height = 6
csts.markersize = 10
csts.markeredgewidth = 2
###########
##METHODS##
###########
#Get the coefficients of the line going through 2 points of xi,yi and xj,yj. By default, begin and end points
#If the indices are given (2 integers), the line must go through these two points
def line_ab(x,y,indices=None):
if len(N.shape(x)) != 1 or len(N.shape(y)) != 1:
print 'ERROR: the array x and/or y is not 1D ... exit'
sys.exit()
if len(x) != len(y):
print 'ERROR: x and y arrays have different lengths ... exit'
sys.exit()
if indices == None:
indices = N.array([0,len(x)-1])
else:
if indices[0] < 0 or indices[1] >= len(x) or indices[0] == indices[1]:
print 'ERROR: indices (0 <= indices[0]=%s < indices[1]=%s < len(x)=%s) are wrong ... exit' %(indices[0],indices[1],len(x))
sys.exit()
a = (y[indices[0]]-y[indices[1]])/(x[indices[0]]-x[indices[1]])
b = (y[indices[0]]*x[indices[1]]-x[indices[0]]*y[indices[1]])/(x[indices[1]]-x[indices[0]])
return N.array([a,b],N.float)
#Get the coefficients of the polynom of degree 2 going through 2 points xi,yi and xj,yj and using a least squares procedure for the other points
#If the indices are given (2 integers), the polynom must go through these two points
def poly2d_ab(x,y,indices=None):
if len(N.shape(x)) != 1 or len(N.shape(y)) != 1:
print 'ERROR: the array x and/or y is not 1D ... exit'
sys.exit()
if len(x) != len(y):
print 'ERROR: x and y arrays have different lengths ... exit'
sys.exit()
if indices == None:
indices = N.array([0,len(x)-1])
else:
if indices[0] < 0 or indices[1] >= len(x) or indices[0] == indices[1]:
print 'ERROR: indices (0 <= indices[0]=%s < indices[1]=%s < len(x)=%s) are wrong ... exit' %(indices[0],indices[1],len(x))
sys.exit()
x1 = x[indices[0]]
x2 = x[indices[1]]
y1 = y[indices[0]]
y2 = y[indices[1]]
x3 = (x1+x2)/2
newy = y - y1*(x-x2)*(x-x3)/((x1-x2)*(x1-x3)) - y2*(x-x1)*(x-x3)/((x2-x1)*(x2-x3))
A = N.vstack([4*(x*x-(x1+x2)*x+x1*x2)/(2*x1*x2-x2*x2-x1*x1)]).T
y3 = N.linalg.lstsq(A,newy)[0]
pp = N.polyfit(N.array([x1,x2,x3]),N.array([y1,y2,y3]),2)
return pp
#Get the coefficients of the polynom of degree "degree" going through 2 points xi,yi and xj,yj and using a least squares procedure for the other points
#If the indices are given (2 integers), the polynom must go through these two points
def polynd_ab(x,y,degree,indices=None):
if degree < 1:
print 'ERROR: cannot find a polynomial going through two points with this degree (%s) ... exit' %degree
sys.exit()
if len(N.shape(x)) != 1 or len(N.shape(y)) != 1:
print 'ERROR: the array x and/or y is not 1D ... exit'
sys.exit()
if len(x) != len(y):
print 'ERROR: x and y arrays have different lengths ... exit'
sys.exit()
if indices == None:
indices = N.array([0,len(x)-1])
else:
if indices[0] < 0 or indices[1] >= len(x) or indices[0] == indices[1]:
print 'ERROR: indices (0 <= indices[0]=%s < indices[1]=%s < len(x)=%s) are wrong ... exit' %(indices[0],indices[1],len(x))
sys.exit()
if degree == 1:
pp = N.polyfit(N.array([x[indices[0]],x[indices[1]]]),N.array([y[indices[0]],y[indices[1]]]),degree)
return pp
x1 = x[indices[0]]
x2 = x[indices[1]]
y1 = y[indices[0]]
y2 = y[indices[1]]
xm = N.linspace(N.min(x),N.max(x),degree,endpoint=False)[1:]
prod1 = (x-x2)/(x1-x2)*y1
prod2 = (x-x1)/(x2-x1)*y2
coeff_list = list()
for ii in range(len(xm)):
prod1 = prod1*(x-xm[ii])/(x1-xm[ii])
prod2 = prod2*(x-xm[ii])/(x2-xm[ii])
prod_ii = (x-x2)*(x-x1)/((xm[ii]-x2)*(xm[ii]-x1))
for jj in range(len(xm)):
if ii != jj:
prod_ii = prod_ii*(x-xm[jj])/(xm[ii]-xm[jj])
coeff_list.append(prod_ii)
p1 = prod1 + prod2
newy = y - p1
A = N.vstack(N.array(coeff_list)).T
ym = N.linalg.lstsq(A,newy)[0]
xx = N.array([x1])
yy = N.array([y1])
for ii in range(len(xm)):
xx = N.append(xx,[xm[ii]])
yy = N.append(yy,[ym[ii]])
xx = N.append(xx,[x2])
yy = N.append(yy,[y2])
pp = N.polyfit(xx,yy,degree)
return pp
#Get the coefficients of the polynom of degree "degree" going through 1 point xi,yi and using a least squares procedure for the other points
#If the indice is given (1 integer), the polynom must go through this point, otherwise, it takes the first point in the list by default
def polynd_a(x,y,degree,indices=None):
if degree < 1:
print 'ERROR: cannot find a polynomial going through one points with this degree (%s) ... exit' %degree
sys.exit()
if len(N.shape(x)) != 1 or len(N.shape(y)) != 1:
print 'ERROR: the array x and/or y is not 1D ... exit'
sys.exit()
if len(x) != len(y):
print 'ERROR: x and y arrays have different lengths ... exit'
sys.exit()
if indices == None:
indices = N.array([0])
elif len(indices) != 1:
print 'ERROR: there should be only one index of a point through which the polynom has to go through ... exit'
sys.exit()
else:
if indices[0] < 0 or indices[0] >= len(x):
print 'ERROR: index (0 <= indices[0]=%s < len(x)=%s) is wrong ... exit' %(indices[0],len(x))
sys.exit()
x1 = x[indices[0]]
y1 = y[indices[0]]
xm = N.linspace(N.min(x),N.max(x),degree+1,endpoint=True)[1:]
prod1 = y1
coeff_list = list()
for ii in range(len(xm)):
prod1 = prod1*(x-xm[ii])/(x1-xm[ii])
prod_ii = (x-x1)/(xm[ii]-x1)
for jj in range(len(xm)):
if ii != jj:
prod_ii = prod_ii*(x-xm[jj])/(xm[ii]-xm[jj])
coeff_list.append(prod_ii)
newy = y - prod1
A = N.vstack(N.array(coeff_list)).T
ym = N.linalg.lstsq(A,newy)[0]
xx = N.array([x1])
yy = N.array([y1])
for ii in range(len(xm)):
xx = N.append(xx,[xm[ii]])
yy = N.append(yy,[ym[ii]])
pp = N.polyfit(xx,yy,degree)
return pp
#Given the polyfit_list and energy_pivots, finds the 2nd degree that goes to a zero slope (where the value will be separated by delta_energy)
#starting from a given energy (derivative is the same at this point). Returns the polynom and the values of the vertex of the polynom (maximum or minimum)
def smoothend(energy_pivots,polyfit_list,energy,delta_energy_ev=None):
method = 2
if delta_energy_ev == None:
if method == 1:
delta_energy_ev = 0.05
elif method == 2:
delta_energy_ev = 1.00
if method == 1:
xi = energy
if xi < energy_pivots[0]:
print 'Error: energy should be larger than the first energy pivot ...'
sys.exit()
if xi > energy_pivots[-1]:
ii = len(energy_pivots)
else:
ii = N.argwhere(energy_pivots>xi)[0]
fpi = N.polyval(N.polyder(polyfit_list[ii]),xi)
fi = N.polyval(polyfit_list[ii],xi)
if fpi == 0:
print 'TODO, the derivative is 0 ... easy but lazy :-)'
return
elif fpi > 0:
fv = fi + delta_energy_ev
elif fpi < 0:
fv = fi - delta_energy_ev
aa = fpi**2/(4*(fi-fv))
bb = fpi - 2*aa*xi
cc = fi - aa*xi**2 - bb*xi
xv = -bb/(2*aa)
new_energy_pivots = N.zeros(ii+2,N.float)
for jj in N.arange(ii):
new_energy_pivots[jj] = energy_pivots[jj]
new_energy_pivots[-2] = energy
new_energy_pivots[-1] = xv
new_polyfit_list = list()
for jj in N.arange(ii+1):
new_polyfit_list.append(polyfit_list[jj])
new_polyfit_list.append([aa,bb,cc])
new_polyfit_list.append([fv])
return new_energy_pivots,new_polyfit_list
if method == 2:
xi = energy
if xi < energy_pivots[0]:
print 'Error: energy should be larger than the first energy pivot ...'
sys.exit()
if xi > energy_pivots[-1]:
ii = len(energy_pivots)
else:
ii = N.argwhere(energy_pivots>xi)[0]
fpi = N.polyval(N.polyder(polyfit_list[ii]),xi)
fi = N.polyval(polyfit_list[ii],xi)
if fpi == 0:
new_energy_pivots = N.zeros(ii+1,N.float)
for jj in N.arange(ii):
new_energy_pivots[jj] = energy_pivots[jj]
new_energy_pivots[-1] = energy
new_polyfit_list = list()
for jj in N.arange(ii+1):
new_polyfit_list.append(polyfit_list[jj])
new_polyfit_list.append([fi])
return new_energy_pivots,new_polyfit_list
else:
xv = xi + delta_energy_ev
bb = fpi/(1.0-xi/xv)
aa = -bb/(2.0*xv)
cc = fi - aa*xi**2 - bb*xi
pp = [aa,bb,cc]
fv = N.polyval(pp,xv)
new_energy_pivots = N.zeros(ii+2,N.float)
for jj in N.arange(ii):
new_energy_pivots[jj] = energy_pivots[jj]
new_energy_pivots[-2] = xi
new_energy_pivots[-1] = xv
new_polyfit_list = list()
for jj in N.arange(ii+1):
new_polyfit_list.append(polyfit_list[jj])
new_polyfit_list.append(pp)
new_polyfit_list.append([fv])
if N.abs(fv-fi) > 0.05:
print 'WARNING : the last energy pivot is more than 0.05 eV from the constant correction'
else:
print 'COMMENT : smoothing the end of the graph starting at energy {0: 8.8f} eV'.format(xi)
print ' => constant correction for higher states : fv = {0: 8.8f} eV'.format(fv)
print ' => last energy pivot : fi = {0: 8.8f} eV'.format(fi)
print ' => fv - fi = {0: 8.8f} eV'.format(fv-fi)
return new_energy_pivots,new_polyfit_list
def write_polyfit(filename,energypivots,polyfitlist,energypivots_2=None,polyfitlist_2=None):
writer = open(filename,'w')
if energypivots_2 == None:
writer.write('nsppol 1\n')
else:
print 'write_polyfit not yet implemented for nsppol = 2 ... returning'
writer.write('NOT IMPLEMENTED\n')
writer.close()
return
writer.write('%s\n' %len(polyfitlist))
for ie in range(len(energypivots)):
writer.write('%s ' %energypivots[ie])
writer.write('\n')
for ip in range(len(polyfitlist)):
pfit = polyfitlist[ip]
for ic in range(len(pfit)):
writer.write('%s ' %pfit[ic])
writer.write('\n')
writer.close()
def read_polyfit(filename):
reader = open(filename,'r')
data = reader.readlines()
if data[0][:8] != 'nsppol 1':
print data[0]
print data[0][:8]
print 'read_polyfit not yet implemented for nsppol != 1 ... returning'
reader.close()
return
npfit = N.int(data[1])
energypivots = N.zeros(npfit-1,N.float)
polyfitlist = list()
for ie, ep in enumerate(data[2].split()):
energypivots[ie] = N.float(ep)
for ip in range(npfit):
sp = data[3+ip].split()
tmp = N.zeros(len(sp),N.float)
for ic, cc in enumerate(sp):
tmp[ic] = N.float(cc)
polyfitlist.append(tmp)
return energypivots,polyfitlist
###########
##CLASSES##
###########
class EigenvalueContainer(object):
nsppol = None
nkpt = None
mband = None
eigenvalues = None
units = None
wtk = None
filename = None
filefullpath = None
bd_indices = None
eigenvalue_type = None
kpoints = None
GROUP_BANDS_BS_TOL_EV = N.float(0.01)
GROUP_BANDS_BS_TOL = GROUP_BANDS_BS_TOL_EV*csts.ev2hartree
GROUP_BANDS_TOL_EV = N.float(0.2)
GROUP_BANDS_TOL = GROUP_BANDS_TOL_EV*csts.ev2hartree
#kpoint_sampling_type: can be Monkhorst-Pack or Bandstructure
KPT_W90_TOL = N.float(1.0e-6)
KPT_DFT_TOL = N.float(1.0e-8)
kpoint_sampling_type = 'Monkhorst-Pack'
inputgvectors = None
gvectors = None
special_kpoints = None
special_kpoints_names = None
special_kpoints_indices = None
kpoint_path_values = None
kpoint_reduced_path_values = None
kpoint_path_length = None
#reduced_norm = None
norm_paths = None
norm_reduced_paths = None
def __init__(self,directory=None,filename=None):
if filename == None:return
if directory == None:directory='.'
self.filename = filename
self.filefullpath = '%s/%s' %(directory,filename)
self.file_open(self.filefullpath)
def file_open(self,filefullpath):
if filefullpath[-3:] == '_GW':
self.gw_file_open(filefullpath)
else:
self.nc_eig_open(filefullpath)
def set_kpoint_sampling_type(self,kpoint_sampling_type):
if kpoint_sampling_type != 'Monkhorst-Pack' and kpoint_sampling_type != 'Bandstructure':
print 'ERROR: kpoint_sampling_type "%s" does not exists' %kpoint_sampling_type
print ' it should be "Monkhorst-Pack" or "Bandstructure" ... exit'
sys.exit()
self.kpoint_sampling_type = kpoint_sampling_type
def find_band_groups(self,bandstructure_file=None,tolerance_ev=None,spinchoice=None):
if self.nsppol > 1:
print 'WARNING: find_band_groups is carefully checked only for nsppol = 1'
if spinchoice == None:
print 'COMMENT: find_band_groups handles spins up and down on equal footing'
spinchoice = 'common'
elif spinchoice == 'common':
print 'COMMENT: find_band_groups handles spins up and down on equal footing'
elif spinchoice == 'separate':
print 'COMMENT: find_band_groups handles spins up and down as 2 different band structures'
if bandstructure_file != None:
ec_bs = EigenvalueContainer(filename=bandstructure_file)
eigenvalues = ec_bs.eigenvalues
nkpt = ec_bs.nkpt
nband = ec_bs.mband
nsppol = ec_bs.nsppol
else:
eigenvalues = self.eigenvalues
nkpt = self.nkpt
nband = self.mband
nsppol = self.nsppol
if tolerance_ev == None:
if bandstructure_file == None:
tolerance = self.GROUP_BANDS_TOL
else:
tolerance = self.GROUP_BANDS_BS_TOL
else:
tolerance = tolerance_ev*csts.ev2hartree
if spinchoice == 'common':
energy_pivots_list = list()
band = eigenvalues[:,:,0]
for iband in range(1,nband):
if N.min(eigenvalues[:,:,iband]) - N.max(band) > tolerance:
energy_pivots_list.append((N.min(eigenvalues[:,:,iband]) + N.max(band))/2)
band = eigenvalues[:,:,iband]
return N.array(energy_pivots_list)
elif spinchoice == 'separate':
energy_pivots_list_up = list()
energy_pivots_list_down = list()
bandup = eigenvalues[0,:,0]
banddown = eigenvalues[1,:,0]
for iband in range(1,nband):
if N.min(eigenvalues[0,:,iband]) - N.max(bandup) > tolerance:
energy_pivots_list_up.append((N.min(eigenvalues[0,:,iband]) + N.max(bandup))/2)
bandup = eigenvalues[0,:,iband]
if N.min(eigenvalues[1,:,iband]) - N.max(banddown) > tolerance:
energy_pivots_list_down.append((N.min(eigenvalues[1,:,iband]) + N.max(banddown))/2)
banddown = eigenvalues[1,:,iband]
return N.array(energy_pivots_list_up),N.array(energy_pivots_list_down)
def correct_kpt(self,kpoint,tolerance=N.float(1.0e-6)):
kpt_correct = N.array(kpoint,N.float)
changed = False
for ii in range(3):
if N.allclose(kpoint[ii],N.float(1.0/3.0),atol=tolerance):
kpt_correct[ii] = N.float(1.0/3.0)
changed = True
elif N.allclose(kpoint[ii],N.float(1.0/6.0),atol=tolerance):
kpt_correct[ii] = N.float(1.0/6.0)
changed = True
elif N.allclose(kpoint[ii],N.float(-1.0/6.0),atol=tolerance):
kpt_correct[ii] = N.float(-1.0/6.0)
changed = True
elif N.allclose(kpoint[ii],N.float(-1.0/3.0),atol=tolerance):
kpt_correct[ii] = N.float(-1.0/3.0)
changed = True
if changed:
print 'COMMENT: kpoint %15.12f %15.12f %15.12f has been changed to %15.12f %15.12f %15.12f' %(kpoint[0],kpoint[1],kpoint[2],kpt_correct[0],kpt_correct[1],kpt_correct[2])
return kpt_correct
def find_special_kpoints(self,gvectors=None):
if self.kpoint_sampling_type != 'Bandstructure':
print 'ERROR: special kpoints are usefull only for bandstructures ... returning find_special_kpoints'
return
if self.eigenvalue_type == 'W90':
correct_kpt_tolerance = N.float(1.0e-4)
KPT_TOL = self.KPT_W90_TOL
elif self.eigenvalue_type == 'DFT':
correct_kpt_tolerance = N.float(1.0e-6)
KPT_TOL = self.KPT_DFT_TOL
else:
print 'ERROR: eigenvalue_type is "%s" while it should be "W90" or "DFT" ... returning find_special_kpoints' %self.eigenvalue_type
return
if gvectors == None:
self.inputgvectors = False
self.gvectors = N.identity(3,N.float)
else:
if N.shape(gvectors) != (3, 3):
print 'ERROR: wrong gvectors ... exiting now'
sys.exit()
self.inputgvectors = True
self.gvectors = gvectors
full_kpoints = N.zeros((self.nkpt,3),N.float)
for ikpt in range(self.nkpt):
full_kpoints[ikpt,:] = self.kpoints[ikpt,0]*self.gvectors[0,:]+self.kpoints[ikpt,1]*self.gvectors[1,:]+self.kpoints[ikpt,2]*self.gvectors[2,:]
delta_kpt = full_kpoints[1,:]-full_kpoints[0,:]
self.special_kpoints_indices = list()
self.special_kpoints = list()
self.special_kpoints_indices.append(0)
self.special_kpoints.append(self.correct_kpt(self.kpoints[0,:],tolerance=correct_kpt_tolerance))
for ikpt in range(1,self.nkpt-1):
thisdelta = full_kpoints[ikpt+1,:]-full_kpoints[ikpt,:]
if not N.allclose(thisdelta,delta_kpt,atol=KPT_TOL):
delta_kpt = thisdelta
self.special_kpoints_indices.append(ikpt)
self.special_kpoints.append(self.correct_kpt(self.kpoints[ikpt,:],tolerance=correct_kpt_tolerance))
self.special_kpoints_indices.append(N.shape(self.kpoints)[0]-1)
self.special_kpoints.append(self.correct_kpt(self.kpoints[-1,:],tolerance=correct_kpt_tolerance))
print 'Special Kpoints : '
print ' {0:d} : {1[0]: 8.8f} {1[1]: 8.8f} {1[2]: 8.8f}'.format(1,self.kpoints[0,:])
self.norm_paths = N.zeros((N.shape(self.special_kpoints_indices)[0]-1),N.float)
self.norm_reduced_paths = N.zeros((N.shape(self.special_kpoints_indices)[0]-1),N.float)
for ispkpt in range(1,N.shape(self.special_kpoints_indices)[0]):
self.norm_paths[ispkpt-1] = N.linalg.norm(full_kpoints[self.special_kpoints_indices[ispkpt]]-full_kpoints[self.special_kpoints_indices[ispkpt-1]])
self.norm_reduced_paths[ispkpt-1] = N.linalg.norm(self.special_kpoints[ispkpt]-self.special_kpoints[ispkpt-1])
print ' {2:d}-{3:d} path length : {0: 8.8f} | reduced path length : {1: 8.8f}'.\
format(self.norm_paths[ispkpt-1],self.norm_reduced_paths[ispkpt-1],ispkpt,ispkpt+1)
print ' {0:d} : {1[0]: 8.8f} {1[1]: 8.8f} {1[2]: 8.8f}'.format(ispkpt+1,self.kpoints[self.special_kpoints_indices[ispkpt],:])
self.kpoint_path_length = N.sum(self.norm_paths)
self.kpoint_reduced_path_length = N.sum(self.norm_reduced_paths)
self.normalized_kpoint_path_norm = self.norm_paths/self.kpoint_path_length
self.normalized_kpoint_reduced_path_norm = self.norm_reduced_paths/self.kpoint_reduced_path_length
kptredpathval = list()
kptpathval = list()
kptredpathval.append(N.float(0.0))
kptpathval.append(N.float(0.0))
curlen = N.float(0.0)
redcurlen = N.float(0.0)
for ispkpt in range(1,N.shape(self.special_kpoints_indices)[0]):
kptredpathval.extend(N.linspace(redcurlen,redcurlen+self.norm_reduced_paths[ispkpt-1],self.special_kpoints_indices[ispkpt]-self.special_kpoints_indices[ispkpt-1]+1)[1:])
kptpathval.extend(N.linspace(curlen,curlen+self.norm_paths[ispkpt-1],self.special_kpoints_indices[ispkpt]-self.special_kpoints_indices[ispkpt-1]+1)[1:])
redcurlen = redcurlen + self.norm_reduced_paths[ispkpt-1]
curlen = curlen + self.norm_paths[ispkpt-1]
self.kpoint_path_values = N.array(kptpathval,N.float)
self.kpoint_reduced_path_values = N.array(kptredpathval,N.float)
self.normalized_kpoint_path_values = self.kpoint_path_values/self.kpoint_path_length
self.normalized_kpoint_reduced_path_values = self.kpoint_reduced_path_values/self.kpoint_reduced_path_length
self.special_kpoints = N.array(self.special_kpoints,N.float)
def has_eigenvalue(self,nsppol,isppol,kpoint,iband):
if self.nsppol != nsppol:
return False
for ikpt in range(self.nkpt):
if N.absolute(self.kpoints[ikpt,0]-kpoint[0]) < csts.TOLKPTS and \
N.absolute(self.kpoints[ikpt,1]-kpoint[1]) < csts.TOLKPTS and \
N.absolute(self.kpoints[ikpt,2]-kpoint[2]) < csts.TOLKPTS:
if iband >= self.bd_indices[isppol,ikpt,0]-1 and iband < self.bd_indices[isppol,ikpt,1]:
return True
return False
return False
def get_eigenvalue(self,nsppol,isppol,kpoint,iband):
for ikpt in range(self.nkpt):
if N.absolute(self.kpoints[ikpt,0]-kpoint[0]) < csts.TOLKPTS and \
N.absolute(self.kpoints[ikpt,1]-kpoint[1]) < csts.TOLKPTS and \
N.absolute(self.kpoints[ikpt,2]-kpoint[2]) < csts.TOLKPTS:
return self.eigenvalues[isppol,ikpt,iband]
def gw_file_open(self,filefullpath):
if not (os.path.isfile(filefullpath)):
print 'ERROR : file "%s" does not exists' %filefullpath
print '... exiting now ...'
sys.exit()
self.eigenvalue_type = 'GW'
self.nsppol = None
self.nkpt = None
self.mband = None
self.eigenvalues = None
self.units = None
self.filefullpath = filefullpath
reader = open(self.filefullpath,'r')
filedata = reader.readlines()
reader.close()
self.nkpt = N.int(filedata[0].split()[0])
self.kpoints = N.ones([self.nkpt,3],N.float)
self.nsppol = N.int(filedata[0].split()[1])
self.bd_indices = N.zeros((self.nsppol,self.nkpt,2),N.int)
icur = 1
nbd_kpt = N.zeros([self.nsppol,self.nkpt],N.int)
for ikpt in range(self.nkpt):
for isppol in range(self.nsppol):
self.kpoints[ikpt,:] = N.array(filedata[icur].split()[:],N.float)
icur = icur + 1
nbd_kpt[isppol,ikpt] = N.int(filedata[icur])
self.bd_indices[isppol,ikpt,0] = N.int(filedata[icur+1].split()[0])
self.bd_indices[isppol,ikpt,1] = N.int(filedata[icur+nbd_kpt[isppol,ikpt]].split()[0])
icur = icur + nbd_kpt[isppol,ikpt] + 1
self.mband = N.max(self.bd_indices[:,:,1])
self.eigenvalues = N.zeros([self.nsppol,self.nkpt,self.mband],N.float)
self.eigenvalues[:,:,:] = N.nan
ii = 3
for ikpt in range(self.nkpt):
for isppol in range(self.nsppol):
for iband in range(self.bd_indices[isppol,ikpt,0]-1,self.bd_indices[isppol,ikpt,1]):
self.eigenvalues[isppol,ikpt,iband] = N.float(filedata[ii].split()[1])
ii = ii + 1
ii = ii + 2
self.eigenvalues = csts.ev2hartree*self.eigenvalues
self.units = 'Hartree'
def pfit_gw_eigenvalues_ha(self,polyfitlist_up,energy_pivots_up=None,nband=None,polyfitlist_down=None,energy_pivots_down=None,ecgw=None):
if polyfitlist_down == None and energy_pivots_down != None:
print 'ERROR: list of polyfits and energy pivots are not coherent ... exit'
sys.exit()
if polyfitlist_down != None and energy_pivots_down == None:
print 'ERROR: list of polyfits and energy pivots are not coherent ... exit'
sys.exit()
if nband == None:
mband = N.shape(self.eigenvalues)[2]
else:
mband = nband
pfit_eigenvalues = csts.hartree2ev*N.array(self.eigenvalues)
if polyfitlist_down == None:
for ikpt in range(self.nkpt):
for isppol in range(self.nsppol):
if ecgw == None:
ibdmin = 0
ibdmax = mband
else:
ibdmin = ecgw.bd_indices[isppol,ikpt,0]-1
ibdmax = ecgw.bd_indices[isppol,ikpt,1]
for iband in range(ibdmin,ibdmax):
delta = N.polyval(polyfitlist_up[-1],csts.hartree2ev*self.eigenvalues[isppol,ikpt,iband])
for ipivot in range(len(energy_pivots_up)):
if csts.hartree2ev*self.eigenvalues[isppol,ikpt,iband] <= energy_pivots_up[ipivot]:
delta = N.polyval(polyfitlist_up[ipivot],csts.hartree2ev*self.eigenvalues[isppol,ikpt,iband])
break
pfit_eigenvalues[isppol,ikpt,iband] = self.eigenvalues[isppol,ikpt,iband]*csts.hartree2ev + delta
return pfit_eigenvalues*csts.ev2hartree
else:
for ikpt in range(self.nkpt):
isppol = 0
if ecgw == None:
ibdmin = 0
ibdmax = mband
else:
print ecgw.bd_indices
ibdmin = ecgw.bd_indices[isppol,0,0]-1
ibdmax = ecgw.bd_indices[isppol,0,1]
for iband in range(ibdmin,ibdmax):
delta = N.polyval(polyfitlist_up[-1],csts.hartree2ev*self.eigenvalues[isppol,ikpt,iband])
for ipivot in range(len(energy_pivots_up)):
if polyfitlist_up[ipivot] != None:
if csts.hartree2ev*self.eigenvalues[isppol,ikpt,iband] <= energy_pivots_up[ipivot]:
delta = N.polyval(polyfitlist_up[ipivot],csts.hartree2ev*self.eigenvalues[isppol,ikpt,iband])
break
pfit_eigenvalues[isppol,ikpt,iband] = self.eigenvalues[isppol,ikpt,iband]*csts.hartree2ev + delta
isppol = 1
if ecgw == None:
ibdmin = 0
ibdmax = mband
else:
ibdmin = ecgw.bd_indices[isppol,0,0]-1
ibdmax = ecgw.bd_indices[isppol,0,1]
for iband in range(ibdmin,ibdmax):
delta = N.polyval(polyfitlist_down[-1],csts.hartree2ev*self.eigenvalues[isppol,ikpt,iband])
for ipivot in range(len(energy_pivots_down)):
if polyfitlist_down[ipivot] != None:
if csts.hartree2ev*self.eigenvalues[isppol,ikpt,iband] <= energy_pivots_down[ipivot]:
delta = N.polyval(polyfitlist_down[ipivot],csts.hartree2ev*self.eigenvalues[isppol,ikpt,iband])
break
pfit_eigenvalues[isppol,ikpt,iband] = self.eigenvalues[isppol,ikpt,iband]*csts.hartree2ev + delta
return pfit_eigenvalues*csts.ev2hartree
def pfit_gw_eigenvalues(self,polyfitlist,energy_pivots=None,nband=None):
if nband == None:
mband = N.shape(self.eigenvalues)[2]
else:
mband = nband
pfit_eigenvalues = N.zeros((self.nsppol,self.nkpt,mband))
for ikpt in range(self.nkpt):
for isppol in range(self.nsppol):
for iband in range(mband):
delta = N.polyval(polyfitlist[-1],csts.hartree2ev*self.eigenvalues[isppol,ikpt,iband])
for ipivot in range(len(energy_pivots)):
if csts.hartree2ev*self.eigenvalues[isppol,ikpt,iband] <= energy_pivots[ipivot]:
delta = N.polyval(polyfitlist[ipivot],csts.hartree2ev*self.eigenvalues[isppol,ikpt,iband])
break
pfit_eigenvalues[isppol,ikpt,iband] = self.eigenvalues[isppol,ikpt,iband]*csts.hartree2ev + delta
return pfit_eigenvalues
def pfit_gw_file_write(self,polyfitlist,directory=None,filename=None,bdgw=None,energy_pivots=None,gwec=None):
if filename == None:return
if directory == None:directory='.'
filefullpath = '%s/%s' %(directory,filename)
if (os.path.isfile(filefullpath)):
user_input = raw_input('WARNING : file "%s" exists, do you want to overwrite it ? (y/n)' %filefullpath)
if not (user_input == 'y' or user_input == 'Y'):
return
writer = open(filefullpath,'w')
writer.write('%12s%12s\n' %(self.nkpt,self.nsppol))
if gwec == None:
for ikpt in range(self.nkpt):
for isppol in range(self.nsppol):
writer.write('%10.6f%10.6f%10.6f\n' %(self.kpoints[ikpt,0],self.kpoints[ikpt,1],self.kpoints[ikpt,2]))
writer.write('%4i\n' %(bdgw[1]-bdgw[0]+1))
for iband in range(bdgw[0]-1,bdgw[1]):
delta = N.polyval(polyfitlist[-1],csts.hartree2ev*self.eigenvalues[isppol,ikpt,iband])
for ipivot in range(len(energy_pivots)):
if csts.hartree2ev*self.eigenvalues[isppol,ikpt,iband] <= energy_pivots[ipivot]:
delta = N.polyval(polyfitlist[ipivot],csts.hartree2ev*self.eigenvalues[isppol,ikpt,iband])
break
writer.write('%6i%9.4f%9.4f%9.4f\n' %(iband+1,csts.hartree2ev*self.eigenvalues[isppol,ikpt,iband]+delta,delta,0.0))
else:
for ikpt in range(self.nkpt):
for isppol in range(self.nsppol):
writer.write('%10.6f%10.6f%10.6f\n' %(self.kpoints[ikpt,0],self.kpoints[ikpt,1],self.kpoints[ikpt,2]))
writer.write('%4i\n' %(bdgw[1]-bdgw[0]+1))
for iband in range(bdgw[0]-1,bdgw[1]):
if gwec.has_eigenvalue(self.nsppol,isppol,self.kpoints[ikpt],iband):
gw_eig = gwec.get_eigenvalue(self.nsppol,isppol,self.kpoints[ikpt],iband)
writer.write('%6i%9.4f%9.4f%9.4f\n' %(iband+1,csts.hartree2ev*gw_eig,csts.hartree2ev*(gw_eig-self.eigenvalues[isppol,ikpt,iband]),0.0))
else:
delta = N.polyval(polyfitlist[-1],csts.hartree2ev*self.eigenvalues[isppol,ikpt,iband])
for ipivot in range(len(energy_pivots)):
if csts.hartree2ev*self.eigenvalues[isppol,ikpt,iband] <= energy_pivots[ipivot]:
delta = N.polyval(polyfitlist[ipivot],csts.hartree2ev*self.eigenvalues[isppol,ikpt,iband])
break
writer.write('%6i%9.4f%9.4f%9.4f\n' %(iband+1,csts.hartree2ev*self.eigenvalues[isppol,ikpt,iband]+delta,delta,0.0))
writer.close()
def pfit_dft_to_gw_bs_write(self,polyfitlist,directory=None,filename=None,bdgw=None,energy_pivots=None,gwec=None):
if filename == None:return
if directory == None:directory='.'
filefullpath = '%s/%s' %(directory,filename)
if (os.path.isfile(filefullpath)):
user_input = raw_input('WARNING : file "%s" exists, do you want to overwrite it ? (y/n)' %filefullpath)
if not (user_input == 'y' or user_input == 'Y'):
return
writer = open(filefullpath,'w')
if gwec == None:
for ikpt in range(self.nkpt):
writer.write('%s' %ikpt)
for isppol in range(self.nsppol):
for iband in range(bdgw[0]-1,bdgw[1]):
delta = N.polyval(polyfitlist[-1],csts.hartree2ev*self.eigenvalues[isppol,ikpt,iband])
for ipivot in range(len(energy_pivots)):
if csts.hartree2ev*self.eigenvalues[isppol,ikpt,iband] <= energy_pivots[ipivot]:
delta = N.polyval(polyfitlist[ipivot],csts.hartree2ev*self.eigenvalues[isppol,ikpt,iband])
break
writer.write(' %s' %(csts.hartree2ev*self.eigenvalues[isppol,ikpt,iband]+delta))
writer.write('\n')
else:
print 'NOT SUPPORTED YET'
sys.exit()
writer.close()
def nc_eig_open(self,filefullpath):
if not (os.path.isfile(filefullpath)):
print 'ERROR : file "%s" does not exists' %filefullpath
print '... exiting now ...'
sys.exit()
ncdata = nc.Dataset(filefullpath)
self.eigenvalue_type = 'DFT'
self.nsppol = None
self.nkpt = None
self.mband = None
self.eigenvalues = None
self.units = None
self.filefullpath = filefullpath
for dimname,dimobj in ncdata.dimensions.iteritems():
if dimname == 'nsppol':self.nsppol = N.int(len(dimobj))
if dimname == 'nkpt':self.nkpt = N.int(len(dimobj))
if dimname == 'mband':self.mband = N.int(len(dimobj))
for varname in ncdata.variables:
if varname == 'Eigenvalues':
varobj = ncdata.variables[varname]
varshape = N.shape(varobj[:])
self.units = None
for attrname in varobj.ncattrs():
if attrname == 'units':
self.units = varobj.getncattr(attrname)
if self.units == None:
print 'WARNING : units are not specified'
print '... assuming "Hartree" units ...'
self.units = 'Hartree'
elif self.units != 'Hartree':
print 'ERROR : units are unknown : "%s"' %self.units
print '... exiting now ...'
sys.exit()
self.eigenvalues = N.reshape(N.array(varobj,N.float),varshape)
self.nsppol = varshape[0]
self.nkpt = varshape[1]
self.kpoints = -1*N.ones((self.nkpt,3),N.float)
self.mband = varshape[2]
self.bd_indices = N.zeros((self.nsppol,self.nkpt,2),N.int)
self.bd_indices[:,:,0] = 1
self.bd_indices[:,:,1] = self.mband
break
for varname in ncdata.variables:
if varname == 'Kptns':
varobj = ncdata.variables[varname]
varshape = N.shape(varobj[:])
self.kpoints = N.reshape(N.array(varobj,N.float),varshape)
def write_bandstructure_to_file(self,filename,option_kpts='bohrm1_units'):
#if option_kpts is set to 'normalized', the path of the bandstructure will be normalized to 1 (and special k-points correctly chosen)
if self.kpoint_sampling_type != 'Bandstructure':
print 'ERROR: kpoint_sampling_type is not "Bandstructure" ... returning from write_bandstructure_to_file'
return
if self.nsppol > 1:
print 'ERROR: number of spins is more than 1, this is not fully tested ... use with care !'
writer = open(filename,'w')
writer.write('# BANDSTRUCTURE FILE FROM DAVID\'S SCRIPT\n')
writer.write('# nsppol = %s\n' %self.nsppol)
writer.write('# nband = %s\n' %self.mband)
writer.write('# eigenvalue_type = %s\n' %self.eigenvalue_type)
if self.inputgvectors:
writer.write('# inputgvectors = 1 (%s)\n' %self.inputgvectors)
else:
writer.write('# inputgvectors = 0 (%s)\n' %self.inputgvectors)
writer.write('# gvectors(1) = %20.17f %20.17f %20.17f \n' %(self.gvectors[0,0],self.gvectors[0,1],self.gvectors[0,2]))
writer.write('# gvectors(2) = %20.17f %20.17f %20.17f \n' %(self.gvectors[1,0],self.gvectors[1,1],self.gvectors[1,2]))
writer.write('# gvectors(3) = %20.17f %20.17f %20.17f \n' %(self.gvectors[2,0],self.gvectors[2,1],self.gvectors[2,2]))
writer.write('# special_kpoints_number = %s\n' %(len(self.special_kpoints_indices)))
writer.write('# list of special kpoints : (given in reduced coordinates, value_path is in Bohr^-1, value_red_path has its total path normalized to 1)\n')
for ii in range(len(self.special_kpoints_indices)):
ispkpt = self.special_kpoints_indices[ii]
spkpt = self.special_kpoints[ii]
writer.write('# special_kpt_index %5s : %20.17f %20.17f %20.17f (value_path = %20.17f | value_red_path = %20.17f)\n' %(ispkpt,spkpt[0],spkpt[1],spkpt[2],self.kpoint_path_values[ispkpt],self.kpoint_reduced_path_values[ispkpt]))
writer.write('# special_kpoints_names :\n')
for ii in range(len(self.special_kpoints_indices)):
ispkpt = self.special_kpoints_indices[ii]
spkpt = self.special_kpoints[ii]
writer.write('# special_kpt_name %3s : "%s" : %20.17f %20.17f %20.17f\n' %(ii+1,self.special_kpoints_names[ii],spkpt[0],spkpt[1],spkpt[2]))
writer.write('# kpoint_path_length = %20.17f \n' %(self.kpoint_path_length))
writer.write('# kpoint_path_number = %s \n' %(self.nkpt))
if self.inputgvectors:
writer.write('# kpoint_path_units = %s\n' %(option_kpts))
else:
writer.write('# kpoint_path_units = %s (!!! CONSIDERING UNITARY GVECTORS MATRIX !!!)\n' %(option_kpts))
writer.write('#BEGIN\n')
if option_kpts == 'bohrm1_units':
values_path = self.kpoint_path_values
elif option_kpts == 'reduced':
values_path = self.kpoint_reduced_path_values
elif option_kpts == 'bohrm1_units_normalized':
values_path = self.normalized_kpoint_path_values
elif option_kpts == 'reduced_normalized':
values_path = self.normalized_kpoint_reduced_path_values
else:
print 'ERROR: wrong option_kpts ... exit'
writer.write('... CANCELLED (wrong option_kpts)')
writer.close()
sys.exit()
for isppol in range(self.nsppol):
writer.write('#isppol %s\n' %isppol)
for iband in range(self.mband):
writer.write('#iband %5s (band number %s)\n' %(iband,iband+1))
for ikpt in range(self.nkpt):
writer.write('%20.17f %20.17f\n' %(values_path[ikpt],self.eigenvalues[isppol,ikpt,iband]))
writer.write('\n')
writer.write('#END\n')
writer.write('\n#KPT_LIST\n')
for ikpt in range(self.nkpt):
writer.write('# %6d : %20.17f %20.17f %20.17f\n' %(ikpt,self.kpoints[ikpt,0],self.kpoints[ikpt,1],self.kpoints[ikpt,2]))
writer.close()
# def write_bandstructure_to_file(self,filename,option_kpts='bohrm1_units'):
# #if option_kpts is set to 'normalized', the path of the bandstructure will be normalized to 1 (and special k-points correctly chosen)
# if self.kpoint_sampling_type != 'Bandstructure':
# print 'ERROR: kpoint_sampling_type is not "Bandstructure" ... returning from write_bandstructure_to_file'
# return
# if self.nsppol > 1:
# print 'ERROR: number of spins is more than 1, this is not yet coded ... returning from write_bandstructure_to_file'
# return
# writer = open(filename,'w')
# writer.write('# BANDSTRUCTURE FILE FROM DAVID\'S SCRIPT\n')
# writer.write('# nsppol = %s\n' %self.nsppol)
# writer.write('# nband = %s\n' %self.mband)
# writer.write('# eigenvalue_type = %s\n' %self.eigenvalue_type)
# if self.inputgvectors:
# writer.write('# inputgvectors = 1 (%s)\n' %self.inputgvectors)
# else:
# writer.write('# inputgvectors = 0 (%s)\n' %self.inputgvectors)
# writer.write('# gvectors(1) = %20.17f %20.17f %20.17f \n' %(self.gvectors[0,0],self.gvectors[0,1],self.gvectors[0,2]))
# writer.write('# gvectors(2) = %20.17f %20.17f %20.17f \n' %(self.gvectors[1,0],self.gvectors[1,1],self.gvectors[1,2]))
# writer.write('# gvectors(3) = %20.17f %20.17f %20.17f \n' %(self.gvectors[2,0],self.gvectors[2,1],self.gvectors[2,2]))
# writer.write('# special_kpoints_number = %s\n' %(len(self.special_kpoints_indices)))
# writer.write('# list of special kpoints : (given in reduced coordinates, value_path is in Bohr^-1, value_red_path has its total path normalized to 1)\n')
# for ii in range(len(self.special_kpoints_indices)):
# ispkpt = self.special_kpoints_indices[ii]
# spkpt = self.special_kpoints[ii]
# writer.write('# special_kpt_index %5s : %20.17f %20.17f %20.17f (value_path = %20.17f | value_red_path = %20.17f)\n' %(ispkpt,spkpt[0],spkpt[1],spkpt[2],self.kpoint_path_values[ispkpt],self.kpoint_reduced_path_values[ispkpt]))
# writer.write('# special_kpoints_names :\n')
# for ii in range(len(self.special_kpoints_indices)):
# ispkpt = self.special_kpoints_indices[ii]
# spkpt = self.special_kpoints[ii]
# writer.write('# special_kpt_name %3s : "%s" : %20.17f %20.17f %20.17f\n' %(ii+1,self.special_kpoints_names[ii],spkpt[0],spkpt[1],spkpt[2]))
# writer.write('# kpoint_path_length = %20.17f \n' %(self.kpoint_path_length))
# writer.write('# kpoint_path_number = %s \n' %(self.nkpt))
# if self.inputgvectors:
# writer.write('# kpoint_path_units = %s\n' %(option_kpts))
# else:
# writer.write('# kpoint_path_units = %s (!!! CONSIDERING UNITARY GVECTORS MATRIX !!!)\n' %(option_kpts))
# writer.write('#BEGIN\n')
# if option_kpts == 'bohrm1_units':
# values_path = self.kpoint_path_values
# elif option_kpts == 'reduced':
# values_path = self.kpoint_reduced_path_values
# elif option_kpts == 'bohrm1_units_normalized':
# values_path = self.normalized_kpoint_path_values
# elif option_kpts == 'reduced_normalized':
# values_path = self.normalized_kpoint_reduced_path_values
# else:
# print 'ERROR: wrong option_kpts ... exit'
# writer.write('... CANCELLED (wrong option_kpts)')
# writer.close()
# sys.exit()
# for isppol in range(self.nsppol):
# writer.write('#isppol %s\n' %isppol)
# for iband in range(self.mband):
# writer.write('#iband %5s (band number %s)\n' %(iband,iband+1))
# for ikpt in range(self.nkpt):
# writer.write('%20.17f %20.17f\n' %(values_path[ikpt],self.eigenvalues[isppol,ikpt,iband]))
# writer.write('\n')
# writer.write('#END\n')
# writer.write('\n#KPT_LIST\n')
# for ikpt in range(self.nkpt):
# writer.write('# %6d : %20.17f %20.17f %20.17f\n' %(ikpt,self.kpoints[ikpt,0],self.kpoints[ikpt,1],self.kpoints[ikpt,2]))
# writer.close()
def read_bandstructure_from_file(self,filename):
reader = open(filename,'r')
bs_data = reader.readlines()
reader.close()
self.gvectors = N.identity(3,N.float)
self.kpoint_sampling_type = 'Bandstructure'
self.special_kpoints_indices = list()
self.special_kpoints = list()
for ii in range(len(bs_data)):
if bs_data[ii] == '#BEGIN\n':
ibegin = ii
break
elif bs_data[ii][:10] == '# nsppol =':
self.nsppol = N.int(bs_data[ii][10:])
elif bs_data[ii][:9] == '# nband =':
self.mband = N.int(bs_data[ii][9:])
elif bs_data[ii][:19] == '# eigenvalue_type =':
self.eigenvalue_type = bs_data[ii][19:].strip()
elif bs_data[ii][:17] == '# inputgvectors =':
tt = N.int(bs_data[ii][18])
if tt == 1:
self.inputgvectors = True
elif tt == 0:
self.inputgvectors = False
else:
print 'ERROR: reading inputgvectors ... exit'
sys.exit()
elif bs_data[ii][:15] == '# gvectors(1) =':
sp = bs_data[ii][15:].split()
self.gvectors[0,0] = N.float(sp[0])
self.gvectors[0,1] = N.float(sp[1])
self.gvectors[0,2] = N.float(sp[2])
elif bs_data[ii][:15] == '# gvectors(2) =':
sp = bs_data[ii][15:].split()
self.gvectors[1,0] = N.float(sp[0])
self.gvectors[1,1] = N.float(sp[1])
self.gvectors[1,2] = N.float(sp[2])
elif bs_data[ii][:15] == '# gvectors(3) =':
sp = bs_data[ii][15:].split()
self.gvectors[2,0] = N.float(sp[0])
self.gvectors[2,1] = N.float(sp[1])
self.gvectors[2,2] = N.float(sp[2])
elif bs_data[ii][:26] == '# special_kpoints_number =':
special_kpoints_number = N.int(bs_data[ii][26:])
self.special_kpoints_names = ['']*special_kpoints_number
elif bs_data[ii][:22] == '# special_kpt_index':
sp = bs_data[ii][22:].split()
self.special_kpoints_indices.append(N.int(sp[0]))
self.special_kpoints.append(N.array([sp[2],sp[3],sp[4]]))
elif bs_data[ii][:21] == '# special_kpt_name':
sp = bs_data[ii][21:].split()
ispkpt = N.int(sp[0])-1
self.special_kpoints_names[ispkpt] = sp[2][1:-1]
elif bs_data[ii][:22] == '# kpoint_path_length =':
self.kpoint_path_length = N.float(bs_data[ii][22:])
elif bs_data[ii][:22] == '# kpoint_path_number =':
self.nkpt = N.int(bs_data[ii][22:])
elif bs_data[ii][:21] == '# kpoint_path_units =':
kpoint_path_units = bs_data[ii][21:].strip()
self.special_kpoints_indices = N.array(self.special_kpoints_indices,N.int)
self.special_kpoints = N.array(self.special_kpoints,N.float)
if len(self.special_kpoints_indices) != special_kpoints_number or len(self.special_kpoints) != special_kpoints_number:
print 'ERROR: reading the special kpoints ... exit'
sys.exit()
self.kpoint_path_values = N.zeros([self.nkpt],N.float)
self.kpoint_reduced_path_values = N.zeros([self.nkpt],N.float)
if kpoint_path_units == 'bohrm1_units':
jj = 0
for ii in range(ibegin+1,len(bs_data)):
if bs_data[ii][:7] == '#isppol' or bs_data[ii][:6] == '#iband':continue
if bs_data[ii] == '\n':
break
self.kpoint_path_values[jj] = N.float(bs_data[ii].split()[0])
jj = jj + 1
if jj != self.nkpt:
print 'ERROR: reading bandstructure file ... exit'
sys.exit()
self.normalized_kpoint_path_values = self.kpoint_path_values/self.kpoint_path_length
if kpoint_path_units == 'bohrm1_units_normalized':
jj = 0
for ii in range(ibegin+1,len(bs_data)):
if bs_data[ii][:7] == '#isppol' or bs_data[ii][:6] == '#iband':continue
if bs_data[ii] == '\n':
break
self.normalized_kpoint_path_values[jj] = N.float(bs_data[ii].split()[0])
jj = jj + 1
if jj != self.nkpt:
print 'ERROR: reading bandstructure file ... exit'
sys.exit()
self.kpoint_path_values = self.normalized_kpoint_path_values*self.kpoint_path_length
elif kpoint_path_units == 'reduced_normalized':
jj = 0
for ii in range(ibegin+1,len(bs_data)):
if bs_data[ii][:7] == '#isppol' or bs_data[ii][:6] == '#iband':continue
if bs_data[ii] == '\n':
break
self.normalized_kpoint_reduced_path_values[jj] = N.float(bs_data[ii].split()[0])
jj = jj + 1
if jj != self.nkpt:
print 'ERROR: reading bandstructure file ... exit'
sys.exit()
self.kpoint_reduced_path_values = self.normalized_kpoint_reduced_path_values/self.kpoint_reduced_path_length
elif kpoint_path_units == 'reduced':
jj = 0
for ii in range(ibegin+1,len(bs_data)):
if bs_data[ii][:7] == '#isppol' or bs_data[ii][:6] == '#iband':continue
if bs_data[ii] == '\n':
break
self.kpoint_reduced_path_values[jj] = N.float(bs_data[ii].split()[0])
jj = jj + 1
if jj != self.nkpt:
print 'ERROR: reading bandstructure file ... exit'
sys.exit()
self.normalized_kpoint_reduced_path_values = self.kpoint_reduced_path_values/self.kpoint_reduced_path_length
self.eigenvalues = N.zeros([self.nsppol,self.nkpt,self.mband],N.float)
check_nband = 0
for ii in range(ibegin+1,len(bs_data)):
if bs_data[ii][:7] == '#isppol':
isppol = N.int(bs_data[ii][7:])
elif bs_data[ii][:6] == '#iband':
iband = N.int(bs_data[ii][6:].split()[0])
ikpt = 0
elif bs_data[ii][:4] == '#END':
break
elif bs_data[ii] == '\n':
check_nband = check_nband + 1
else:
self.eigenvalues[isppol,ikpt,iband] = N.float(bs_data[ii].split()[1])
ikpt = ikpt + 1
def check_gw_vs_dft_parameters(dftec,gwec):
if gwec.eigenvalue_type != 'GW' or dftec.eigenvalue_type != 'DFT':
print 'ERROR: eigenvalue files do not contain GW and DFT eigenvalues ... exiting now'
sys.exit()
if dftec.nsppol != gwec.nsppol or dftec.nkpt != gwec.nkpt:
print 'ERROR: the number of spins/kpoints is not the same in the GW and DFT files used to make the interpolation ... exiting now'
sys.exit()
for ikpt in range(dftec.nkpt):
if N.absolute(dftec.kpoints[ikpt,0]-gwec.kpoints[ikpt,0]) > csts.TOLKPTS or \
N.absolute(dftec.kpoints[ikpt,1]-gwec.kpoints[ikpt,1]) > csts.TOLKPTS or \
N.absolute(dftec.kpoints[ikpt,2]-gwec.kpoints[ikpt,2]) > csts.TOLKPTS:
print 'ERROR: the kpoints are not the same in the GW and DFT files used to make the interpolation ... exiting now'
sys.exit()
def classify_eigenvalues(eigarray,energy_pivots,eig2array=None):
eigarray_list = list()
if eig2array != None:
eig2array_list = list()
for iinterval in range(len(energy_pivots)+1):
print iinterval,' : '
tmpeigarray = N.array([],N.float)
tmpeig2array = N.array([],N.float)
if iinterval == 0:
emin = None
emax = energy_pivots[0]
print emin,emax
for ii in range(len(eigarray)):
if eigarray[ii] <= emax:
tmpeigarray = N.append(tmpeigarray,[eigarray[ii]])
tmpeig2array = N.append(tmpeig2array,[eig2array[ii]])
elif iinterval == len(energy_pivots):
emin = energy_pivots[-1]
emax = None
print emin,emax
for ii in range(len(eigarray)):
if eigarray[ii] >= emin:
tmpeigarray = N.append(tmpeigarray,[eigarray[ii]])
tmpeig2array = N.append(tmpeig2array,[eig2array[ii]])
else:
emin = energy_pivots[iinterval-1]
emax = energy_pivots[iinterval]
print emin,emax
for ii in range(len(eigarray)):
if eigarray[ii] >= emin and eigarray[ii] <= emax:
tmpeigarray = N.append(tmpeigarray,[eigarray[ii]])
tmpeig2array = N.append(tmpeig2array,[eig2array[ii]])
eigarray_list.append(tmpeigarray)
eig2array_list.append(tmpeig2array)
return eigarray_list,eig2array_list
else:
for iinterval in range(len(energy_pivots)+1):
tmpeigarray = N.array([],N.float)
if iinterval == 0:
emin = None
emax = energy_pivots[0]
for ii in range(len(eigarray)):
if eigarray[ii] <= emax:
tmpeigarray = N.append(tmpeigarray,[eigarray[ii]])
elif iinterval == len(energy_pivots):
emin = energy_pivots[-1]
emax = None
for ii in range(len(eigarray)):
if eigarray[ii] >= emin:
tmpeigarray = N.append(tmpeigarray,[eigarray[ii]])
else:
emin = energy_pivots[iinterval-1]
emax = energy_pivots[iinterval]
for ii in range(len(eigarray)):
if eigarray[ii] >= emin and eigarray[ii] <= emax:
tmpeigarray = N.append(tmpeigarray,[eigarray[ii]])
eigarray_list.append(tmpeigarray)
return eigarray_list
def plot_gw_vs_dft_eig(dftec,gwec,vbm_index,energy_pivots_up=None,energy_pivots_down=None,polyfit_degrees_up=None,polyfit_degrees_down=None,limitpoints=None,spinchoice=None,smooth_end=True,smooth_energy=None,smooth_delta_energy=None):
DELTA_ENERGY_END = 2.0
if gwec.eigenvalue_type != 'GW' or dftec.eigenvalue_type != 'DFT':
print 'ERROR: eigenvalue containers do not contain GW and DFT eigenvalues ... exiting now'
sys.exit()
if dftec.nsppol != gwec.nsppol or dftec.nkpt != gwec.nkpt:
print 'ERROR: the number of spins/kpoints is not the same in the GW and DFT containers ... exiting now'
sys.exit()
if dftec.nsppol == 1:
spinchoice = 'common'
valdftarray = N.array([],N.float)
conddftarray = N.array([],N.float)
valgwarray = N.array([],N.float)
condgwarray = N.array([],N.float)
if dftec.nsppol == 2 and spinchoice == 'separate':
upvaldftarray = N.array([],N.float)
upconddftarray = N.array([],N.float)
upvalgwarray = N.array([],N.float)
upcondgwarray = N.array([],N.float)
downvaldftarray = N.array([],N.float)
downconddftarray = N.array([],N.float)
downvalgwarray = N.array([],N.float)
downcondgwarray = N.array([],N.float)
if spinchoice == None or spinchoice == 'common':
for ikpt in range(dftec.nkpt):
for isppol in range(dftec.nsppol):
ibdmin = N.max([dftec.bd_indices[isppol,ikpt,0],gwec.bd_indices[isppol,ikpt,0]])-1
ibdmax = N.min([dftec.bd_indices[isppol,ikpt,1],gwec.bd_indices[isppol,ikpt,1]])-1
valdftarray = N.append(valdftarray,csts.hartree2ev*dftec.eigenvalues[isppol,ikpt,ibdmin:vbm_index])
valgwarray = N.append(valgwarray,csts.hartree2ev*gwec.eigenvalues[isppol,ikpt,ibdmin:vbm_index])
conddftarray = N.append(conddftarray,csts.hartree2ev*dftec.eigenvalues[isppol,ikpt,vbm_index:ibdmax+1])
condgwarray = N.append(condgwarray,csts.hartree2ev*gwec.eigenvalues[isppol,ikpt,vbm_index:ibdmax+1])
elif spinchoice == 'separate':
for ikpt in range(dftec.nkpt):
isppol = 0
ibdmin = N.max([dftec.bd_indices[isppol,ikpt,0],gwec.bd_indices[isppol,ikpt,0]])-1
ibdmax = N.min([dftec.bd_indices[isppol,ikpt,1],gwec.bd_indices[isppol,ikpt,1]])-1
upvaldftarray = N.append(upvaldftarray,csts.hartree2ev*dftec.eigenvalues[isppol,ikpt,ibdmin:vbm_index])
upvalgwarray = N.append(upvalgwarray,csts.hartree2ev*gwec.eigenvalues[isppol,ikpt,ibdmin:vbm_index])
upconddftarray = N.append(upconddftarray,csts.hartree2ev*dftec.eigenvalues[isppol,ikpt,vbm_index:ibdmax+1])
upcondgwarray = N.append(upcondgwarray,csts.hartree2ev*gwec.eigenvalues[isppol,ikpt,vbm_index:ibdmax+1])
isppol = 1
ibdmin = N.max([dftec.bd_indices[isppol,ikpt,0],gwec.bd_indices[isppol,ikpt,0]])-1
ibdmax = N.min([dftec.bd_indices[isppol,ikpt,1],gwec.bd_indices[isppol,ikpt,1]])-1
downvaldftarray = N.append(downvaldftarray,csts.hartree2ev*dftec.eigenvalues[isppol,ikpt,ibdmin:vbm_index])
downvalgwarray = N.append(downvalgwarray,csts.hartree2ev*gwec.eigenvalues[isppol,ikpt,ibdmin:vbm_index])
downconddftarray = N.append(downconddftarray,csts.hartree2ev*dftec.eigenvalues[isppol,ikpt,vbm_index:ibdmax+1])
downcondgwarray = N.append(downcondgwarray,csts.hartree2ev*gwec.eigenvalues[isppol,ikpt,vbm_index:ibdmax+1])
if energy_pivots_up == None:
if plot_figures == 1:
if dftec.nsppol == 2:
P.figure(1,figsize=(csts.fig_width,csts.fig_height))
P.hold(True)
P.grid(True)
P.plot(upvaldftarray,upvalgwarray,'bx',markersize=csts.markersize,markeredgewidth=csts.markeredgewidth)
P.plot(upconddftarray,upcondgwarray,'rx',markersize=csts.markersize,markeredgewidth=csts.markeredgewidth)
P.xlabel('DFT eigenvalues - spin UP (in eV)')
P.ylabel('GW eigenvalues (in eV)')
P.figure(2,figsize=(csts.fig_width,csts.fig_height))
P.hold(True)
P.grid(True)
P.plot(downvaldftarray,downvalgwarray,'bo',markersize=csts.markersize,markeredgewidth=csts.markeredgewidth)
P.plot(downconddftarray,downcondgwarray,'ro',markersize=csts.markersize,markeredgewidth=csts.markeredgewidth)
P.xlabel('DFT eigenvalues - spin UP (in eV)')
P.ylabel('GW eigenvalues (in eV)')
else:
P.figure(1,figsize=(csts.fig_width,csts.fig_height))
P.hold(True)
P.grid(True)
P.plot(valdftarray,valgwarray,'bx',markersize=csts.markersize,markeredgewidth=csts.markeredgewidth)
P.plot(conddftarray,condgwarray,'rx',markersize=csts.markersize,markeredgewidth=csts.markeredgewidth)
P.xlabel('DFT eigenvalues (in eV)')
P.ylabel('GW eigenvalues (in eV)')
if dftec.nsppol == 2:
P.figure(3,figsize=(csts.fig_width,csts.fig_height))
P.hold(True)
P.grid(True)
P.plot(upvaldftarray,upvalgwarray-upvaldftarray,'bx',markersize=csts.markersize,markeredgewidth=csts.markeredgewidth)
P.plot(upconddftarray,upcondgwarray-upconddftarray,'rx',markersize=csts.markersize,markeredgewidth=csts.markeredgewidth)
P.xlabel('DFT eigenvalues - spin UP (in eV)')
P.ylabel('GW correction to the DFT eigenvalues (in eV)')
P.figure(4,figsize=(csts.fig_width,csts.fig_height))
P.hold(True)
P.grid(True)
P.plot(downvaldftarray,downvalgwarray-downvaldftarray,'bo',markersize=csts.markersize,markeredgewidth=csts.markeredgewidth)
P.plot(downconddftarray,downcondgwarray-downconddftarray,'ro',markersize=csts.markersize,markeredgewidth=csts.markeredgewidth)
P.xlabel('DFT eigenvalues - spin DOWN (in eV)')
P.ylabel('GW correction to the DFT eigenvalues (in eV)')
else:
P.figure(2,figsize=(csts.fig_width,csts.fig_height))
P.hold(True)
P.grid(True)
P.plot(valdftarray,valgwarray-valdftarray,'bx',markersize=csts.markersize,markeredgewidth=csts.markeredgewidth)
P.plot(conddftarray,condgwarray-conddftarray,'rx',markersize=csts.markersize,markeredgewidth=csts.markeredgewidth)
P.xlabel('DFT eigenvalues(in eV)')
P.ylabel('GW correction to the DFT eigenvalues (in eV)')
P.show()
return
if spinchoice == None or spinchoice == 'common':
polyfitlist = list()
if len(polyfit_degrees_up) == 1:
print 'ERROR: making a fit with only one interval is not allowed ... exiting now'
sys.exit()
dftarray = N.append(valdftarray,conddftarray)
gwarray = N.append(valgwarray,condgwarray)
dftarray_list,gwarray_list = classify_eigenvalues(dftarray,energy_pivots_up,gwarray)
for iinterval in range(len(polyfit_degrees_up)):
tmpdftarray = dftarray_list[iinterval]
tmpgwarray = gwarray_list[iinterval]
if len(tmpdftarray) > 0:
if limitpoints == 'least-squares' or (polyfit_degrees_up[iinterval] <= 0 and iinterval != len(polyfit_degrees_up)-1):
pfit = N.polyfit(tmpdftarray,tmpgwarray-tmpdftarray,N.abs(polyfit_degrees_up[iinterval]))
elif limitpoints == 'least-squares_last-fixed':
if iinterval == len(polyfit_degrees_up)-1:
idftmin = N.argmin(tmpdftarray)
idftmax = N.argmax(tmpdftarray)
igwmin = N.argmin(tmpgwarray)
igwmax = N.argmax(tmpgwarray)
if idftmin == igwmin:
myimin = idftmin
else:
print 'COMMENT: the minimum for DFT and GW are not the same for band group #%s' %(iinterval+1)
print ' => the gw minimum is taken'
myimin = igwmin
pfit = polynd_a(tmpdftarray,tmpgwarray-tmpdftarray,polyfit_degrees_up[iinterval],indices=[myimin])
else:
pfit = N.polyfit(tmpdftarray,tmpgwarray-tmpdftarray,N.abs(polyfit_degrees_up[iinterval]))
elif limitpoints == 'endpoints-fixed' or limitpoints == 'endpoints-fixed_last-flat':
idftmin = N.argmin(tmpdftarray)
idftmax = N.argmax(tmpdftarray)
igwmin = N.argmin(tmpgwarray)
igwmax = N.argmax(tmpgwarray)
if idftmin == igwmin:
myimin = idftmin
else:
print 'COMMENT: the minimum for DFT and GW are not the same for band group #%s' %(iinterval+1)
print ' => the gw minimum is taken'
myimin = igwmin
if iinterval == len(polyfit_degrees_up)-1:
if limitpoints == 'endpoints-fixed':
pfit = polynd_a(tmpdftarray,tmpgwarray-tmpdftarray,polyfit_degrees_up[iinterval],indices=[myimin])
elif limitpoints == 'endpoints-fixed_last-flat':
pfit = [N.polyval(polyfitlist[-1],energy_pivots_up[-1])]
else:
if idftmax == igwmax:
myimax = idftmax
else:
print 'COMMENT: the maximum for DFT and GW are not the same for band group #%s' %(iinterval+1)
print ' => the gw maximum is taken'
myimax = igwmax
pfit = polynd_ab(tmpdftarray,tmpgwarray-tmpdftarray,polyfit_degrees_up[iinterval],indices=[myimin,myimax])
else:
pfit = None
polyfitlist.append(pfit)
if smooth_end:
if smooth_energy == None:
smoothenergy = N.max(dftarray)
else:
smoothenergy = smooth_energy
smoothdeltaenergy = None
if smooth_delta_energy != None:
smoothdeltaenergy = smooth_delta_energy
oldpolyfitlist = list(polyfitlist)
oldenergypivots = N.array(energy_pivots_up)
energy_pivots_up,polyfitlist = smoothend(energy_pivots_up,polyfitlist,smoothenergy,delta_energy_ev=smoothdeltaenergy)
dftarray_list,gwarray_list = classify_eigenvalues(dftarray,energy_pivots_up,gwarray)
if plot_figures == 1:
linspace_npoints = 200
valpoly_x = N.linspace(N.min(valdftarray),N.max(valdftarray),linspace_npoints)
condpoly_x = N.linspace(N.min(conddftarray),N.max(conddftarray),linspace_npoints)
P.figure(3,figsize=(csts.fig_width,csts.fig_height))
P.hold(True)
P.grid(True)
P.plot(valdftarray,valgwarray-valdftarray,'bx',markersize=csts.markersize,markeredgewidth=csts.markeredgewidth)
P.plot(conddftarray,condgwarray-conddftarray,'rx',markersize=csts.markersize,markeredgewidth=csts.markeredgewidth)
[x_min,x_max] = P.xlim()
[y_min,y_max] = P.ylim()
if smooth_end:
x_max = energy_pivots_up[-1]+DELTA_ENERGY_END
for iinterval in range(len(polyfitlist)):
if iinterval == 0:
tmppoly_x = N.linspace(x_min,energy_pivots_up[iinterval],linspace_npoints)
elif iinterval == len(polyfitlist)-1:
tmppoly_x = N.linspace(energy_pivots_up[iinterval-1],x_max,linspace_npoints)
else:
tmppoly_x = N.linspace(energy_pivots_up[iinterval-1],energy_pivots_up[iinterval],linspace_npoints)
if polyfitlist[iinterval] != None:
P.plot(tmppoly_x,N.polyval(polyfitlist[iinterval],tmppoly_x),'k',markersize=csts.markersize,markeredgewidth=csts.markeredgewidth)
for ipivot in range(len(energy_pivots_up)):
en = energy_pivots_up[ipivot]
if polyfitlist[ipivot] != None and polyfitlist[ipivot+1] != None:
P.plot([en,en],[N.polyval(polyfitlist[ipivot],en),N.polyval(polyfitlist[ipivot+1],en)],'k-.',markersize=csts.markersize,markeredgewidth=csts.markeredgewidth)
P.xlabel('DFT eigenvalues (in eV)')
P.ylabel('GW correction to the DFT eigenvalues (in eV)')
P.ylim([y_min,y_max])
P.figure(4,figsize=(csts.fig_width,csts.fig_height))
P.hold(True)
P.grid(True)
for iinterval in range(len(polyfitlist)):
if polyfitlist[iinterval] != None:
P.plot(dftarray_list[iinterval],gwarray_list[iinterval]-dftarray_list[iinterval]-N.polyval(polyfitlist[iinterval],dftarray_list[iinterval]),'bx',markersize=csts.markersize,markeredgewidth=csts.markeredgewidth)
[x_min,x_max] = P.xlim()
P.plot([x_min,x_max],[0,0],'k-')
P.xlabel('DFT eigenvalues (in eV)')
P.ylabel('Error in the fit (in eV)')
P.show()
return energy_pivots_up,polyfitlist
elif spinchoice == 'separate':
polyfitlist_up = list()
polyfitlist_down = list()
if len(polyfit_degrees_up) == 1 or len(polyfit_degrees_down) == 1:
print 'ERROR: making a fit with only one interval is not allowed ... exiting now'
sys.exit()
updftarray = N.append(upvaldftarray,upconddftarray)
upgwarray = N.append(upvalgwarray,upcondgwarray)
downdftarray = N.append(downvaldftarray,downconddftarray)
downgwarray = N.append(downvalgwarray,downcondgwarray)
updftarray_list,upgwarray_list = classify_eigenvalues(updftarray,energy_pivots_up,upgwarray)
downdftarray_list,downgwarray_list = classify_eigenvalues(downdftarray,energy_pivots_down,downgwarray)
for iinterval in range(len(polyfit_degrees_up)):
tmpdftarray = updftarray_list[iinterval]
tmpgwarray = upgwarray_list[iinterval]
if len(tmpdftarray) > 0:
if limitpoints == 'least-squares' or polyfit_degrees_up[iinterval] <= 0:
pfit = N.polyfit(tmpdftarray,tmpgwarray-tmpdftarray,N.abs(polyfit_degrees_up[iinterval]))
elif limitpoints == 'endpoints-fixed':
idftmin = N.argmin(tmpdftarray)
idftmax = N.argmax(tmpdftarray)
igwmin = N.argmin(tmpgwarray)
igwmax = N.argmax(tmpgwarray)
if idftmin == igwmin:
myimin = idftmin
else:
print 'COMMENT: the minimum for DFT and GW are not the same for band group #%s' %(iinterval+1)
print ' => the gw minimum is taken'
myimin = igwmin
if iinterval == len(polyfit_degrees_up)-1:
pfit = polynd_a(tmpdftarray,tmpgwarray-tmpdftarray,polyfit_degrees_up[iinterval],indices=[myimin])
else:
if idftmax == igwmax:
myimax = idftmax
else:
print 'COMMENT: the maximum for DFT and GW are not the same for band group #%s' %(iinterval+1)
print ' => the gw maximum is taken'
myimax = igwmax
pfit = polynd_ab(tmpdftarray,tmpgwarray-tmpdftarray,polyfit_degrees_up[iinterval],indices=[myimin,myimax])
else:
pfit = None
polyfitlist_up.append(pfit)
if smooth_end:
if smooth_energy == None:
smoothenergy = N.max(dftarray)
else:
smoothenergy = smooth_energy
smoothdeltaenergy = None
if smooth_delta_energy != None:
smoothdeltaenergy = smooth_delta_energy
oldpolyfitlist_up = list(polyfitlist_up)
oldenergypivots_up = N.array(energy_pivots_up)
energy_pivots_up,polyfitlist_up = smoothend(energy_pivots_up,polyfitlist_up,smoothenergy,delta_energy_ev=smoothdeltaenergy)
updftarray_list,upgwarray_list = classify_eigenvalues(updftarray,energy_pivots_up,upgwarray)
for iinterval in range(len(polyfit_degrees_down)):
tmpdftarray = downdftarray_list[iinterval]
tmpgwarray = downgwarray_list[iinterval]
if len(tmpdftarray) > 0:
if limitpoints == 'least-squares' or polyfit_degrees_down[iinterval] <= 0:
pfit = N.polyfit(tmpdftarray,tmpgwarray-tmpdftarray,N.abs(polyfit_degrees_down[iinterval]))
elif limitpoints == 'endpoints-fixed':
idftmin = N.argmin(tmpdftarray)
idftmax = N.argmax(tmpdftarray)
igwmin = N.argmin(tmpgwarray)
igwmax = N.argmax(tmpgwarray)
if idftmin == igwmin:
myimin = idftmin
else:
print 'COMMENT: the minimum for DFT and GW are not the same for band group #%s' %(iinterval+1)
print ' => the gw minimum is taken'
myimin = igwmin
if iinterval == len(polyfit_degrees_down)-1:
pfit = polynd_a(tmpdftarray,tmpgwarray-tmpdftarray,polyfit_degrees_down[iinterval],indices=[myimin])
else:
if idftmax == igwmax:
myimax = idftmax
else:
print 'COMMENT: the maximum for DFT and GW are not the same for band group #%s' %(iinterval+1)
print ' => the gw maximum is taken'
myimax = igwmax
pfit = polynd_ab(tmpdftarray,tmpgwarray-tmpdftarray,polyfit_degrees_down[iinterval],indices=[myimin,myimax])
else:
pfit = None
polyfitlist_down.append(pfit)
if smooth_end:
if smooth_energy == None:
smoothenergy = N.max(dftarray)
else:
smoothenergy = smooth_energy
smoothdeltaenergy = None
if smooth_delta_energy != None:
smoothdeltaenergy = smooth_delta_energy
oldpolyfitlist_down = list(polyfitlist_down)
oldenergypivots_down = N.array(energy_pivots_down)
energy_pivots_down,polyfitlist_down = smoothend(energy_pivots_down,polyfitlist_down,smoothenergy,delta_energy_ev=smoothdeltaenergy)
downdftarray_list,downgwarray_list = classify_eigenvalues(downdftarray,energy_pivots_down,downgwarray)
if plot_figures == 1:
linspace_npoints = 200
upvalpoly_x = N.linspace(N.min(upvaldftarray),N.max(upvaldftarray),linspace_npoints)
upcondpoly_x = N.linspace(N.min(upconddftarray),N.max(upconddftarray),linspace_npoints)
downvalpoly_x = N.linspace(N.min(downvaldftarray),N.max(downvaldftarray),linspace_npoints)
downcondpoly_x = N.linspace(N.min(downconddftarray),N.max(downconddftarray),linspace_npoints)
P.figure(3,figsize=(csts.fig_width,csts.fig_height))
P.hold(True)
P.grid(True)
P.plot(upvaldftarray,upvalgwarray-upvaldftarray,'bx',markersize=csts.markersize,markeredgewidth=csts.markeredgewidth)
P.plot(upconddftarray,upcondgwarray-upconddftarray,'rx',markersize=csts.markersize,markeredgewidth=csts.markeredgewidth)
[x_min,x_max] = P.xlim()
[y_min,y_max] = P.ylim()
#for iinterval in range(len(polyfit_degrees_up)):
for iinterval in range(len(polyfitlist_up)):
if iinterval == 0:
tmppoly_x = N.linspace(x_min,energy_pivots_up[iinterval],linspace_npoints)
elif iinterval == len(polyfitlist_up)-1:
tmppoly_x = N.linspace(energy_pivots_up[iinterval-1],x_max,linspace_npoints)
else:
tmppoly_x = N.linspace(energy_pivots_up[iinterval-1],energy_pivots_up[iinterval],linspace_npoints)
if polyfitlist_up[iinterval] != None:
P.plot(tmppoly_x,N.polyval(polyfitlist_up[iinterval],tmppoly_x),'k',markersize=csts.markersize,markeredgewidth=csts.markeredgewidth)
for ipivot in range(len(energy_pivots_up)):
en = energy_pivots_up[ipivot]
if polyfitlist_up[ipivot] != None and polyfitlist_up[ipivot+1] != None:
P.plot([en,en],[N.polyval(polyfitlist_up[ipivot],en),N.polyval(polyfitlist_up[ipivot+1],en)],'k-.',markersize=csts.markersize,markeredgewidth=csts.markeredgewidth)
P.xlabel('DFT eigenvalues (in eV) - spin UP')
P.ylabel('GW correction to the DFT eigenvalues (in eV)')
P.ylim([y_min,y_max])
P.figure(4,figsize=(csts.fig_width,csts.fig_height))
P.hold(True)
P.grid(True)
P.plot(downvaldftarray,downvalgwarray-downvaldftarray,'bo',markersize=csts.markersize,markeredgewidth=csts.markeredgewidth)
P.plot(downconddftarray,downcondgwarray-downconddftarray,'ro',markersize=csts.markersize,markeredgewidth=csts.markeredgewidth)
[x_min,x_max] = P.xlim()
[y_min,y_max] = P.ylim()
for iinterval in range(len(polyfitlist_down)):
if iinterval == 0:
tmppoly_x = N.linspace(x_min,energy_pivots_down[iinterval],linspace_npoints)
elif iinterval == len(polyfitlist_down)-1:
tmppoly_x = N.linspace(energy_pivots_down[iinterval-1],x_max,linspace_npoints)
else:
tmppoly_x = N.linspace(energy_pivots_down[iinterval-1],energy_pivots_down[iinterval],linspace_npoints)
if polyfitlist_down[iinterval] != None:
P.plot(tmppoly_x,N.polyval(polyfitlist_down[iinterval],tmppoly_x),'k',markersize=csts.markersize,markeredgewidth=csts.markeredgewidth)
for ipivot in range(len(energy_pivots_down)):
en = energy_pivots_down[ipivot]
if polyfitlist_down[ipivot] != None and polyfitlist_down[ipivot+1] != None:
P.plot([en,en],[N.polyval(polyfitlist_down[ipivot],en),N.polyval(polyfitlist_down[ipivot+1],en)],'k-.',markersize=csts.markersize,markeredgewidth=csts.markeredgewidth)
P.xlabel('DFT eigenvalues (in eV) - spin DOWN')
P.ylabel('GW correction to the DFT eigenvalues (in eV)')
P.ylim([y_min,y_max])
P.figure(5,figsize=(csts.fig_width,csts.fig_height))
P.hold(True)
P.grid(True)
#for iinterval in range(len(polyfit_degrees_up)):
for iinterval in range(len(polyfitlist_up)):
if polyfitlist_up[iinterval] != None:
P.plot(updftarray_list[iinterval],upgwarray_list[iinterval]-updftarray_list[iinterval]-N.polyval(polyfitlist_up[iinterval],updftarray_list[iinterval]),'bx',markersize=csts.markersize,markeredgewidth=csts.markeredgewidth)
[x_min,x_max] = P.xlim()
P.plot([x_min,x_max],[0,0],'k-')
P.xlabel('DFT eigenvalues (in eV) - spin UP')
P.ylabel('Error in the fit (in eV)')
P.figure(6,figsize=(csts.fig_width,csts.fig_height))
P.hold(True)
P.grid(True)
#for iinterval in range(len(polyfit_degrees_down)):
for iinterval in range(len(polyfitlist_down)):
if polyfitlist_down[iinterval] != None:
P.plot(downdftarray_list[iinterval],downgwarray_list[iinterval]-downdftarray_list[iinterval]-N.polyval(polyfitlist_down[iinterval],downdftarray_list[iinterval]),'bx',markersize=csts.markersize,markeredgewidth=csts.markeredgewidth)
[x_min,x_max] = P.xlim()
P.plot([x_min,x_max],[0,0],'k-')
P.xlabel('DFT eigenvalues (in eV) - spin DOWN')
P.ylabel('Error in the fit (in eV)')
P.show()
return energy_pivots_up,energy_pivots_down,polyfitlist_up,polyfitlist_down
def get_gvectors():
if os.path.isfile('.gvectors.bsinfo'):
print 'File ".gvectors.bsinfo found with the following gvectors information :"'
try:
gvectors_reader = open('.gvectors.bsinfo','r')
gvectors_data = gvectors_reader.readlines()
gvectors_reader.close()
trial_gvectors = N.identity(3,N.float)
trial_gvectors[0,0] = N.float(gvectors_data[0].split()[0])
trial_gvectors[0,1] = N.float(gvectors_data[0].split()[1])
trial_gvectors[0,2] = N.float(gvectors_data[0].split()[2])
trial_gvectors[1,0] = N.float(gvectors_data[1].split()[0])
trial_gvectors[1,1] = N.float(gvectors_data[1].split()[1])
trial_gvectors[1,2] = N.float(gvectors_data[1].split()[2])
trial_gvectors[2,0] = N.float(gvectors_data[2].split()[0])
trial_gvectors[2,1] = N.float(gvectors_data[2].split()[1])
trial_gvectors[2,2] = N.float(gvectors_data[2].split()[2])
print ' gvectors(1) = [ %20.17f %20.17f %20.17f ]' %(trial_gvectors[0,0],trial_gvectors[0,1],trial_gvectors[0,2])
print ' gvectors(2) = [ %20.17f %20.17f %20.17f ]' %(trial_gvectors[1,0],trial_gvectors[1,1],trial_gvectors[1,2])
print ' gvectors(3) = [ %20.17f %20.17f %20.17f ]' %(trial_gvectors[2,0],trial_gvectors[2,1],trial_gvectors[2,2])
except:
print 'ERROR: file ".gvectors.bsinfo" might be corrupted (empty or not formatted correctly ...)'
print ' you should remove the file and start again or check the file ... exit'
sys.exit()
test = raw_input('Press <ENTER> to use these gvectors (any other character to enter manually other gvectors)\n')
if test == '':
gvectors = trial_gvectors
else:
gvectors = N.identity(3,N.float)
test = raw_input('Enter G1 (example : "0.153 0 0") : \n')
gvectors[0,0] = N.float(test.split()[0])
gvectors[0,1] = N.float(test.split()[1])
gvectors[0,2] = N.float(test.split()[2])
test = raw_input('Enter G2 (example : "0.042 1.023 0") : \n')
gvectors[1,0] = N.float(test.split()[0])
gvectors[1,1] = N.float(test.split()[1])
gvectors[1,2] = N.float(test.split()[2])
test = raw_input('Enter G3 (example : "0 0 1.432") : \n')
gvectors[2,0] = N.float(test.split()[0])
gvectors[2,1] = N.float(test.split()[1])
gvectors[2,2] = N.float(test.split()[2])
test = raw_input('Do you want to overwrite the gvectors contained in the file ".gvectors.bsinfo" ? (<ENTER> for yes, anything else for no)\n')
if test == '':
print 'Writing gvectors to file ".gvectors.bsinfo" ...'
gvectors_writer = open('.gvectors.bsinfo','w')
gvectors_writer.write('%20.17f %20.17f %20.17f\n' %(trial_gvectors[0,0],trial_gvectors[0,1],trial_gvectors[0,2]))
gvectors_writer.write('%20.17f %20.17f %20.17f\n' %(trial_gvectors[1,0],trial_gvectors[1,1],trial_gvectors[1,2]))
gvectors_writer.write('%20.17f %20.17f %20.17f\n' %(trial_gvectors[2,0],trial_gvectors[2,1],trial_gvectors[2,2]))
gvectors_writer.close()
print '... done'
else:
test = raw_input('Do you want to enter the the reciprocal space primitive vectors (y/n)\n')
if test == 'y':
gvectors = N.identity(3,N.float)
test = raw_input('Enter G1 (example : "0.153 0 0") : ')
gvectors[0,0] = N.float(test.split()[0])
gvectors[0,1] = N.float(test.split()[1])
gvectors[0,2] = N.float(test.split()[2])
test = raw_input('Enter G2 (example : "0.042 1.023 0") : ')
gvectors[1,0] = N.float(test.split()[0])
gvectors[1,1] = N.float(test.split()[1])
gvectors[1,2] = N.float(test.split()[2])
test = raw_input('Enter G3 (example : "0 0 1.432") : ')
gvectors[2,0] = N.float(test.split()[0])
gvectors[2,1] = N.float(test.split()[1])
gvectors[2,2] = N.float(test.split()[2])
test = raw_input('Do you want to write the gvectors to file ".gvectors.bsinfo" ? (<ENTER> for yes, anything else for no)\n')
if test == '':
print 'Writing gvectors to file ".gvectors.bsinfo" ...'
gvectors_writer = open('.gvectors.bsinfo','w')
gvectors_writer.write('%20.17f %20.17f %20.17f\n' %(gvectors[0,0],gvectors[0,1],gvectors[0,2]))
gvectors_writer.write('%20.17f %20.17f %20.17f\n' %(gvectors[1,0],gvectors[1,1],gvectors[1,2]))
gvectors_writer.write('%20.17f %20.17f %20.17f\n' %(gvectors[2,0],gvectors[2,1],gvectors[2,2]))
gvectors_writer.close()
print '... done'
else:
gvectors = None
return gvectors
# Parse the command line options
parser = argparse.ArgumentParser(description='Tool for eigenvalue analysis')
parser.add_argument('-g','--graphical',help='use the graphical user interface',action='store_true')
parser.add_argument('-c','--command_line',help='use the command line interface',action='store_true')
parser.add_argument('files',help='files to be opened',nargs=2)
args = parser.parse_args()
args_dict = vars(args)
if args_dict['command_line'] and args_dict['graphical']:
raise StandardError('Use either "-g/--graphical" or "-c/--command_line"')
elif args_dict['command_line']:
use_gui = False
else:
use_gui = False
if not use_gui:
if args_dict['files']:
if len(args_dict['files']) != 2:
print 'ERROR: you should provide EIG.nc and _GW files ! exiting now ...'
sys.exit()
file_1 = args_dict['files'][0]
file_2 = args_dict['files'][1]
if file_1[-6:] == 'EIG.nc':
eig_file = file_1
if file_2[-3:] == '_GW':
gw_file = file_2
else:
print 'ERROR: you should provide 1 _GW file with your EIG.nc file ! exiting now ...'
sys.exit()
elif file_1[-3:] == '_GW':
gw_file = file_1
if file_2[-6:] == 'EIG.nc':
eig_file = file_2
else:
print 'ERROR: you should provide 1 EIG.nc file with your _GW file ! exiting now ...'
sys.exit()
else:
print 'ERROR: you should provide 1 EIG.nc and 1 _GW files ! exiting now ...'
sys.exit()
else:
print 'ERROR: you should provide EIG.nc and _GW files ! exiting now ...'
sys.exit()
ec_dft = EigenvalueContainer(directory='.',filename=eig_file)
ec_gw = EigenvalueContainer(directory='.',filename=gw_file)
check_gw_vs_dft_parameters(ec_dft,ec_gw)
user_input = raw_input('Do you want to plot the figures ? (y/n)\n')
if user_input == 'y' or user_input == 'Y':
plot_figures = 1
else:
plot_figures = 0
user_input = raw_input('Enter the index of the valence band maximum :\n')
vbm_index = N.int(user_input)
user_input = raw_input('Do you want the script to automatically find groups of bands (y/n) ?\n')
if user_input == 'y':
user_input = raw_input('Enter the name of the bandstructure file used to find groups of bands\n(<ENTER> for finding groups of bands on the regular grid -- file "%s" ... not recommended)\n' %eig_file)
if user_input == '':
if ec_dft.nsppol > 1:
energy_pivots_up_ha,energy_pivots_down_ha = ec_dft.find_band_groups(spinchoice='separate')
energy_pivots_up = csts.hartree2ev*energy_pivots_up_ha
energy_pivots_down = csts.hartree2ev*energy_pivots_down_ha
else:
energy_pivots = csts.hartree2ev*ec_dft.find_band_groups()
else:
if ec_dft.nsppol > 1:
energy_pivots_up_ha,energy_pivots_down_ha = ec_dft.find_band_groups(bandstructure_file=user_input,spinchoice='separate')
energy_pivots_up = csts.hartree2ev*energy_pivots_up_ha
energy_pivots_down = csts.hartree2ev*energy_pivots_down_ha
else:
energy_pivots = csts.hartree2ev*ec_dft.find_band_groups(bandstructure_file=user_input)
if ec_dft.nsppol > 1:
nfittingintervals_up = len(energy_pivots_up)+1
nfittingintervals_down = len(energy_pivots_down)+1
else:
nfittingintervals = len(energy_pivots)+1
else:
if plot_figures == 1:
plot_gw_vs_dft_eig(ec_dft,ec_gw,vbm_index,spinchoice='separate')
if ec_dft.nsppol == 1:
user_input = raw_input('How many fitting intervals do you want ? (default is 2 : valence/conduction => press <ENTER>)\n')
if user_input == '':
nfittingintervals = 2
energy_pivots = N.zeros(nfittingintervals-1,N.float)
energy_pivots[0] = csts.hartree2ev*(N.min(ec_dft.eigenvalues[:,:,vbm_index])+N.max(ec_dft.eigenvalues[:,:,vbm_index-1]))/2
else:
nfittingintervals = N.int(user_input)
energy_pivots = N.zeros(nfittingintervals-1,N.float)
user_input = raw_input('Enter the %s energy "pivots" that splits the dft eigenvalues in %s fitting intervals (in eV) :\n' %(nfittingintervals-1,nfittingintervals))
energy_pivots = N.array(user_input.split(),N.float)
if len(energy_pivots) != nfittingintervals-1:
print 'ERROR: you asked %s fitting intervals and provided %s energy "pivots".' %(nfittingintervals,len(energy_pivots))
print ' you should provide %s energy "pivots" ... exiting now' %(nfittingintervals-1)
sys.exit()
for ienergy in range(1,len(energy_pivots)):
if energy_pivots[ienergy] <= energy_pivots[ienergy-1]:
print 'ERROR: the energy pivots have to be entered increasingly'
print ' you should provide energy "pivots" with increasing energies ... exiting now'
sys.exit()
elif ec_dft.nsppol == 2:
user_input = raw_input('How many fitting intervals do you want for spin up ? (default is 2 : valence/conduction => press <ENTER>)\n')
if user_input == '':
nfittingintervals_up = 2
energy_pivots_up = N.zeros(nfittingintervals_up-1,N.float)
energy_pivots_up[0] = csts.hartree2ev*(N.min(ec_dft.eigenvalues[0,:,vbm_index])+N.max(ec_dft.eigenvalues[0,:,vbm_index-1]))/2
else:
nfittingintervals_up = N.int(user_input)
energy_pivots_up = N.zeros(nfittingintervals_up-1,N.float)
user_input = raw_input('Enter the %s energy "pivots" that splits the dft eigenvalues (spin up) in %s fitting intervals (in eV) :\n' %(nfittingintervals_up-1,nfittingintervals_up))
energy_pivots_up = N.array(user_input.split(),N.float)
if len(energy_pivots_up) != nfittingintervals_up-1:
print 'ERROR: you asked %s fitting intervals and provided %s energy "pivots".' %(nfittingintervals_up,len(energy_pivots_up))
print ' you should provide %s energy "pivots" ... exiting now' %(nfittingintervals_up-1)
sys.exit()
for ienergy in range(1,len(energy_pivots_up)):
if energy_pivots_up[ienergy] <= energy_pivots_up[ienergy-1]:
print 'ERROR: the energy pivots have to be entered increasingly'
print ' you should provide energy "pivots" with increasing energies ... exiting now'
sys.exit()
user_input = raw_input('How many fitting intervals do you want for spin down ? (default is 2 : valence/conduction => press <ENTER>)\n')
if user_input == '':
nfittingintervals_down = 2
energy_pivots_down = N.zeros(nfittingintervals_down-1,N.float)
energy_pivots_down[0] = csts.hartree2ev*(N.min(ec_dft.eigenvalues[0,:,vbm_index])+N.max(ec_dft.eigenvalues[0,:,vbm_index-1]))/2
else:
nfittingintervals_down = N.int(user_input)
energy_pivots_down = N.zeros(nfittingintervals_down-1,N.float)
user_input = raw_input('Enter the %s energy "pivots" that splits the dft eigenvalues (spin down) in %s fitting intervals (in eV) :\n' %(nfittingintervals_down-1,nfittingintervals_down))
energy_pivots_down = N.array(user_input.split(),N.float)
if len(energy_pivots_down) != nfittingintervals_down-1:
print 'ERROR: you asked %s fitting intervals and provided %s energy "pivots".' %(nfittingintervals_down,len(energy_pivots_down))
print ' you should provide %s energy "pivots" ... exiting now' %(nfittingintervals_down-1)
sys.exit()
for ienergy in range(1,len(energy_pivots_down)):
if energy_pivots_down[ienergy] <= energy_pivots_down[ienergy-1]:
print 'ERROR: the energy pivots have to be entered increasingly'
print ' you should provide energy "pivots" with increasing energies ... exiting now'
sys.exit()
if ec_dft.nsppol > 1:
print 'Script will use the following energy pivots for the interpolation (spin up)'
print energy_pivots_up
user_input = raw_input('Enter the degree of polynomials used to fit the GW corrections (spin up) \nfor each interval (%s values, default is 3rd order polynomials with "fixed points" for each group of bands => press <ENTER>)\n or enter "options" to enter specific options' %nfittingintervals_up)
if user_input == '':
polyfit_degrees_up = 3*N.ones(nfittingintervals_up,N.int)
option_limit_points = 'endpoints-fixed'
elif user_input == 'options':
print 'ERROR: this option is not yet coded ... exit'
sys.exit()
else:
polyfit_degrees_up = N.array(user_input.split(),N.int)
option_limit_points = 'endpoints-fixed'
print 'Script will use the following energy pivots for the interpolation (spin down)'
print energy_pivots_down
user_input = raw_input('Enter the degree of polynomials used to fit the GW corrections (spin down) \nfor each interval (%s values, default is 3rd order polynomials with "fixed points" for each group of bands => press <ENTER>)\n or enter "options" to enter specific options' %nfittingintervals_down)
if user_input == '':
polyfit_degrees_down = 3*N.ones(nfittingintervals_down,N.int)
option_limit_points = 'endpoints-fixed'
elif user_input == 'options':
print 'ERROR: this option is not yet coded ... exit'
sys.exit()
else:
polyfit_degrees_down = N.array(user_input.split(),N.int)
option_limit_points = 'endpoints-fixed'
new_energy_pivots_up,new_energy_pivots_down,polyfit_list_up,polyfit_list_down = plot_gw_vs_dft_eig(ec_dft,ec_gw,vbm_index,energy_pivots_up=energy_pivots_up,energy_pivots_down=energy_pivots_down,polyfit_degrees_up=polyfit_degrees_up,polyfit_degrees_down=polyfit_degrees_down,limitpoints=option_limit_points,spinchoice='separate')
else:
print 'Script will use the following energy pivots for the interpolation (same for all spins)'
print energy_pivots
user_input = raw_input('Enter the degree of polynomials used to fit the GW corrections \nfor each interval (%s values, default is 3rd order polynomials with "fixed points" for each group of bands => press <ENTER>)\n or enter "options" to enter specific options' %nfittingintervals)
if user_input == '':
polyfit_degrees = 3*N.ones(nfittingintervals,N.int)
option_limit_points = 'endpoints-fixed'
elif user_input == 'options':
print 'ERROR: this option is not yet coded ... exit'
sys.exit()
else:
if user_input.split()[-1] == 'x':
tmp = user_input.split()
tmp[-1] = '0'
polyfit_degrees = N.array(tmp,N.int)
option_limit_points = 'endpoints-fixed_last-flat'
else:
polyfit_degrees = N.array(user_input.split(),N.int)
option_limit_points = 'endpoints-fixed'
user_input = raw_input('Enter specific options for the end of the polyfit ? (y/n) [<ENTER> to continue without entering specific options]')
if user_input == 'y':
user_input = raw_input('Enter the end smooth energy (to be documented ...) : ')
smoothenergy = N.float(user_input)
user_input = raw_input('Enter the end smooth delta energy (to be documented ...) [<ENTER> for default]: ')
if user_input == '':
smoothdeltaenergy = None
else:
smoothdeltaenergy = N.float(user_input)
new_energypivots,polyfit_list = plot_gw_vs_dft_eig(ec_dft,ec_gw,vbm_index,energy_pivots_up=energy_pivots,polyfit_degrees_up=polyfit_degrees,limitpoints=option_limit_points,smooth_end=True,smooth_energy=smoothenergy,smooth_delta_energy=smoothdeltaenergy)
else:
new_energypivots,polyfit_list = plot_gw_vs_dft_eig(ec_dft,ec_gw,vbm_index,energy_pivots_up=energy_pivots,polyfit_degrees_up=polyfit_degrees,limitpoints=option_limit_points)
if ec_dft.nsppol > 1:
print polyfit_list_up
print polyfit_list_down
else:
print polyfit_list
write_polyfit('mytest.pfitlist',new_energypivots,polyfit_list)
gw_interpolate = False
user_input = raw_input('Do you want to make an interpolated _GW file ? (y/n)\n')
if user_input == 'y' or user_input == 'Y':
gw_interpolate = True
if gw_interpolate:
nc_eig_file = raw_input('Enter the name of the EIG.nc file you want to extrapolate to GW :\n')
new_ec_dft = EigenvalueContainer(directory='.',filename=nc_eig_file)
user_input = raw_input('For which "bdgw"\'s do you want the interpolation (indices of \nthe smallest \
valence and largest conduction bands) ? bdgw(1)<=vbm_index<bdgw(2)<=%s \
(usually "1 something")\n' %N.min(new_ec_dft.bd_indices[:,:,1]))
if user_input == '':
bdgw_interpolated = N.array([1,N.min(new_ec_dft.bd_indices[:,:,1])])
else:
bdgw_interpolated = N.array(user_input.split(),N.int)
filename = '%s_polyfit_GW' %(nc_eig_file)
new_ec_dft.pfit_gw_file_write(polyfit_list,filename=filename,bdgw=bdgw_interpolated,energy_pivots=new_energypivots,gwec=ec_gw)
user_input = raw_input('Do you want to make an interpolated bandstructure file ? (y/n)\n')
if user_input == 'y' or user_input == 'Y':
nc_eig_file = raw_input('Enter the name of the bandstructure EIG.nc file you want to extrapolate to GW :\n')
new_ec_dft = EigenvalueContainer(directory='.',filename=nc_eig_file)
gvectors = get_gvectors()
if ec_dft.nsppol > 1:
gw_eigenvalues = new_ec_dft.pfit_gw_eigenvalues_ha(polyfit_list_up,energy_pivots_up=energy_pivots_up,polyfitlist_down=polyfit_list_down,energy_pivots_down=energy_pivots_down,ecgw=ec_gw)
new_ec_dft.eigenvalues = gw_eigenvalues
else:
#gw_eigenvalues = new_ec_dft.pfit_gw_eigenvalues_ha(polyfit_list,energy_pivots_up=energy_pivots,ecgw=ec_gw)
gw_eigenvalues = new_ec_dft.pfit_gw_eigenvalues_ha(polyfit_list,energy_pivots_up=new_energypivots,ecgw=None)
new_ec_dft.eigenvalues = gw_eigenvalues
new_ec_dft.set_kpoint_sampling_type('Bandstructure')
new_ec_dft.find_special_kpoints(gvectors)
print 'Number of bands in the file : %s' %(N.shape(new_ec_dft.eigenvalues)[2])
test = raw_input('Enter the number of bands to be plotted (<ENTER> : %s) : \n' %(N.shape(new_ec_dft.eigenvalues)[2]))
if test == '':
nbd_plot = N.shape(new_ec_dft.eigenvalues)[2]
else:
nbd_plot = N.int(test)
if nbd_plot > N.shape(new_ec_dft.eigenvalues)[2]:
print 'ERROR: the number of bands to be plotted is larger than the number available ... exit'
sys.exit()
new_ec_dft.special_kpoints_names = ['']*len(new_ec_dft.special_kpoints_indices)
for ii in range(len(new_ec_dft.special_kpoints_indices)):
new_ec_dft.special_kpoints_names[ii] = 'k%s' %(ii+1)
print 'List of special kpoints :'
for ii in range(len(new_ec_dft.special_kpoints_indices)):
spkpt = new_ec_dft.kpoints[new_ec_dft.special_kpoints_indices[ii]]
print ' Kpoint %s : %s %s %s' %(ii+1,spkpt[0],spkpt[1],spkpt[2])
print 'Enter the name of the %s special k-points :' %(len(new_ec_dft.special_kpoints_indices))
test = raw_input('')
if len(test.split()) == len(new_ec_dft.special_kpoints_indices):
for ii in range(len(new_ec_dft.special_kpoints_indices)):
new_ec_dft.special_kpoints_names[ii] = test.split()[ii]
test = raw_input('Enter base name for bandstructure file : \n')
new_ec_dft.write_bandstructure_to_file('%s.bandstructure' %test)
P.figure(1,figsize=(3.464,5))
P.hold('on')
P.grid('on')
P.xticks(N.take(new_ec_dft.kpoint_reduced_path_values,N.array(new_ec_dft.special_kpoints_indices,N.int)),new_ec_dft.special_kpoints_names)
for iband in range(nbd_plot):
if new_ec_dft.nsppol == 1:
P.plot(new_ec_dft.kpoint_reduced_path_values,new_ec_dft.eigenvalues[0,:,iband]*csts.hartree2ev,'k-',linewidth=2)
else:
P.plot(new_ec_dft.kpoint_reduced_path_values,new_ec_dft.eigenvalues[0,:,iband]*csts.hartree2ev,'k-',linewidth=2)
P.plot(new_ec_dft.kpoint_reduced_path_values,new_ec_dft.eigenvalues[1,:,iband]*csts.hartree2ev,'r-',linewidth=2)
P.show()
|
jmbeuken/abinit
|
scripts/post_processing/ab_extended_scissors_guess.py
|
Python
|
gpl-3.0
| 101,073
|
#!/usr/bin/env python
"""Translation service using Google Translate"""
import re
import simplejson
from madcow.util import strip_html
from madcow.util.http import geturl
from madcow.util import Module
class BabelError(Exception):
"""Raised to stop translation due to internal error"""
class Main(Module):
"""Translation service using Google Translate"""
pattern = re.compile(r'^\s*(tr(?:ans(?:late)?)?(.+?)|list\s+lang(s|uages))\s*$', re.I)
help = '\n'.join([
'translate: <text> - auto-detect language and convert to english',
'translate: from <lang> to <lang> [to <lang> ...]: text - specify translation',
'list languages - list all available languages',
])
default_lang = 'english'
url = 'http://ajax.googleapis.com/ajax/services/language/translate'
langs = {'auto': '',
'afrikaans': 'af',
'albanian': 'sq',
'amharic': 'am',
'arabic': 'ar',
'armenian': 'hy',
'azerbaijani': 'az',
'basque': 'eu',
'belarusian': 'be',
'bengali': 'bn',
'bihari': 'bh',
'bulgarian': 'bg',
'burmese': 'my',
'catalan': 'ca',
'cherokee': 'chr',
'chinese': 'zh',
'chinese_simplified': 'zh-CN',
'chinese_traditional': 'zh-TW',
'croatian': 'hr',
'czech': 'cs',
'danish': 'da',
'dhivehi': 'dv',
'dutch': 'nl',
'english': 'en',
'esperanto': 'eo',
'estonian': 'et',
'filipino': 'tl',
'finnish': 'fi',
'french': 'fr',
'galician': 'gl',
'georgian': 'ka',
'german': 'de',
'greek': 'el',
'guarani': 'gn',
'gujarati': 'gu',
'hebrew': 'iw',
'hindi': 'hi',
'hungarian': 'hu',
'icelandic': 'is',
'indonesian': 'id',
'inuktitut': 'iu',
'irish': 'ga',
'italian': 'it',
'japanese': 'ja',
'kannada': 'kn',
'kazakh': 'kk',
'khmer': 'km',
'korean': 'ko',
'kurdish': 'ku',
'kyrgyz': 'ky',
'laothian': 'lo',
'latvian': 'lv',
'lithuanian': 'lt',
'macedonian': 'mk',
'malay': 'ms',
'malayalam': 'ml',
'maltese': 'mt',
'marathi': 'mr',
'mongolian': 'mn',
'nepali': 'ne',
'norwegian': 'no',
'oriya': 'or',
'pashto': 'ps',
'persian': 'fa',
'polish': 'pl',
'portuguese': 'pt-PT',
'punjabi': 'pa',
'romanian': 'ro',
'russian': 'ru',
'sanskrit': 'sa',
'serbian': 'sr',
'sindhi': 'sd',
'sinhalese': 'si',
'slovak': 'sk',
'slovenian': 'sl',
'spanish': 'es',
'swahili': 'sw',
'swedish': 'sv',
'tagalog': 'tl',
'tajik': 'tg',
'tamil': 'ta',
'telugu': 'te',
'thai': 'th',
'tibetan': 'bo',
'turkish': 'tr',
'uighur': 'ug',
'ukrainian': 'uk',
'urdu': 'ur',
'uzbek': 'uz',
'vietnamese': 'vi',
'welsh': 'cy',
'yiddish': 'yi'}
lookup = dict((val, key) for key, val in langs.iteritems())
def response(self, nick, args, kwargs):
"""Return a response to the bot to display"""
if args[0].startswith('tr'):
try:
message = self.parse(args[1])
except BabelError, error:
self.log.error(error)
message = error
except Exception, error:
self.log.warn('error in %s' % self.__module__)
self.log.exception(error)
message = error
else:
message = ', '.join(self.langs)
return u'%s: %s' % (nick, message)
def parse(self, cmd):
"""Parse command structure and transform text"""
if ':' not in cmd:
raise BabelError('missing text to translate')
cmd, text = [arg.strip() for arg in cmd.split(':', 1)]
cmd = cmd.lower().split()
translations = []
current_lang = None
while cmd:
arg = cmd.pop(0)
if arg == 'from':
continue
elif arg in self.langs:
if current_lang:
if arg == 'auto':
raise BabelError('can only auto-detect source')
if current_lang != arg:
translations.append((current_lang, arg))
current_lang = arg
elif arg == 'to':
if not current_lang:
current_lang = 'auto'
else:
raise BabelError('unknown language: ' + arg)
if not translations:
translations = [('auto', self.default_lang)]
for from_lang, to_lang in translations:
text = self.translate(text, from_lang, to_lang)
return text
def translate(self, text, src, dst):
"""Perform the translation"""
opts = {'langpair': '%s|%s' % (self.langs[src], self.langs[dst]), 'v': '1.0', 'q': text}
res = simplejson.loads(geturl(self.url, opts))['responseData']
text = strip_html(res['translatedText'])
try:
text = u'[detected %s] %s' % (self.lookup[res['detectedSourceLanguage']].capitalize(), text)
except KeyError:
pass
return text
|
ToxicFrog/lancow
|
madcow/modules/translate.py
|
Python
|
gpl-3.0
| 5,891
|
# -*- coding: utf-8 -*-
import re
from module.plugins.internal.SimpleHoster import SimpleHoster, create_getInfo
class TwoSharedCom(SimpleHoster):
__name__ = "TwoSharedCom"
__type__ = "hoster"
__version__ = "0.13"
__pattern__ = r'http://(?:www\.)?2shared\.com/(account/)?(download|get|file|document|photo|video|audio)/.+'
__description__ = """2Shared.com hoster plugin"""
__license__ = "GPLv3"
__authors__ = [("zoidberg", "zoidberg@mujmail.cz")]
NAME_PATTERN = r'<h1>(?P<N>.*)</h1>'
SIZE_PATTERN = r'<span class="dtitle">File size:</span>\s*(?P<S>[\d.,]+) (?P<U>[\w^_]+)'
OFFLINE_PATTERN = r'The file link that you requested is not valid\.|This file was deleted\.'
LINK_FREE_PATTERN = r'window.location =\'(.+?)\';'
def setup(self):
self.resumeDownload = True
self.multiDL = True
getInfo = create_getInfo(TwoSharedCom)
|
immenz/pyload
|
module/plugins/hoster/TwoSharedCom.py
|
Python
|
gpl-3.0
| 922
|
#!/usr/bin/env python
from __future__ import print_function
import argparse
import logging
import random
from apollo import accessible_organisms
from apollo.util import GuessOrg, OrgOrGuess, retry
from arrow.apollo import get_apollo_instance
from webapollo import UserObj, handle_credentials
logging.basicConfig(level=logging.INFO)
log = logging.getLogger(__name__)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Script to delete all features from an organism')
parser.add_argument('email', help='User Email')
parser.add_argument('--type', help='Feature type filter')
OrgOrGuess(parser)
args = parser.parse_args()
wa = get_apollo_instance()
# User must have an account
gx_user = UserObj(**wa.users._assert_or_create_user(args.email))
handle_credentials(gx_user)
# Get organism
org_cn = GuessOrg(args, wa)
if isinstance(org_cn, list):
org_cn = org_cn[0]
all_orgs = wa.organisms.get_organisms()
if 'error' in all_orgs:
all_orgs = []
all_orgs = [org['commonName'] for org in all_orgs]
if org_cn not in all_orgs:
raise Exception("Could not find organism %s" % org_cn)
orgs = accessible_organisms(gx_user, [org_cn], 'WRITE')
if not orgs:
raise Exception("You do not have write permission on this organism")
org = wa.organisms.show_organism(org_cn)
sequences = wa.organisms.get_sequences(org['id'])
for sequence in sequences['sequences']:
log.info("Processing %s %s", org['commonName'], sequence['name'])
# Call setSequence to tell apollo which organism we're working with
wa.annotations.set_sequence(org['id'], sequence['name'])
# Then get a list of features.
features = wa.annotations.get_features()
# For each feature in the features
for feature in sorted(features['features'], key=lambda x: random.random()):
if args.type:
if args.type == 'tRNA':
if feature['type']['name'] != 'tRNA':
continue
elif args.type == 'terminator':
if feature['type']['name'] != 'terminator':
continue
elif args.type == 'mRNA':
if feature['type']['name'] != 'mRNA':
continue
else:
raise Exception("Unknown type")
# We see that deleteFeatures wants a uniqueName, and so we pass
# is the uniquename field in the feature.
def fn():
wa.annotations.delete_feature(feature['uniquename'])
print('Deleted %s [type=%s]' % (feature['uniquename'], feature['type']['name']))
if not retry(fn, limit=3):
print('Error %s' % feature['uniquename'])
|
TAMU-CPT/galaxy-tools
|
tools/webapollo/shareOrg/delete_features.py
|
Python
|
gpl-3.0
| 2,847
|
# -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
import res_users
import sale_crm
import crm_lead
import account_invoice
|
orchidinfosys/odoo
|
addons/sale_crm/__init__.py
|
Python
|
gpl-3.0
| 171
|
"""A light version of test points."""
from __future__ import division
from ..vectormath.euclid import Point3, Vector3
class AnalysisPointLite(object):
"""A radiance analysis point.
Attributes:
location: Location of analysis points as (x, y, z).
direction: Direction of analysis point as (x, y, z).
This class is developed to enable honeybee for running daylight control
studies with dynamic shadings without going back to several files.
Each AnalysisPoint can load annual total and direct results for every state of
each source assigned to it. As a result once can end up with a lot of data for
a single point (8760 * sources * states for each source). The data are sorted as
integers and in different lists for each source. There are several methods to
set or get the data but if you're interested in more details read the comments
under __init__ to know how the data is stored.
In this class:
- Id stands for 'the id of a blind state'. Each state has a name and an ID will
be assigned to it based on the order of loading.
- coupledValue stands for a tuple of (total, direct) values. If one the values is
not available it will be set to None.
"""
__slots__ = ('_loc', '_dir')
def __init__(self, location, direction):
"""Create an analysis point."""
self.location = location
self.direction = direction
@classmethod
def from_json(cls, ap_json):
"""Create an analysis point from json object.
{"location": [x, y, z], "direction": [x, y, z]}
"""
return cls(ap_json['location'], ap_json['direction'])
@classmethod
def from_raw_values(cls, x, y, z, x1, y1, z1):
"""Create an analysis point from 6 values.
x, y, z are the location of the point and x1, y1 and z1 is the direction.
"""
return cls((x, y, z), (x1, y1, z1))
@property
def location(self):
"""Location of analysis points as Point3."""
return self._loc
@location.setter
def location(self, location):
try:
self._loc = Point3(*(float(l) for l in location))
except TypeError:
try:
# Dynamo Points!
self._loc = Point3(location.X, location.Y, location.Z)
except Exception as e:
raise TypeError(
'Failed to convert {} to location.\n'
'location should be a list or a tuple with 3 values.\n{}'
.format(location, e))
@property
def direction(self):
"""Direction of analysis points as Point3."""
return self._dir
@direction.setter
def direction(self, direction):
try:
self._dir = Vector3(*(float(d) for d in direction))
except TypeError:
try:
# Dynamo Points!
self._dir = Vector3(direction.X, direction.Y, direction.Z)
except Exception as e:
raise TypeError(
'Failed to convert {} to direction.\n'
'location should be a list or a tuple with 3 values.\n{}'
.format(direction, e))
def duplicate(self):
"""Duplicate the analysis point."""
return self
def ToString(self):
"""Overwrite .NET ToString."""
return self.__repr__()
def to_rad_string(self):
"""Return Radiance string for a test point."""
return "%s %s" % (self.location, self.direction)
def to_json(self):
"""Create an analysis point from json object.
{"location": [x, y, z], "direction": [x, y, z]}
"""
return {"location": list(self.location),
"direction": list(self.direction)}
def __repr__(self):
"""Print an analysis point."""
return 'AnalysisPoint::(%s)::(%s)' % (self.location, self.direction)
|
ladybug-tools/honeybee
|
honeybee_plus/radiance/analysispointlite.py
|
Python
|
gpl-3.0
| 3,943
|
#!/usr/bin/env python
###############################################################################
# #
# This program is free software: you can redistribute it and/or modify #
# it under the terms of the GNU General Public License as published by #
# the Free Software Foundation, either version 3 of the License, or #
# (at your option) any later version. #
# #
# This program is distributed in the hope that it will be useful, #
# but WITHOUT ANY WARRANTY; without even the implied warranty of #
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the #
# GNU General Public License for more details. #
# #
# You should have received a copy of the GNU General Public License #
# along with this program. If not, see <http://www.gnu.org/licenses/>. #
# #
###############################################################################
"""
Assess performance of marker sets under different conditions on draft genomes
composed of several scaffolds that are sampled randomly without replacement.
"""
__author__ = 'Donovan Parks'
__copyright__ = 'Copyright 2014'
__credits__ = ['Donovan Parks']
__license__ = 'GPL3'
__version__ = '1.0.0'
__maintainer__ = 'Donovan Parks'
__email__ = 'donovan.parks@gmail.com'
__status__ = 'Development'
import os
import sys
import argparse
import multiprocessing as mp
from collections import defaultdict
import random
import gzip
import time
import dendropy
from dendropy.dataobject.taxon import Taxon
from numpy import mean, std, abs
from checkm.util.img import IMG
from checkm.util.seqUtils import readFasta
from lib.markerSetBuilder import MarkerSetBuilder
class SimulationScaffolds(object):
def __init__(self):
self.markerSetBuilder = MarkerSetBuilder()
self.img = IMG('/srv/whitlam/bio/db/checkm/img/img_metadata.tsv', '/srv/whitlam/bio/db/checkm/pfam/tigrfam2pfam.tsv')
self.contigLens = [5000, 20000, 50000]
self.percentComps = [0.5, 0.7, 0.8, 0.9, 0.95, 1.0]
self.percentConts = [0.0, 0.05, 0.1, 0.15, 0.2]
def __seqLens(self, seqs):
"""Calculate lengths of seqs."""
genomeSize = 0
seqLens = {}
for seqId, seq in seqs.iteritems():
seqLens[seqId] = len(seq)
genomeSize += len(seq)
return seqLens, genomeSize
def __workerThread(self, tree, metadata, genomeIdsToTest, ubiquityThreshold, singleCopyThreshold, numReplicates, queueIn, queueOut):
"""Process each data item in parallel."""
while True:
testGenomeId = queueIn.get(block=True, timeout=None)
if testGenomeId == None:
break
# build marker sets for evaluating test genome
testNode = tree.find_node_with_taxon_label('IMG_' + testGenomeId)
binMarkerSets, refinedBinMarkerSet = self.markerSetBuilder.buildBinMarkerSet(tree, testNode.parent_node, ubiquityThreshold, singleCopyThreshold, bMarkerSet = True, genomeIdsToRemove = [testGenomeId])
# determine distribution of all marker genes within the test genome
geneDistTable = self.img.geneDistTable([testGenomeId], binMarkerSets.getMarkerGenes(), spacingBetweenContigs=0)
# estimate completeness of unmodified genome
unmodifiedComp = {}
unmodifiedCont = {}
for ms in binMarkerSets.markerSetIter():
hits = {}
for mg in ms.getMarkerGenes():
if mg in geneDistTable[testGenomeId]:
hits[mg] = geneDistTable[testGenomeId][mg]
completeness, contamination = ms.genomeCheck(hits, bIndividualMarkers=True)
unmodifiedComp[ms.lineageStr] = completeness
unmodifiedCont[ms.lineageStr] = contamination
# estimate completion and contamination of genome after subsampling using both the domain and lineage-specific marker sets
testSeqs = readFasta(os.path.join(self.img.genomeDir, testGenomeId, testGenomeId + '.fna'))
testSeqLens, genomeSize = self.__seqLens(testSeqs)
for contigLen in self.contigLens:
for percentComp in self.percentComps:
for percentCont in self.percentConts:
deltaComp = defaultdict(list)
deltaCont = defaultdict(list)
deltaCompSet = defaultdict(list)
deltaContSet = defaultdict(list)
deltaCompRefined = defaultdict(list)
deltaContRefined = defaultdict(list)
deltaCompSetRefined = defaultdict(list)
deltaContSetRefined = defaultdict(list)
trueComps = []
trueConts = []
numDescendants = {}
for i in xrange(0, numReplicates):
# generate test genome with a specific level of completeness, by randomly sampling scaffolds to remove
# (this will sample >= the desired level of completeness)
retainedTestSeqs, trueComp = self.markerSetBuilder.sampleGenomeScaffoldsWithoutReplacement(percentComp, testSeqLens, genomeSize)
trueComps.append(trueComp)
# select a random genome to use as a source of contamination
contGenomeId = random.sample(genomeIdsToTest - set([testGenomeId]), 1)[0]
contSeqs = readFasta(os.path.join(self.img.genomeDir, contGenomeId, contGenomeId + '.fna'))
contSeqLens, contGenomeSize = self.__seqLens(contSeqs)
seqsToRetain, trueRetainedPer = self.markerSetBuilder.sampleGenomeScaffoldsWithoutReplacement(1 - percentCont, contSeqLens, contGenomeSize)
contSampledSeqIds = set(contSeqs.keys()).difference(seqsToRetain)
trueCont = 100.0 - trueRetainedPer
trueConts.append(trueCont)
for ms in binMarkerSets.markerSetIter():
numDescendants[ms.lineageStr] = ms.numGenomes
containedMarkerGenes= defaultdict(list)
self.markerSetBuilder.markerGenesOnScaffolds(ms.getMarkerGenes(), testGenomeId, retainedTestSeqs, containedMarkerGenes)
self.markerSetBuilder.markerGenesOnScaffolds(ms.getMarkerGenes(), contGenomeId, contSampledSeqIds, containedMarkerGenes)
completeness, contamination = ms.genomeCheck(containedMarkerGenes, bIndividualMarkers=True)
deltaComp[ms.lineageStr].append(completeness - trueComp)
deltaCont[ms.lineageStr].append(contamination - trueCont)
completeness, contamination = ms.genomeCheck(containedMarkerGenes, bIndividualMarkers=False)
deltaCompSet[ms.lineageStr].append(completeness - trueComp)
deltaContSet[ms.lineageStr].append(contamination - trueCont)
for ms in refinedBinMarkerSet.markerSetIter():
containedMarkerGenes= defaultdict(list)
self.markerSetBuilder.markerGenesOnScaffolds(ms.getMarkerGenes(), testGenomeId, retainedTestSeqs, containedMarkerGenes)
self.markerSetBuilder.markerGenesOnScaffolds(ms.getMarkerGenes(), contGenomeId, contSampledSeqIds, containedMarkerGenes)
completeness, contamination = ms.genomeCheck(containedMarkerGenes, bIndividualMarkers=True)
deltaCompRefined[ms.lineageStr].append(completeness - trueComp)
deltaContRefined[ms.lineageStr].append(contamination - trueCont)
completeness, contamination = ms.genomeCheck(containedMarkerGenes, bIndividualMarkers=False)
deltaCompSetRefined[ms.lineageStr].append(completeness - trueComp)
deltaContSetRefined[ms.lineageStr].append(contamination - trueCont)
taxonomy = ';'.join(metadata[testGenomeId]['taxonomy'])
queueOut.put((testGenomeId, contigLen, percentComp, percentCont, taxonomy, numDescendants, unmodifiedComp, unmodifiedCont, deltaComp, deltaCont, deltaCompSet, deltaContSet, deltaCompRefined, deltaContRefined, deltaCompSetRefined, deltaContSetRefined, trueComps, trueConts))
def __writerThread(self, numTestGenomes, writerQueue):
"""Store or write results of worker threads in a single thread."""
summaryOut = open('/tmp/simulation.random_scaffolds.w_refinement_50.draft.summary.tsv', 'w')
summaryOut.write('Genome Id\tContig len\t% comp\t% cont')
summaryOut.write('\tTaxonomy\tMarker set\t# descendants')
summaryOut.write('\tUnmodified comp\tUnmodified cont')
summaryOut.write('\tIM comp\tIM comp std\tIM cont\tIM cont std')
summaryOut.write('\tMS comp\tMS comp std\tMS cont\tMS cont std')
summaryOut.write('\tRIM comp\tRIM comp std\tRIM cont\tRIM cont std')
summaryOut.write('\tRMS comp\tRMS comp std\tRMS cont\tRMS cont std\n')
fout = gzip.open('/tmp/simulation.random_scaffolds.w_refinement_50.draft.tsv.gz', 'wb')
fout.write('Genome Id\tContig len\t% comp\t% cont')
fout.write('\tTaxonomy\tMarker set\t# descendants')
fout.write('\tUnmodified comp\tUnmodified cont')
fout.write('\tIM comp\tIM cont')
fout.write('\tMS comp\tMS cont')
fout.write('\tRIM comp\tRIM cont')
fout.write('\tRMS comp\tRMS cont\tTrue Comp\tTrue Cont\n')
testsPerGenome = len(self.contigLens) * len(self.percentComps) * len(self.percentConts)
itemsProcessed = 0
while True:
testGenomeId, contigLen, percentComp, percentCont, taxonomy, numDescendants, unmodifiedComp, unmodifiedCont, deltaComp, deltaCont, deltaCompSet, deltaContSet, deltaCompRefined, deltaContRefined, deltaCompSetRefined, deltaContSetRefined, trueComps, trueConts = writerQueue.get(block=True, timeout=None)
if testGenomeId == None:
break
itemsProcessed += 1
statusStr = ' Finished processing %d of %d (%.2f%%) test cases.' % (itemsProcessed, numTestGenomes*testsPerGenome, float(itemsProcessed)*100/(numTestGenomes*testsPerGenome))
sys.stdout.write('%s\r' % statusStr)
sys.stdout.flush()
for markerSetId in unmodifiedComp:
summaryOut.write(testGenomeId + '\t%d\t%.2f\t%.2f' % (contigLen, percentComp, percentCont))
summaryOut.write('\t' + taxonomy + '\t' + markerSetId + '\t' + str(numDescendants[markerSetId]))
summaryOut.write('\t%.3f\t%.3f' % (unmodifiedComp[markerSetId], unmodifiedCont[markerSetId]))
summaryOut.write('\t%.3f\t%.3f' % (mean(abs(deltaComp[markerSetId])), std(abs(deltaComp[markerSetId]))))
summaryOut.write('\t%.3f\t%.3f' % (mean(abs(deltaCont[markerSetId])), std(abs(deltaCont[markerSetId]))))
summaryOut.write('\t%.3f\t%.3f' % (mean(abs(deltaCompSet[markerSetId])), std(abs(deltaCompSet[markerSetId]))))
summaryOut.write('\t%.3f\t%.3f' % (mean(abs(deltaContSet[markerSetId])), std(abs(deltaContSet[markerSetId]))))
summaryOut.write('\t%.3f\t%.3f' % (mean(abs(deltaCompRefined[markerSetId])), std(abs(deltaCompRefined[markerSetId]))))
summaryOut.write('\t%.3f\t%.3f' % (mean(abs(deltaContRefined[markerSetId])), std(abs(deltaContRefined[markerSetId]))))
summaryOut.write('\t%.3f\t%.3f' % (mean(abs(deltaCompSetRefined[markerSetId])), std(abs(deltaCompSetRefined[markerSetId]))))
summaryOut.write('\t%.3f\t%.3f' % (mean(abs(deltaContSetRefined[markerSetId])), std(abs(deltaContSetRefined[markerSetId]))))
summaryOut.write('\n')
fout.write(testGenomeId + '\t%d\t%.2f\t%.2f' % (contigLen, percentComp, percentCont))
fout.write('\t' + taxonomy + '\t' + markerSetId + '\t' + str(numDescendants[markerSetId]))
fout.write('\t%.3f\t%.3f' % (unmodifiedComp[markerSetId], unmodifiedCont[markerSetId]))
fout.write('\t%s' % ','.join(map(str, deltaComp[markerSetId])))
fout.write('\t%s' % ','.join(map(str, deltaCont[markerSetId])))
fout.write('\t%s' % ','.join(map(str, deltaCompSet[markerSetId])))
fout.write('\t%s' % ','.join(map(str, deltaContSet[markerSetId])))
fout.write('\t%s' % ','.join(map(str, deltaCompRefined[markerSetId])))
fout.write('\t%s' % ','.join(map(str, deltaContRefined[markerSetId])))
fout.write('\t%s' % ','.join(map(str, deltaCompSetRefined[markerSetId])))
fout.write('\t%s' % ','.join(map(str, deltaContSetRefined[markerSetId])))
fout.write('\t%s' % ','.join(map(str, trueComps)))
fout.write('\t%s' % ','.join(map(str, trueConts)))
fout.write('\n')
summaryOut.close()
fout.close()
sys.stdout.write('\n')
def run(self, ubiquityThreshold, singleCopyThreshold, numReplicates, minScaffolds, numThreads):
random.seed(0)
print '\n Reading reference genome tree.'
treeFile = os.path.join('/srv', 'db', 'checkm', 'genome_tree', 'genome_tree_prok.refpkg', 'genome_tree.final.tre')
tree = dendropy.Tree.get_from_path(treeFile, schema='newick', as_rooted=True, preserve_underscores=True)
print ' Number of taxa in tree: %d' % (len(tree.leaf_nodes()))
genomesInTree = set()
for leaf in tree.leaf_iter():
genomesInTree.add(leaf.taxon.label.replace('IMG_', ''))
# get all draft genomes consisting of a user-specific minimum number of scaffolds
print ''
metadata = self.img.genomeMetadata()
print ' Total genomes: %d' % len(metadata)
draftGenomeIds = genomesInTree - self.img.filterGenomeIds(genomesInTree, metadata, 'status', 'Finished')
print ' Number of draft genomes: %d' % len(draftGenomeIds)
genomeIdsToTest = set()
for genomeId in draftGenomeIds:
if metadata[genomeId]['scaffold count'] >= minScaffolds:
genomeIdsToTest.add(genomeId)
print ' Number of draft genomes with >= %d scaffolds: %d' % (minScaffolds, len(genomeIdsToTest))
print ''
start = time.time()
self.markerSetBuilder.readLineageSpecificGenesToRemove()
end = time.time()
print ' readLineageSpecificGenesToRemove: %.2f' % (end - start)
print ' Pre-computing genome information for calculating marker sets:'
start = time.time()
self.markerSetBuilder.precomputeGenomeFamilyScaffolds(metadata.keys())
end = time.time()
print ' precomputeGenomeFamilyScaffolds: %.2f' % (end - start)
start = time.time()
self.markerSetBuilder.cachedGeneCountTable = self.img.geneCountTable(metadata.keys())
end = time.time()
print ' globalGeneCountTable: %.2f' % (end - start)
start = time.time()
self.markerSetBuilder.precomputeGenomeSeqLens(metadata.keys())
end = time.time()
print ' precomputeGenomeSeqLens: %.2f' % (end - start)
start = time.time()
self.markerSetBuilder.precomputeGenomeFamilyPositions(metadata.keys(), 0)
end = time.time()
print ' precomputeGenomeFamilyPositions: %.2f' % (end - start)
print ''
print ' Evaluating %d test genomes.' % len(genomeIdsToTest)
workerQueue = mp.Queue()
writerQueue = mp.Queue()
for testGenomeId in list(genomeIdsToTest):
workerQueue.put(testGenomeId)
for _ in range(numThreads):
workerQueue.put(None)
workerProc = [mp.Process(target = self.__workerThread, args = (tree, metadata, genomeIdsToTest, ubiquityThreshold, singleCopyThreshold, numReplicates, workerQueue, writerQueue)) for _ in range(numThreads)]
writeProc = mp.Process(target = self.__writerThread, args = (len(genomeIdsToTest), writerQueue))
writeProc.start()
for p in workerProc:
p.start()
for p in workerProc:
p.join()
writerQueue.put((None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None))
writeProc.join()
if __name__ == '__main__':
parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('-u', '--ubiquity', help='Ubiquity threshold for defining marker set', type=float, default = 0.97)
parser.add_argument('-s', '--single_copy', help='Single-copy threshold for defining marker set', type=float, default = 0.97)
parser.add_argument('-x', '--replicates', help='Replicates per genome.', type=int, default = 20)
parser.add_argument('-m', '--min_scaffolds', help='Minimum number of scaffolds required for simulation.', type=int, default = 20)
parser.add_argument('-t', '--threads', help='Threads to use', type=int, default = 40)
args = parser.parse_args()
simulationScaffolds = SimulationScaffolds()
simulationScaffolds.run(args.ubiquity, args.single_copy, args.replicates, args.min_scaffolds, args.threads)
|
Ecogenomics/CheckM
|
scripts/simulationScaffoldsRandom.py
|
Python
|
gpl-3.0
| 18,716
|
import os, re
class Test1(object):
pass
class Test2(object):
pass
class Test3(object):
pass
__all__ = ('Test1', 'Test2', 'Test3')
|
euclio/eclim
|
org.eclim.pydev/test/eclim_unit_test_python/test/common/objects.py
|
Python
|
gpl-3.0
| 140
|
#
# Copyright 2010 Free Software Foundation, Inc.
#
# This file was generated by gr_modtool, a tool from the GNU Radio framework
# This file is a part of gr-howto
#
# SPDX-License-Identifier: GPL-3.0-or-later
#
#
"""
A base class is created.
Classes based upon this are used to make more user-friendly interfaces
to the doxygen xml docs than the generated classes provide.
"""
from __future__ import print_function
from __future__ import unicode_literals
import os
import pdb
from xml.parsers.expat import ExpatError
from .generated import compound
class Base(object):
class Duplicate(Exception):
pass
class NoSuchMember(Exception):
pass
class ParsingError(Exception):
pass
def __init__(self, parse_data, top=None):
self._parsed = False
self._error = False
self._parse_data = parse_data
self._members = []
self._dict_members = {}
self._in_category = {}
self._data = {}
if top is not None:
self._xml_path = top._xml_path
# Set up holder of references
else:
top = self
self._refs = {}
self._xml_path = parse_data
self.top = top
@classmethod
def from_refid(cls, refid, top=None):
""" Instantiate class from a refid rather than parsing object. """
# First check to see if its already been instantiated.
if top is not None and refid in top._refs:
return top._refs[refid]
# Otherwise create a new instance and set refid.
inst = cls(None, top=top)
inst.refid = refid
inst.add_ref(inst)
return inst
@classmethod
def from_parse_data(cls, parse_data, top=None):
refid = getattr(parse_data, 'refid', None)
if refid is not None and top is not None and refid in top._refs:
return top._refs[refid]
inst = cls(parse_data, top=top)
if refid is not None:
inst.refid = refid
inst.add_ref(inst)
return inst
def add_ref(self, obj):
if hasattr(obj, 'refid'):
self.top._refs[obj.refid] = obj
mem_classes = []
def get_cls(self, mem):
for cls in self.mem_classes:
if cls.can_parse(mem):
return cls
raise Exception(("Did not find a class for object '%s'." \
% (mem.get_name())))
def convert_mem(self, mem):
try:
cls = self.get_cls(mem)
converted = cls.from_parse_data(mem, self.top)
if converted is None:
raise Exception('No class matched this object.')
self.add_ref(converted)
return converted
except Exception as e:
print(e)
@classmethod
def includes(cls, inst):
return isinstance(inst, cls)
@classmethod
def can_parse(cls, obj):
return False
def _parse(self):
self._parsed = True
def _get_dict_members(self, cat=None):
"""
For given category a dictionary is returned mapping member names to
members of that category. For names that are duplicated the name is
mapped to None.
"""
self.confirm_no_error()
if cat not in self._dict_members:
new_dict = {}
for mem in self.in_category(cat):
if mem.name() not in new_dict:
new_dict[mem.name()] = mem
else:
new_dict[mem.name()] = self.Duplicate
self._dict_members[cat] = new_dict
return self._dict_members[cat]
def in_category(self, cat):
self.confirm_no_error()
if cat is None:
return self._members
if cat not in self._in_category:
self._in_category[cat] = [mem for mem in self._members
if cat.includes(mem)]
return self._in_category[cat]
def get_member(self, name, cat=None):
self.confirm_no_error()
# Check if it's in a namespace or class.
bits = name.split('::')
first = bits[0]
rest = '::'.join(bits[1:])
member = self._get_dict_members(cat).get(first, self.NoSuchMember)
# Raise any errors that are returned.
if member in set([self.NoSuchMember, self.Duplicate]):
raise member()
if rest:
return member.get_member(rest, cat=cat)
return member
def has_member(self, name, cat=None):
try:
mem = self.get_member(name, cat=cat)
return True
except self.NoSuchMember:
return False
def data(self):
self.confirm_no_error()
return self._data
def members(self):
self.confirm_no_error()
return self._members
def process_memberdefs(self):
mdtss = []
for sec in self._retrieved_data.compounddef.sectiondef:
mdtss += sec.memberdef
# At the moment we lose all information associated with sections.
# Sometimes a memberdef is in several sectiondef.
# We make sure we don't get duplicates here.
uniques = set([])
for mem in mdtss:
converted = self.convert_mem(mem)
pair = (mem.name, mem.__class__)
if pair not in uniques:
uniques.add(pair)
self._members.append(converted)
def retrieve_data(self):
filename = os.path.join(self._xml_path, self.refid + '.xml')
try:
self._retrieved_data = compound.parse(filename)
except ExpatError:
print('Error in xml in file %s' % filename)
self._error = True
self._retrieved_data = None
def check_parsed(self):
if not self._parsed:
self._parse()
def confirm_no_error(self):
self.check_parsed()
if self._error:
raise self.ParsingError()
def error(self):
self.check_parsed()
return self._error
def name(self):
# first see if we can do it without processing.
if self._parse_data is not None:
return self._parse_data.name
self.check_parsed()
return self._retrieved_data.compounddef.name
|
skoslowski/gnuradio
|
gr-utils/modtool/templates/gr-newmod/docs/doxygen/doxyxml/base.py
|
Python
|
gpl-3.0
| 6,283
|
# -*- Mode: Python; test-case-name: whipper.test.test_image_table -*-
# vi:si:et:sw=4:sts=4:ts=4
from whipper.image import table
from whipper.test import common as tcommon
def h(i):
return "0x%08x" % i
class TrackTestCase(tcommon.TestCase):
def testRepr(self):
track = table.Track(1)
self.assertEquals(repr(track), "<Track 01>")
track.index(1, 100)
self.failUnless(repr(track.indexes[1]).startswith('<Index 01 '))
class LadyhawkeTestCase(tcommon.TestCase):
# Ladyhawke - Ladyhawke - 0602517818866
# contains 12 audio tracks and one data track
# CDDB has been verified against freedb:
# http://www.freedb.org/freedb/misc/c60af50d
# http://www.freedb.org/freedb/jazz/c60af50d
# AccurateRip URL has been verified against EAC's, using wireshark
def setUp(self):
self.table = table.Table()
for i in range(12):
self.table.tracks.append(table.Track(i + 1, audio=True))
self.table.tracks.append(table.Track(13, audio=False))
offsets = [0, 15537, 31691, 50866, 66466, 81202, 99409,
115920, 133093, 149847, 161560, 177682, 207106]
t = self.table.tracks
for i, offset in enumerate(offsets):
t[i].index(1, absolute=offset)
self.failIf(self.table.hasTOC())
self.table.leadout = 210385
self.failUnless(self.table.hasTOC())
self.assertEquals(self.table.tracks[0].getPregap(), 0)
def testCDDB(self):
self.assertEquals(self.table.getCDDBDiscId(), "c60af50d")
def testMusicBrainz(self):
# output from mb-submit-disc:
# https://musicbrainz.org/cdtoc/attach?toc=1+12+195856+150+
# 15687+31841+51016+66616+81352+99559+116070+133243+149997+161710+
# 177832&tracks=12&id=KnpGsLhvH.lPrNc1PBL21lb9Bg4-
# however, not (yet) in MusicBrainz database
self.assertEquals(self.table.getMusicBrainzDiscId(),
"KnpGsLhvH.lPrNc1PBL21lb9Bg4-")
def testAccurateRip(self):
self.assertEquals(self.table.accuraterip_ids(), (
"0013bd5a", "00b8d489"))
self.assertEquals(self.table.accuraterip_path(),
"a/5/d/dBAR-012-0013bd5a-00b8d489-c60af50d.bin")
def testDuration(self):
self.assertEquals(self.table.duration(), 2761413)
class MusicBrainzTestCase(tcommon.TestCase):
# example taken from https://musicbrainz.org/doc/Disc_ID_Calculation
# disc is Ettella Diamant
def setUp(self):
self.table = table.Table()
for i in range(6):
self.table.tracks.append(table.Track(i + 1, audio=True))
offsets = [0, 15213, 32164, 46442, 63264, 80339]
t = self.table.tracks
for i, offset in enumerate(offsets):
t[i].index(1, absolute=offset)
self.failIf(self.table.hasTOC())
self.table.leadout = 95312
self.failUnless(self.table.hasTOC())
def testMusicBrainz(self):
self.assertEquals(self.table.getMusicBrainzDiscId(),
'49HHV7Eb8UKF3aQiNmu1GR8vKTY-')
class PregapTestCase(tcommon.TestCase):
def setUp(self):
self.table = table.Table()
for i in range(2):
self.table.tracks.append(table.Track(i + 1, audio=True))
offsets = [0, 15537]
t = self.table.tracks
for i, offset in enumerate(offsets):
t[i].index(1, absolute=offset)
t[1].index(0, offsets[1] - 200)
def testPreGap(self):
self.assertEquals(self.table.tracks[0].getPregap(), 0)
self.assertEquals(self.table.tracks[1].getPregap(), 200)
|
Freso/morituri
|
whipper/test/test_image_table.py
|
Python
|
gpl-3.0
| 3,651
|
import pymongo
def migrate(db, validate_production):
print 'Starting data migration..'
# ** Migrate one collection (production) to two (production & exchanges)
col_production = db['production']
col_exchange = db['exchange']
col_old = db['realtime']
for row in col_old.find():
# Extract exchange
if 'exchange' in row:
exchange = row['exchange']
# Insert into exchange db
for k, v in exchange.iteritems():
if k == 'datetime': continue
sortedCountryCodes = '->'.join(sorted([k, row['countryCode']]))
col_exchange.insert({
'datetime': row.get('datetimeExchange', row['datetime']),
'sortedCountryCodes': sortedCountryCodes,
'netFlow': v if sortedCountryCodes[1] == k else v * -1
})
# Delete exchange
del row['exchange']
if 'datetimeExchange' in row: del row['datetimeExchange']
if 'datetimeProduction' in row: del row['datetimeProduction']
# Copy in other collection
try: col_production.insert(row)
except pymongo.errors.DuplicateKeyError: pass
# Delete in old collection
col_old.remove({'_id': row['_id']})
# ** Validate production data
# for row in col_production.find():
# try:
# validate_production(row, row.get('countryCode', None))
# except:
# print 'Warning: row %s did not pass validation' % row['_id']
# print row
# ** 2017-01-28 Add storage
for row in col_production.find({'countryCode': 'FR', 'consumption': {'$exists': True}}):
print 'Migrating %s' % row['datetime']
row['storage'] = row['consumption']
del row['consumption']
col_production.update_one({'_id': row['_id']}, {'$set': row}, upsert=False)
print 'Migration done.'
|
Pantkowsky/electricitymap
|
feeder/migrate_db.py
|
Python
|
gpl-3.0
| 1,926
|
# Utilities for parsing DOLFIN files and generaton of SWIG files
#
# Copyright (C) 2012 Johan Hake
#
# This file is part of DOLFIN.
#
# DOLFIN is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# DOLFIN is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with DOLFIN. If not, see <http://www.gnu.org/licenses/>.
#
# First added: 2012-07-01
# Last changed: 2012-07-03
# System imports
import os
import re
try:
from collections import OrderedDict
except ImportError:
from dolfin_utils.ordereddict import OrderedDict
# Local imports
from .CppHeaderParser import CppHeader
# reg exp pattern
_template_pattern = re.compile("\w+<([\w ]+)>")
def strip_templates(types):
"""
Strip template types
"""
ret_types = set()
temp_types = re.findall(_template_pattern, types)
while temp_types:
ret_types.update(temp_types)
for temp_type in temp_types:
types = types.replace("<%s>" % temp_type, "")
temp_types = re.findall(_template_pattern, types)
if types:
ret_types.add(types)
return ret_types
def build_swig_import_info(dependencies, submodule_info, module_prepend="",
package_name="", parsed_modules=None):
"""
Build import and include info from a list of submodules and headers
"""
import_lines = []
headers_includes = ["", "// Include types from dependent modules"]
file_dependencies = []
# Iterate over the file dependencies and generate import statements
previous_submodule = ""
for submodule, headers in list(dependencies.items()):
module = submodule_info[submodule]["module"]
headers_includes.append("")
headers_includes.append("// #include types from %s submodule of "\
"module %s" % (submodule, module))
import_lines.append("")
import_lines.append("// %%import types from submodule"\
" %s of SWIG module %s" % \
(submodule, module))
# Check for pre includes
if submodule_info[submodule]["has_pre"]:
file_dependencies.append("dolfin/swig/%s/pre.i" % submodule)
import_lines.append(\
"%%include \"dolfin/swig/%s/pre.i\"" % submodule)
# Add headers
file_dependencies.extend(headers)
for header in headers:
# Add file dependency
if parsed_modules is None or module in parsed_modules:
# If module is not parsed yet we introduce circular dependencies
# in import statments which we want to avoid
import_lines.append("%%import(package=\"%s\", module=\"%s\") \"%s\"" % \
(package_name, module_prepend+module, header))
headers_includes.append("#include \"%s\"" % header)
return import_lines, headers_includes, file_dependencies
def parse_and_extract_type_info(code):
"""
Parse header code and return declared types, used types, and any bases
"""
used_types=set()
declared_types=OrderedDict()
# Use CppHeader to extract
cppheader = CppHeader(code, argType="string")
# Collect used types from the code
used_types = set()
# Iterate over typedefs and collect types
for typedef in cppheader.typedefs:
if "dolfin::" in typedef:
typedef = typedef.replace("dolfin::", "")
# Store type information
declared_types[typedef] = []
# Iterate over free functions and collect dependant types
for function in cppheader.functions:
# Check return type
if function["rtnType"] != "void":
used_types.update(strip_templates(function["rtnType"]))
# Check argument types
for argument in function["parameters"]:
used_types.update(strip_templates(argument["type"]))
# Iterate over class and collect info
for class_name, class_repr in list(cppheader.classes.items()):
# Check if class is private
if class_repr["parent"] is not None:
continue
# Get bases
bases = set()
for base in class_repr["inherits"]:
bases.update(strip_templates(base["class"]))
# Remove itself from any bases
bases.difference_update([class_name])
# Register the class
declared_types[class_name] = list(bases)
# Iterate over public properties
for prop in class_repr["properties"]["public"]:
used_types.update(strip_templates(prop["type"]))
# Iterate over methods and collect dependant types
for method in class_repr["methods"]["public"]+\
class_repr["methods"]["protected"]:
# Check return type
if method["rtnType"] != "void":
used_types.update(strip_templates(method["rtnType"]))
# Check argument types
for argument in method["parameters"]:
used_types.update(strip_templates(argument["type"]))
# Remove any self dependencies
used_types = set(used_type for used_type in
used_types if class_name not in used_type)
return used_types, declared_types
def sort_submodule_dependencies(dependencies, submodule_info):
"""
Given a dict of submodules and headers, we use original
submodule info to sort the content
"""
submodule_dependences = OrderedDict()
for submodule in submodule_info:
if submodule in dependencies:
# Register sorted dependency
submodule_dependences[submodule] = [header for header in \
submodule_info[submodule]["headers"] \
if header in dependencies[submodule]]
return submodule_dependences
|
MiroK/dolfin
|
site-packages/dolfin_utils/cppparser/utils.py
|
Python
|
gpl-3.0
| 6,332
|
# IMPORTANT: the same tests are run from "test_xml_etree_c" in order
# to ensure consistency between the C implementation and the Python
# implementation.
#
# For this purpose, the module-level "ET" symbol is temporarily
# monkey-patched when running the "test_xml_etree_c" test suite.
import html
import io
import operator
import pickle
import sys
import unittest
import weakref
from itertools import product
from test import support
from test.support import TESTFN, findfile, import_fresh_module, gc_collect
# pyET is the pure-Python implementation.
#
# ET is pyET in test_xml_etree and is the C accelerated version in
# test_xml_etree_c.
pyET = None
ET = None
SIMPLE_XMLFILE = findfile("simple.xml", subdir="xmltestdata")
try:
SIMPLE_XMLFILE.encode("utf-8")
except UnicodeEncodeError:
raise unittest.SkipTest("filename is not encodable to utf8")
SIMPLE_NS_XMLFILE = findfile("simple-ns.xml", subdir="xmltestdata")
SAMPLE_XML = """\
<body>
<tag class='a'>text</tag>
<tag class='b' />
<section>
<tag class='b' id='inner'>subtext</tag>
</section>
</body>
"""
SAMPLE_SECTION = """\
<section>
<tag class='b' id='inner'>subtext</tag>
<nexttag />
<nextsection>
<tag />
</nextsection>
</section>
"""
SAMPLE_XML_NS = """
<body xmlns="http://effbot.org/ns">
<tag>text</tag>
<tag />
<section>
<tag>subtext</tag>
</section>
</body>
"""
SAMPLE_XML_NS_ELEMS = """
<root>
<h:table xmlns:h="hello">
<h:tr>
<h:td>Apples</h:td>
<h:td>Bananas</h:td>
</h:tr>
</h:table>
<f:table xmlns:f="foo">
<f:name>African Coffee Table</f:name>
<f:width>80</f:width>
<f:length>120</f:length>
</f:table>
</root>
"""
ENTITY_XML = """\
<!DOCTYPE points [
<!ENTITY % user-entities SYSTEM 'user-entities.xml'>
%user-entities;
]>
<document>&entity;</document>
"""
class ModuleTest(unittest.TestCase):
# TODO: this should be removed once we get rid of the global module vars
def test_sanity(self):
# Import sanity.
from xml.etree import ElementTree
from xml.etree import ElementInclude
from xml.etree import ElementPath
def serialize(elem, to_string=True, encoding='unicode', **options):
if encoding != 'unicode':
file = io.BytesIO()
else:
file = io.StringIO()
tree = ET.ElementTree(elem)
tree.write(file, encoding=encoding, **options)
if to_string:
return file.getvalue()
else:
file.seek(0)
return file
def summarize_list(seq):
return [elem.tag for elem in seq]
class ElementTestCase:
@classmethod
def setUpClass(cls):
cls.modules = {pyET, ET}
def pickleRoundTrip(self, obj, name, dumper, loader):
save_m = sys.modules[name]
try:
sys.modules[name] = dumper
temp = pickle.dumps(obj)
sys.modules[name] = loader
result = pickle.loads(temp)
except pickle.PicklingError as pe:
# pyET must be second, because pyET may be (equal to) ET.
human = dict([(ET, "cET"), (pyET, "pyET")])
raise support.TestFailed("Failed to round-trip %r from %r to %r"
% (obj,
human.get(dumper, dumper),
human.get(loader, loader))) from pe
finally:
sys.modules[name] = save_m
return result
def assertEqualElements(self, alice, bob):
self.assertIsInstance(alice, (ET.Element, pyET.Element))
self.assertIsInstance(bob, (ET.Element, pyET.Element))
self.assertEqual(len(list(alice)), len(list(bob)))
for x, y in zip(alice, bob):
self.assertEqualElements(x, y)
properties = operator.attrgetter('tag', 'tail', 'text', 'attrib')
self.assertEqual(properties(alice), properties(bob))
# --------------------------------------------------------------------
# element tree tests
class ElementTreeTest(unittest.TestCase):
def serialize_check(self, elem, expected):
self.assertEqual(serialize(elem), expected)
def test_interface(self):
# Test element tree interface.
def check_string(string):
len(string)
for char in string:
self.assertEqual(len(char), 1,
msg="expected one-character string, got %r" % char)
new_string = string + ""
new_string = string + " "
string[:0]
def check_mapping(mapping):
len(mapping)
keys = mapping.keys()
items = mapping.items()
for key in keys:
item = mapping[key]
mapping["key"] = "value"
self.assertEqual(mapping["key"], "value",
msg="expected value string, got %r" % mapping["key"])
def check_element(element):
self.assertTrue(ET.iselement(element), msg="not an element")
self.assertTrue(hasattr(element, "tag"), msg="no tag member")
self.assertTrue(hasattr(element, "attrib"), msg="no attrib member")
self.assertTrue(hasattr(element, "text"), msg="no text member")
self.assertTrue(hasattr(element, "tail"), msg="no tail member")
check_string(element.tag)
check_mapping(element.attrib)
if element.text is not None:
check_string(element.text)
if element.tail is not None:
check_string(element.tail)
for elem in element:
check_element(elem)
element = ET.Element("tag")
check_element(element)
tree = ET.ElementTree(element)
check_element(tree.getroot())
element = ET.Element("t\xe4g", key="value")
tree = ET.ElementTree(element)
self.assertRegex(repr(element), r"^<Element 't\xe4g' at 0x.*>$")
element = ET.Element("tag", key="value")
# Make sure all standard element methods exist.
def check_method(method):
self.assertTrue(hasattr(method, '__call__'),
msg="%s not callable" % method)
check_method(element.append)
check_method(element.extend)
check_method(element.insert)
check_method(element.remove)
check_method(element.getchildren)
check_method(element.find)
check_method(element.iterfind)
check_method(element.findall)
check_method(element.findtext)
check_method(element.clear)
check_method(element.get)
check_method(element.set)
check_method(element.keys)
check_method(element.items)
check_method(element.iter)
check_method(element.itertext)
check_method(element.getiterator)
# These methods return an iterable. See bug 6472.
def check_iter(it):
check_method(it.__next__)
check_iter(element.iterfind("tag"))
check_iter(element.iterfind("*"))
check_iter(tree.iterfind("tag"))
check_iter(tree.iterfind("*"))
# These aliases are provided:
self.assertEqual(ET.XML, ET.fromstring)
self.assertEqual(ET.PI, ET.ProcessingInstruction)
self.assertEqual(ET.XMLParser, ET.XMLTreeBuilder)
def test_simpleops(self):
# Basic method sanity checks.
elem = ET.XML("<body><tag/></body>")
self.serialize_check(elem, '<body><tag /></body>')
e = ET.Element("tag2")
elem.append(e)
self.serialize_check(elem, '<body><tag /><tag2 /></body>')
elem.remove(e)
self.serialize_check(elem, '<body><tag /></body>')
elem.insert(0, e)
self.serialize_check(elem, '<body><tag2 /><tag /></body>')
elem.remove(e)
elem.extend([e])
self.serialize_check(elem, '<body><tag /><tag2 /></body>')
elem.remove(e)
element = ET.Element("tag", key="value")
self.serialize_check(element, '<tag key="value" />') # 1
subelement = ET.Element("subtag")
element.append(subelement)
self.serialize_check(element, '<tag key="value"><subtag /></tag>') # 2
element.insert(0, subelement)
self.serialize_check(element,
'<tag key="value"><subtag /><subtag /></tag>') # 3
element.remove(subelement)
self.serialize_check(element, '<tag key="value"><subtag /></tag>') # 4
element.remove(subelement)
self.serialize_check(element, '<tag key="value" />') # 5
with self.assertRaises(ValueError) as cm:
element.remove(subelement)
self.assertEqual(str(cm.exception), 'list.remove(x): x not in list')
self.serialize_check(element, '<tag key="value" />') # 6
element[0:0] = [subelement, subelement, subelement]
self.serialize_check(element[1], '<subtag />')
self.assertEqual(element[1:9], [element[1], element[2]])
self.assertEqual(element[:9:2], [element[0], element[2]])
del element[1:2]
self.serialize_check(element,
'<tag key="value"><subtag /><subtag /></tag>')
def test_cdata(self):
# Test CDATA handling (etc).
self.serialize_check(ET.XML("<tag>hello</tag>"),
'<tag>hello</tag>')
self.serialize_check(ET.XML("<tag>hello</tag>"),
'<tag>hello</tag>')
self.serialize_check(ET.XML("<tag><![CDATA[hello]]></tag>"),
'<tag>hello</tag>')
def test_file_init(self):
stringfile = io.BytesIO(SAMPLE_XML.encode("utf-8"))
tree = ET.ElementTree(file=stringfile)
self.assertEqual(tree.find("tag").tag, 'tag')
self.assertEqual(tree.find("section/tag").tag, 'tag')
tree = ET.ElementTree(file=SIMPLE_XMLFILE)
self.assertEqual(tree.find("element").tag, 'element')
self.assertEqual(tree.find("element/../empty-element").tag,
'empty-element')
def test_path_cache(self):
# Check that the path cache behaves sanely.
from xml.etree import ElementPath
elem = ET.XML(SAMPLE_XML)
for i in range(10): ET.ElementTree(elem).find('./'+str(i))
cache_len_10 = len(ElementPath._cache)
for i in range(10): ET.ElementTree(elem).find('./'+str(i))
self.assertEqual(len(ElementPath._cache), cache_len_10)
for i in range(20): ET.ElementTree(elem).find('./'+str(i))
self.assertGreater(len(ElementPath._cache), cache_len_10)
for i in range(600): ET.ElementTree(elem).find('./'+str(i))
self.assertLess(len(ElementPath._cache), 500)
def test_copy(self):
# Test copy handling (etc).
import copy
e1 = ET.XML("<tag>hello<foo/></tag>")
e2 = copy.copy(e1)
e3 = copy.deepcopy(e1)
e1.find("foo").tag = "bar"
self.serialize_check(e1, '<tag>hello<bar /></tag>')
self.serialize_check(e2, '<tag>hello<bar /></tag>')
self.serialize_check(e3, '<tag>hello<foo /></tag>')
def test_attrib(self):
# Test attribute handling.
elem = ET.Element("tag")
elem.get("key") # 1.1
self.assertEqual(elem.get("key", "default"), 'default') # 1.2
elem.set("key", "value")
self.assertEqual(elem.get("key"), 'value') # 1.3
elem = ET.Element("tag", key="value")
self.assertEqual(elem.get("key"), 'value') # 2.1
self.assertEqual(elem.attrib, {'key': 'value'}) # 2.2
attrib = {"key": "value"}
elem = ET.Element("tag", attrib)
attrib.clear() # check for aliasing issues
self.assertEqual(elem.get("key"), 'value') # 3.1
self.assertEqual(elem.attrib, {'key': 'value'}) # 3.2
attrib = {"key": "value"}
elem = ET.Element("tag", **attrib)
attrib.clear() # check for aliasing issues
self.assertEqual(elem.get("key"), 'value') # 4.1
self.assertEqual(elem.attrib, {'key': 'value'}) # 4.2
elem = ET.Element("tag", {"key": "other"}, key="value")
self.assertEqual(elem.get("key"), 'value') # 5.1
self.assertEqual(elem.attrib, {'key': 'value'}) # 5.2
elem = ET.Element('test')
elem.text = "aa"
elem.set('testa', 'testval')
elem.set('testb', 'test2')
self.assertEqual(ET.tostring(elem),
b'<test testa="testval" testb="test2">aa</test>')
self.assertEqual(sorted(elem.keys()), ['testa', 'testb'])
self.assertEqual(sorted(elem.items()),
[('testa', 'testval'), ('testb', 'test2')])
self.assertEqual(elem.attrib['testb'], 'test2')
elem.attrib['testb'] = 'test1'
elem.attrib['testc'] = 'test2'
self.assertEqual(ET.tostring(elem),
b'<test testa="testval" testb="test1" testc="test2">aa</test>')
def test_makeelement(self):
# Test makeelement handling.
elem = ET.Element("tag")
attrib = {"key": "value"}
subelem = elem.makeelement("subtag", attrib)
self.assertIsNot(subelem.attrib, attrib, msg="attrib aliasing")
elem.append(subelem)
self.serialize_check(elem, '<tag><subtag key="value" /></tag>')
elem.clear()
self.serialize_check(elem, '<tag />')
elem.append(subelem)
self.serialize_check(elem, '<tag><subtag key="value" /></tag>')
elem.extend([subelem, subelem])
self.serialize_check(elem,
'<tag><subtag key="value" /><subtag key="value" /><subtag key="value" /></tag>')
elem[:] = [subelem]
self.serialize_check(elem, '<tag><subtag key="value" /></tag>')
elem[:] = tuple([subelem])
self.serialize_check(elem, '<tag><subtag key="value" /></tag>')
def test_parsefile(self):
# Test parsing from file.
tree = ET.parse(SIMPLE_XMLFILE)
stream = io.StringIO()
tree.write(stream, encoding='unicode')
self.assertEqual(stream.getvalue(),
'<root>\n'
' <element key="value">text</element>\n'
' <element>text</element>tail\n'
' <empty-element />\n'
'</root>')
tree = ET.parse(SIMPLE_NS_XMLFILE)
stream = io.StringIO()
tree.write(stream, encoding='unicode')
self.assertEqual(stream.getvalue(),
'<ns0:root xmlns:ns0="namespace">\n'
' <ns0:element key="value">text</ns0:element>\n'
' <ns0:element>text</ns0:element>tail\n'
' <ns0:empty-element />\n'
'</ns0:root>')
with open(SIMPLE_XMLFILE) as f:
data = f.read()
parser = ET.XMLParser()
self.assertRegex(parser.version, r'^Expat ')
parser.feed(data)
self.serialize_check(parser.close(),
'<root>\n'
' <element key="value">text</element>\n'
' <element>text</element>tail\n'
' <empty-element />\n'
'</root>')
parser = ET.XMLTreeBuilder() # 1.2 compatibility
parser.feed(data)
self.serialize_check(parser.close(),
'<root>\n'
' <element key="value">text</element>\n'
' <element>text</element>tail\n'
' <empty-element />\n'
'</root>')
target = ET.TreeBuilder()
parser = ET.XMLParser(target=target)
parser.feed(data)
self.serialize_check(parser.close(),
'<root>\n'
' <element key="value">text</element>\n'
' <element>text</element>tail\n'
' <empty-element />\n'
'</root>')
def test_parseliteral(self):
element = ET.XML("<html><body>text</body></html>")
self.assertEqual(ET.tostring(element, encoding='unicode'),
'<html><body>text</body></html>')
element = ET.fromstring("<html><body>text</body></html>")
self.assertEqual(ET.tostring(element, encoding='unicode'),
'<html><body>text</body></html>')
sequence = ["<html><body>", "text</bo", "dy></html>"]
element = ET.fromstringlist(sequence)
self.assertEqual(ET.tostring(element),
b'<html><body>text</body></html>')
self.assertEqual(b"".join(ET.tostringlist(element)),
b'<html><body>text</body></html>')
self.assertEqual(ET.tostring(element, "ascii"),
b"<?xml version='1.0' encoding='ascii'?>\n"
b"<html><body>text</body></html>")
_, ids = ET.XMLID("<html><body>text</body></html>")
self.assertEqual(len(ids), 0)
_, ids = ET.XMLID("<html><body id='body'>text</body></html>")
self.assertEqual(len(ids), 1)
self.assertEqual(ids["body"].tag, 'body')
def test_iterparse(self):
# Test iterparse interface.
iterparse = ET.iterparse
context = iterparse(SIMPLE_XMLFILE)
action, elem = next(context)
self.assertEqual((action, elem.tag), ('end', 'element'))
self.assertEqual([(action, elem.tag) for action, elem in context], [
('end', 'element'),
('end', 'empty-element'),
('end', 'root'),
])
self.assertEqual(context.root.tag, 'root')
context = iterparse(SIMPLE_NS_XMLFILE)
self.assertEqual([(action, elem.tag) for action, elem in context], [
('end', '{namespace}element'),
('end', '{namespace}element'),
('end', '{namespace}empty-element'),
('end', '{namespace}root'),
])
events = ()
context = iterparse(SIMPLE_XMLFILE, events)
self.assertEqual([(action, elem.tag) for action, elem in context], [])
events = ()
context = iterparse(SIMPLE_XMLFILE, events=events)
self.assertEqual([(action, elem.tag) for action, elem in context], [])
events = ("start", "end")
context = iterparse(SIMPLE_XMLFILE, events)
self.assertEqual([(action, elem.tag) for action, elem in context], [
('start', 'root'),
('start', 'element'),
('end', 'element'),
('start', 'element'),
('end', 'element'),
('start', 'empty-element'),
('end', 'empty-element'),
('end', 'root'),
])
events = ("start", "end", "start-ns", "end-ns")
context = iterparse(SIMPLE_NS_XMLFILE, events)
self.assertEqual([(action, elem.tag) if action in ("start", "end")
else (action, elem)
for action, elem in context], [
('start-ns', ('', 'namespace')),
('start', '{namespace}root'),
('start', '{namespace}element'),
('end', '{namespace}element'),
('start', '{namespace}element'),
('end', '{namespace}element'),
('start', '{namespace}empty-element'),
('end', '{namespace}empty-element'),
('end', '{namespace}root'),
('end-ns', None),
])
events = ('start-ns', 'end-ns')
context = iterparse(io.StringIO(r"<root xmlns=''/>"), events)
res = [action for action, elem in context]
self.assertEqual(res, ['start-ns', 'end-ns'])
events = ("start", "end", "bogus")
with self.assertRaises(ValueError) as cm:
with open(SIMPLE_XMLFILE, "rb") as f:
iterparse(f, events)
self.assertEqual(str(cm.exception), "unknown event 'bogus'")
source = io.BytesIO(
b"<?xml version='1.0' encoding='iso-8859-1'?>\n"
b"<body xmlns='http://éffbot.org/ns'\n"
b" xmlns:cl\xe9='http://effbot.org/ns'>text</body>\n")
events = ("start-ns",)
context = iterparse(source, events)
self.assertEqual([(action, elem) for action, elem in context], [
('start-ns', ('', 'http://\xe9ffbot.org/ns')),
('start-ns', ('cl\xe9', 'http://effbot.org/ns')),
])
source = io.StringIO("<document />junk")
it = iterparse(source)
action, elem = next(it)
self.assertEqual((action, elem.tag), ('end', 'document'))
with self.assertRaises(ET.ParseError) as cm:
next(it)
self.assertEqual(str(cm.exception),
'junk after document element: line 1, column 12')
def test_writefile(self):
elem = ET.Element("tag")
elem.text = "text"
self.serialize_check(elem, '<tag>text</tag>')
ET.SubElement(elem, "subtag").text = "subtext"
self.serialize_check(elem, '<tag>text<subtag>subtext</subtag></tag>')
# Test tag suppression
elem.tag = None
self.serialize_check(elem, 'text<subtag>subtext</subtag>')
elem.insert(0, ET.Comment("comment"))
self.serialize_check(elem,
'text<!--comment--><subtag>subtext</subtag>') # assumes 1.3
elem[0] = ET.PI("key", "value")
self.serialize_check(elem, 'text<?key value?><subtag>subtext</subtag>')
def test_custom_builder(self):
# Test parser w. custom builder.
with open(SIMPLE_XMLFILE) as f:
data = f.read()
class Builder(list):
def start(self, tag, attrib):
self.append(("start", tag))
def end(self, tag):
self.append(("end", tag))
def data(self, text):
pass
builder = Builder()
parser = ET.XMLParser(target=builder)
parser.feed(data)
self.assertEqual(builder, [
('start', 'root'),
('start', 'element'),
('end', 'element'),
('start', 'element'),
('end', 'element'),
('start', 'empty-element'),
('end', 'empty-element'),
('end', 'root'),
])
with open(SIMPLE_NS_XMLFILE) as f:
data = f.read()
class Builder(list):
def start(self, tag, attrib):
self.append(("start", tag))
def end(self, tag):
self.append(("end", tag))
def data(self, text):
pass
def pi(self, target, data):
self.append(("pi", target, data))
def comment(self, data):
self.append(("comment", data))
builder = Builder()
parser = ET.XMLParser(target=builder)
parser.feed(data)
self.assertEqual(builder, [
('pi', 'pi', 'data'),
('comment', ' comment '),
('start', '{namespace}root'),
('start', '{namespace}element'),
('end', '{namespace}element'),
('start', '{namespace}element'),
('end', '{namespace}element'),
('start', '{namespace}empty-element'),
('end', '{namespace}empty-element'),
('end', '{namespace}root'),
])
def test_getchildren(self):
# Test Element.getchildren()
with open(SIMPLE_XMLFILE, "rb") as f:
tree = ET.parse(f)
self.assertEqual([summarize_list(elem.getchildren())
for elem in tree.getroot().iter()], [
['element', 'element', 'empty-element'],
[],
[],
[],
])
self.assertEqual([summarize_list(elem.getchildren())
for elem in tree.getiterator()], [
['element', 'element', 'empty-element'],
[],
[],
[],
])
elem = ET.XML(SAMPLE_XML)
self.assertEqual(len(elem.getchildren()), 3)
self.assertEqual(len(elem[2].getchildren()), 1)
self.assertEqual(elem[:], elem.getchildren())
child1 = elem[0]
child2 = elem[2]
del elem[1:2]
self.assertEqual(len(elem.getchildren()), 2)
self.assertEqual(child1, elem[0])
self.assertEqual(child2, elem[1])
elem[0:2] = [child2, child1]
self.assertEqual(child2, elem[0])
self.assertEqual(child1, elem[1])
self.assertNotEqual(child1, elem[0])
elem.clear()
self.assertEqual(elem.getchildren(), [])
def test_writestring(self):
elem = ET.XML("<html><body>text</body></html>")
self.assertEqual(ET.tostring(elem), b'<html><body>text</body></html>')
elem = ET.fromstring("<html><body>text</body></html>")
self.assertEqual(ET.tostring(elem), b'<html><body>text</body></html>')
def test_encoding(self):
def check(encoding, body=''):
xml = ("<?xml version='1.0' encoding='%s'?><xml>%s</xml>" %
(encoding, body))
self.assertEqual(ET.XML(xml.encode(encoding)).text, body)
self.assertEqual(ET.XML(xml).text, body)
check("ascii", 'a')
check("us-ascii", 'a')
check("iso-8859-1", '\xbd')
check("iso-8859-15", '\u20ac')
check("cp437", '\u221a')
check("mac-roman", '\u02da')
def xml(encoding):
return "<?xml version='1.0' encoding='%s'?><xml />" % encoding
def bxml(encoding):
return xml(encoding).encode(encoding)
supported_encodings = [
'ascii', 'utf-8', 'utf-8-sig', 'utf-16', 'utf-16be', 'utf-16le',
'iso8859-1', 'iso8859-2', 'iso8859-3', 'iso8859-4', 'iso8859-5',
'iso8859-6', 'iso8859-7', 'iso8859-8', 'iso8859-9', 'iso8859-10',
'iso8859-13', 'iso8859-14', 'iso8859-15', 'iso8859-16',
'cp437', 'cp720', 'cp737', 'cp775', 'cp850', 'cp852',
'cp855', 'cp856', 'cp857', 'cp858', 'cp860', 'cp861', 'cp862',
'cp863', 'cp865', 'cp866', 'cp869', 'cp874', 'cp1006', 'cp1250',
'cp1251', 'cp1252', 'cp1253', 'cp1254', 'cp1255', 'cp1256',
'cp1257', 'cp1258',
'mac-cyrillic', 'mac-greek', 'mac-iceland', 'mac-latin2',
'mac-roman', 'mac-turkish',
'iso2022-jp', 'iso2022-jp-1', 'iso2022-jp-2', 'iso2022-jp-2004',
'iso2022-jp-3', 'iso2022-jp-ext',
'koi8-r', 'koi8-u',
'hz', 'ptcp154',
]
for encoding in supported_encodings:
self.assertEqual(ET.tostring(ET.XML(bxml(encoding))), b'<xml />')
unsupported_ascii_compatible_encodings = [
'big5', 'big5hkscs',
'cp932', 'cp949', 'cp950',
'euc-jp', 'euc-jis-2004', 'euc-jisx0213', 'euc-kr',
'gb2312', 'gbk', 'gb18030',
'iso2022-kr', 'johab',
'shift-jis', 'shift-jis-2004', 'shift-jisx0213',
'utf-7',
]
for encoding in unsupported_ascii_compatible_encodings:
self.assertRaises(ValueError, ET.XML, bxml(encoding))
unsupported_ascii_incompatible_encodings = [
'cp037', 'cp424', 'cp500', 'cp864', 'cp875', 'cp1026', 'cp1140',
'utf_32', 'utf_32_be', 'utf_32_le',
]
for encoding in unsupported_ascii_incompatible_encodings:
self.assertRaises(ET.ParseError, ET.XML, bxml(encoding))
self.assertRaises(ValueError, ET.XML, xml('undefined').encode('ascii'))
self.assertRaises(LookupError, ET.XML, xml('xxx').encode('ascii'))
def test_methods(self):
# Test serialization methods.
e = ET.XML("<html><link/><script>1 < 2</script></html>")
e.tail = "\n"
self.assertEqual(serialize(e),
'<html><link /><script>1 < 2</script></html>\n')
self.assertEqual(serialize(e, method=None),
'<html><link /><script>1 < 2</script></html>\n')
self.assertEqual(serialize(e, method="xml"),
'<html><link /><script>1 < 2</script></html>\n')
self.assertEqual(serialize(e, method="html"),
'<html><link><script>1 < 2</script></html>\n')
self.assertEqual(serialize(e, method="text"), '1 < 2\n')
def test_issue18347(self):
e = ET.XML('<html><CamelCase>text</CamelCase></html>')
self.assertEqual(serialize(e),
'<html><CamelCase>text</CamelCase></html>')
self.assertEqual(serialize(e, method="html"),
'<html><CamelCase>text</CamelCase></html>')
def test_entity(self):
# Test entity handling.
# 1) good entities
e = ET.XML("<document title='舰'>test</document>")
self.assertEqual(serialize(e, encoding="us-ascii"),
b'<document title="舰">test</document>')
self.serialize_check(e, '<document title="\u8230">test</document>')
# 2) bad entities
with self.assertRaises(ET.ParseError) as cm:
ET.XML("<document>&entity;</document>")
self.assertEqual(str(cm.exception),
'undefined entity: line 1, column 10')
with self.assertRaises(ET.ParseError) as cm:
ET.XML(ENTITY_XML)
self.assertEqual(str(cm.exception),
'undefined entity &entity;: line 5, column 10')
# 3) custom entity
parser = ET.XMLParser()
parser.entity["entity"] = "text"
parser.feed(ENTITY_XML)
root = parser.close()
self.serialize_check(root, '<document>text</document>')
def test_namespace(self):
# Test namespace issues.
# 1) xml namespace
elem = ET.XML("<tag xml:lang='en' />")
self.serialize_check(elem, '<tag xml:lang="en" />') # 1.1
# 2) other "well-known" namespaces
elem = ET.XML("<rdf:RDF xmlns:rdf='http://www.w3.org/1999/02/22-rdf-syntax-ns#' />")
self.serialize_check(elem,
'<rdf:RDF xmlns:rdf="http://www.w3.org/1999/02/22-rdf-syntax-ns#" />') # 2.1
elem = ET.XML("<html:html xmlns:html='http://www.w3.org/1999/xhtml' />")
self.serialize_check(elem,
'<html:html xmlns:html="http://www.w3.org/1999/xhtml" />') # 2.2
elem = ET.XML("<soap:Envelope xmlns:soap='http://schemas.xmlsoap.org/soap/envelope' />")
self.serialize_check(elem,
'<ns0:Envelope xmlns:ns0="http://schemas.xmlsoap.org/soap/envelope" />') # 2.3
# 3) unknown namespaces
elem = ET.XML(SAMPLE_XML_NS)
self.serialize_check(elem,
'<ns0:body xmlns:ns0="http://effbot.org/ns">\n'
' <ns0:tag>text</ns0:tag>\n'
' <ns0:tag />\n'
' <ns0:section>\n'
' <ns0:tag>subtext</ns0:tag>\n'
' </ns0:section>\n'
'</ns0:body>')
def test_qname(self):
# Test QName handling.
# 1) decorated tags
elem = ET.Element("{uri}tag")
self.serialize_check(elem, '<ns0:tag xmlns:ns0="uri" />') # 1.1
elem = ET.Element(ET.QName("{uri}tag"))
self.serialize_check(elem, '<ns0:tag xmlns:ns0="uri" />') # 1.2
elem = ET.Element(ET.QName("uri", "tag"))
self.serialize_check(elem, '<ns0:tag xmlns:ns0="uri" />') # 1.3
elem = ET.Element(ET.QName("uri", "tag"))
subelem = ET.SubElement(elem, ET.QName("uri", "tag1"))
subelem = ET.SubElement(elem, ET.QName("uri", "tag2"))
self.serialize_check(elem,
'<ns0:tag xmlns:ns0="uri"><ns0:tag1 /><ns0:tag2 /></ns0:tag>') # 1.4
# 2) decorated attributes
elem.clear()
elem.attrib["{uri}key"] = "value"
self.serialize_check(elem,
'<ns0:tag xmlns:ns0="uri" ns0:key="value" />') # 2.1
elem.clear()
elem.attrib[ET.QName("{uri}key")] = "value"
self.serialize_check(elem,
'<ns0:tag xmlns:ns0="uri" ns0:key="value" />') # 2.2
# 3) decorated values are not converted by default, but the
# QName wrapper can be used for values
elem.clear()
elem.attrib["{uri}key"] = "{uri}value"
self.serialize_check(elem,
'<ns0:tag xmlns:ns0="uri" ns0:key="{uri}value" />') # 3.1
elem.clear()
elem.attrib["{uri}key"] = ET.QName("{uri}value")
self.serialize_check(elem,
'<ns0:tag xmlns:ns0="uri" ns0:key="ns0:value" />') # 3.2
elem.clear()
subelem = ET.Element("tag")
subelem.attrib["{uri1}key"] = ET.QName("{uri2}value")
elem.append(subelem)
elem.append(subelem)
self.serialize_check(elem,
'<ns0:tag xmlns:ns0="uri" xmlns:ns1="uri1" xmlns:ns2="uri2">'
'<tag ns1:key="ns2:value" />'
'<tag ns1:key="ns2:value" />'
'</ns0:tag>') # 3.3
# 4) Direct QName tests
self.assertEqual(str(ET.QName('ns', 'tag')), '{ns}tag')
self.assertEqual(str(ET.QName('{ns}tag')), '{ns}tag')
q1 = ET.QName('ns', 'tag')
q2 = ET.QName('ns', 'tag')
self.assertEqual(q1, q2)
q2 = ET.QName('ns', 'other-tag')
self.assertNotEqual(q1, q2)
self.assertNotEqual(q1, 'ns:tag')
self.assertEqual(q1, '{ns}tag')
def test_doctype_public(self):
# Test PUBLIC doctype.
elem = ET.XML('<!DOCTYPE html PUBLIC'
' "-//W3C//DTD XHTML 1.0 Transitional//EN"'
' "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd">'
'<html>text</html>')
def test_xpath_tokenizer(self):
# Test the XPath tokenizer.
from xml.etree import ElementPath
def check(p, expected):
self.assertEqual([op or tag
for op, tag in ElementPath.xpath_tokenizer(p)],
expected)
# tests from the xml specification
check("*", ['*'])
check("text()", ['text', '()'])
check("@name", ['@', 'name'])
check("@*", ['@', '*'])
check("para[1]", ['para', '[', '1', ']'])
check("para[last()]", ['para', '[', 'last', '()', ']'])
check("*/para", ['*', '/', 'para'])
check("/doc/chapter[5]/section[2]",
['/', 'doc', '/', 'chapter', '[', '5', ']',
'/', 'section', '[', '2', ']'])
check("chapter//para", ['chapter', '//', 'para'])
check("//para", ['//', 'para'])
check("//olist/item", ['//', 'olist', '/', 'item'])
check(".", ['.'])
check(".//para", ['.', '//', 'para'])
check("..", ['..'])
check("../@lang", ['..', '/', '@', 'lang'])
check("chapter[title]", ['chapter', '[', 'title', ']'])
check("employee[@secretary and @assistant]", ['employee',
'[', '@', 'secretary', '', 'and', '', '@', 'assistant', ']'])
# additional tests
check("{http://spam}egg", ['{http://spam}egg'])
check("./spam.egg", ['.', '/', 'spam.egg'])
check(".//{http://spam}egg", ['.', '//', '{http://spam}egg'])
def test_processinginstruction(self):
# Test ProcessingInstruction directly
self.assertEqual(ET.tostring(ET.ProcessingInstruction('test', 'instruction')),
b'<?test instruction?>')
self.assertEqual(ET.tostring(ET.PI('test', 'instruction')),
b'<?test instruction?>')
# Issue #2746
self.assertEqual(ET.tostring(ET.PI('test', '<testing&>')),
b'<?test <testing&>?>')
self.assertEqual(ET.tostring(ET.PI('test', '<testing&>\xe3'), 'latin-1'),
b"<?xml version='1.0' encoding='latin-1'?>\n"
b"<?test <testing&>\xe3?>")
def test_html_empty_elems_serialization(self):
# issue 15970
# from http://www.w3.org/TR/html401/index/elements.html
for element in ['AREA', 'BASE', 'BASEFONT', 'BR', 'COL', 'FRAME', 'HR',
'IMG', 'INPUT', 'ISINDEX', 'LINK', 'META', 'PARAM']:
for elem in [element, element.lower()]:
expected = '<%s>' % elem
serialized = serialize(ET.XML('<%s />' % elem), method='html')
self.assertEqual(serialized, expected)
serialized = serialize(ET.XML('<%s></%s>' % (elem,elem)),
method='html')
self.assertEqual(serialized, expected)
#
# xinclude tests (samples from appendix C of the xinclude specification)
XINCLUDE = {}
XINCLUDE["C1.xml"] = """\
<?xml version='1.0'?>
<document xmlns:xi="http://www.w3.org/2001/XInclude">
<p>120 Mz is adequate for an average home user.</p>
<xi:include href="disclaimer.xml"/>
</document>
"""
XINCLUDE["disclaimer.xml"] = """\
<?xml version='1.0'?>
<disclaimer>
<p>The opinions represented herein represent those of the individual
and should not be interpreted as official policy endorsed by this
organization.</p>
</disclaimer>
"""
XINCLUDE["C2.xml"] = """\
<?xml version='1.0'?>
<document xmlns:xi="http://www.w3.org/2001/XInclude">
<p>This document has been accessed
<xi:include href="count.txt" parse="text"/> times.</p>
</document>
"""
XINCLUDE["count.txt"] = "324387"
XINCLUDE["C2b.xml"] = """\
<?xml version='1.0'?>
<document xmlns:xi="http://www.w3.org/2001/XInclude">
<p>This document has been <em>accessed</em>
<xi:include href="count.txt" parse="text"/> times.</p>
</document>
"""
XINCLUDE["C3.xml"] = """\
<?xml version='1.0'?>
<document xmlns:xi="http://www.w3.org/2001/XInclude">
<p>The following is the source of the "data.xml" resource:</p>
<example><xi:include href="data.xml" parse="text"/></example>
</document>
"""
XINCLUDE["data.xml"] = """\
<?xml version='1.0'?>
<data>
<item><![CDATA[Brooks & Shields]]></item>
</data>
"""
XINCLUDE["C5.xml"] = """\
<?xml version='1.0'?>
<div xmlns:xi="http://www.w3.org/2001/XInclude">
<xi:include href="example.txt" parse="text">
<xi:fallback>
<xi:include href="fallback-example.txt" parse="text">
<xi:fallback><a href="mailto:bob@example.org">Report error</a></xi:fallback>
</xi:include>
</xi:fallback>
</xi:include>
</div>
"""
XINCLUDE["default.xml"] = """\
<?xml version='1.0'?>
<document xmlns:xi="http://www.w3.org/2001/XInclude">
<p>Example.</p>
<xi:include href="{}"/>
</document>
""".format(html.escape(SIMPLE_XMLFILE, True))
#
# badly formatted xi:include tags
XINCLUDE_BAD = {}
XINCLUDE_BAD["B1.xml"] = """\
<?xml version='1.0'?>
<document xmlns:xi="http://www.w3.org/2001/XInclude">
<p>120 Mz is adequate for an average home user.</p>
<xi:include href="disclaimer.xml" parse="BAD_TYPE"/>
</document>
"""
XINCLUDE_BAD["B2.xml"] = """\
<?xml version='1.0'?>
<div xmlns:xi="http://www.w3.org/2001/XInclude">
<xi:fallback></xi:fallback>
</div>
"""
class XIncludeTest(unittest.TestCase):
def xinclude_loader(self, href, parse="xml", encoding=None):
try:
data = XINCLUDE[href]
except KeyError:
raise OSError("resource not found")
if parse == "xml":
data = ET.XML(data)
return data
def none_loader(self, href, parser, encoding=None):
return None
def _my_loader(self, href, parse):
# Used to avoid a test-dependency problem where the default loader
# of ElementInclude uses the pyET parser for cET tests.
if parse == 'xml':
with open(href, 'rb') as f:
return ET.parse(f).getroot()
else:
return None
def test_xinclude_default(self):
from xml.etree import ElementInclude
doc = self.xinclude_loader('default.xml')
ElementInclude.include(doc, self._my_loader)
self.assertEqual(serialize(doc),
'<document>\n'
' <p>Example.</p>\n'
' <root>\n'
' <element key="value">text</element>\n'
' <element>text</element>tail\n'
' <empty-element />\n'
'</root>\n'
'</document>')
def test_xinclude(self):
from xml.etree import ElementInclude
# Basic inclusion example (XInclude C.1)
document = self.xinclude_loader("C1.xml")
ElementInclude.include(document, self.xinclude_loader)
self.assertEqual(serialize(document),
'<document>\n'
' <p>120 Mz is adequate for an average home user.</p>\n'
' <disclaimer>\n'
' <p>The opinions represented herein represent those of the individual\n'
' and should not be interpreted as official policy endorsed by this\n'
' organization.</p>\n'
'</disclaimer>\n'
'</document>') # C1
# Textual inclusion example (XInclude C.2)
document = self.xinclude_loader("C2.xml")
ElementInclude.include(document, self.xinclude_loader)
self.assertEqual(serialize(document),
'<document>\n'
' <p>This document has been accessed\n'
' 324387 times.</p>\n'
'</document>') # C2
# Textual inclusion after sibling element (based on modified XInclude C.2)
document = self.xinclude_loader("C2b.xml")
ElementInclude.include(document, self.xinclude_loader)
self.assertEqual(serialize(document),
'<document>\n'
' <p>This document has been <em>accessed</em>\n'
' 324387 times.</p>\n'
'</document>') # C2b
# Textual inclusion of XML example (XInclude C.3)
document = self.xinclude_loader("C3.xml")
ElementInclude.include(document, self.xinclude_loader)
self.assertEqual(serialize(document),
'<document>\n'
' <p>The following is the source of the "data.xml" resource:</p>\n'
" <example><?xml version='1.0'?>\n"
'<data>\n'
' <item><![CDATA[Brooks & Shields]]></item>\n'
'</data>\n'
'</example>\n'
'</document>') # C3
# Fallback example (XInclude C.5)
# Note! Fallback support is not yet implemented
document = self.xinclude_loader("C5.xml")
with self.assertRaises(OSError) as cm:
ElementInclude.include(document, self.xinclude_loader)
self.assertEqual(str(cm.exception), 'resource not found')
self.assertEqual(serialize(document),
'<div xmlns:ns0="http://www.w3.org/2001/XInclude">\n'
' <ns0:include href="example.txt" parse="text">\n'
' <ns0:fallback>\n'
' <ns0:include href="fallback-example.txt" parse="text">\n'
' <ns0:fallback><a href="mailto:bob@example.org">Report error</a></ns0:fallback>\n'
' </ns0:include>\n'
' </ns0:fallback>\n'
' </ns0:include>\n'
'</div>') # C5
def test_xinclude_failures(self):
from xml.etree import ElementInclude
# Test failure to locate included XML file.
document = ET.XML(XINCLUDE["C1.xml"])
with self.assertRaises(ElementInclude.FatalIncludeError) as cm:
ElementInclude.include(document, loader=self.none_loader)
self.assertEqual(str(cm.exception),
"cannot load 'disclaimer.xml' as 'xml'")
# Test failure to locate included text file.
document = ET.XML(XINCLUDE["C2.xml"])
with self.assertRaises(ElementInclude.FatalIncludeError) as cm:
ElementInclude.include(document, loader=self.none_loader)
self.assertEqual(str(cm.exception),
"cannot load 'count.txt' as 'text'")
# Test bad parse type.
document = ET.XML(XINCLUDE_BAD["B1.xml"])
with self.assertRaises(ElementInclude.FatalIncludeError) as cm:
ElementInclude.include(document, loader=self.none_loader)
self.assertEqual(str(cm.exception),
"unknown parse type in xi:include tag ('BAD_TYPE')")
# Test xi:fallback outside xi:include.
document = ET.XML(XINCLUDE_BAD["B2.xml"])
with self.assertRaises(ElementInclude.FatalIncludeError) as cm:
ElementInclude.include(document, loader=self.none_loader)
self.assertEqual(str(cm.exception),
"xi:fallback tag must be child of xi:include "
"('{http://www.w3.org/2001/XInclude}fallback')")
# --------------------------------------------------------------------
# reported bugs
class BugsTest(unittest.TestCase):
def test_bug_xmltoolkit21(self):
# marshaller gives obscure errors for non-string values
def check(elem):
with self.assertRaises(TypeError) as cm:
serialize(elem)
self.assertEqual(str(cm.exception),
'cannot serialize 123 (type int)')
elem = ET.Element(123)
check(elem) # tag
elem = ET.Element("elem")
elem.text = 123
check(elem) # text
elem = ET.Element("elem")
elem.tail = 123
check(elem) # tail
elem = ET.Element("elem")
elem.set(123, "123")
check(elem) # attribute key
elem = ET.Element("elem")
elem.set("123", 123)
check(elem) # attribute value
def test_bug_xmltoolkit25(self):
# typo in ElementTree.findtext
elem = ET.XML(SAMPLE_XML)
tree = ET.ElementTree(elem)
self.assertEqual(tree.findtext("tag"), 'text')
self.assertEqual(tree.findtext("section/tag"), 'subtext')
def test_bug_xmltoolkit28(self):
# .//tag causes exceptions
tree = ET.XML("<doc><table><tbody/></table></doc>")
self.assertEqual(summarize_list(tree.findall(".//thead")), [])
self.assertEqual(summarize_list(tree.findall(".//tbody")), ['tbody'])
def test_bug_xmltoolkitX1(self):
# dump() doesn't flush the output buffer
tree = ET.XML("<doc><table><tbody/></table></doc>")
with support.captured_stdout() as stdout:
ET.dump(tree)
self.assertEqual(stdout.getvalue(), '<doc><table><tbody /></table></doc>\n')
def test_bug_xmltoolkit39(self):
# non-ascii element and attribute names doesn't work
tree = ET.XML(b"<?xml version='1.0' encoding='iso-8859-1'?><t\xe4g />")
self.assertEqual(ET.tostring(tree, "utf-8"), b'<t\xc3\xa4g />')
tree = ET.XML(b"<?xml version='1.0' encoding='iso-8859-1'?>"
b"<tag \xe4ttr='välue' />")
self.assertEqual(tree.attrib, {'\xe4ttr': 'v\xe4lue'})
self.assertEqual(ET.tostring(tree, "utf-8"),
b'<tag \xc3\xa4ttr="v\xc3\xa4lue" />')
tree = ET.XML(b"<?xml version='1.0' encoding='iso-8859-1'?>"
b'<t\xe4g>text</t\xe4g>')
self.assertEqual(ET.tostring(tree, "utf-8"),
b'<t\xc3\xa4g>text</t\xc3\xa4g>')
tree = ET.Element("t\u00e4g")
self.assertEqual(ET.tostring(tree, "utf-8"), b'<t\xc3\xa4g />')
tree = ET.Element("tag")
tree.set("\u00e4ttr", "v\u00e4lue")
self.assertEqual(ET.tostring(tree, "utf-8"),
b'<tag \xc3\xa4ttr="v\xc3\xa4lue" />')
def test_bug_xmltoolkit54(self):
# problems handling internally defined entities
e = ET.XML("<!DOCTYPE doc [<!ENTITY ldots '舰'>]>"
'<doc>&ldots;</doc>')
self.assertEqual(serialize(e, encoding="us-ascii"),
b'<doc>舰</doc>')
self.assertEqual(serialize(e), '<doc>\u8230</doc>')
def test_bug_xmltoolkit55(self):
# make sure we're reporting the first error, not the last
with self.assertRaises(ET.ParseError) as cm:
ET.XML(b"<!DOCTYPE doc SYSTEM 'doc.dtd'>"
b'<doc>&ldots;&ndots;&rdots;</doc>')
self.assertEqual(str(cm.exception),
'undefined entity &ldots;: line 1, column 36')
def test_bug_xmltoolkit60(self):
# Handle crash in stream source.
class ExceptionFile:
def read(self, x):
raise OSError
self.assertRaises(OSError, ET.parse, ExceptionFile())
def test_bug_xmltoolkit62(self):
# Don't crash when using custom entities.
ENTITIES = {'rsquo': '\u2019', 'lsquo': '\u2018'}
parser = ET.XMLTreeBuilder()
parser.entity.update(ENTITIES)
parser.feed("""<?xml version="1.0" encoding="UTF-8"?>
<!DOCTYPE patent-application-publication SYSTEM "pap-v15-2001-01-31.dtd" []>
<patent-application-publication>
<subdoc-abstract>
<paragraph id="A-0001" lvl="0">A new cultivar of Begonia plant named ‘BCT9801BEG’.</paragraph>
</subdoc-abstract>
</patent-application-publication>""")
t = parser.close()
self.assertEqual(t.find('.//paragraph').text,
'A new cultivar of Begonia plant named \u2018BCT9801BEG\u2019.')
def test_bug_xmltoolkit63(self):
# Check reference leak.
def xmltoolkit63():
tree = ET.TreeBuilder()
tree.start("tag", {})
tree.data("text")
tree.end("tag")
xmltoolkit63()
count = sys.getrefcount(None)
for i in range(1000):
xmltoolkit63()
self.assertEqual(sys.getrefcount(None), count)
def test_bug_200708_newline(self):
# Preserve newlines in attributes.
e = ET.Element('SomeTag', text="def _f():\n return 3\n")
self.assertEqual(ET.tostring(e),
b'<SomeTag text="def _f(): return 3 " />')
self.assertEqual(ET.XML(ET.tostring(e)).get("text"),
'def _f():\n return 3\n')
self.assertEqual(ET.tostring(ET.XML(ET.tostring(e))),
b'<SomeTag text="def _f(): return 3 " />')
def test_bug_200708_close(self):
# Test default builder.
parser = ET.XMLParser() # default
parser.feed("<element>some text</element>")
self.assertEqual(parser.close().tag, 'element')
# Test custom builder.
class EchoTarget:
def close(self):
return ET.Element("element") # simulate root
parser = ET.XMLParser(EchoTarget())
parser.feed("<element>some text</element>")
self.assertEqual(parser.close().tag, 'element')
def test_bug_200709_default_namespace(self):
e = ET.Element("{default}elem")
s = ET.SubElement(e, "{default}elem")
self.assertEqual(serialize(e, default_namespace="default"), # 1
'<elem xmlns="default"><elem /></elem>')
e = ET.Element("{default}elem")
s = ET.SubElement(e, "{default}elem")
s = ET.SubElement(e, "{not-default}elem")
self.assertEqual(serialize(e, default_namespace="default"), # 2
'<elem xmlns="default" xmlns:ns1="not-default">'
'<elem />'
'<ns1:elem />'
'</elem>')
e = ET.Element("{default}elem")
s = ET.SubElement(e, "{default}elem")
s = ET.SubElement(e, "elem") # unprefixed name
with self.assertRaises(ValueError) as cm:
serialize(e, default_namespace="default") # 3
self.assertEqual(str(cm.exception),
'cannot use non-qualified names with default_namespace option')
def test_bug_200709_register_namespace(self):
e = ET.Element("{http://namespace.invalid/does/not/exist/}title")
self.assertEqual(ET.tostring(e),
b'<ns0:title xmlns:ns0="http://namespace.invalid/does/not/exist/" />')
ET.register_namespace("foo", "http://namespace.invalid/does/not/exist/")
e = ET.Element("{http://namespace.invalid/does/not/exist/}title")
self.assertEqual(ET.tostring(e),
b'<foo:title xmlns:foo="http://namespace.invalid/does/not/exist/" />')
# And the Dublin Core namespace is in the default list:
e = ET.Element("{http://purl.org/dc/elements/1.1/}title")
self.assertEqual(ET.tostring(e),
b'<dc:title xmlns:dc="http://purl.org/dc/elements/1.1/" />')
def test_bug_200709_element_comment(self):
# Not sure if this can be fixed, really (since the serializer needs
# ET.Comment, not cET.comment).
a = ET.Element('a')
a.append(ET.Comment('foo'))
self.assertEqual(a[0].tag, ET.Comment)
a = ET.Element('a')
a.append(ET.PI('foo'))
self.assertEqual(a[0].tag, ET.PI)
def test_bug_200709_element_insert(self):
a = ET.Element('a')
b = ET.SubElement(a, 'b')
c = ET.SubElement(a, 'c')
d = ET.Element('d')
a.insert(0, d)
self.assertEqual(summarize_list(a), ['d', 'b', 'c'])
a.insert(-1, d)
self.assertEqual(summarize_list(a), ['d', 'b', 'd', 'c'])
def test_bug_200709_iter_comment(self):
a = ET.Element('a')
b = ET.SubElement(a, 'b')
comment_b = ET.Comment("TEST-b")
b.append(comment_b)
self.assertEqual(summarize_list(a.iter(ET.Comment)), [ET.Comment])
# --------------------------------------------------------------------
# reported on bugs.python.org
def test_bug_1534630(self):
bob = ET.TreeBuilder()
e = bob.data("data")
e = bob.start("tag", {})
e = bob.end("tag")
e = bob.close()
self.assertEqual(serialize(e), '<tag />')
def test_issue6233(self):
e = ET.XML(b"<?xml version='1.0' encoding='utf-8'?>"
b'<body>t\xc3\xa3g</body>')
self.assertEqual(ET.tostring(e, 'ascii'),
b"<?xml version='1.0' encoding='ascii'?>\n"
b'<body>tãg</body>')
e = ET.XML(b"<?xml version='1.0' encoding='iso-8859-1'?>"
b'<body>t\xe3g</body>')
self.assertEqual(ET.tostring(e, 'ascii'),
b"<?xml version='1.0' encoding='ascii'?>\n"
b'<body>tãg</body>')
def test_issue3151(self):
e = ET.XML('<prefix:localname xmlns:prefix="${stuff}"/>')
self.assertEqual(e.tag, '{${stuff}}localname')
t = ET.ElementTree(e)
self.assertEqual(ET.tostring(e), b'<ns0:localname xmlns:ns0="${stuff}" />')
def test_issue6565(self):
elem = ET.XML("<body><tag/></body>")
self.assertEqual(summarize_list(elem), ['tag'])
newelem = ET.XML(SAMPLE_XML)
elem[:] = newelem[:]
self.assertEqual(summarize_list(elem), ['tag', 'tag', 'section'])
def test_issue10777(self):
# Registering a namespace twice caused a "dictionary changed size during
# iteration" bug.
ET.register_namespace('test10777', 'http://myuri/')
ET.register_namespace('test10777', 'http://myuri/')
# --------------------------------------------------------------------
class BasicElementTest(ElementTestCase, unittest.TestCase):
def test_augmentation_type_errors(self):
e = ET.Element('joe')
self.assertRaises(TypeError, e.append, 'b')
self.assertRaises(TypeError, e.extend, [ET.Element('bar'), 'foo'])
self.assertRaises(TypeError, e.insert, 0, 'foo')
def test_cyclic_gc(self):
class Dummy:
pass
# Test the shortest cycle: d->element->d
d = Dummy()
d.dummyref = ET.Element('joe', attr=d)
wref = weakref.ref(d)
del d
gc_collect()
self.assertIsNone(wref())
# A longer cycle: d->e->e2->d
e = ET.Element('joe')
d = Dummy()
d.dummyref = e
wref = weakref.ref(d)
e2 = ET.SubElement(e, 'foo', attr=d)
del d, e, e2
gc_collect()
self.assertIsNone(wref())
# A cycle between Element objects as children of one another
# e1->e2->e3->e1
e1 = ET.Element('e1')
e2 = ET.Element('e2')
e3 = ET.Element('e3')
e1.append(e2)
e2.append(e2)
e3.append(e1)
wref = weakref.ref(e1)
del e1, e2, e3
gc_collect()
self.assertIsNone(wref())
def test_weakref(self):
flag = False
def wref_cb(w):
nonlocal flag
flag = True
e = ET.Element('e')
wref = weakref.ref(e, wref_cb)
self.assertEqual(wref().tag, 'e')
del e
self.assertEqual(flag, True)
self.assertEqual(wref(), None)
def test_get_keyword_args(self):
e1 = ET.Element('foo' , x=1, y=2, z=3)
self.assertEqual(e1.get('x', default=7), 1)
self.assertEqual(e1.get('w', default=7), 7)
def test_pickle(self):
# issue #16076: the C implementation wasn't pickleable.
for dumper, loader in product(self.modules, repeat=2):
e = dumper.Element('foo', bar=42)
e.text = "text goes here"
e.tail = "opposite of head"
dumper.SubElement(e, 'child').append(dumper.Element('grandchild'))
e.append(dumper.Element('child'))
e.findall('.//grandchild')[0].set('attr', 'other value')
e2 = self.pickleRoundTrip(e, 'xml.etree.ElementTree',
dumper, loader)
self.assertEqual(e2.tag, 'foo')
self.assertEqual(e2.attrib['bar'], 42)
self.assertEqual(len(e2), 2)
self.assertEqualElements(e, e2)
def test_pickle_issue18997(self):
for dumper, loader in product(self.modules, repeat=2):
XMLTEXT = """<?xml version="1.0"?>
<group><dogs>4</dogs>
</group>"""
e1 = dumper.fromstring(XMLTEXT)
if hasattr(e1, '__getstate__'):
self.assertEqual(e1.__getstate__()['tag'], 'group')
e2 = self.pickleRoundTrip(e1, 'xml.etree.ElementTree', dumper, loader)
self.assertEqual(e2.tag, 'group')
self.assertEqual(e2[0].tag, 'dogs')
class ElementTreeTypeTest(unittest.TestCase):
def test_istype(self):
self.assertIsInstance(ET.ParseError, type)
self.assertIsInstance(ET.QName, type)
self.assertIsInstance(ET.ElementTree, type)
self.assertIsInstance(ET.Element, type)
self.assertIsInstance(ET.TreeBuilder, type)
self.assertIsInstance(ET.XMLParser, type)
def test_Element_subclass_trivial(self):
class MyElement(ET.Element):
pass
mye = MyElement('foo')
self.assertIsInstance(mye, ET.Element)
self.assertIsInstance(mye, MyElement)
self.assertEqual(mye.tag, 'foo')
# test that attribute assignment works (issue 14849)
mye.text = "joe"
self.assertEqual(mye.text, "joe")
def test_Element_subclass_constructor(self):
class MyElement(ET.Element):
def __init__(self, tag, attrib={}, **extra):
super(MyElement, self).__init__(tag + '__', attrib, **extra)
mye = MyElement('foo', {'a': 1, 'b': 2}, c=3, d=4)
self.assertEqual(mye.tag, 'foo__')
self.assertEqual(sorted(mye.items()),
[('a', 1), ('b', 2), ('c', 3), ('d', 4)])
def test_Element_subclass_new_method(self):
class MyElement(ET.Element):
def newmethod(self):
return self.tag
mye = MyElement('joe')
self.assertEqual(mye.newmethod(), 'joe')
class ElementFindTest(unittest.TestCase):
def test_find_simple(self):
e = ET.XML(SAMPLE_XML)
self.assertEqual(e.find('tag').tag, 'tag')
self.assertEqual(e.find('section/tag').tag, 'tag')
self.assertEqual(e.find('./tag').tag, 'tag')
e[2] = ET.XML(SAMPLE_SECTION)
self.assertEqual(e.find('section/nexttag').tag, 'nexttag')
self.assertEqual(e.findtext('./tag'), 'text')
self.assertEqual(e.findtext('section/tag'), 'subtext')
# section/nexttag is found but has no text
self.assertEqual(e.findtext('section/nexttag'), '')
self.assertEqual(e.findtext('section/nexttag', 'default'), '')
# tog doesn't exist and 'default' kicks in
self.assertIsNone(e.findtext('tog'))
self.assertEqual(e.findtext('tog', 'default'), 'default')
# Issue #16922
self.assertEqual(ET.XML('<tag><empty /></tag>').findtext('empty'), '')
def test_find_xpath(self):
LINEAR_XML = '''
<body>
<tag class='a'/>
<tag class='b'/>
<tag class='c'/>
<tag class='d'/>
</body>'''
e = ET.XML(LINEAR_XML)
# Test for numeric indexing and last()
self.assertEqual(e.find('./tag[1]').attrib['class'], 'a')
self.assertEqual(e.find('./tag[2]').attrib['class'], 'b')
self.assertEqual(e.find('./tag[last()]').attrib['class'], 'd')
self.assertEqual(e.find('./tag[last()-1]').attrib['class'], 'c')
self.assertEqual(e.find('./tag[last()-2]').attrib['class'], 'b')
def test_findall(self):
e = ET.XML(SAMPLE_XML)
e[2] = ET.XML(SAMPLE_SECTION)
self.assertEqual(summarize_list(e.findall('.')), ['body'])
self.assertEqual(summarize_list(e.findall('tag')), ['tag', 'tag'])
self.assertEqual(summarize_list(e.findall('tog')), [])
self.assertEqual(summarize_list(e.findall('tog/foo')), [])
self.assertEqual(summarize_list(e.findall('*')),
['tag', 'tag', 'section'])
self.assertEqual(summarize_list(e.findall('.//tag')),
['tag'] * 4)
self.assertEqual(summarize_list(e.findall('section/tag')), ['tag'])
self.assertEqual(summarize_list(e.findall('section//tag')), ['tag'] * 2)
self.assertEqual(summarize_list(e.findall('section/*')),
['tag', 'nexttag', 'nextsection'])
self.assertEqual(summarize_list(e.findall('section//*')),
['tag', 'nexttag', 'nextsection', 'tag'])
self.assertEqual(summarize_list(e.findall('section/.//*')),
['tag', 'nexttag', 'nextsection', 'tag'])
self.assertEqual(summarize_list(e.findall('*/*')),
['tag', 'nexttag', 'nextsection'])
self.assertEqual(summarize_list(e.findall('*//*')),
['tag', 'nexttag', 'nextsection', 'tag'])
self.assertEqual(summarize_list(e.findall('*/tag')), ['tag'])
self.assertEqual(summarize_list(e.findall('*/./tag')), ['tag'])
self.assertEqual(summarize_list(e.findall('./tag')), ['tag'] * 2)
self.assertEqual(summarize_list(e.findall('././tag')), ['tag'] * 2)
self.assertEqual(summarize_list(e.findall('.//tag[@class]')),
['tag'] * 3)
self.assertEqual(summarize_list(e.findall('.//tag[@class="a"]')),
['tag'])
self.assertEqual(summarize_list(e.findall('.//tag[@class="b"]')),
['tag'] * 2)
self.assertEqual(summarize_list(e.findall('.//tag[@id]')),
['tag'])
self.assertEqual(summarize_list(e.findall('.//section[tag]')),
['section'])
self.assertEqual(summarize_list(e.findall('.//section[element]')), [])
self.assertEqual(summarize_list(e.findall('../tag')), [])
self.assertEqual(summarize_list(e.findall('section/../tag')),
['tag'] * 2)
self.assertEqual(e.findall('section//'), e.findall('section//*'))
def test_test_find_with_ns(self):
e = ET.XML(SAMPLE_XML_NS)
self.assertEqual(summarize_list(e.findall('tag')), [])
self.assertEqual(
summarize_list(e.findall("{http://effbot.org/ns}tag")),
['{http://effbot.org/ns}tag'] * 2)
self.assertEqual(
summarize_list(e.findall(".//{http://effbot.org/ns}tag")),
['{http://effbot.org/ns}tag'] * 3)
def test_findall_different_nsmaps(self):
root = ET.XML('''
<a xmlns:x="X" xmlns:y="Y">
<x:b><c/></x:b>
<b/>
<c><x:b/><b/></c><y:b/>
</a>''')
nsmap = {'xx': 'X'}
self.assertEqual(len(root.findall(".//xx:b", namespaces=nsmap)), 2)
self.assertEqual(len(root.findall(".//b", namespaces=nsmap)), 2)
nsmap = {'xx': 'Y'}
self.assertEqual(len(root.findall(".//xx:b", namespaces=nsmap)), 1)
self.assertEqual(len(root.findall(".//b", namespaces=nsmap)), 2)
def test_bad_find(self):
e = ET.XML(SAMPLE_XML)
with self.assertRaisesRegex(SyntaxError, 'cannot use absolute path'):
e.findall('/tag')
def test_find_through_ElementTree(self):
e = ET.XML(SAMPLE_XML)
self.assertEqual(ET.ElementTree(e).find('tag').tag, 'tag')
self.assertEqual(ET.ElementTree(e).findtext('tag'), 'text')
self.assertEqual(summarize_list(ET.ElementTree(e).findall('tag')),
['tag'] * 2)
# this produces a warning
self.assertEqual(summarize_list(ET.ElementTree(e).findall('//tag')),
['tag'] * 3)
class ElementIterTest(unittest.TestCase):
def _ilist(self, elem, tag=None):
return summarize_list(elem.iter(tag))
def test_basic(self):
doc = ET.XML("<html><body>this is a <i>paragraph</i>.</body>..</html>")
self.assertEqual(self._ilist(doc), ['html', 'body', 'i'])
self.assertEqual(self._ilist(doc.find('body')), ['body', 'i'])
self.assertEqual(next(doc.iter()).tag, 'html')
self.assertEqual(''.join(doc.itertext()), 'this is a paragraph...')
self.assertEqual(''.join(doc.find('body').itertext()),
'this is a paragraph.')
self.assertEqual(next(doc.itertext()), 'this is a ')
# iterparse should return an iterator
sourcefile = serialize(doc, to_string=False)
self.assertEqual(next(ET.iterparse(sourcefile))[0], 'end')
# With an explitit parser too (issue #9708)
sourcefile = serialize(doc, to_string=False)
parser = ET.XMLParser(target=ET.TreeBuilder())
self.assertEqual(next(ET.iterparse(sourcefile, parser=parser))[0],
'end')
tree = ET.ElementTree(None)
self.assertRaises(AttributeError, tree.iter)
# Issue #16913
doc = ET.XML("<root>a&<sub>b&</sub>c&</root>")
self.assertEqual(''.join(doc.itertext()), 'a&b&c&')
def test_corners(self):
# single root, no subelements
a = ET.Element('a')
self.assertEqual(self._ilist(a), ['a'])
# one child
b = ET.SubElement(a, 'b')
self.assertEqual(self._ilist(a), ['a', 'b'])
# one child and one grandchild
c = ET.SubElement(b, 'c')
self.assertEqual(self._ilist(a), ['a', 'b', 'c'])
# two children, only first with grandchild
d = ET.SubElement(a, 'd')
self.assertEqual(self._ilist(a), ['a', 'b', 'c', 'd'])
# replace first child by second
a[0] = a[1]
del a[1]
self.assertEqual(self._ilist(a), ['a', 'd'])
def test_iter_by_tag(self):
doc = ET.XML('''
<document>
<house>
<room>bedroom1</room>
<room>bedroom2</room>
</house>
<shed>nothing here
</shed>
<house>
<room>bedroom8</room>
</house>
</document>''')
self.assertEqual(self._ilist(doc, 'room'), ['room'] * 3)
self.assertEqual(self._ilist(doc, 'house'), ['house'] * 2)
# test that iter also accepts 'tag' as a keyword arg
self.assertEqual(
summarize_list(doc.iter(tag='room')),
['room'] * 3)
# make sure both tag=None and tag='*' return all tags
all_tags = ['document', 'house', 'room', 'room',
'shed', 'house', 'room']
self.assertEqual(self._ilist(doc), all_tags)
self.assertEqual(self._ilist(doc, '*'), all_tags)
class TreeBuilderTest(unittest.TestCase):
sample1 = ('<!DOCTYPE html PUBLIC'
' "-//W3C//DTD XHTML 1.0 Transitional//EN"'
' "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd">'
'<html>text<div>subtext</div>tail</html>')
sample2 = '''<toplevel>sometext</toplevel>'''
def _check_sample1_element(self, e):
self.assertEqual(e.tag, 'html')
self.assertEqual(e.text, 'text')
self.assertEqual(e.tail, None)
self.assertEqual(e.attrib, {})
children = list(e)
self.assertEqual(len(children), 1)
child = children[0]
self.assertEqual(child.tag, 'div')
self.assertEqual(child.text, 'subtext')
self.assertEqual(child.tail, 'tail')
self.assertEqual(child.attrib, {})
def test_dummy_builder(self):
class BaseDummyBuilder:
def close(self):
return 42
class DummyBuilder(BaseDummyBuilder):
data = start = end = lambda *a: None
parser = ET.XMLParser(target=DummyBuilder())
parser.feed(self.sample1)
self.assertEqual(parser.close(), 42)
parser = ET.XMLParser(target=BaseDummyBuilder())
parser.feed(self.sample1)
self.assertEqual(parser.close(), 42)
parser = ET.XMLParser(target=object())
parser.feed(self.sample1)
self.assertIsNone(parser.close())
def test_treebuilder_elementfactory_none(self):
parser = ET.XMLParser(target=ET.TreeBuilder(element_factory=None))
parser.feed(self.sample1)
e = parser.close()
self._check_sample1_element(e)
def test_subclass(self):
class MyTreeBuilder(ET.TreeBuilder):
def foobar(self, x):
return x * 2
tb = MyTreeBuilder()
self.assertEqual(tb.foobar(10), 20)
parser = ET.XMLParser(target=tb)
parser.feed(self.sample1)
e = parser.close()
self._check_sample1_element(e)
def test_element_factory(self):
lst = []
def myfactory(tag, attrib):
nonlocal lst
lst.append(tag)
return ET.Element(tag, attrib)
tb = ET.TreeBuilder(element_factory=myfactory)
parser = ET.XMLParser(target=tb)
parser.feed(self.sample2)
parser.close()
self.assertEqual(lst, ['toplevel'])
def _check_element_factory_class(self, cls):
tb = ET.TreeBuilder(element_factory=cls)
parser = ET.XMLParser(target=tb)
parser.feed(self.sample1)
e = parser.close()
self.assertIsInstance(e, cls)
self._check_sample1_element(e)
def test_element_factory_subclass(self):
class MyElement(ET.Element):
pass
self._check_element_factory_class(MyElement)
def test_element_factory_pure_python_subclass(self):
# Mimick SimpleTAL's behaviour (issue #16089): both versions of
# TreeBuilder should be able to cope with a subclass of the
# pure Python Element class.
base = ET._Element
# Not from a C extension
self.assertEqual(base.__module__, 'xml.etree.ElementTree')
# Force some multiple inheritance with a C class to make things
# more interesting.
class MyElement(base, ValueError):
pass
self._check_element_factory_class(MyElement)
def test_doctype(self):
class DoctypeParser:
_doctype = None
def doctype(self, name, pubid, system):
self._doctype = (name, pubid, system)
def close(self):
return self._doctype
parser = ET.XMLParser(target=DoctypeParser())
parser.feed(self.sample1)
self.assertEqual(parser.close(),
('html', '-//W3C//DTD XHTML 1.0 Transitional//EN',
'http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd'))
class XMLParserTest(unittest.TestCase):
sample1 = b'<file><line>22</line></file>'
sample2 = (b'<!DOCTYPE html PUBLIC'
b' "-//W3C//DTD XHTML 1.0 Transitional//EN"'
b' "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd">'
b'<html>text</html>')
sample3 = ('<?xml version="1.0" encoding="iso-8859-1"?>\n'
'<money value="$\xa3\u20ac\U0001017b">$\xa3\u20ac\U0001017b</money>')
def _check_sample_element(self, e):
self.assertEqual(e.tag, 'file')
self.assertEqual(e[0].tag, 'line')
self.assertEqual(e[0].text, '22')
def test_constructor_args(self):
# Positional args. The first (html) is not supported, but should be
# nevertheless correctly accepted.
parser = ET.XMLParser(None, ET.TreeBuilder(), 'utf-8')
parser.feed(self.sample1)
self._check_sample_element(parser.close())
# Now as keyword args.
parser2 = ET.XMLParser(encoding='utf-8',
html=[{}],
target=ET.TreeBuilder())
parser2.feed(self.sample1)
self._check_sample_element(parser2.close())
def test_subclass(self):
class MyParser(ET.XMLParser):
pass
parser = MyParser()
parser.feed(self.sample1)
self._check_sample_element(parser.close())
def test_subclass_doctype(self):
_doctype = None
class MyParserWithDoctype(ET.XMLParser):
def doctype(self, name, pubid, system):
nonlocal _doctype
_doctype = (name, pubid, system)
parser = MyParserWithDoctype()
with self.assertWarns(DeprecationWarning):
parser.feed(self.sample2)
parser.close()
self.assertEqual(_doctype,
('html', '-//W3C//DTD XHTML 1.0 Transitional//EN',
'http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd'))
def test_parse_string(self):
parser = ET.XMLParser(target=ET.TreeBuilder())
parser.feed(self.sample3)
e = parser.close()
self.assertEqual(e.tag, 'money')
self.assertEqual(e.attrib['value'], '$\xa3\u20ac\U0001017b')
self.assertEqual(e.text, '$\xa3\u20ac\U0001017b')
class NamespaceParseTest(unittest.TestCase):
def test_find_with_namespace(self):
nsmap = {'h': 'hello', 'f': 'foo'}
doc = ET.fromstring(SAMPLE_XML_NS_ELEMS)
self.assertEqual(len(doc.findall('{hello}table', nsmap)), 1)
self.assertEqual(len(doc.findall('.//{hello}td', nsmap)), 2)
self.assertEqual(len(doc.findall('.//{foo}name', nsmap)), 1)
class ElementSlicingTest(unittest.TestCase):
def _elem_tags(self, elemlist):
return [e.tag for e in elemlist]
def _subelem_tags(self, elem):
return self._elem_tags(list(elem))
def _make_elem_with_children(self, numchildren):
"""Create an Element with a tag 'a', with the given amount of children
named 'a0', 'a1' ... and so on.
"""
e = ET.Element('a')
for i in range(numchildren):
ET.SubElement(e, 'a%s' % i)
return e
def test_getslice_single_index(self):
e = self._make_elem_with_children(10)
self.assertEqual(e[1].tag, 'a1')
self.assertEqual(e[-2].tag, 'a8')
self.assertRaises(IndexError, lambda: e[12])
def test_getslice_range(self):
e = self._make_elem_with_children(6)
self.assertEqual(self._elem_tags(e[3:]), ['a3', 'a4', 'a5'])
self.assertEqual(self._elem_tags(e[3:6]), ['a3', 'a4', 'a5'])
self.assertEqual(self._elem_tags(e[3:16]), ['a3', 'a4', 'a5'])
self.assertEqual(self._elem_tags(e[3:5]), ['a3', 'a4'])
self.assertEqual(self._elem_tags(e[3:-1]), ['a3', 'a4'])
self.assertEqual(self._elem_tags(e[:2]), ['a0', 'a1'])
def test_getslice_steps(self):
e = self._make_elem_with_children(10)
self.assertEqual(self._elem_tags(e[8:10:1]), ['a8', 'a9'])
self.assertEqual(self._elem_tags(e[::3]), ['a0', 'a3', 'a6', 'a9'])
self.assertEqual(self._elem_tags(e[::8]), ['a0', 'a8'])
self.assertEqual(self._elem_tags(e[1::8]), ['a1', 'a9'])
def test_getslice_negative_steps(self):
e = self._make_elem_with_children(4)
self.assertEqual(self._elem_tags(e[::-1]), ['a3', 'a2', 'a1', 'a0'])
self.assertEqual(self._elem_tags(e[::-2]), ['a3', 'a1'])
def test_delslice(self):
e = self._make_elem_with_children(4)
del e[0:2]
self.assertEqual(self._subelem_tags(e), ['a2', 'a3'])
e = self._make_elem_with_children(4)
del e[0:]
self.assertEqual(self._subelem_tags(e), [])
e = self._make_elem_with_children(4)
del e[::-1]
self.assertEqual(self._subelem_tags(e), [])
e = self._make_elem_with_children(4)
del e[::-2]
self.assertEqual(self._subelem_tags(e), ['a0', 'a2'])
e = self._make_elem_with_children(4)
del e[1::2]
self.assertEqual(self._subelem_tags(e), ['a0', 'a2'])
e = self._make_elem_with_children(2)
del e[::2]
self.assertEqual(self._subelem_tags(e), ['a1'])
class IOTest(unittest.TestCase):
def tearDown(self):
support.unlink(TESTFN)
def test_encoding(self):
# Test encoding issues.
elem = ET.Element("tag")
elem.text = "abc"
self.assertEqual(serialize(elem), '<tag>abc</tag>')
self.assertEqual(serialize(elem, encoding="utf-8"),
b'<tag>abc</tag>')
self.assertEqual(serialize(elem, encoding="us-ascii"),
b'<tag>abc</tag>')
for enc in ("iso-8859-1", "utf-16", "utf-32"):
self.assertEqual(serialize(elem, encoding=enc),
("<?xml version='1.0' encoding='%s'?>\n"
"<tag>abc</tag>" % enc).encode(enc))
elem = ET.Element("tag")
elem.text = "<&\"\'>"
self.assertEqual(serialize(elem), '<tag><&"\'></tag>')
self.assertEqual(serialize(elem, encoding="utf-8"),
b'<tag><&"\'></tag>')
self.assertEqual(serialize(elem, encoding="us-ascii"),
b'<tag><&"\'></tag>')
for enc in ("iso-8859-1", "utf-16", "utf-32"):
self.assertEqual(serialize(elem, encoding=enc),
("<?xml version='1.0' encoding='%s'?>\n"
"<tag><&\"'></tag>" % enc).encode(enc))
elem = ET.Element("tag")
elem.attrib["key"] = "<&\"\'>"
self.assertEqual(serialize(elem), '<tag key="<&"\'>" />')
self.assertEqual(serialize(elem, encoding="utf-8"),
b'<tag key="<&"\'>" />')
self.assertEqual(serialize(elem, encoding="us-ascii"),
b'<tag key="<&"\'>" />')
for enc in ("iso-8859-1", "utf-16", "utf-32"):
self.assertEqual(serialize(elem, encoding=enc),
("<?xml version='1.0' encoding='%s'?>\n"
"<tag key=\"<&"'>\" />" % enc).encode(enc))
elem = ET.Element("tag")
elem.text = '\xe5\xf6\xf6<>'
self.assertEqual(serialize(elem), '<tag>\xe5\xf6\xf6<></tag>')
self.assertEqual(serialize(elem, encoding="utf-8"),
b'<tag>\xc3\xa5\xc3\xb6\xc3\xb6<></tag>')
self.assertEqual(serialize(elem, encoding="us-ascii"),
b'<tag>åöö<></tag>')
for enc in ("iso-8859-1", "utf-16", "utf-32"):
self.assertEqual(serialize(elem, encoding=enc),
("<?xml version='1.0' encoding='%s'?>\n"
"<tag>åöö<></tag>" % enc).encode(enc))
elem = ET.Element("tag")
elem.attrib["key"] = '\xe5\xf6\xf6<>'
self.assertEqual(serialize(elem), '<tag key="\xe5\xf6\xf6<>" />')
self.assertEqual(serialize(elem, encoding="utf-8"),
b'<tag key="\xc3\xa5\xc3\xb6\xc3\xb6<>" />')
self.assertEqual(serialize(elem, encoding="us-ascii"),
b'<tag key="åöö<>" />')
for enc in ("iso-8859-1", "utf-16", "utf-16le", "utf-16be", "utf-32"):
self.assertEqual(serialize(elem, encoding=enc),
("<?xml version='1.0' encoding='%s'?>\n"
"<tag key=\"åöö<>\" />" % enc).encode(enc))
def test_write_to_filename(self):
tree = ET.ElementTree(ET.XML('''<site />'''))
tree.write(TESTFN)
with open(TESTFN, 'rb') as f:
self.assertEqual(f.read(), b'''<site />''')
def test_write_to_text_file(self):
tree = ET.ElementTree(ET.XML('''<site />'''))
with open(TESTFN, 'w', encoding='utf-8') as f:
tree.write(f, encoding='unicode')
self.assertFalse(f.closed)
with open(TESTFN, 'rb') as f:
self.assertEqual(f.read(), b'''<site />''')
def test_write_to_binary_file(self):
tree = ET.ElementTree(ET.XML('''<site />'''))
with open(TESTFN, 'wb') as f:
tree.write(f)
self.assertFalse(f.closed)
with open(TESTFN, 'rb') as f:
self.assertEqual(f.read(), b'''<site />''')
def test_write_to_binary_file_with_bom(self):
tree = ET.ElementTree(ET.XML('''<site />'''))
# test BOM writing to buffered file
with open(TESTFN, 'wb') as f:
tree.write(f, encoding='utf-16')
self.assertFalse(f.closed)
with open(TESTFN, 'rb') as f:
self.assertEqual(f.read(),
'''<?xml version='1.0' encoding='utf-16'?>\n'''
'''<site />'''.encode("utf-16"))
# test BOM writing to non-buffered file
with open(TESTFN, 'wb', buffering=0) as f:
tree.write(f, encoding='utf-16')
self.assertFalse(f.closed)
with open(TESTFN, 'rb') as f:
self.assertEqual(f.read(),
'''<?xml version='1.0' encoding='utf-16'?>\n'''
'''<site />'''.encode("utf-16"))
def test_read_from_stringio(self):
tree = ET.ElementTree()
stream = io.StringIO('''<?xml version="1.0"?><site></site>''')
tree.parse(stream)
self.assertEqual(tree.getroot().tag, 'site')
def test_write_to_stringio(self):
tree = ET.ElementTree(ET.XML('''<site />'''))
stream = io.StringIO()
tree.write(stream, encoding='unicode')
self.assertEqual(stream.getvalue(), '''<site />''')
def test_read_from_bytesio(self):
tree = ET.ElementTree()
raw = io.BytesIO(b'''<?xml version="1.0"?><site></site>''')
tree.parse(raw)
self.assertEqual(tree.getroot().tag, 'site')
def test_write_to_bytesio(self):
tree = ET.ElementTree(ET.XML('''<site />'''))
raw = io.BytesIO()
tree.write(raw)
self.assertEqual(raw.getvalue(), b'''<site />''')
class dummy:
pass
def test_read_from_user_text_reader(self):
stream = io.StringIO('''<?xml version="1.0"?><site></site>''')
reader = self.dummy()
reader.read = stream.read
tree = ET.ElementTree()
tree.parse(reader)
self.assertEqual(tree.getroot().tag, 'site')
def test_write_to_user_text_writer(self):
tree = ET.ElementTree(ET.XML('''<site />'''))
stream = io.StringIO()
writer = self.dummy()
writer.write = stream.write
tree.write(writer, encoding='unicode')
self.assertEqual(stream.getvalue(), '''<site />''')
def test_read_from_user_binary_reader(self):
raw = io.BytesIO(b'''<?xml version="1.0"?><site></site>''')
reader = self.dummy()
reader.read = raw.read
tree = ET.ElementTree()
tree.parse(reader)
self.assertEqual(tree.getroot().tag, 'site')
tree = ET.ElementTree()
def test_write_to_user_binary_writer(self):
tree = ET.ElementTree(ET.XML('''<site />'''))
raw = io.BytesIO()
writer = self.dummy()
writer.write = raw.write
tree.write(writer)
self.assertEqual(raw.getvalue(), b'''<site />''')
def test_write_to_user_binary_writer_with_bom(self):
tree = ET.ElementTree(ET.XML('''<site />'''))
raw = io.BytesIO()
writer = self.dummy()
writer.write = raw.write
writer.seekable = lambda: True
writer.tell = raw.tell
tree.write(writer, encoding="utf-16")
self.assertEqual(raw.getvalue(),
'''<?xml version='1.0' encoding='utf-16'?>\n'''
'''<site />'''.encode("utf-16"))
def test_tostringlist_invariant(self):
root = ET.fromstring('<tag>foo</tag>')
self.assertEqual(
ET.tostring(root, 'unicode'),
''.join(ET.tostringlist(root, 'unicode')))
self.assertEqual(
ET.tostring(root, 'utf-16'),
b''.join(ET.tostringlist(root, 'utf-16')))
class ParseErrorTest(unittest.TestCase):
def test_subclass(self):
self.assertIsInstance(ET.ParseError(), SyntaxError)
def _get_error(self, s):
try:
ET.fromstring(s)
except ET.ParseError as e:
return e
def test_error_position(self):
self.assertEqual(self._get_error('foo').position, (1, 0))
self.assertEqual(self._get_error('<tag>&foo;</tag>').position, (1, 5))
self.assertEqual(self._get_error('foobar<').position, (1, 6))
def test_error_code(self):
import xml.parsers.expat.errors as ERRORS
self.assertEqual(self._get_error('foo').code,
ERRORS.codes[ERRORS.XML_ERROR_SYNTAX])
class KeywordArgsTest(unittest.TestCase):
# Test various issues with keyword arguments passed to ET.Element
# constructor and methods
def test_issue14818(self):
x = ET.XML("<a>foo</a>")
self.assertEqual(x.find('a', None),
x.find(path='a', namespaces=None))
self.assertEqual(x.findtext('a', None, None),
x.findtext(path='a', default=None, namespaces=None))
self.assertEqual(x.findall('a', None),
x.findall(path='a', namespaces=None))
self.assertEqual(list(x.iterfind('a', None)),
list(x.iterfind(path='a', namespaces=None)))
self.assertEqual(ET.Element('a').attrib, {})
elements = [
ET.Element('a', dict(href="#", id="foo")),
ET.Element('a', attrib=dict(href="#", id="foo")),
ET.Element('a', dict(href="#"), id="foo"),
ET.Element('a', href="#", id="foo"),
ET.Element('a', dict(href="#", id="foo"), href="#", id="foo"),
]
for e in elements:
self.assertEqual(e.tag, 'a')
self.assertEqual(e.attrib, dict(href="#", id="foo"))
e2 = ET.SubElement(elements[0], 'foobar', attrib={'key1': 'value1'})
self.assertEqual(e2.attrib['key1'], 'value1')
with self.assertRaisesRegex(TypeError, 'must be dict, not str'):
ET.Element('a', "I'm not a dict")
with self.assertRaisesRegex(TypeError, 'must be dict, not str'):
ET.Element('a', attrib="I'm not a dict")
# --------------------------------------------------------------------
class NoAcceleratorTest(unittest.TestCase):
def setUp(self):
if not pyET:
raise unittest.SkipTest('only for the Python version')
# Test that the C accelerator was not imported for pyET
def test_correct_import_pyET(self):
self.assertEqual(pyET.Element.__module__, 'xml.etree.ElementTree')
self.assertEqual(pyET.SubElement.__module__, 'xml.etree.ElementTree')
# --------------------------------------------------------------------
class CleanContext(object):
"""Provide default namespace mapping and path cache."""
checkwarnings = None
def __init__(self, quiet=False):
if sys.flags.optimize >= 2:
# under -OO, doctests cannot be run and therefore not all warnings
# will be emitted
quiet = True
deprecations = (
# Search behaviour is broken if search path starts with "/".
("This search is broken in 1.3 and earlier, and will be fixed "
"in a future version. If you rely on the current behaviour, "
"change it to '.+'", FutureWarning),
# Element.getchildren() and Element.getiterator() are deprecated.
("This method will be removed in future versions. "
"Use .+ instead.", DeprecationWarning),
("This method will be removed in future versions. "
"Use .+ instead.", PendingDeprecationWarning))
self.checkwarnings = support.check_warnings(*deprecations, quiet=quiet)
def __enter__(self):
from xml.etree import ElementPath
self._nsmap = ET.register_namespace._namespace_map
# Copy the default namespace mapping
self._nsmap_copy = self._nsmap.copy()
# Copy the path cache (should be empty)
self._path_cache = ElementPath._cache
ElementPath._cache = self._path_cache.copy()
self.checkwarnings.__enter__()
def __exit__(self, *args):
from xml.etree import ElementPath
# Restore mapping and path cache
self._nsmap.clear()
self._nsmap.update(self._nsmap_copy)
ElementPath._cache = self._path_cache
self.checkwarnings.__exit__(*args)
def test_main(module=None):
# When invoked without a module, runs the Python ET tests by loading pyET.
# Otherwise, uses the given module as the ET.
global pyET
pyET = import_fresh_module('xml.etree.ElementTree',
blocked=['_elementtree'])
if module is None:
module = pyET
global ET
ET = module
test_classes = [
ModuleTest,
ElementSlicingTest,
BasicElementTest,
ElementTreeTest,
IOTest,
ParseErrorTest,
XIncludeTest,
ElementTreeTypeTest,
ElementFindTest,
ElementIterTest,
TreeBuilderTest,
XMLParserTest,
BugsTest,
]
# These tests will only run for the pure-Python version that doesn't import
# _elementtree. We can't use skipUnless here, because pyET is filled in only
# after the module is loaded.
if pyET is not ET:
test_classes.extend([
NoAcceleratorTest,
])
try:
# XXX the C module should give the same warnings as the Python module
with CleanContext(quiet=(pyET is not ET)):
support.run_unittest(*test_classes)
finally:
# don't interfere with subsequent tests
ET = pyET = None
if __name__ == '__main__':
test_main()
|
mancoast/CPythonPyc_test
|
fail/335_test_xml_etree.py
|
Python
|
gpl-3.0
| 90,629
|
# pylint: disable=invalid-name, too-many-public-methods
from __future__ import (absolute_import, division, print_function)
import unittest
from mantid.kernel import DateAndTime
from mantid.api import EventType
from mantid.dataobjects import EventList
class EventListTest(unittest.TestCase):
def test_event_list_constructor(self):
el = EventList()
self.assertEquals(el.getNumberEvents(), 0)
self.assertEquals(el.getEventType(), EventType.TOF)
def test_event_list_addEventQuickly(self):
el = EventList()
el.addEventQuickly(float(0.123), DateAndTime(42))
self.assertEquals(el.getNumberEvents(), 1)
self.assertEquals(el.getEventType(), EventType.TOF)
self.assertEquals(el.getTofs()[0], float(0.123))
self.assertEquals(el.getPulseTimes()[0], DateAndTime(42))
if __name__ == '__main__':
unittest.main()
|
dymkowsk/mantid
|
Framework/PythonInterface/test/python/mantid/dataobjects/EventListTest.py
|
Python
|
gpl-3.0
| 888
|
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
from __future__ import unicode_literals
import frappe
from frappe.utils import flt, cint, nowdate
from frappe import throw, _
import frappe.defaults
from frappe.utils import getdate
from erpnext.controllers.buying_controller import BuyingController
from erpnext.accounts.utils import get_account_currency
from frappe.desk.notifications import clear_doctype_notifications
from erpnext.buying.utils import check_for_closed_status
form_grid_templates = {
"items": "templates/form_grid/item_grid.html"
}
class PurchaseReceipt(BuyingController):
def __init__(self, *args, **kwargs):
super(PurchaseReceipt, self).__init__(*args, **kwargs)
self.status_updater = [{
'source_dt': 'Purchase Receipt Item',
'target_dt': 'Purchase Order Item',
'join_field': 'purchase_order_item',
'target_field': 'received_qty',
'target_parent_dt': 'Purchase Order',
'target_parent_field': 'per_received',
'target_ref_field': 'qty',
'source_field': 'qty',
'percent_join_field': 'purchase_order',
'overflow_type': 'receipt'
},
{
'source_dt': 'Purchase Receipt Item',
'target_dt': 'Purchase Order Item',
'join_field': 'purchase_order_item',
'target_field': 'returned_qty',
'target_parent_dt': 'Purchase Order',
# 'target_parent_field': 'per_received',
# 'target_ref_field': 'qty',
'source_field': '-1 * qty',
# 'overflow_type': 'receipt',
'extra_cond': """ and exists (select name from `tabPurchase Receipt` where name=`tabPurchase Receipt Item`.parent and is_return=1)"""
}]
def validate(self):
self.validate_posting_time()
super(PurchaseReceipt, self).validate()
if self._action=="submit":
self.make_batches('warehouse')
else:
self.set_status()
self.po_required()
self.validate_with_previous_doc()
self.validate_uom_is_integer("uom", ["qty", "received_qty"])
self.validate_uom_is_integer("stock_uom", "stock_qty")
self.check_for_closed_status()
if getdate(self.posting_date) > getdate(nowdate()):
throw(_("Posting Date cannot be future date"))
def validate_with_previous_doc(self):
super(PurchaseReceipt, self).validate_with_previous_doc({
"Purchase Order": {
"ref_dn_field": "purchase_order",
"compare_fields": [["supplier", "="], ["company", "="], ["currency", "="]],
},
"Purchase Order Item": {
"ref_dn_field": "purchase_order_item",
"compare_fields": [["project", "="], ["uom", "="], ["item_code", "="]],
"is_child_table": True,
"allow_duplicate_prev_row_id": True
}
})
if cint(frappe.db.get_single_value('Buying Settings', 'maintain_same_rate')) and not self.is_return:
self.validate_rate_with_reference_doc([["Purchase Order", "purchase_order", "purchase_order_item"]])
def po_required(self):
if frappe.db.get_value("Buying Settings", None, "po_required") == 'Yes':
for d in self.get('items'):
if not d.purchase_order:
frappe.throw(_("Purchase Order number required for Item {0}").format(d.item_code))
def get_already_received_qty(self, po, po_detail):
qty = frappe.db.sql("""select sum(qty) from `tabPurchase Receipt Item`
where purchase_order_item = %s and docstatus = 1
and purchase_order=%s
and parent != %s""", (po_detail, po, self.name))
return qty and flt(qty[0][0]) or 0.0
def get_po_qty_and_warehouse(self, po_detail):
po_qty, po_warehouse = frappe.db.get_value("Purchase Order Item", po_detail,
["qty", "warehouse"])
return po_qty, po_warehouse
# Check for Closed status
def check_for_closed_status(self):
check_list =[]
for d in self.get('items'):
if (d.meta.get_field('purchase_order') and d.purchase_order
and d.purchase_order not in check_list):
check_list.append(d.purchase_order)
check_for_closed_status('Purchase Order', d.purchase_order)
# on submit
def on_submit(self):
super(PurchaseReceipt, self).on_submit()
# Check for Approving Authority
frappe.get_doc('Authorization Control').validate_approving_authority(self.doctype,
self.company, self.base_grand_total)
self.update_prevdoc_status()
if self.per_billed < 100:
self.update_billing_status()
# Updating stock ledger should always be called after updating prevdoc status,
# because updating ordered qty, reserved_qty_for_subcontract in bin
# depends upon updated ordered qty in PO
self.update_stock_ledger()
from erpnext.stock.doctype.serial_no.serial_no import update_serial_nos_after_submit
update_serial_nos_after_submit(self, "items")
self.make_gl_entries()
def check_next_docstatus(self):
submit_rv = frappe.db.sql("""select t1.name
from `tabPurchase Invoice` t1,`tabPurchase Invoice Item` t2
where t1.name = t2.parent and t2.purchase_receipt = %s and t1.docstatus = 1""",
(self.name))
if submit_rv:
frappe.throw(_("Purchase Invoice {0} is already submitted").format(self.submit_rv[0][0]))
def on_cancel(self):
super(PurchaseReceipt, self).on_cancel()
self.check_for_closed_status()
# Check if Purchase Invoice has been submitted against current Purchase Order
submitted = frappe.db.sql("""select t1.name
from `tabPurchase Invoice` t1,`tabPurchase Invoice Item` t2
where t1.name = t2.parent and t2.purchase_receipt = %s and t1.docstatus = 1""",
self.name)
if submitted:
frappe.throw(_("Purchase Invoice {0} is already submitted").format(submitted[0][0]))
self.update_prevdoc_status()
self.update_billing_status()
# Updating stock ledger should always be called after updating prevdoc status,
# because updating ordered qty in bin depends upon updated ordered qty in PO
self.update_stock_ledger()
self.make_gl_entries_on_cancel()
def get_current_stock(self):
for d in self.get('supplied_items'):
if self.supplier_warehouse:
bin = frappe.db.sql("select actual_qty from `tabBin` where item_code = %s and warehouse = %s", (d.rm_item_code, self.supplier_warehouse), as_dict = 1)
d.current_stock = bin and flt(bin[0]['actual_qty']) or 0
def get_gl_entries(self, warehouse_account=None):
from erpnext.accounts.general_ledger import process_gl_map
stock_rbnb = self.get_company_default("stock_received_but_not_billed")
expenses_included_in_valuation = self.get_company_default("expenses_included_in_valuation")
gl_entries = []
warehouse_with_no_account = []
negative_expense_to_be_booked = 0.0
stock_items = self.get_stock_items()
for d in self.get("items"):
if d.item_code in stock_items and flt(d.valuation_rate) and flt(d.qty):
if warehouse_account.get(d.warehouse):
stock_value_diff = frappe.db.get_value("Stock Ledger Entry",
{"voucher_type": "Purchase Receipt", "voucher_no": self.name,
"voucher_detail_no": d.name}, "stock_value_difference")
if not stock_value_diff:
continue
gl_entries.append(self.get_gl_dict({
"account": warehouse_account[d.warehouse]["account"],
"against": stock_rbnb,
"cost_center": d.cost_center,
"remarks": self.get("remarks") or _("Accounting Entry for Stock"),
"debit": stock_value_diff
}, warehouse_account[d.warehouse]["account_currency"]))
# stock received but not billed
stock_rbnb_currency = get_account_currency(stock_rbnb)
gl_entries.append(self.get_gl_dict({
"account": stock_rbnb,
"against": warehouse_account[d.warehouse]["account"],
"cost_center": d.cost_center,
"remarks": self.get("remarks") or _("Accounting Entry for Stock"),
"credit": flt(d.base_net_amount, d.precision("base_net_amount")),
"credit_in_account_currency": flt(d.base_net_amount, d.precision("base_net_amount")) \
if stock_rbnb_currency==self.company_currency else flt(d.net_amount, d.precision("net_amount"))
}, stock_rbnb_currency))
negative_expense_to_be_booked += flt(d.item_tax_amount)
# Amount added through landed-cost-voucher
if flt(d.landed_cost_voucher_amount):
gl_entries.append(self.get_gl_dict({
"account": expenses_included_in_valuation,
"against": warehouse_account[d.warehouse]["account"],
"cost_center": d.cost_center,
"remarks": self.get("remarks") or _("Accounting Entry for Stock"),
"credit": flt(d.landed_cost_voucher_amount),
"project": d.project
}))
# sub-contracting warehouse
if flt(d.rm_supp_cost) and warehouse_account.get(self.supplier_warehouse):
gl_entries.append(self.get_gl_dict({
"account": warehouse_account[self.supplier_warehouse]["account"],
"against": warehouse_account[d.warehouse]["account"],
"cost_center": d.cost_center,
"remarks": self.get("remarks") or _("Accounting Entry for Stock"),
"credit": flt(d.rm_supp_cost)
}, warehouse_account[self.supplier_warehouse]["account_currency"]))
# divisional loss adjustment
valuation_amount_as_per_doc = flt(d.base_net_amount, d.precision("base_net_amount")) + \
flt(d.landed_cost_voucher_amount) + flt(d.rm_supp_cost) + flt(d.item_tax_amount)
divisional_loss = flt(valuation_amount_as_per_doc - stock_value_diff,
d.precision("base_net_amount"))
if divisional_loss:
if self.is_return or flt(d.item_tax_amount):
loss_account = expenses_included_in_valuation
else:
loss_account = stock_rbnb
gl_entries.append(self.get_gl_dict({
"account": loss_account,
"against": warehouse_account[d.warehouse]["account"],
"cost_center": d.cost_center,
"remarks": self.get("remarks") or _("Accounting Entry for Stock"),
"debit": divisional_loss,
"project": d.project
}, stock_rbnb_currency))
elif d.warehouse not in warehouse_with_no_account or \
d.rejected_warehouse not in warehouse_with_no_account:
warehouse_with_no_account.append(d.warehouse)
# Cost center-wise amount breakup for other charges included for valuation
valuation_tax = {}
for tax in self.get("taxes"):
if tax.category in ("Valuation", "Valuation and Total") and flt(tax.base_tax_amount_after_discount_amount):
if not tax.cost_center:
frappe.throw(_("Cost Center is required in row {0} in Taxes table for type {1}").format(tax.idx, _(tax.category)))
valuation_tax.setdefault(tax.cost_center, 0)
valuation_tax[tax.cost_center] += \
(tax.add_deduct_tax == "Add" and 1 or -1) * flt(tax.base_tax_amount_after_discount_amount)
if negative_expense_to_be_booked and valuation_tax:
# Backward compatibility:
# If expenses_included_in_valuation account has been credited in against PI
# and charges added via Landed Cost Voucher,
# post valuation related charges on "Stock Received But Not Billed"
negative_expense_booked_in_pi = frappe.db.sql("""select name from `tabPurchase Invoice Item` pi
where docstatus = 1 and purchase_receipt=%s
and exists(select name from `tabGL Entry` where voucher_type='Purchase Invoice'
and voucher_no=pi.parent and account=%s)""", (self.name, expenses_included_in_valuation))
if negative_expense_booked_in_pi:
expenses_included_in_valuation = stock_rbnb
against_account = ", ".join([d.account for d in gl_entries if flt(d.debit) > 0])
total_valuation_amount = sum(valuation_tax.values())
amount_including_divisional_loss = negative_expense_to_be_booked
i = 1
for cost_center, amount in valuation_tax.items():
if i == len(valuation_tax):
applicable_amount = amount_including_divisional_loss
else:
applicable_amount = negative_expense_to_be_booked * (amount / total_valuation_amount)
amount_including_divisional_loss -= applicable_amount
gl_entries.append(
self.get_gl_dict({
"account": expenses_included_in_valuation,
"cost_center": cost_center,
"credit": applicable_amount,
"remarks": self.remarks or _("Accounting Entry for Stock"),
"against": against_account
})
)
i += 1
if warehouse_with_no_account:
frappe.msgprint(_("No accounting entries for the following warehouses") + ": \n" +
"\n".join(warehouse_with_no_account))
return process_gl_map(gl_entries)
def update_status(self, status):
self.set_status(update=True, status = status)
self.notify_update()
clear_doctype_notifications(self)
def update_billing_status(self, update_modified=True):
updated_pr = [self.name]
for d in self.get("items"):
if d.purchase_order_item:
updated_pr += update_billed_amount_based_on_po(d.purchase_order_item, update_modified)
for pr in set(updated_pr):
pr_doc = self if (pr == self.name) else frappe.get_doc("Purchase Receipt", pr)
pr_doc.update_billing_percentage(update_modified=update_modified)
self.load_from_db()
def update_billed_amount_based_on_po(po_detail, update_modified=True):
# Billed against Sales Order directly
billed_against_po = frappe.db.sql("""select sum(amount) from `tabPurchase Invoice Item`
where po_detail=%s and (pr_detail is null or pr_detail = '') and docstatus=1""", po_detail)
billed_against_po = billed_against_po and billed_against_po[0][0] or 0
# Get all Delivery Note Item rows against the Sales Order Item row
pr_details = frappe.db.sql("""select pr_item.name, pr_item.amount, pr_item.parent
from `tabPurchase Receipt Item` pr_item, `tabPurchase Receipt` pr
where pr.name=pr_item.parent and pr_item.purchase_order_item=%s
and pr.docstatus=1 and pr.is_return = 0
order by pr.posting_date asc, pr.posting_time asc, pr.name asc""", po_detail, as_dict=1)
updated_pr = []
for pr_item in pr_details:
# Get billed amount directly against Purchase Receipt
billed_amt_agianst_pr = frappe.db.sql("""select sum(amount) from `tabPurchase Invoice Item`
where pr_detail=%s and docstatus=1""", pr_item.name)
billed_amt_agianst_pr = billed_amt_agianst_pr and billed_amt_agianst_pr[0][0] or 0
# Distribute billed amount directly against PO between PRs based on FIFO
if billed_against_po and billed_amt_agianst_pr < pr_item.amount:
pending_to_bill = flt(pr_item.amount) - billed_amt_agianst_pr
if pending_to_bill <= billed_against_po:
billed_amt_agianst_pr += pending_to_bill
billed_against_po -= pending_to_bill
else:
billed_amt_agianst_pr += billed_against_po
billed_against_po = 0
frappe.db.set_value("Purchase Receipt Item", pr_item.name, "billed_amt", billed_amt_agianst_pr, update_modified=update_modified)
updated_pr.append(pr_item.parent)
return updated_pr
@frappe.whitelist()
def make_purchase_invoice(source_name, target_doc=None):
from frappe.model.mapper import get_mapped_doc
invoiced_qty_map = get_invoiced_qty_map(source_name)
def set_missing_values(source, target):
if len(target.get("items")) == 0:
frappe.throw(_("All items have already been invoiced"))
doc = frappe.get_doc(target)
doc.ignore_pricing_rule = 1
doc.run_method("set_missing_values")
doc.run_method("calculate_taxes_and_totals")
def update_item(source_doc, target_doc, source_parent):
target_doc.qty = source_doc.qty - invoiced_qty_map.get(source_doc.name, 0)
doclist = get_mapped_doc("Purchase Receipt", source_name, {
"Purchase Receipt": {
"doctype": "Purchase Invoice",
"validation": {
"docstatus": ["=", 1],
},
},
"Purchase Receipt Item": {
"doctype": "Purchase Invoice Item",
"field_map": {
"name": "pr_detail",
"parent": "purchase_receipt",
"purchase_order_item": "po_detail",
"purchase_order": "purchase_order",
},
"postprocess": update_item,
"filter": lambda d: abs(d.qty) - abs(invoiced_qty_map.get(d.name, 0))<=0
},
"Purchase Taxes and Charges": {
"doctype": "Purchase Taxes and Charges",
"add_if_empty": True
}
}, target_doc, set_missing_values)
return doclist
def get_invoiced_qty_map(purchase_receipt):
"""returns a map: {pr_detail: invoiced_qty}"""
invoiced_qty_map = {}
for pr_detail, qty in frappe.db.sql("""select pr_detail, qty from `tabPurchase Invoice Item`
where purchase_receipt=%s and docstatus=1""", purchase_receipt):
if not invoiced_qty_map.get(pr_detail):
invoiced_qty_map[pr_detail] = 0
invoiced_qty_map[pr_detail] += qty
return invoiced_qty_map
@frappe.whitelist()
def make_purchase_return(source_name, target_doc=None):
from erpnext.controllers.sales_and_purchase_return import make_return_doc
return make_return_doc("Purchase Receipt", source_name, target_doc)
@frappe.whitelist()
def update_purchase_receipt_status(docname, status):
pr = frappe.get_doc("Purchase Receipt", docname)
pr.update_status(status)
|
adityaduggal/erpnext
|
erpnext/stock/doctype/purchase_receipt/purchase_receipt.py
|
Python
|
gpl-3.0
| 16,520
|
#
# Copyright (C) 2016 FreeIPA Contributors see COPYING for license
#
from __future__ import absolute_import
import pytest
from ipalib import errors, api
from ipaplatform.services import knownservices
from ipatests.test_xmlrpc.tracker.location_plugin import LocationTracker
from ipatests.test_xmlrpc.tracker.server_plugin import ServerTracker
from ipatests.test_xmlrpc.xmlrpc_test import (
XMLRPC_test,
raises_exact
)
from ipapython.dnsutil import DNSName
@pytest.fixture(scope='class', params=[u'location1', u'sk\xfa\u0161ka.idna'])
def location(request, xmlrpc_setup):
tracker = LocationTracker(request.param)
return tracker.make_fixture(request)
@pytest.fixture(scope='class')
def location_invalid(request, xmlrpc_setup):
tracker = LocationTracker(u'invalid..location')
return tracker
@pytest.fixture(scope='class')
def location_absolute(request, xmlrpc_setup):
tracker = LocationTracker(u'invalid.absolute.')
return tracker
@pytest.fixture(scope='class')
def server(request, xmlrpc_setup):
tracker = ServerTracker(api.env.host)
return tracker.make_fixture_clean_location(request)
@pytest.mark.tier1
class TestNonexistentIPALocation(XMLRPC_test):
def test_retrieve_nonexistent(self, location):
location.ensure_missing()
command = location.make_retrieve_command()
with raises_exact(errors.NotFound(
reason=u'%s: location not found' % location.idnsname)):
command()
def test_update_nonexistent(self, location):
location.ensure_missing()
command = location.make_update_command(updates=dict(
description=u'Nope'))
with raises_exact(errors.NotFound(
reason=u'%s: location not found' % location.idnsname)):
command()
def test_delete_nonexistent(self, location):
location.ensure_missing()
command = location.make_delete_command()
with raises_exact(errors.NotFound(
reason=u'%s: location not found' % location.idnsname)):
command()
@pytest.mark.tier1
class TestInvalidIPALocations(XMLRPC_test):
def test_invalid_name(self, location_invalid):
command = location_invalid.make_create_command()
with raises_exact(errors.ConversionError(
name=u'name',
error=u"empty DNS label")):
command()
def test_invalid_absolute(self, location_absolute):
command = location_absolute.make_create_command()
with raises_exact(errors.ValidationError(
name=u'name', error=u'must be relative')):
command()
@pytest.mark.tier1
class TestCRUD(XMLRPC_test):
def test_create_duplicate(self, location):
location.ensure_exists()
command = location.make_create_command()
with raises_exact(errors.DuplicateEntry(
message=u'location with name "%s" already exists' %
location.idnsname)):
command()
def test_retrieve_simple(self, location):
location.retrieve()
def test_retrieve_all(self, location):
location.retrieve(all=True)
def test_search_simple(self, location):
location.find()
def test_search_all(self, location):
location.find(all=True)
def test_update_simple(self, location):
location.update(dict(
description=u'Updated description',
),
expected_updates=dict(
description=[u'Updated description'],
))
location.retrieve()
def test_try_rename(self, location):
location.ensure_exists()
command = location.make_update_command(
updates=dict(setattr=u'idnsname=changed'))
with raises_exact(errors.NotAllowedOnRDN()):
command()
def test_delete_location(self, location):
location.delete()
@pytest.mark.tier1
@pytest.mark.skipif(
not api.Command.dns_is_enabled()['result'], reason='DNS not configured')
class TestLocationsServer(XMLRPC_test):
messages = [{
u'data': {u'service': knownservices.named.systemd_name,
u'server': api.env.host},
u'message': (u'Service %s requires restart '
u'on IPA server %s to apply configuration '
u'changes.' % (knownservices.named.systemd_name,
api.env.host)),
u'code': 13025,
u'type': u'warning',
u'name': u'ServiceRestartRequired'}]
def test_add_nonexistent_location_to_server(self, server):
nonexistent_loc = DNSName(u'nonexistent-location')
command = server.make_update_command(
updates=dict(
ipalocation_location=nonexistent_loc,
)
)
with raises_exact(errors.NotFound(
reason=u"{location}: location not found".format(
location=nonexistent_loc
))):
command()
def test_add_location_to_server(self, location, server):
location.ensure_exists()
server.update(
updates={u'ipalocation_location': location.idnsname_obj},
expected_updates={u'ipalocation_location': [location.idnsname_obj],
u'enabled_role_servrole': lambda other: True},
messages=self.messages)
location.add_server_to_location(server.server_name)
location.retrieve()
location.remove_server_from_location(server.server_name)
def test_retrieve(self, server):
server.retrieve()
def test_retrieve_all(self, server):
server.retrieve(all=True)
def test_search_server_with_location(self, location, server):
command = server.make_find_command(
server.server_name, in_location=location.idnsname_obj)
result = command()
server.check_find(result)
def test_search_server_with_location_with_all(self, location, server):
command = server.make_find_command(
server.server_name, in_location=location.idnsname_obj, all=True)
result = command()
server.check_find(result, all=True)
def test_search_server_without_location(self, location, server):
command = server.make_find_command(
server.server_name, not_in_location=location.idnsname_obj)
result = command()
server.check_find_nomatch(result)
def test_add_location_to_server_custom_weight(self, location, server):
location.ensure_exists()
server.update(
updates={u'ipalocation_location': location.idnsname_obj,
u'ipaserviceweight': 200},
expected_updates={u'ipalocation_location': [location.idnsname_obj],
u'enabled_role_servrole': lambda other: True,
u'ipaserviceweight': [u'200']},
messages=self.messages)
# remove invalid data from the previous test
location.remove_server_from_location(server.server_name)
location.add_server_to_location(server.server_name, weight=200)
location.retrieve()
def test_remove_location_from_server(self, location, server):
server.update(
updates={u'ipalocation_location': None},
expected_updates={u'enabled_role_servrole': lambda other: True},
messages=self.messages)
location.remove_server_from_location(server.server_name)
location.retrieve()
def test_remove_service_weight_from_server(self, location, server):
server.update(
updates={u'ipaserviceweight': None},
expected_updates={u'enabled_role_servrole': lambda other: True},
messages=self.messages)
location.retrieve()
|
encukou/freeipa
|
ipatests/test_xmlrpc/test_location_plugin.py
|
Python
|
gpl-3.0
| 7,773
|
import unittest
from releasetasks.test.desktop import make_task_graph, do_common_assertions, \
get_task_by_name, create_firefox_test_args
from releasetasks.test import generate_scope_validator, PVT_KEY_FILE, verify
from voluptuous import Schema, truth
class TestVersionBump(unittest.TestCase):
maxDiff = 30000
graph = None
task = None
human_task = None
payload = None
def setUp(self):
self.graph_schema = Schema({
'scopes': generate_scope_validator(scopes={
"queue:task-priority:high",
})
}, extra=True, required=True)
self.task_schema = Schema({
'task': {
'provisionerId': 'buildbot-bridge',
'workerType': 'buildbot-bridge',
'payload': {
'properties': {
'next_version': '42.0b3',
'repo_path': 'releases/foo',
'script_repo_revision': 'abcd',
}
}
}
}, extra=True, required=True)
self.human_task_schema = Schema({
'task': {
'provisionerId': 'null-provisioner',
'workerType': 'human-decision',
}
}, extra=True, required=True)
test_kwargs = create_firefox_test_args({
'bouncer_enabled': True,
'postrelease_version_bump_enabled': True,
'release_channels': ['foo'],
'final_verify_channels': ['foo'],
'signing_pvt_key': PVT_KEY_FILE,
'en_US_config': {
"platforms": {
"macosx64": {},
"win32": {},
"win64": {},
"linux": {},
"linux64": {},
}
},
})
self.graph = make_task_graph(**test_kwargs)
self.task = get_task_by_name(self.graph, "release-foo-firefox_version_bump")
self.human_task = get_task_by_name(self.graph, "publish_release_human_decision")
def generate_task_dependency_validator(self):
human_task_id = self.human_task['taskId']
@truth
def validate_task_dependencies(task):
return human_task_id in task['requires']
return validate_task_dependencies
def test_common_assertions(self):
do_common_assertions(self.graph)
def test_version_bump_task(self):
verify(self.task, self.task_schema, self.generate_task_dependency_validator())
def test_version_bump_human_task(self):
verify(self.human_task, self.human_task_schema)
|
mozilla/releasetasks
|
releasetasks/test/desktop/test_version_bump.py
|
Python
|
mpl-2.0
| 2,641
|
# -*- coding: utf-8 -*-
## File autogenerated by SQLAutoCode
## see http://code.google.com/p/sqlautocode/
from sqlalchemy import *
from sqlalchemy.dialects.postgresql import *
from geoalchemy2 import Geography
from models import metadata
object_type = Table('object_type', metadata,*[
Column('id', INTEGER(), primary_key=True, nullable=False),
Column('name', TEXT(), primary_key=False, nullable=False),],
schema='navitia')
object_properties = Table('object_properties', metadata,*[
Column('object_id', BIGINT(), primary_key=False, nullable=False),
Column('object_type', TEXT(), primary_key=False, nullable=False),
Column('property_name', TEXT(), primary_key=False, nullable=False),
Column('property_value', TEXT(), primary_key=False, nullable=False),],
schema='navitia')
connection_kind = Table('connection_kind', metadata,*[
Column('id', BIGINT(), primary_key=True, nullable=False),
Column('name', TEXT(), primary_key=False, nullable=False),],
schema='navitia')
odt_type = Table('odt_type', metadata,*[
Column('id', BIGINT(), primary_key=True, nullable=False),
Column('name', TEXT(), primary_key=False, nullable=False),],
schema='navitia')
timezone = Table('timezone', metadata,*[
Column('id', BIGINT(), primary_key=True, nullable=False),
Column('name', TEXT(), primary_key=False, nullable=False),],
schema='navitia')
tz_dst = Table('tz_dst', metadata,*[
Column('id', BIGINT(), primary_key=True, nullable=False),
Column('tz_id', BIGINT(), primary_key=False, nullable=False),
Column('beginning_date', DATE(), primary_key=False, nullable=False),
Column('end_date', DATE(), primary_key=False, nullable=False),
Column('utc_offset', INTEGER(), primary_key=False, nullable=False),
ForeignKeyConstraint(['tz_id'], [u'navitia.timezone.id'], name=u'associated_tz_dst_fkey')
],
schema='navitia')
meta_vj = Table('meta_vj', metadata,*[
Column('id', BIGINT(), primary_key=True, nullable=False),
Column('name', TEXT(), primary_key=False, nullable=False),
Column('timezone', BIGINT(), primary_key=False, nullable=False),
ForeignKeyConstraint(['timezone'], [u'navitia.timezone.id'], name=u'associated_timezone_metavj_fkey')
],
schema='navitia')
contributor = Table('contributor', metadata,*[
Column('id', BIGINT(), primary_key=True, nullable=False),
Column('uri', TEXT(), primary_key=False, nullable=False),
Column('name', TEXT(), primary_key=False, nullable=False),
Column('website', TEXT(), primary_key=False, nullable=True),
Column('license', TEXT(), primary_key=False, nullable=True),],
schema='navitia')
dataset = Table('dataset', metadata,*[
Column('id', BIGINT(), primary_key=True, nullable=False),
Column('uri', TEXT(), primary_key=False, nullable=False),
Column('description', TEXT(), primary_key=False, nullable=True),
Column('system', TEXT(), primary_key=False, nullable=True),
Column('start_date', DATE(), primary_key=False, nullable=False),
Column('end_date', DATE(), primary_key=False, nullable=False),
Column('contributor_id', BIGINT(), primary_key=False, nullable=False),
ForeignKeyConstraint(['contributor_id'], [u'navitia.contributor.id'], name=u'contributor_dataset_fkey')
],
schema='navitia')
commercial_mode = Table('commercial_mode', metadata,*[
Column('id', BIGINT(), primary_key=True, nullable=False),
Column('uri', TEXT(), primary_key=False, nullable=False),
Column('name', TEXT(), primary_key=False, nullable=False),],
schema='navitia')
parameters = Table('parameters', metadata,*[
Column('beginning_date', DATE(), primary_key=False),
Column('end_date', DATE(), primary_key=False),
Column('shape', Geography(geometry_type='MULTIPOLYGON', srid=4326, spatial_index=False), primary_key=False),
Column('shape_computed', BOOLEAN(), primary_key=False, default=text(u'true')),
Column('parse_pois_from_osm', BOOLEAN(), primary_key=False, default=text(u'true')),
],
schema='navitia')
company = Table('company', metadata,*[
Column('id', BIGINT(), primary_key=True, nullable=False),
Column('name', TEXT(), primary_key=False, nullable=False),
Column('uri', TEXT(), primary_key=False, nullable=False),
Column('address_name', TEXT(), primary_key=False),
Column('address_number', TEXT(), primary_key=False),
Column('address_type_name', TEXT(), primary_key=False),
Column('phone_number', TEXT(), primary_key=False),
Column('mail', TEXT(), primary_key=False),
Column('website', TEXT(), primary_key=False),
Column('fax', TEXT(), primary_key=False),],
schema='navitia')
physical_mode = Table('physical_mode', metadata,*[
Column('id', BIGINT(), primary_key=True, nullable=False),
Column('uri', TEXT(), primary_key=False, nullable=False),
Column('name', TEXT(), primary_key=False, nullable=False),
Column('co2_emission', FLOAT(), primary_key=False, nullable=True)],
schema='navitia')
validity_pattern = Table('validity_pattern', metadata,*[
Column('id', BIGINT(), primary_key=True, nullable=False),
Column('days', BIT(length=400, varying=True), primary_key=False, nullable=False),],
schema='navitia')
associated_calendar = Table('associated_calendar', metadata,*[
Column('id', BIGINT(), primary_key=True, nullable=False, default=text(u'nextval(\'"navitia".associated_calendar_id_seq\'::regclass)')),
Column('calendar_id', BIGINT(), primary_key=False, nullable=False),
Column('name', TEXT(), primary_key=False, nullable=False),
ForeignKeyConstraint(['calendar_id'], [u'navitia.calendar.id'], name=u'associated_calendar_calendar_fkey'),
],
schema='navitia')
associated_exception_date = Table('associated_exception_date', metadata,*[
Column('id', BIGINT(), primary_key=True, nullable=False, default=text(u'nextval(\'"navitia".associated_exception_date_id_seq\'::regclass)')),
Column('datetime', DATE(), primary_key=False, nullable=False),
Column('type_ex', ENUM(u'Add', u'Sub', name='associated_exception_type'), primary_key=False, nullable=False),
Column('associated_calendar_id', BIGINT(), primary_key=False, nullable=False),
ForeignKeyConstraint(['associated_calendar_id'], [u'navitia.associated_calendar.id'], name=u'associated_exception_date_associated_calendar_fkey'),
],
schema='navitia')
rel_metavj_associated_calendar = Table('rel_metavj_associated_calendar', metadata,*[
Column('meta_vj_id', BIGINT(), primary_key=False),
Column('associated_calendar_id', BIGINT(), primary_key=False),
ForeignKeyConstraint(['meta_vj_id'], [u'navitia.meta_vj.id'], name=u'rel_metavj_associated_calendar_meta_vj_fkey'),
ForeignKeyConstraint(['associated_calendar_id'], [u'navitia.associated_calendar.id'], name=u'rel_metavj_associated_calendar_associated_calendar_fkey')],
schema='navitia')
rel_line_company = Table('rel_line_company', metadata,*[
Column('line_id', BIGINT(), primary_key=True, nullable=False),
Column('company_id', BIGINT(), primary_key=True, nullable=False),
ForeignKeyConstraint(['line_id'], [u'navitia.line.id'], name=u'rel_line_company_line_id_fkey'),
ForeignKeyConstraint(['company_id'], [u'navitia.company.id'], name=u'rel_line_company_company_id_fkey'),],
schema='navitia')
route = Table('route', metadata,*[
Column('id', BIGINT(), primary_key=True, nullable=False),
Column('line_id', BIGINT(), primary_key=False, nullable=False),
Column('name', TEXT(), primary_key=False, nullable=False),
Column('uri', TEXT(), primary_key=False, nullable=False),
Column('shape', Geography(geometry_type='MULTILINESTRING', srid=4326, spatial_index=False), primary_key=False),
Column('destination_stop_area_id', BIGINT(), primary_key=False, nullable=True),
Column('direction_type', TEXT(), primary_key=False, nullable=True),
ForeignKeyConstraint(['line_id'], [u'navitia.line.id'], name=u'route_line_id_fkey'),],
schema='navitia')
vehicle_properties = Table('vehicle_properties', metadata,*[
Column('id', BIGINT(), primary_key=True, nullable=False, default=text(u'nextval(\'"navitia".vehicle_properties_id_seq\'::regclass)')),
Column('wheelchair_accessible', BOOLEAN(), primary_key=False, nullable=False),
Column('bike_accepted', BOOLEAN(), primary_key=False, nullable=False),
Column('air_conditioned', BOOLEAN(), primary_key=False, nullable=False),
Column('visual_announcement', BOOLEAN(), primary_key=False, nullable=False),
Column('audible_announcement', BOOLEAN(), primary_key=False, nullable=False),
Column('appropriate_escort', BOOLEAN(), primary_key=False, nullable=False),
Column('appropriate_signage', BOOLEAN(), primary_key=False, nullable=False),
Column('school_vehicle', BOOLEAN(), primary_key=False, nullable=False),
],
schema='navitia')
network = Table('network', metadata,*[
Column('id', BIGINT(), primary_key=True, nullable=False),
Column('name', TEXT(), primary_key=False, nullable=False),
Column('uri', TEXT(), primary_key=False, nullable=False),
Column('sort', INTEGER(), primary_key=False, nullable=False, default=text(u'2147483647')),
Column('website', TEXT(), primary_key=False),
],
schema='navitia')
origin_destination = Table('origin_destination', metadata,*[
Column('id', BIGINT(), primary_key=True, nullable=False),
Column('origin_id', TEXT(), primary_key=False, nullable=False),
Column('origin_mode', ENUM(u'Zone', u'StopArea', u'Mode', name='fare_od_mode'), primary_key=False, nullable=False),
Column('destination_id', TEXT(), primary_key=False, nullable=False),
Column('destination_mode', ENUM(u'Zone', u'StopArea', u'Mode', name='fare_od_mode'), primary_key=False, nullable=False),
],
schema='navitia')
ticket = Table('ticket', metadata,*[
Column('ticket_key', TEXT(), primary_key=True, nullable=False),
Column('ticket_title', TEXT(), primary_key=False),
Column('ticket_comment', TEXT(), primary_key=False),],
schema='navitia')
od_ticket = Table('od_ticket', metadata,*[
Column('id', BIGINT(), primary_key=True, nullable=False),
Column('od_id', BIGINT(), primary_key=False, nullable=False),
Column('ticket_id', TEXT(), primary_key=False, nullable=False),
ForeignKeyConstraint(['ticket_id'], [u'navitia.ticket.ticket_key'], name=u'od_ticket_ticket_id_fkey'),
ForeignKeyConstraint(['od_id'], [u'navitia.origin_destination.id'], name=u'od_ticket_od_id_fkey'),],
schema='navitia')
connection_type = Table('connection_type', metadata,*[
Column('id', BIGINT(), primary_key=True, nullable=False),
Column('name', TEXT(), primary_key=False, nullable=False),],
schema='navitia')
properties = Table('properties', metadata,*[
Column('id', BIGINT(), primary_key=True, nullable=False),
Column('wheelchair_boarding', BOOLEAN(), primary_key=False, nullable=False),
Column('sheltered', BOOLEAN(), primary_key=False, nullable=False),
Column('elevator', BOOLEAN(), primary_key=False, nullable=False),
Column('escalator', BOOLEAN(), primary_key=False, nullable=False),
Column('bike_accepted', BOOLEAN(), primary_key=False, nullable=False),
Column('bike_depot', BOOLEAN(), primary_key=False, nullable=False),
Column('visual_announcement', BOOLEAN(), primary_key=False, nullable=False),
Column('audible_announcement', BOOLEAN(), primary_key=False, nullable=False),
Column('appropriate_escort', BOOLEAN(), primary_key=False, nullable=False),
Column('appropriate_signage', BOOLEAN(), primary_key=False, nullable=False),
],
schema='navitia')
connection = Table('connection', metadata,*[
Column('departure_stop_point_id', BIGINT(), primary_key=True, nullable=False),
Column('destination_stop_point_id', BIGINT(), primary_key=True, nullable=False),
Column('connection_type_id', BIGINT(), primary_key=False, nullable=False),
Column('properties_id', BIGINT(), primary_key=False),
Column('duration', INTEGER(), primary_key=False, nullable=False),
Column('max_duration', INTEGER(), primary_key=False, nullable=False),
Column('display_duration', INTEGER(), primary_key=False, nullable=False),
ForeignKeyConstraint(['properties_id'], [u'navitia.properties.id'], name=u'connection_properties_id_fkey'),
ForeignKeyConstraint(['destination_stop_point_id'], [u'navitia.stop_point.id'], name=u'connection_destination_stop_point_id_fkey'),
ForeignKeyConstraint(['departure_stop_point_id'], [u'navitia.stop_point.id'], name=u'connection_departure_stop_point_id_fkey'),
ForeignKeyConstraint(['connection_type_id'], [u'navitia.connection_type.id'], name=u'connection_connection_type_id_fkey'),],
schema='navitia')
vehicle_journey = Table('vehicle_journey', metadata,*[
Column('id', BIGINT(), primary_key=True, nullable=False),
Column('adapted_validity_pattern_id', BIGINT(), primary_key=False, nullable=False),
Column('validity_pattern_id', BIGINT(), primary_key=False),
Column('company_id', BIGINT(), primary_key=False, nullable=False),
Column('route_id', BIGINT(), primary_key=False, nullable=True),
Column('physical_mode_id', BIGINT(), primary_key=False, nullable=True),
Column('uri', TEXT(), primary_key=False, nullable=False),
Column('odt_message', TEXT(), primary_key=False),
Column('name', TEXT(), primary_key=False, nullable=False),
Column('odt_type_id', BIGINT(), primary_key=False),
Column('vehicle_properties_id', BIGINT(), primary_key=False),
Column('theoric_vehicle_journey_id', BIGINT(), primary_key=False),
Column('previous_vehicle_journey_id', BIGINT(), primary_key=False),
Column('next_vehicle_journey_id', BIGINT(), primary_key=False),
Column('start_time', INTEGER(), primary_key=False),
Column('end_time', INTEGER(), primary_key=False),
Column('headway_sec', INTEGER(), primary_key=False),
Column('utc_to_local_offset', INTEGER(), primary_key=False),
Column('is_frequency', BOOLEAN(), primary_key=False),
Column('vj_class', ENUM(u'Theoric', u'Adapted', u'RealTime', name='vj_classification'), server_default=u'Theoric', default=u'Theoric', primary_key=False, nullable=False),
Column('meta_vj_name', TEXT(), primary_key=False),
Column('dataset_id', BIGINT(), primary_key=False, nullable=True),
ForeignKeyConstraint(['vehicle_properties_id'], [u'navitia.vehicle_properties.id'], name=u'vehicle_journey_vehicle_properties_id_fkey'),
ForeignKeyConstraint(['validity_pattern_id'], [u'navitia.validity_pattern.id'], name=u'vehicle_journey_validity_pattern_id_fkey'),
ForeignKeyConstraint(['previous_vehicle_journey_id'], [u'navitia.vehicle_journey.id'], name=u'vehicle_journey_previous_vehicle_journey_id_fkey'),
ForeignKeyConstraint(['next_vehicle_journey_id'], [u'navitia.vehicle_journey.id'], name=u'vehicle_journey_next_vehicle_journey_id_fkey'),
ForeignKeyConstraint(['route_id'], [u'navitia.route.id'], name=u'vehicle_journey_route_id_fkey'),
ForeignKeyConstraint(['physical_mode_id'], [u'navitia.physical_mode.id'], name=u'vehicle_journey_physical_mode_id_fkey'),
ForeignKeyConstraint(['adapted_validity_pattern_id'], [u'navitia.validity_pattern.id'], name=u'vehicle_journey_adapted_validity_pattern_id_fkey'),
ForeignKeyConstraint(['company_id'], [u'navitia.company.id'], name=u'vehicle_journey_company_id_fkey'),
ForeignKeyConstraint(['theoric_vehicle_journey_id'], [u'navitia.vehicle_journey.id'], name=u'vehicle_journey_theoric_vehicle_journey_id_fkey'),
ForeignKeyConstraint(['dataset_id'], [u'navitia.dataset.id'], name=u'vehicle_journey_dataset_id_fkey'),],
schema='navitia')
stop_point = Table('stop_point', metadata,*[
Column('id', BIGINT(), primary_key=True, nullable=False),
Column('properties_id', BIGINT(), primary_key=False),
Column('uri', TEXT(), primary_key=False, nullable=False),
Column('coord', Geography(geometry_type='POINT', srid=4326, spatial_index=False), primary_key=False),
Column('fare_zone', INTEGER(), primary_key=False),
Column('name', TEXT(), primary_key=False, nullable=False),
Column('stop_area_id', BIGINT(), primary_key=False, nullable=False),
Column('platform_code', TEXT(), primary_key=False),
Column('is_zonal', BOOLEAN(), primary_key=False, nullable=False, default=text(u'false')),
Column('area', Geography(geometry_type='MULTIPOLYGON', srid=4326, spatial_index=False), primary_key=False, nullable=True),
ForeignKeyConstraint(['properties_id'], [u'navitia.properties.id'], name=u'stop_point_properties_id_fkey'),
ForeignKeyConstraint(['stop_area_id'], [u'navitia.stop_area.id'], name=u'stop_point_stop_area_id_fkey'),],
schema='navitia')
stop_time = Table('stop_time', metadata,*[
Column('id', BIGINT(), primary_key=True, nullable=False, default=text(u'nextval(\'"navitia".stop_time_id_seq\'::regclass)')),
Column('vehicle_journey_id', BIGINT(), primary_key=False, nullable=False),
Column('order', INTEGER(), primary_key=False, nullable=True),
Column('stop_point_id', BIGINT(), primary_key=False, nullable=True),
Column('shape_from_prev', Geography(geometry_type='LINESTRING', srid=4326, spatial_index=False), primary_key=False),
Column('arrival_time', INTEGER(), primary_key=False),
Column('departure_time', INTEGER(), primary_key=False),
Column('local_traffic_zone', INTEGER(), primary_key=False),
Column('odt', BOOLEAN(), primary_key=False, nullable=False),
Column('pick_up_allowed', BOOLEAN(), primary_key=False, nullable=False),
Column('drop_off_allowed', BOOLEAN(), primary_key=False, nullable=False),
Column('is_frequency', BOOLEAN(), primary_key=False, nullable=False),
Column('date_time_estimated', BOOLEAN(), primary_key=False, nullable=False, default=text(u'false')),
Column('properties_id', BIGINT(), primary_key=False),
Column('headsign', TEXT(), primary_key=False, nullable=True),
ForeignKeyConstraint(['vehicle_journey_id'], [u'navitia.vehicle_journey.id'], name=u'stop_time_vehicle_journey_id_fkey'),
ForeignKeyConstraint(['properties_id'], [u'navitia.properties.id'], name=u'stop_time_properties_id_fkey'),
ForeignKeyConstraint(['stop_point_id'], [u'navitia.stop_point.id'], name=u'stop_time_stop_point_id_fkey'),],
schema='navitia')
period = Table('period', metadata,*[
Column('id', BIGINT(), primary_key=True, nullable=False, default=text(u'nextval(\'"navitia".period_id_seq\'::regclass)')),
Column('calendar_id', BIGINT(), primary_key=False, nullable=False),
Column('begin_date', DATE(), primary_key=False, nullable=False),
Column('end_date', DATE(), primary_key=False, nullable=False),
ForeignKeyConstraint(['calendar_id'], [u'navitia.calendar.id'], name=u'period_calendar_id_fkey'),],
schema='navitia')
week_pattern = Table('week_pattern', metadata,*[
Column('id', BIGINT(), primary_key=True, nullable=False, default=text(u'nextval(\'"navitia".week_pattern_id_seq\'::regclass)')),
Column('monday', BOOLEAN(), primary_key=False, nullable=False),
Column('tuesday', BOOLEAN(), primary_key=False, nullable=False),
Column('wednesday', BOOLEAN(), primary_key=False, nullable=False),
Column('thursday', BOOLEAN(), primary_key=False, nullable=False),
Column('friday', BOOLEAN(), primary_key=False, nullable=False),
Column('saturday', BOOLEAN(), primary_key=False, nullable=False),
Column('sunday', BOOLEAN(), primary_key=False, nullable=False),
],
schema='navitia')
exception_date = Table('exception_date', metadata,*[
Column('id', BIGINT(), primary_key=True, nullable=False, default=text(u'nextval(\'"navitia".exception_date_id_seq\'::regclass)')),
Column('datetime', DATE(), primary_key=False, nullable=False),
Column('type_ex', ENUM(u'Add', u'Sub', name='exception_type'), primary_key=False, nullable=False),
Column('calendar_id', BIGINT(), primary_key=False, nullable=False),
ForeignKeyConstraint(['calendar_id'], [u'navitia.calendar.id'], name=u'exception_date_calendar_id_fkey'),
],
schema='navitia')
rel_calendar_line = Table('rel_calendar_line', metadata,*[
Column('calendar_id', BIGINT(), primary_key=True, nullable=False),
Column('line_id', BIGINT(), primary_key=True, nullable=False),
ForeignKeyConstraint(['line_id'], [u'navitia.line.id'], name=u'rel_calendar_line_line_id_fkey'),
ForeignKeyConstraint(['calendar_id'], [u'navitia.calendar.id'], name=u'rel_calendar_line_calendar_id_fkey'),],
schema='navitia')
line = Table('line', metadata,*[
Column('id', BIGINT(), primary_key=True, nullable=False),
Column('network_id', BIGINT(), primary_key=False, nullable=False),
Column('commercial_mode_id', BIGINT(), primary_key=False, nullable=False),
Column('uri', TEXT(), primary_key=False, nullable=False),
Column('name', TEXT(), primary_key=False, nullable=False),
Column('code', TEXT(), primary_key=False),
Column('color', TEXT(), primary_key=False),
Column('sort', INTEGER(), primary_key=False, nullable=False, default=text(u'2147483647')),
Column('shape', Geography(geometry_type='MULTILINESTRING', srid=4326, spatial_index=False), primary_key=False),
Column('opening_time', TIME(), primary_key=False),
Column('closing_time', TIME(), primary_key=False),
Column('text_color', TEXT(), primary_key=False),
ForeignKeyConstraint(['commercial_mode_id'], [u'navitia.commercial_mode.id'], name=u'line_commercial_mode_id_fkey'),
ForeignKeyConstraint(['network_id'], [u'navitia.network.id'], name=u'line_network_id_fkey'),],
schema='navitia')
line_group = Table('line_group', metadata,*[
Column('id', BIGINT(), primary_key=True, nullable=False),
Column('uri', TEXT(), primary_key=False, nullable=False),
Column('name', TEXT(), primary_key=False, nullable=False),
Column('main_line_id', BIGINT(), primary_key=False, nullable=False),
ForeignKeyConstraint(['main_line_id'], [u'navitia.line.id'], name=u'line_group_line_id_fkey'),],
schema='navitia')
line_group_link = Table('line_group_link', metadata,*[
Column('group_id', BIGINT(), primary_key=True, nullable=False),
Column('line_id', BIGINT(), primary_key=True, nullable=False),
ForeignKeyConstraint(['group_id'], [u'navitia.line_group.id'], name=u'line_group_link_group_id_fkey'),
ForeignKeyConstraint(['line_id'], [u'navitia.line.id'], name=u'line_group_link_line_id_fkey'),],
schema='navitia')
calendar = Table('calendar', metadata,*[
Column('id', BIGINT(), primary_key=True, nullable=False, default=text(u'nextval(\'"navitia".calendar_id_seq\'::regclass)')),
Column('uri', TEXT(), primary_key=False, nullable=False),
Column('name', TEXT(), primary_key=False, nullable=False),
Column('week_pattern_id', BIGINT(), primary_key=False, nullable=False),
ForeignKeyConstraint(['week_pattern_id'], [u'navitia.week_pattern.id'], name=u'calendar_week_pattern_id_fkey'),],
schema='navitia')
transition = Table('transition', metadata,*[
Column('id', BIGINT(), primary_key=True, nullable=False),
Column('before_change', TEXT(), primary_key=False, nullable=False),
Column('after_change', TEXT(), primary_key=False, nullable=False),
Column('start_trip', TEXT(), primary_key=False, nullable=False),
Column('end_trip', TEXT(), primary_key=False, nullable=False),
Column('global_condition', ENUM(u'nothing', u'exclusive', u'with_changes', name='fare_transition_condition'), primary_key=False, nullable=False),
Column('ticket_id', TEXT(), primary_key=False),
ForeignKeyConstraint(['ticket_id'], [u'navitia.ticket.ticket_key'], name=u'transition_ticket_id_fkey'),
],
schema='navitia')
dated_ticket = Table('dated_ticket', metadata,*[
Column('id', BIGINT(), primary_key=True, nullable=False),
Column('ticket_id', TEXT(), primary_key=False),
Column('valid_from', DATE(), primary_key=False, nullable=False),
Column('valid_to', DATE(), primary_key=False, nullable=False),
Column('ticket_price', INTEGER(), primary_key=False, nullable=False),
Column('comments', TEXT(), primary_key=False),
Column('currency', TEXT(), primary_key=False),
ForeignKeyConstraint(['ticket_id'], [u'navitia.ticket.ticket_key'], name=u'dated_ticket_ticket_id_fkey'),],
schema='navitia')
admin_stop_area = Table('admin_stop_area', metadata,*[
Column('admin_id', TEXT(), primary_key=False, nullable=False),
Column('stop_area_id', BIGINT(), primary_key=False, nullable=False),
ForeignKeyConstraint(['stop_area_id'], [u'navitia.stop_area.id'], name=u'admin_stop_area_stop_area_id_fkey'),],
schema='navitia')
stop_area = Table('stop_area', metadata,*[
Column('id', BIGINT(), primary_key=True, nullable=False),
Column('properties_id', BIGINT(), primary_key=False),
Column('uri', TEXT(), primary_key=False, nullable=False),
Column('name', TEXT(), primary_key=False, nullable=False),
Column('coord', Geography(geometry_type='POINT', srid=4326, spatial_index=False), primary_key=False),
Column('visible', BOOLEAN(), primary_key=False, nullable=False, default=text(u'true')),
Column('timezone', TEXT(), primary_key=False),
ForeignKeyConstraint(['properties_id'], [u'navitia.properties.id'], name=u'stop_area_properties_id_fkey'),],
schema='navitia')
object_code = Table('object_code', metadata,*[
Column('object_type_id', BIGINT(), nullable=False),
Column('object_id', BIGINT(), nullable=False),
Column('key', TEXT(), nullable=False),
Column('value', TEXT(), nullable=False),
ForeignKeyConstraint(['object_type_id'], [u'navitia.object_type.id'], name=u'object_type_id_fkey'),],
schema='navitia')
comments = Table('comments', metadata,*[
Column('id', BIGINT(), primary_key=True, nullable=False),
Column('comment', TEXT(), primary_key=False, nullable=False),
],
schema='navitia')
object_comments = Table('ptobject_comments', metadata,*[
Column('id', BIGINT(), primary_key=True, nullable=False),
Column('object_type', TEXT(), primary_key=False, nullable=False),
Column('object_id', BIGINT(), primary_key=False, nullable=False),
Column('comment_id', BIGINT(), primary_key=False, nullable=False),
ForeignKeyConstraint(['comment_id'], [u'navitia.comments.id'], name=u'ptobject_comments_comment_id_fkey'),
],
schema='navitia')
object_feed_infos = Table('feed_info', metadata,*[
Column('key', TEXT(), primary_key=True, nullable=False),
Column('value', TEXT(), primary_key=False, nullable=True)
],
schema='navitia')
|
TeXitoi/navitia
|
source/sql/models/navitia.py
|
Python
|
agpl-3.0
| 26,527
|
# Copyright (C) 2015 Renato Lima - Akretion
# License AGPL-3 - See http://www.gnu.org/licenses/agpl-3.0.html
from . import res_config_settings
from . import l10n_br_zip
from . import format_address_mixin
|
OCA/l10n-brazil
|
l10n_br_zip/models/__init__.py
|
Python
|
agpl-3.0
| 206
|
# Copyright (c) 2001-2014, Canal TP and/or its affiliates. All rights reserved.
#
# This file is part of Navitia,
# the software to build cool stuff with public transport.
#
# Hope you'll enjoy and contribute to this project,
# powered by Canal TP (www.canaltp.fr).
# Help us simplify mobility and open public transport:
# a non ending quest to the responsive locomotion way of traveling!
#
# LICENCE: This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# Stay tuned using
# twitter @navitia
# channel `#navitia` on riot https://riot.im/app/#/room/#navitia:matrix.org
# https://groups.google.com/d/forum/navitia
# www.navitia.io
import coloredlogs
def enable_color(enabled):
# type: (bool) -> None
"""
Get and check all the parameters.
:params enabled: true to enable the color in logs, otherwise false
"""
if enabled:
coloredlogs.install(
level='DEBUG',
fmt='%(asctime)-15s %(name)s %(message)s',
level_styles={
'critical': {'color': 'red', 'bold': True},
'debug': {'color': 'black', 'bold': True},
'error': {'color': 'red'},
'info': {'color': 'green'},
'notice': {'color': 'magenta'},
'spam': {'color': 'black', 'faint': True},
'success': {'color': 'green', 'bold': True},
'verbose': {'color': 'cyan'},
'warning': {'color': 'yellow'},
},
field_styles={'asctime': {'color': 'white'}, 'name': {'color': 'blue'}},
)
else:
coloredlogs.install(
level='DEBUG', fmt='%(asctime)-15s %(name)s %(message)s', level_styles={}, field_styles={}
)
|
xlqian/navitia
|
source/navitiacommon/navitiacommon/log.py
|
Python
|
agpl-3.0
| 2,328
|
"""
Functionality for generating grade reports.
"""
import logging
import re
from collections import OrderedDict
from datetime import datetime
from itertools import chain, izip, izip_longest
from time import time
from lazy import lazy
from pytz import UTC
from six import text_type
from courseware.courses import get_course_by_id
from instructor_analytics.basic import list_problem_responses
from instructor_analytics.csvs import format_dictlist
from lms.djangoapps.certificates.models import CertificateWhitelist, GeneratedCertificate, certificate_info_for_user
from lms.djangoapps.grades.context import grading_context, grading_context_for_course
from lms.djangoapps.grades.models import PersistentCourseGrade
from lms.djangoapps.grades.course_grade_factory import CourseGradeFactory
from lms.djangoapps.teams.models import CourseTeamMembership
from lms.djangoapps.verify_student.models import SoftwareSecurePhotoVerification
from openedx.core.djangoapps.content.block_structure.api import get_course_in_cache
from openedx.core.djangoapps.course_groups.cohorts import bulk_cache_cohorts, get_cohort, is_course_cohorted
from openedx.core.djangoapps.user_api.course_tag.api import BulkCourseTags
from student.models import CourseEnrollment
from student.roles import BulkRoleCache
from xmodule.modulestore.django import modulestore
from xmodule.partitions.partitions_service import PartitionService
from xmodule.split_test_module import get_split_user_partitions
from .runner import TaskProgress
from .utils import upload_csv_to_report_store
TASK_LOG = logging.getLogger('edx.celery.task')
ENROLLED_IN_COURSE = 'enrolled'
NOT_ENROLLED_IN_COURSE = 'unenrolled'
def _user_enrollment_status(user, course_id):
"""
Returns the enrollment activation status in the given course
for the given user.
"""
enrollment_is_active = CourseEnrollment.enrollment_mode_for_user(user, course_id)[1]
if enrollment_is_active:
return ENROLLED_IN_COURSE
return NOT_ENROLLED_IN_COURSE
def _flatten(iterable):
return list(chain.from_iterable(iterable))
class _CourseGradeReportContext(object):
"""
Internal class that provides a common context to use for a single grade
report. When a report is parallelized across multiple processes,
elements of this context are serialized and parsed across process
boundaries.
"""
def __init__(self, _xmodule_instance_args, _entry_id, course_id, _task_input, action_name):
self.task_info_string = (
u'Task: {task_id}, '
u'InstructorTask ID: {entry_id}, '
u'Course: {course_id}, '
u'Input: {task_input}'
).format(
task_id=_xmodule_instance_args.get('task_id') if _xmodule_instance_args is not None else None,
entry_id=_entry_id,
course_id=course_id,
task_input=_task_input,
)
self.action_name = action_name
self.course_id = course_id
self.task_progress = TaskProgress(self.action_name, total=None, start_time=time())
@lazy
def course(self):
return get_course_by_id(self.course_id)
@lazy
def course_structure(self):
return get_course_in_cache(self.course_id)
@lazy
def course_experiments(self):
return get_split_user_partitions(self.course.user_partitions)
@lazy
def teams_enabled(self):
return self.course.teams_enabled
@lazy
def cohorts_enabled(self):
return is_course_cohorted(self.course_id)
@lazy
def graded_assignments(self):
"""
Returns an OrderedDict that maps an assignment type to a dict of
subsection-headers and average-header.
"""
grading_cxt = grading_context(self.course, self.course_structure)
graded_assignments_map = OrderedDict()
for assignment_type_name, subsection_infos in grading_cxt['all_graded_subsections_by_type'].iteritems():
graded_subsections_map = OrderedDict()
for subsection_index, subsection_info in enumerate(subsection_infos, start=1):
subsection = subsection_info['subsection_block']
header_name = u"{assignment_type} {subsection_index}: {subsection_name}".format(
assignment_type=assignment_type_name,
subsection_index=subsection_index,
subsection_name=subsection.display_name,
)
graded_subsections_map[subsection.location] = header_name
average_header = u"{assignment_type}".format(assignment_type=assignment_type_name)
# Use separate subsection and average columns only if
# there's more than one subsection.
separate_subsection_avg_headers = len(subsection_infos) > 1
if separate_subsection_avg_headers:
average_header += u" (Avg)"
graded_assignments_map[assignment_type_name] = {
'subsection_headers': graded_subsections_map,
'average_header': average_header,
'separate_subsection_avg_headers': separate_subsection_avg_headers,
'grader': grading_cxt['subsection_type_graders'].get(assignment_type_name),
}
return graded_assignments_map
def update_status(self, message):
"""
Updates the status on the celery task to the given message.
Also logs the update.
"""
TASK_LOG.info(u'%s, Task type: %s, %s', self.task_info_string, self.action_name, message)
return self.task_progress.update_task_state(extra_meta={'step': message})
class _CertificateBulkContext(object):
def __init__(self, context, users):
certificate_whitelist = CertificateWhitelist.objects.filter(course_id=context.course_id, whitelist=True)
self.whitelisted_user_ids = [entry.user_id for entry in certificate_whitelist]
self.certificates_by_user = {
certificate.user.id: certificate
for certificate in
GeneratedCertificate.objects.filter(course_id=context.course_id, user__in=users)
}
class _TeamBulkContext(object):
def __init__(self, context, users):
self.enabled = context.teams_enabled
if self.enabled:
self.teams_by_user = {
membership.user.id: membership.team.name
for membership in
CourseTeamMembership.objects.filter(team__course_id=context.course_id, user__in=users)
}
else:
self.teams_by_user = {}
class _EnrollmentBulkContext(object):
def __init__(self, context, users):
CourseEnrollment.bulk_fetch_enrollment_states(users, context.course_id)
self.verified_users = [
verified.user.id for verified in
SoftwareSecurePhotoVerification.verified_query().filter(user__in=users).select_related('user')
]
class _CourseGradeBulkContext(object):
def __init__(self, context, users):
self.certs = _CertificateBulkContext(context, users)
self.teams = _TeamBulkContext(context, users)
self.enrollments = _EnrollmentBulkContext(context, users)
bulk_cache_cohorts(context.course_id, users)
BulkRoleCache.prefetch(users)
PersistentCourseGrade.prefetch(context.course_id, users)
BulkCourseTags.prefetch(context.course_id, users)
class CourseGradeReport(object):
"""
Class to encapsulate functionality related to generating Grade Reports.
"""
# Batch size for chunking the list of enrollees in the course.
USER_BATCH_SIZE = 100
@classmethod
def generate(cls, _xmodule_instance_args, _entry_id, course_id, _task_input, action_name):
"""
Public method to generate a grade report.
"""
with modulestore().bulk_operations(course_id):
context = _CourseGradeReportContext(_xmodule_instance_args, _entry_id, course_id, _task_input, action_name)
return CourseGradeReport()._generate(context)
def _generate(self, context):
"""
Internal method for generating a grade report for the given context.
"""
context.update_status(u'Starting grades')
success_headers = self._success_headers(context)
error_headers = self._error_headers()
batched_rows = self._batched_rows(context)
context.update_status(u'Compiling grades')
success_rows, error_rows = self._compile(context, batched_rows)
context.update_status(u'Uploading grades')
self._upload(context, success_headers, success_rows, error_headers, error_rows)
return context.update_status(u'Completed grades')
def _success_headers(self, context):
"""
Returns a list of all applicable column headers for this grade report.
"""
return (
["Student ID", "Email", "Username"] +
self._grades_header(context) +
(['Cohort Name'] if context.cohorts_enabled else []) +
[u'Experiment Group ({})'.format(partition.name) for partition in context.course_experiments] +
(['Team Name'] if context.teams_enabled else []) +
['Enrollment Track', 'Verification Status'] +
['Certificate Eligible', 'Certificate Delivered', 'Certificate Type'] +
['Enrollment Status']
)
def _error_headers(self):
"""
Returns a list of error headers for this grade report.
"""
return ["Student ID", "Username", "Error"]
def _batched_rows(self, context):
"""
A generator of batches of (success_rows, error_rows) for this report.
"""
for users in self._batch_users(context):
users = filter(lambda u: u is not None, users)
yield self._rows_for_users(context, users)
def _compile(self, context, batched_rows):
"""
Compiles and returns the complete list of (success_rows, error_rows) for
the given batched_rows and context.
"""
# partition and chain successes and errors
success_rows, error_rows = izip(*batched_rows)
success_rows = list(chain(*success_rows))
error_rows = list(chain(*error_rows))
# update metrics on task status
context.task_progress.succeeded = len(success_rows)
context.task_progress.failed = len(error_rows)
context.task_progress.attempted = context.task_progress.succeeded + context.task_progress.failed
context.task_progress.total = context.task_progress.attempted
return success_rows, error_rows
def _upload(self, context, success_headers, success_rows, error_headers, error_rows):
"""
Creates and uploads a CSV for the given headers and rows.
"""
date = datetime.now(UTC)
upload_csv_to_report_store([success_headers] + success_rows, 'grade_report', context.course_id, date)
if len(error_rows) > 0:
error_rows = [error_headers] + error_rows
upload_csv_to_report_store(error_rows, 'grade_report_err', context.course_id, date)
def _grades_header(self, context):
"""
Returns the applicable grades-related headers for this report.
"""
graded_assignments = context.graded_assignments
grades_header = ["Grade"]
for assignment_info in graded_assignments.itervalues():
if assignment_info['separate_subsection_avg_headers']:
grades_header.extend(assignment_info['subsection_headers'].itervalues())
grades_header.append(assignment_info['average_header'])
return grades_header
def _batch_users(self, context):
"""
Returns a generator of batches of users.
"""
def grouper(iterable, chunk_size=self.USER_BATCH_SIZE, fillvalue=None):
args = [iter(iterable)] * chunk_size
return izip_longest(*args, fillvalue=fillvalue)
users = CourseEnrollment.objects.users_enrolled_in(context.course_id, include_inactive=True)
users = users.select_related('profile')
return grouper(users)
def _user_grades(self, course_grade, context):
"""
Returns a list of grade results for the given course_grade corresponding
to the headers for this report.
"""
grade_results = []
for assignment_type, assignment_info in context.graded_assignments.iteritems():
subsection_grades, subsection_grades_results = self._user_subsection_grades(
course_grade,
assignment_info['subsection_headers'],
)
grade_results.extend(subsection_grades_results)
assignment_average = self._user_assignment_average(course_grade, subsection_grades, assignment_info)
if assignment_average is not None:
grade_results.append([assignment_average])
return [course_grade.percent] + _flatten(grade_results)
def _user_subsection_grades(self, course_grade, subsection_headers):
"""
Returns a list of grade results for the given course_grade corresponding
to the headers for this report.
"""
subsection_grades = []
grade_results = []
for subsection_location in subsection_headers:
subsection_grade = course_grade.subsection_grade(subsection_location)
if subsection_grade.attempted_graded:
grade_result = subsection_grade.percent_graded
else:
grade_result = u'Not Attempted'
grade_results.append([grade_result])
subsection_grades.append(subsection_grade)
return subsection_grades, grade_results
def _user_assignment_average(self, course_grade, subsection_grades, assignment_info):
if assignment_info['separate_subsection_avg_headers']:
if assignment_info['grader']:
if course_grade.attempted:
subsection_breakdown = [
{'percent': subsection_grade.percent_graded}
for subsection_grade in subsection_grades
]
assignment_average, _ = assignment_info['grader'].total_with_drops(subsection_breakdown)
else:
assignment_average = 0.0
return assignment_average
def _user_cohort_group_names(self, user, context):
"""
Returns a list of names of cohort groups in which the given user
belongs.
"""
cohort_group_names = []
if context.cohorts_enabled:
group = get_cohort(user, context.course_id, assign=False, use_cached=True)
cohort_group_names.append(group.name if group else '')
return cohort_group_names
def _user_experiment_group_names(self, user, context):
"""
Returns a list of names of course experiments in which the given user
belongs.
"""
experiment_group_names = []
for partition in context.course_experiments:
group = PartitionService(context.course_id).get_group(user, partition, assign=False)
experiment_group_names.append(group.name if group else '')
return experiment_group_names
def _user_team_names(self, user, bulk_teams):
"""
Returns a list of names of teams in which the given user belongs.
"""
team_names = []
if bulk_teams.enabled:
team_names = [bulk_teams.teams_by_user.get(user.id, '')]
return team_names
def _user_verification_mode(self, user, context, bulk_enrollments):
"""
Returns a list of enrollment-mode and verification-status for the
given user.
"""
enrollment_mode = CourseEnrollment.enrollment_mode_for_user(user, context.course_id)[0]
verification_status = SoftwareSecurePhotoVerification.verification_status_for_user(
user,
context.course_id,
enrollment_mode,
user_is_verified=user.id in bulk_enrollments.verified_users,
)
return [enrollment_mode, verification_status]
def _user_certificate_info(self, user, context, course_grade, bulk_certs):
"""
Returns the course certification information for the given user.
"""
is_whitelisted = user.id in bulk_certs.whitelisted_user_ids
certificate_info = certificate_info_for_user(
user,
course_grade.letter_grade,
is_whitelisted,
bulk_certs.certificates_by_user.get(user.id),
)
TASK_LOG.info(
u'Student certificate eligibility: %s '
u'(user=%s, course_id=%s, grade_percent=%s letter_grade=%s gradecutoffs=%s, allow_certificate=%s, '
u'is_whitelisted=%s)',
certificate_info[0],
user,
context.course_id,
course_grade.percent,
course_grade.letter_grade,
context.course.grade_cutoffs,
user.profile.allow_certificate,
is_whitelisted,
)
return certificate_info
def _rows_for_users(self, context, users):
"""
Returns a list of rows for the given users for this report.
"""
with modulestore().bulk_operations(context.course_id):
bulk_context = _CourseGradeBulkContext(context, users)
success_rows, error_rows = [], []
for user, course_grade, error in CourseGradeFactory().iter(
users,
course=context.course,
collected_block_structure=context.course_structure,
course_key=context.course_id,
):
if not course_grade:
# An empty gradeset means we failed to grade a student.
error_rows.append([user.id, user.username, text_type(error)])
else:
success_rows.append(
[user.id, user.email, user.username] +
self._user_grades(course_grade, context) +
self._user_cohort_group_names(user, context) +
self._user_experiment_group_names(user, context) +
self._user_team_names(user, bulk_context.teams) +
self._user_verification_mode(user, context, bulk_context.enrollments) +
self._user_certificate_info(user, context, course_grade, bulk_context.certs) +
[_user_enrollment_status(user, context.course_id)]
)
return success_rows, error_rows
class ProblemGradeReport(object):
@classmethod
def generate(cls, _xmodule_instance_args, _entry_id, course_id, _task_input, action_name):
"""
Generate a CSV containing all students' problem grades within a given
`course_id`.
"""
start_time = time()
start_date = datetime.now(UTC)
status_interval = 100
enrolled_students = CourseEnrollment.objects.users_enrolled_in(course_id, include_inactive=True)
task_progress = TaskProgress(action_name, enrolled_students.count(), start_time)
# This struct encapsulates both the display names of each static item in the
# header row as values as well as the django User field names of those items
# as the keys. It is structured in this way to keep the values related.
header_row = OrderedDict([('id', 'Student ID'), ('email', 'Email'), ('username', 'Username')])
course = get_course_by_id(course_id)
graded_scorable_blocks = cls._graded_scorable_blocks_to_header(course)
# Just generate the static fields for now.
rows = [list(header_row.values()) + ['Enrollment Status', 'Grade'] + _flatten(graded_scorable_blocks.values())]
error_rows = [list(header_row.values()) + ['error_msg']]
current_step = {'step': 'Calculating Grades'}
# Bulk fetch and cache enrollment states so we can efficiently determine
# whether each user is currently enrolled in the course.
CourseEnrollment.bulk_fetch_enrollment_states(enrolled_students, course_id)
for student, course_grade, error in CourseGradeFactory().iter(enrolled_students, course):
student_fields = [getattr(student, field_name) for field_name in header_row]
task_progress.attempted += 1
if not course_grade:
err_msg = text_type(error)
# There was an error grading this student.
if not err_msg:
err_msg = u'Unknown error'
error_rows.append(student_fields + [err_msg])
task_progress.failed += 1
continue
enrollment_status = _user_enrollment_status(student, course_id)
earned_possible_values = []
for block_location in graded_scorable_blocks:
try:
problem_score = course_grade.problem_scores[block_location]
except KeyError:
earned_possible_values.append([u'Not Available', u'Not Available'])
else:
if problem_score.first_attempted:
earned_possible_values.append([problem_score.earned, problem_score.possible])
else:
earned_possible_values.append([u'Not Attempted', problem_score.possible])
rows.append(student_fields + [enrollment_status, course_grade.percent] + _flatten(earned_possible_values))
task_progress.succeeded += 1
if task_progress.attempted % status_interval == 0:
task_progress.update_task_state(extra_meta=current_step)
# Perform the upload if any students have been successfully graded
if len(rows) > 1:
upload_csv_to_report_store(rows, 'problem_grade_report', course_id, start_date)
# If there are any error rows, write them out as well
if len(error_rows) > 1:
upload_csv_to_report_store(error_rows, 'problem_grade_report_err', course_id, start_date)
return task_progress.update_task_state(extra_meta={'step': 'Uploading CSV'})
@classmethod
def _graded_scorable_blocks_to_header(cls, course):
"""
Returns an OrderedDict that maps a scorable block's id to its
headers in the final report.
"""
scorable_blocks_map = OrderedDict()
grading_context = grading_context_for_course(course)
for assignment_type_name, subsection_infos in grading_context['all_graded_subsections_by_type'].iteritems():
for subsection_index, subsection_info in enumerate(subsection_infos, start=1):
for scorable_block in subsection_info['scored_descendants']:
header_name = (
u"{assignment_type} {subsection_index}: "
u"{subsection_name} - {scorable_block_name}"
).format(
scorable_block_name=scorable_block.display_name,
assignment_type=assignment_type_name,
subsection_index=subsection_index,
subsection_name=subsection_info['subsection_block'].display_name,
)
scorable_blocks_map[scorable_block.location] = [header_name + " (Earned)",
header_name + " (Possible)"]
return scorable_blocks_map
class ProblemResponses(object):
@classmethod
def generate(cls, _xmodule_instance_args, _entry_id, course_id, task_input, action_name):
"""
For a given `course_id`, generate a CSV file containing
all student answers to a given problem, and store using a `ReportStore`.
"""
start_time = time()
start_date = datetime.now(UTC)
num_reports = 1
task_progress = TaskProgress(action_name, num_reports, start_time)
current_step = {'step': 'Calculating students answers to problem'}
task_progress.update_task_state(extra_meta=current_step)
# Compute result table and format it
problem_location = task_input.get('problem_location')
student_data = list_problem_responses(course_id, problem_location)
features = ['username', 'state']
header, rows = format_dictlist(student_data, features)
task_progress.attempted = task_progress.succeeded = len(rows)
task_progress.skipped = task_progress.total - task_progress.attempted
rows.insert(0, header)
current_step = {'step': 'Uploading CSV'}
task_progress.update_task_state(extra_meta=current_step)
# Perform the upload
problem_location = re.sub(r'[:/]', '_', problem_location)
csv_name = 'student_state_from_{}'.format(problem_location)
upload_csv_to_report_store(rows, csv_name, course_id, start_date)
return task_progress.update_task_state(extra_meta=current_step)
|
procangroup/edx-platform
|
lms/djangoapps/instructor_task/tasks_helper/grades.py
|
Python
|
agpl-3.0
| 25,153
|
# Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
class PyAzureKeyvault(PythonPackage):
"""Microsoft Azure Key Vault Client Libraries for Python."""
homepage = "https://github.com/Azure/azure-sdk-for-python/tree/master/sdk/keyvault/azure-keyvault"
url = "https://pypi.io/packages/source/a/azure-keyvault/azure-keyvault-4.1.0.zip"
version('4.1.0', sha256='69002a546921a8290eb54d9a3805cfc515c321bc1d4c0bfcfb463620245eca40')
version('1.1.0', sha256='37a8e5f376eb5a304fcd066d414b5d93b987e68f9212b0c41efa37d429aadd49')
depends_on('py-setuptools', type='build')
depends_on('py-azure-keyvault-certificates@4.1:4.999', when='@4:', type=('build', 'run'))
depends_on('py-azure-keyvault-secrets@4.1:4.999', when='@4:', type=('build', 'run'))
depends_on('py-azure-keyvault-keys@4.1:4.999', when='@4:', type=('build', 'run'))
depends_on('py-msrest@0.5.0:', when='@:1', type=('build', 'run'))
depends_on('py-msrestazure@0.4.32:1.999', when='@:1', type=('build', 'run'))
depends_on('py-azure-common@1.1:1.999', when='@:1', type=('build', 'run'))
depends_on('py-cryptography@2.1.4:', when='@:1', type=('build', 'run'))
depends_on('py-requests@2.18.4:', when='@:1', type=('build', 'run'))
|
rspavel/spack
|
var/spack/repos/builtin/packages/py-azure-keyvault/package.py
|
Python
|
lgpl-2.1
| 1,385
|
#!/usr/bin/env python
#
# spyne - Copyright (C) Spyne contributors.
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301
#
import logging
logging.basicConfig(level=logging.DEBUG)
logger = logging.getLogger('spyne.protocol.xml')
logger.setLevel(logging.DEBUG)
from spyne.application import Application
from spyne.test.interop.server._service import services
from spyne.protocol.http import HttpRpc
from spyne.protocol.soap import Soap12
from spyne.server.wsgi import WsgiApplication
httprpc_soap_application = Application(services,
'spyne.test.interop.server.httprpc.soap', in_protocol=HttpRpc(), out_protocol=Soap12())
host = '127.0.0.1'
port = 9753
if __name__ == '__main__':
try:
from wsgiref.simple_server import make_server
from wsgiref.validate import validator
wsgi_application = WsgiApplication(httprpc_soap_application)
server = make_server(host, port, validator(wsgi_application))
logger.info('Starting interop server at %s:%s.' % ('0.0.0.0', 9753))
logger.info('WSDL is at: /?wsdl')
server.serve_forever()
except ImportError:
print("Error: example server code requires Python >= 2.5")
|
plq/spyne
|
spyne/test/interop/server/soap12/httprpc_soap_basic.py
|
Python
|
lgpl-2.1
| 1,851
|
# Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
"""This package contains modules with hooks for various stages in the
Spack install process. You can add modules here and they'll be
executed by package at various times during the package lifecycle.
Each hook is just a function that takes a package as a parameter.
Hooks are not executed in any particular order.
Currently the following hooks are supported:
* pre_install(spec)
* post_install(spec)
* pre_uninstall(spec)
* post_uninstall(spec)
* on_install_start(spec)
* on_install_success(spec)
* on_install_failure(spec)
* on_phase_success(pkg, phase_name, log_file)
* on_phase_error(pkg, phase_name, log_file)
* on_phase_error(pkg, phase_name, log_file)
* on_analyzer_save(pkg, result)
* post_env_write(env)
This can be used to implement support for things like module
systems (e.g. modules, lmod, etc.) or to add other custom
features.
"""
import llnl.util.lang
import spack.paths
class _HookRunner(object):
#: Stores all hooks on first call, shared among
#: all HookRunner objects
_hooks = None
def __init__(self, hook_name):
self.hook_name = hook_name
@classmethod
def _populate_hooks(cls):
# Lazily populate the list of hooks
cls._hooks = []
relative_names = list(llnl.util.lang.list_modules(
spack.paths.hooks_path
))
# We want this hook to be the last registered
relative_names.sort(key=lambda x: x == 'write_install_manifest')
assert relative_names[-1] == 'write_install_manifest'
for name in relative_names:
module_name = __name__ + '.' + name
# When importing a module from a package, __import__('A.B', ...)
# returns package A when 'fromlist' is empty. If fromlist is not
# empty it returns the submodule B instead
# See: https://stackoverflow.com/a/2725668/771663
module_obj = __import__(module_name, fromlist=[None])
cls._hooks.append((module_name, module_obj))
@property
def hooks(self):
if not self._hooks:
self._populate_hooks()
return self._hooks
def __call__(self, *args, **kwargs):
for _, module in self.hooks:
if hasattr(module, self.hook_name):
hook = getattr(module, self.hook_name)
if hasattr(hook, '__call__'):
hook(*args, **kwargs)
# pre/post install and run by the install subprocess
pre_install = _HookRunner('pre_install')
post_install = _HookRunner('post_install')
# These hooks are run within an install subprocess
pre_uninstall = _HookRunner('pre_uninstall')
post_uninstall = _HookRunner('post_uninstall')
on_phase_success = _HookRunner('on_phase_success')
on_phase_error = _HookRunner('on_phase_error')
# These are hooks in installer.py, before starting install subprocess
on_install_start = _HookRunner('on_install_start')
on_install_success = _HookRunner('on_install_success')
on_install_failure = _HookRunner('on_install_failure')
on_install_cancel = _HookRunner('on_install_cancel')
# Analyzer hooks
on_analyzer_save = _HookRunner('on_analyzer_save')
# Environment hooks
post_env_write = _HookRunner('post_env_write')
|
LLNL/spack
|
lib/spack/spack/hooks/__init__.py
|
Python
|
lgpl-2.1
| 3,426
|
# Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class NvptxTools(AutotoolsPackage):
"""nvptx-tools: A collection of tools for use with nvptx-none GCC
toolchains. These tools are necessary when building a version
of GCC that enables offloading of OpenMP/OpenACC code to NVIDIA
GPUs."""
homepage = "https://github.com/MentorEmbedded/nvptx-tools"
git = "https://github.com/MentorEmbedded/nvptx-tools"
version('2018-03-01', commit='5f6f343a302d620b0868edab376c00b15741e39e')
depends_on('binutils')
depends_on('cuda')
def configure_args(self):
cuda_dir = self.spec['cuda'].prefix
config_args = [
"--with-cuda-driver-include={0}".format(cuda_dir.include),
"--with-cuda-driver-lib={0}".format(cuda_dir.lib64)
]
return config_args
|
rspavel/spack
|
var/spack/repos/builtin/packages/nvptx-tools/package.py
|
Python
|
lgpl-2.1
| 1,002
|
# Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
import os
class Gnuplot(AutotoolsPackage):
"""Gnuplot is a portable command-line driven graphing utility for Linux,
OS/2, MS Windows, OSX, VMS, and many other platforms. The source
code is copyrighted but freely distributed (i.e., you don't have
to pay for it). It was originally created to allow scientists and
students to visualize mathematical functions and data
interactively, but has grown to support many non-interactive uses
such as web scripting. It is also used as a plotting engine by
third-party applications like Octave. Gnuplot has been supported
and under active development since 1986
"""
homepage = "http://www.gnuplot.info"
url = "http://downloads.sourceforge.net/project/gnuplot/gnuplot/5.0.6/gnuplot-5.0.6.tar.gz"
# There is a conflict in term.h between gnuplot and ncurses, which is a
# dependency of readline. Fix it with a small patch
patch('term_include.patch')
version('5.2.8', sha256='60a6764ccf404a1668c140f11cc1f699290ab70daa1151bb58fed6139a28ac37')
version('5.2.7', sha256='97fe503ff3b2e356fe2ae32203fc7fd2cf9cef1f46b60fe46dc501a228b9f4ed')
version('5.2.5', sha256='039db2cce62ddcfd31a6696fe576f4224b3bc3f919e66191dfe2cdb058475caa')
version('5.2.2', sha256='a416d22f02bdf3873ef82c5eb7f8e94146795811ef808e12b035ada88ef7b1a1')
version('5.2.0', sha256='7dfe6425a1a6b9349b1fb42dae46b2e52833b13e807a78a613024d6a99541e43')
version('5.0.7', sha256='0ad760ff013b4a9cf29853fa9b50c50030a33cd8fb86220a23abb466655136fc')
version('5.0.6', sha256='5bbe4713e555c2e103b7d4ffd45fca69551fff09cf5c3f9cb17428aaacc9b460')
version('5.0.5', sha256='25f3e0bf192e01115c580f278c3725d7a569eb848786e12b455a3fda70312053')
version('5.0.1', sha256='7cbc557e71df581ea520123fb439dea5f073adcc9010a2885dc80d4ed28b3c47')
variant('wx', default=False,
description='Activates wxWidgets terminal')
variant('gd', default=True,
description='Activates gd based terminal')
variant('cairo', default=True,
description='Activates cairo based terminal')
variant('X', default=False,
description='Build with X11')
variant('libcerf', default=True,
description='Build with libcerf support')
variant('pbm', default=False,
description='Enable PBM (Portable Bit Map) and other older bitmap terminals') # NOQA: ignore=E501
variant('qt', default=False,
description='Build with QT')
# required dependencies
depends_on('readline')
depends_on('pkgconfig', type='build')
depends_on('libxpm')
depends_on('iconv')
# optional dependencies:
depends_on('libcerf', when='+libcerf')
depends_on('libgd', when='+gd')
depends_on('cairo@1.2:', when='+cairo')
depends_on('wxwidgets', when='+wx')
depends_on('pango@1.10:', when='+wx')
depends_on('libsm', when='+wx')
depends_on('pango@1.10:', when='+cairo')
depends_on('libx11', when='+X')
depends_on('qt@5.7:+opengl', when='+qt')
depends_on('qt+framework', when='+qt platform=darwin')
def configure_args(self):
# see https://github.com/Homebrew/homebrew-core/blob/master/Formula/gnuplot.rb
# and https://github.com/macports/macports-ports/blob/master/math/gnuplot/Portfile
spec = self.spec
options = [
'--disable-dependency-tracking',
'--disable-silent-rules',
# Per upstream: "--with-tutorial is horribly out of date."
'--without-tutorial',
'--with-readline=%s' % spec['readline'].prefix
]
if '+pbm' in spec:
options.append('--with-bitmap-terminals')
else:
options.append('--without-bitmap-terminals')
if '+X' in spec:
# It seems there's an open bug for wxWidgets support
# See : http://sourceforge.net/p/gnuplot/bugs/1694/
os.environ['TERMLIBS'] = '-lX11'
options.append('--with-x')
else:
options.append('--without-x')
if '+qt' in spec:
options.append('--with-qt=qt5')
# QT needs C++11 compiler:
os.environ['CXXFLAGS'] = '{0}'.format(self.compiler.cxx11_flag)
if spec.satisfies('platform=darwin'):
qt_path = spec['qt'].prefix
# see
# http://gnuplot.10905.n7.nabble.com/Building-with-Qt-depends-on-pkg-config-Qt-5-term-doesn-t-work-on-OS-X-td18063.html
os.environ['QT_LIBS'] = (
'-F{0}/lib ' +
'-framework QtCore ' +
'-framework QtGui ' +
'-framework QtWidgets ' +
'-framework QtNetwork ' +
'-framework QtSvg ' +
'-framework QtPrintSupport').format(qt_path)
os.environ['QT_CFLAGS'] = (
'-F{0}/lib ' +
'-I{0}/lib/QtCore.framework/Headers ' +
'-I{0}/lib/QtGui.framework/Headers ' +
'-I{0}/lib/QtWidgets.framework/Headers ' +
'-I{0}/lib/QtNetwork.framework/Headers ' +
'-I{0}/lib/QtSvg.framework/Headers').format(qt_path)
else:
options.append('--with-qt=no')
if '+wx' in spec:
options.append('--with-wx=%s' % spec['wxwidgets'].prefix)
else:
options.append('--disable-wxwidgets')
if '+gd' in spec:
options.append('--with-gd=%s' % spec['libgd'].prefix)
else:
options.append('--without-gd')
if '+cairo' in spec:
options.append('--with-cairo')
else:
options.append('--without-cairo')
if '+libcerf' in spec:
options.append('--with-libcerf')
else:
options.append('--without-libcerf')
# TODO: Enable pdflib-based pdf terminal
# '--with-pdf=%s' % spec['pdflib-lite'].prefix (or pdflib)
options.append('--without-pdf')
# TODO: Enable lua-based terminals
options.append('--without-lua')
# TODO: --with-latex
options.append('--without-latex')
# TODO: --with-aquaterm depends_on('aquaterm')
options.append('--without-aquaterm')
return options
|
iulian787/spack
|
var/spack/repos/builtin/packages/gnuplot/package.py
|
Python
|
lgpl-2.1
| 6,582
|
#!/usr/bin/python3
import argparse as ap
import shared
def create_parser():
parser = ap.ArgumentParser()
parser.add_argument("old-version", type=shared.version_type)
parser.add_argument("new-version", type=shared.version_type)
shared.update_parser_with_common_stuff(parser)
return parser
def create_new_milestone(repo, name):
assert shared.get_milestone(repo, name) is None, \
f"There already is a milestone {name}"
return repo.create_milestone(name, "open")
def transfer_open_issues_and_prs_to_new_milestone(repo, old_milestone, new_milestone):
assert old_milestone.title != new_milestone.title, \
f"The new and old milestones have the same title '{new_milestone.title}'"
old_milestone_issues = repo.get_issues(milestone=old_milestone, state="open")
for issue in old_milestone_issues:
issue.edit(milestone=new_milestone)
def close_milestone(milestone):
milestone.edit(milestone.title, state="closed")
if __name__ == "__main__":
parser = create_parser()
args = parser.parse_args()
github = shared.get_github(args)
repo = shared.get_repo(github, args.owner)
old_milestone = shared.get_milestone(repo, vars(args)["old-version"])
new_milestone = create_new_milestone(repo, vars(args)["new-version"])
transfer_open_issues_and_prs_to_new_milestone(repo, old_milestone, new_milestone)
close_milestone(old_milestone)
|
mpreisler/openscap
|
release_tools/move-milestones.py
|
Python
|
lgpl-2.1
| 1,424
|
# Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class Coin3d(AutotoolsPackage):
"""Coin is an OpenGL-based, 3D graphics library that has its roots in the
Open Inventor 2.1 API, which Coin still is compatible with."""
homepage = "https://github.com/coin3d/coin"
url = "https://github.com/coin3d/coin/archive/Coin-4.0.0.tar.gz"
version('3.1.0', sha256='70dd5ef39406e1d9e05eeadd54a5b51884a143e127530876a97744ca54173dc3')
version('3.0.0', sha256='d5c2eb0ecaa5c83d93daf0e9e275e58a6a8dfadc74c873d51b0c939011f81bfa')
version('2.0.0', sha256='6d26435aa962d085b7accd306a0b478069a7de1bc5ca24e22344971852dd097c')
depends_on('boost@1.45.0:', type='build')
depends_on('doxygen', when='+html', type='build')
depends_on('perl', when='+html', type='build')
depends_on('openglu', type='link')
depends_on('opengl', type='link')
depends_on('libsm', type='link')
depends_on('libxext', type='link')
depends_on('libice', type='link')
depends_on('libuuid', type='link')
depends_on('libxcb', type='link')
depends_on('libxau', type='link')
variant('html', default=False, description='Build and install Coin HTML documentation')
variant('man', default=False, description='Build and install Coin man pages')
variant('framework', default=False, description="Do 'UNIX-style' installation on Mac OS X")
variant('shared', default=True, description='Build shared library (off: build static library)')
variant('debug', default=False, description='Make debug build')
variant('symbols', default=False, description='Enable debug symbols')
def configure_args(self):
args = []
args += self.enable_or_disable('framework')
args += self.enable_or_disable('shared')
args += self.enable_or_disable('html')
args += self.enable_or_disable('man')
args += self.enable_or_disable('symbols')
args += self.enable_or_disable('debug')
args.append("--with-boost=" + self.spec['boost'].prefix)
args.append("--with-boost-libdir=" + self.spec['boost'].prefix.lib)
return args
|
rspavel/spack
|
var/spack/repos/builtin/packages/coin3d/package.py
|
Python
|
lgpl-2.1
| 2,287
|
# -*- coding: utf-8 -*-
"""
sphinxcontrib.plantuml
~~~~~~~~~~~~~~~~~~~~~~
Embed PlantUML diagrams on your documentation.
:copyright: Copyright 2010 by Yuya Nishihara <yuya@tcha.org>.
:license: BSD, see LICENSE for details.
"""
import codecs
import errno
import hashlib
import os
import re
import shlex
import subprocess
from docutils import nodes
from docutils.parsers.rst import directives
from sphinx.errors import SphinxError
from sphinx.util.compat import Directive
from sphinx.util.osutil import (
ensuredir,
ENOENT,
)
try:
from PIL import Image
except ImportError:
Image = None
try:
from sphinx.util.i18n import search_image_for_language
except ImportError: # Sphinx < 1.4
def search_image_for_language(filename, env):
return filename
class PlantUmlError(SphinxError):
pass
class plantuml(nodes.General, nodes.Element):
pass
def align(argument):
align_values = ('left', 'center', 'right')
return directives.choice(argument, align_values)
class UmlDirective(Directive):
"""Directive to insert PlantUML markup
Example::
.. uml::
:alt: Alice and Bob
Alice -> Bob: Hello
Alice <- Bob: Hi
"""
has_content = True
required_arguments = 0
optional_arguments = 1
option_spec = {'alt': directives.unchanged,
'caption': directives.unchanged,
'height': directives.length_or_unitless,
'width': directives.length_or_percentage_or_unitless,
'scale': directives.percentage,
'align': align,
}
def run(self):
warning = self.state.document.reporter.warning
env = self.state.document.settings.env
if self.arguments and self.content:
return [warning('uml directive cannot have both content and '
'a filename argument', line=self.lineno)]
if self.arguments:
fn = search_image_for_language(self.arguments[0], env)
relfn, absfn = env.relfn2path(fn)
env.note_dependency(relfn)
try:
umlcode = _read_utf8(absfn)
except (IOError, UnicodeDecodeError) as err:
return [warning('PlantUML file "%s" cannot be read: %s'
% (fn, err), line=self.lineno)]
else:
relfn = env.doc2path(env.docname, base=None)
umlcode = '\n'.join(self.content)
node = plantuml(self.block_text, **self.options)
node['uml'] = umlcode
node['incdir'] = os.path.dirname(relfn)
# XXX maybe this should be moved to _visit_plantuml functions. it
# seems wrong to insert "figure" node by "plantuml" directive.
if 'caption' in self.options or 'align' in self.options:
node = nodes.figure('', node)
if 'align' in self.options:
node['align'] = self.options['align']
if 'caption' in self.options:
import docutils.statemachine
cnode = nodes.Element() # anonymous container for parsing
sl = docutils.statemachine.StringList([self.options['caption']],
source='')
self.state.nested_parse(sl, self.content_offset, cnode)
caption = nodes.caption(self.options['caption'], '', *cnode)
node += caption
return [node]
def _read_utf8(filename):
fp = codecs.open(filename, 'rb', 'utf-8')
try:
return fp.read()
finally:
fp.close()
def generate_name(self, node, fileformat):
h = hashlib.sha1()
# may include different file relative to doc
h.update(node['incdir'].encode('utf-8'))
h.update(b'\0')
h.update(node['uml'].encode('utf-8'))
key = h.hexdigest()
fname = 'plantuml-%s.%s' % (key, fileformat)
imgpath = getattr(self.builder, 'imgpath', None)
if imgpath:
return ('/'.join((self.builder.imgpath, fname)),
os.path.join(self.builder.outdir, '_images', fname))
else:
return fname, os.path.join(self.builder.outdir, fname)
_ARGS_BY_FILEFORMAT = {
'eps': '-teps'.split(),
'png': (),
'svg': '-tsvg'.split(),
}
def generate_plantuml_args(self, fileformat):
if isinstance(self.builder.config.plantuml, (tuple, list)):
args = list(self.builder.config.plantuml)
else:
args = shlex.split(self.builder.config.plantuml)
args.extend('-pipe -charset utf-8'.split())
args.extend(_ARGS_BY_FILEFORMAT[fileformat])
return args
def render_plantuml(self, node, fileformat):
refname, outfname = generate_name(self, node, fileformat)
if os.path.exists(outfname):
return refname, outfname # don't regenerate
absincdir = os.path.join(self.builder.srcdir, node['incdir'])
ensuredir(os.path.dirname(outfname))
f = open(outfname, 'wb')
try:
try:
p = subprocess.Popen(generate_plantuml_args(self, fileformat),
stdout=f, stdin=subprocess.PIPE,
stderr=subprocess.PIPE,
cwd=absincdir)
except OSError as err:
if err.errno != ENOENT:
raise
raise PlantUmlError('plantuml command %r cannot be run'
% self.builder.config.plantuml)
serr = p.communicate(node['uml'].encode('utf-8'))[1]
if p.returncode != 0:
raise PlantUmlError('error while running plantuml\n\n%s' % serr)
return refname, outfname
finally:
f.close()
def _get_png_tag(self, fnames, node):
refname, _outfname = fnames['png']
alt = node.get('alt', node['uml'])
# mimic StandaloneHTMLBuilder.post_process_images(). maybe we should
# process images prior to html_vist.
scale_keys = ('scale', 'width', 'height')
if all(key not in node for key in scale_keys) or Image is None:
return ('<img src="%s" alt="%s" />\n'
% (self.encode(refname), self.encode(alt)))
# Get sizes from the rendered image (defaults)
im = Image.open(_outfname)
im.load()
(fw, fh) = im.size
# Regex to get value and units
vu = re.compile(r"(?P<value>\d+)\s*(?P<units>[a-zA-Z%]+)?")
# Width
if 'width' in node:
m = vu.match(node['width'])
if not m:
raise PlantUmlError('Invalid width')
else:
m = m.groupdict()
w = int(m['value'])
wu = m['units'] if m['units'] else 'px'
else:
w = fw
wu = 'px'
# Height
if 'height' in node:
m = vu.match(node['height'])
if not m:
raise PlantUmlError('Invalid height')
else:
m = m.groupdict()
h = int(m['value'])
hu = m['units'] if m['units'] else 'px'
else:
h = fh
hu = 'px'
# Scale
if 'scale' not in node:
node['scale'] = 100
return ('<a href="%s"><img src="%s" alt="%s" width="%s%s" height="%s%s"/>'
'</a>\n'
% (self.encode(refname),
self.encode(refname),
self.encode(alt),
self.encode(w * node['scale'] / 100),
self.encode(wu),
self.encode(h * node['scale'] / 100),
self.encode(hu)))
def _get_svg_style(fname):
f = open(fname)
try:
for l in f:
m = re.search(r'<svg\b([^<>]+)', l)
if m:
attrs = m.group(1)
break
else:
return
finally:
f.close()
m = re.search(r'\bstyle=[\'"]([^\'"]+)', attrs)
if not m:
return
return m.group(1)
def _get_svg_tag(self, fnames, node):
refname, outfname = fnames['svg']
return '\n'.join([
# copy width/height style from <svg> tag, so that <object> area
# has enough space.
'<object data="%s" type="image/svg+xml" style="%s">' % (
self.encode(refname), _get_svg_style(outfname) or ''),
_get_png_tag(self, fnames, node),
'</object>'])
_KNOWN_HTML_FORMATS = {
'png': (('png',), _get_png_tag),
'svg': (('png', 'svg'), _get_svg_tag),
}
def html_visit_plantuml(self, node):
try:
format = self.builder.config.plantuml_output_format
try:
fileformats, gettag = _KNOWN_HTML_FORMATS[format]
except KeyError:
raise PlantUmlError(
'plantuml_output_format must be one of %s, but is %r'
% (', '.join(map(repr, _KNOWN_HTML_FORMATS)), format))
# fnames: {fileformat: (refname, outfname), ...}
fnames = dict((e, render_plantuml(self, node, e))
for e in fileformats)
except PlantUmlError as err:
self.builder.warn(str(err))
raise nodes.SkipNode
self.body.append(self.starttag(node, 'p', CLASS='plantuml'))
self.body.append(gettag(self, fnames, node))
self.body.append('</p>\n')
raise nodes.SkipNode
def _convert_eps_to_pdf(self, refname, fname):
if isinstance(self.builder.config.plantuml_epstopdf, (tuple, list)):
args = list(self.builder.config.plantuml_epstopdf)
else:
args = shlex.split(self.builder.config.plantuml_epstopdf)
args.append(fname)
try:
try:
p = subprocess.Popen(args, stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
except OSError as err:
# workaround for missing shebang of epstopdf script
if err.errno != getattr(errno, 'ENOEXEC', 0):
raise
p = subprocess.Popen(['bash'] + args, stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
except OSError as err:
if err.errno != ENOENT:
raise
raise PlantUmlError('epstopdf command %r cannot be run'
% self.builder.config.plantuml_epstopdf)
serr = p.communicate()[1]
if p.returncode != 0:
raise PlantUmlError('error while running epstopdf\n\n' + serr)
return refname[:-4] + '.pdf', fname[:-4] + '.pdf'
_KNOWN_LATEX_FORMATS = {
'eps': ('eps', lambda self, refname, fname: (refname, fname)),
'pdf': ('eps', _convert_eps_to_pdf),
'png': ('png', lambda self, refname, fname: (refname, fname)),
}
def latex_visit_plantuml(self, node):
try:
format = self.builder.config.plantuml_latex_output_format
try:
fileformat, postproc = _KNOWN_LATEX_FORMATS[format]
except KeyError:
raise PlantUmlError(
'plantuml_latex_output_format must be one of %s, but is %r'
% (', '.join(map(repr, _KNOWN_LATEX_FORMATS)), format))
refname, outfname = render_plantuml(self, node, fileformat)
refname, outfname = postproc(self, refname, outfname)
except PlantUmlError as err:
self.builder.warn(str(err))
raise nodes.SkipNode
# put node representing rendered image
img_node = nodes.image(uri=refname, **node.attributes)
img_node.delattr('uml')
if not img_node.hasattr('alt'):
img_node['alt'] = node['uml']
node.append(img_node)
def latex_depart_plantuml(self, node):
pass
def pdf_visit_plantuml(self, node):
try:
refname, outfname = render_plantuml(self, node, 'eps')
refname, outfname = _convert_eps_to_pdf(self, refname, outfname)
except PlantUmlError as err:
self.builder.warn(str(err))
raise nodes.SkipNode
rep = nodes.image(uri=outfname, alt=node.get('alt', node['uml']))
node.parent.replace(node, rep)
def setup(app):
app.add_node(plantuml,
html=(html_visit_plantuml, None),
latex=(latex_visit_plantuml, latex_depart_plantuml))
app.add_directive('uml', UmlDirective)
app.add_config_value('plantuml', 'plantuml', 'html')
app.add_config_value('plantuml_output_format', 'png', 'html')
app.add_config_value('plantuml_epstopdf', 'epstopdf', '')
app.add_config_value('plantuml_latex_output_format', 'png', '')
# imitate what app.add_node() does
if 'rst2pdf.pdfbuilder' in app.config.extensions:
from rst2pdf.pdfbuilder import PDFTranslator as translator
setattr(translator, 'visit_' + plantuml.__name__, pdf_visit_plantuml)
return {'parallel_read_safe': True}
|
a1ezzz/wasp-launcher
|
thirdparty/sphinxcontrib-plantuml-0.8.1/sphinxcontrib/plantuml.py
|
Python
|
lgpl-3.0
| 12,436
|
# -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
import subprocess
from airflow.hooks.base_hook import BaseHook
from airflow.exceptions import AirflowException
from airflow.utils.log.logging_mixin import LoggingMixin
class SparkSqlHook(BaseHook):
"""
This hook is a wrapper around the spark-sql binary. It requires that the
"spark-sql" binary is in the PATH.
:param sql: The SQL query to execute
:type sql: str
:param conf: arbitrary Spark configuration property
:type conf: str (format: PROP=VALUE)
:param conn_id: connection_id string
:type conn_id: str
:param total_executor_cores: (Standalone & Mesos only) Total cores for all executors (Default: all the available cores on the worker)
:type total_executor_cores: int
:param executor_cores: (Standalone & YARN only) Number of cores per executor (Default: 2)
:type executor_cores: int
:param executor_memory: Memory per executor (e.g. 1000M, 2G) (Default: 1G)
:type executor_memory: str
:param keytab: Full path to the file that contains the keytab
:type keytab: str
:param master: spark://host:port, mesos://host:port, yarn, or local
:type master: str
:param name: Name of the job.
:type name: str
:param num_executors: Number of executors to launch
:type num_executors: int
:param verbose: Whether to pass the verbose flag to spark-sql
:type verbose: bool
:param yarn_queue: The YARN queue to submit to (Default: "default")
:type yarn_queue: str
"""
def __init__(self,
sql,
conf=None,
conn_id='spark_sql_default',
total_executor_cores=None,
executor_cores=None,
executor_memory=None,
keytab=None,
principal=None,
master='yarn',
name='default-name',
num_executors=None,
verbose=True,
yarn_queue='default'
):
self._sql = sql
self._conf = conf
self._conn = self.get_connection(conn_id)
self._total_executor_cores = total_executor_cores
self._executor_cores = executor_cores
self._executor_memory = executor_memory
self._keytab = keytab
self._principal = principal
self._master = master
self._name = name
self._num_executors = num_executors
self._verbose = verbose
self._yarn_queue = yarn_queue
self._sp = None
def get_conn(self):
pass
def _prepare_command(self, cmd):
"""
Construct the spark-sql command to execute. Verbose output is enabled
as default.
:param cmd: command to append to the spark-sql command
:type cmd: str
:return: full command to be executed
"""
connection_cmd = ["spark-sql"]
if self._conf:
for conf_el in self._conf.split(","):
connection_cmd += ["--conf", conf_el]
if self._total_executor_cores:
connection_cmd += ["--total-executor-cores", str(self._total_executor_cores)]
if self._executor_cores:
connection_cmd += ["--executor-cores", str(self._executor_cores)]
if self._executor_memory:
connection_cmd += ["--executor-memory", self._executor_memory]
if self._keytab:
connection_cmd += ["--keytab", self._keytab]
if self._principal:
connection_cmd += ["--principal", self._principal]
if self._num_executors:
connection_cmd += ["--num-executors", str(self._num_executors)]
if self._sql:
sql = self._sql.strip()
if sql.endswith(".sql") or sql.endswith(".hql"):
connection_cmd += ["-f", sql]
else:
connection_cmd += ["-e", sql]
if self._master:
connection_cmd += ["--master", self._master]
if self._name:
connection_cmd += ["--name", self._name]
if self._verbose:
connection_cmd += ["--verbose"]
if self._yarn_queue:
connection_cmd += ["--queue", self._yarn_queue]
connection_cmd += cmd
self.log.debug("Spark-Sql cmd: %s", connection_cmd)
return connection_cmd
def run_query(self, cmd="", **kwargs):
"""
Remote Popen (actually execute the Spark-sql query)
:param cmd: command to remotely execute
:param kwargs: extra arguments to Popen (see subprocess.Popen)
"""
spark_sql_cmd = self._prepare_command(cmd)
self._sp = subprocess.Popen(spark_sql_cmd,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
**kwargs)
for line in iter(self._sp.stdout.readline, ''):
self.log.info(line)
returncode = self._sp.wait()
if returncode:
raise AirflowException(
"Cannot execute {} on {}. Process exit code: {}.".format(
cmd, self._conn.host, returncode
)
)
def kill(self):
if self._sp and self._sp.poll() is None:
self.log.info("Killing the Spark-Sql job")
self._sp.kill()
|
Tagar/incubator-airflow
|
airflow/contrib/hooks/spark_sql_hook.py
|
Python
|
apache-2.0
| 6,107
|
# Copyright 2012 Nebula, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Contains the core classes and functionality that makes Horizon what it is.
This module is considered internal, and should not be relied on directly.
Public APIs are made available through the :mod:`horizon` module and
the classes contained therein.
"""
import collections
import copy
import inspect
import logging
import os
import django
from django.conf import settings
from django.conf.urls import include
from django.conf.urls import url
from django.core.exceptions import ImproperlyConfigured # noqa
from django.core.urlresolvers import reverse
from django.utils.encoding import python_2_unicode_compatible
from django.utils.functional import empty
from django.utils.functional import SimpleLazyObject # noqa
from django.utils.module_loading import module_has_submodule # noqa
from django.utils.translation import ugettext_lazy as _
from importlib import import_module
import six
from horizon import conf
from horizon.decorators import _current_component # noqa
from horizon.decorators import require_auth # noqa
from horizon.decorators import require_perms # noqa
from horizon import loaders
# Name of the panel group for panels to be displayed without a group.
DEFAULT_PANEL_GROUP = 'default'
LOG = logging.getLogger(__name__)
def _decorate_urlconf(urlpatterns, decorator, *args, **kwargs):
for pattern in urlpatterns:
if getattr(pattern, 'callback', None):
decorated = decorator(pattern.callback, *args, **kwargs)
if django.VERSION >= (1, 10):
pattern.callback = decorated
else:
# prior to 1.10 callback was a property and we had
# to modify the private attribute behind the property
pattern._callback = decorated
if getattr(pattern, 'url_patterns', []):
_decorate_urlconf(pattern.url_patterns, decorator, *args, **kwargs)
# FIXME(lhcheng): We need to find a better way to cache the result.
# Rather than storing it in the session, we could leverage the Django
# session. Currently, this has been causing issue with cookie backend,
# adding 1600+ in the cookie size.
def access_cached(func):
def inner(self, context):
session = context['request'].session
try:
if session['allowed']['valid_for'] != session.get('token'):
raise KeyError()
except KeyError:
session['allowed'] = {"valid_for": session.get('token')}
key = "%s.%s" % (self.__class__.__module__, self.__class__.__name__)
if key not in session['allowed']:
session['allowed'][key] = func(self, context)
session.modified = True
return session['allowed'][key]
return inner
class NotRegistered(Exception):
pass
@python_2_unicode_compatible
class HorizonComponent(object):
policy_rules = None
def __init__(self):
super(HorizonComponent, self).__init__()
if not self.slug:
raise ImproperlyConfigured('Every %s must have a slug.'
% self.__class__)
def __str__(self):
name = getattr(self, 'name', u"Unnamed %s" % self.__class__.__name__)
return name
def _get_default_urlpatterns(self):
package_string = '.'.join(self.__module__.split('.')[:-1])
if getattr(self, 'urls', None):
try:
mod = import_module('.%s' % self.urls, package_string)
except ImportError:
mod = import_module(self.urls)
urlpatterns = mod.urlpatterns
else:
# Try importing a urls.py from the dashboard package
if module_has_submodule(import_module(package_string), 'urls'):
urls_mod = import_module('.urls', package_string)
urlpatterns = urls_mod.urlpatterns
else:
urlpatterns = []
return urlpatterns
# FIXME(lhcheng): Removed the access_cached decorator for now until
# a better implementation has been figured out. This has been causing
# issue with cookie backend, adding 1600+ in the cookie size.
# @access_cached
def can_access(self, context):
"""Return whether the user has role based access to this component.
This method is not intended to be overridden.
The result of the method is stored in per-session cache.
"""
return self.allowed(context)
def allowed(self, context):
"""Checks if the user is allowed to access this component.
This method should be overridden to return the result of
any policy checks required for the user to access this component
when more complex checks are required.
"""
return self._can_access(context['request'])
def _can_access(self, request):
policy_check = getattr(settings, "POLICY_CHECK_FUNCTION", None)
# this check is an OR check rather than an AND check that is the
# default in the policy engine, so calling each rule individually
if policy_check and self.policy_rules:
for rule in self.policy_rules:
if policy_check((rule,), request):
return True
return False
# default to allowed
return True
class Registry(object):
def __init__(self):
self._registry = {}
if not getattr(self, '_registerable_class', None):
raise ImproperlyConfigured('Subclasses of Registry must set a '
'"_registerable_class" property.')
def _register(self, cls):
"""Registers the given class.
If the specified class is already registered then it is ignored.
"""
if not inspect.isclass(cls):
raise ValueError('Only classes may be registered.')
elif not issubclass(cls, self._registerable_class):
raise ValueError('Only %s classes or subclasses may be registered.'
% self._registerable_class.__name__)
if cls not in self._registry:
cls._registered_with = self
self._registry[cls] = cls()
return self._registry[cls]
def _unregister(self, cls):
"""Unregisters the given class.
If the specified class isn't registered, ``NotRegistered`` will
be raised.
"""
if not issubclass(cls, self._registerable_class):
raise ValueError('Only %s classes or subclasses may be '
'unregistered.' % self._registerable_class)
if cls not in self._registry.keys():
raise NotRegistered('%s is not registered' % cls)
del self._registry[cls]
return True
def _registered(self, cls):
if inspect.isclass(cls) and issubclass(cls, self._registerable_class):
found = self._registry.get(cls, None)
if found:
return found
else:
# Allow for fetching by slugs as well.
for registered in self._registry.values():
if registered.slug == cls:
return registered
class_name = self._registerable_class.__name__
if hasattr(self, "_registered_with"):
parent = self._registered_with._registerable_class.__name__
raise NotRegistered('%(type)s with slug "%(slug)s" is not '
'registered with %(parent)s "%(name)s".'
% {"type": class_name,
"slug": cls,
"parent": parent,
"name": self.slug})
else:
slug = getattr(cls, "slug", cls)
raise NotRegistered('%(type)s with slug "%(slug)s" is not '
'registered.' % {"type": class_name,
"slug": slug})
class Panel(HorizonComponent):
"""A base class for defining Horizon dashboard panels.
All Horizon dashboard panels should extend from this class. It provides
the appropriate hooks for automatically constructing URLconfs, and
providing permission-based access control.
.. attribute:: name
The name of the panel. This will be displayed in the
auto-generated navigation and various other places.
Default: ``''``.
.. attribute:: slug
A unique "short name" for the panel. The slug is used as
a component of the URL path for the panel. Default: ``''``.
.. attribute:: permissions
A list of permission names, all of which a user must possess in order
to access any view associated with this panel. This attribute
is combined cumulatively with any permissions required on the
``Dashboard`` class with which it is registered.
.. attribute:: urls
Path to a URLconf of views for this panel using dotted Python
notation. If no value is specified, a file called ``urls.py``
living in the same package as the ``panel.py`` file is used.
Default: ``None``.
.. attribute:: nav
.. method:: nav(context)
The ``nav`` attribute can be either boolean value or a callable
which accepts a ``RequestContext`` object as a single argument
to control whether or not this panel should appear in
automatically-generated navigation. Default: ``True``.
.. attribute:: index_url_name
The ``name`` argument for the URL pattern which corresponds to
the index view for this ``Panel``. This is the view that
:meth:`.Panel.get_absolute_url` will attempt to reverse.
.. staticmethod:: can_register
This optional static method can be used to specify conditions that
need to be satisfied to load this panel. Unlike ``permissions`` and
``allowed`` this method is intended to handle settings based
conditions rather than user based permission and policy checks.
The return value is boolean. If the method returns ``True``, then the
panel will be registered and available to user (if ``permissions`` and
``allowed`` runtime checks are also satisfied). If the method returns
``False``, then the panel will not be registered and will not be
available via normal navigation or direct URL access.
"""
name = ''
slug = ''
urls = None
nav = True
index_url_name = "index"
def __repr__(self):
return "<Panel: %s>" % self.slug
def get_absolute_url(self):
"""Returns the default URL for this panel.
The default URL is defined as the URL pattern with ``name="index"`` in
the URLconf for this panel.
"""
try:
return reverse('horizon:%s:%s:%s' % (self._registered_with.slug,
self.slug,
self.index_url_name))
except Exception as exc:
# Logging here since this will often be called in a template
# where the exception would be hidden.
LOG.info("Error reversing absolute URL for %s: %s" % (self, exc))
raise
@property
def _decorated_urls(self):
urlpatterns = self._get_default_urlpatterns()
# Apply access controls to all views in the patterns
permissions = getattr(self, 'permissions', [])
_decorate_urlconf(urlpatterns, require_perms, permissions)
_decorate_urlconf(urlpatterns, _current_component, panel=self)
# Return the three arguments to django.conf.urls.include
return urlpatterns, self.slug, self.slug
@six.python_2_unicode_compatible
class PanelGroup(object):
"""A container for a set of :class:`~horizon.Panel` classes.
When iterated, it will yield each of the ``Panel`` instances it
contains.
.. attribute:: slug
A unique string to identify this panel group. Required.
.. attribute:: name
A user-friendly name which will be used as the group heading in
places such as the navigation. Default: ``None``.
.. attribute:: panels
A list of panel module names which should be contained within this
grouping.
"""
def __init__(self, dashboard, slug=None, name=None, panels=None):
self.dashboard = dashboard
self.slug = slug or getattr(self, "slug", DEFAULT_PANEL_GROUP)
self.name = name or getattr(self, "name", None)
# Our panels must be mutable so it can be extended by others.
self.panels = list(panels or getattr(self, "panels", []))
def __repr__(self):
return "<%s: %s>" % (self.__class__.__name__, self.slug)
def __str__(self):
return self.name
def __iter__(self):
panel_instances = []
for name in self.panels:
try:
panel_instances.append(self.dashboard.get_panel(name))
except NotRegistered as e:
LOG.debug(e)
return iter(panel_instances)
class Dashboard(Registry, HorizonComponent):
"""A base class for defining Horizon dashboards.
All Horizon dashboards should extend from this base class. It provides the
appropriate hooks for automatic discovery of :class:`~horizon.Panel`
modules, automatically constructing URLconfs, and providing
permission-based access control.
.. attribute:: name
The name of the dashboard. This will be displayed in the
auto-generated navigation and various other places.
Default: ``''``.
.. attribute:: slug
A unique "short name" for the dashboard. The slug is used as
a component of the URL path for the dashboard. Default: ``''``.
.. attribute:: panels
The ``panels`` attribute can be either a flat list containing the name
of each panel **module** which should be loaded as part of this
dashboard, or a list of :class:`~horizon.PanelGroup` classes which
define groups of panels as in the following example::
class SystemPanels(horizon.PanelGroup):
slug = "syspanel"
name = _("System")
panels = ('overview', 'instances', ...)
class Syspanel(horizon.Dashboard):
panels = (SystemPanels,)
Automatically generated navigation will use the order of the
modules in this attribute.
Default: ``[]``.
.. warning::
The values for this attribute should not correspond to the
:attr:`~.Panel.name` attributes of the ``Panel`` classes.
They should be the names of the Python modules in which the
``panel.py`` files live. This is used for the automatic
loading and registration of ``Panel`` classes much like
Django's ``ModelAdmin`` machinery.
Panel modules must be listed in ``panels`` in order to be
discovered by the automatic registration mechanism.
.. attribute:: default_panel
The name of the panel which should be treated as the default
panel for the dashboard, i.e. when you visit the root URL
for this dashboard, that's the panel that is displayed.
Default: ``None``.
.. attribute:: permissions
A list of permission names, all of which a user must possess in order
to access any panel registered with this dashboard. This attribute
is combined cumulatively with any permissions required on individual
:class:`~horizon.Panel` classes.
.. attribute:: urls
Optional path to a URLconf of additional views for this dashboard
which are not connected to specific panels. Default: ``None``.
.. attribute:: nav
.. method:: nav(context)
The ``nav`` attribute can be either boolean value or a callable
which accepts a ``RequestContext`` object as a single argument
to control whether or not this dashboard should appear in
automatically-generated navigation. Default: ``True``.
.. attribute:: public
Boolean value to determine whether this dashboard can be viewed
without being logged in. Defaults to ``False``.
"""
_registerable_class = Panel
name = ''
slug = ''
urls = None
panels = []
default_panel = None
nav = True
public = False
def __repr__(self):
return "<Dashboard: %s>" % self.slug
def __init__(self, *args, **kwargs):
super(Dashboard, self).__init__(*args, **kwargs)
self._panel_groups = None
def get_panel(self, panel):
"""Returns the specified :class:`~horizon.Panel` instance registered
with this dashboard.
"""
return self._registered(panel)
def get_panels(self):
"""Returns the :class:`~horizon.Panel` instances registered with this
dashboard in order, without any panel groupings.
"""
all_panels = []
panel_groups = self.get_panel_groups()
for panel_group in panel_groups.values():
all_panels.extend(panel_group)
return all_panels
def get_panel_group(self, slug):
"""Returns the specified :class:~horizon.PanelGroup
or None if not registered
"""
return self._panel_groups.get(slug)
def get_panel_groups(self):
registered = copy.copy(self._registry)
panel_groups = []
# Gather our known panels
if self._panel_groups is not None:
for panel_group in self._panel_groups.values():
for panel in panel_group:
registered.pop(panel.__class__)
panel_groups.append((panel_group.slug, panel_group))
# Deal with leftovers (such as add-on registrations)
if len(registered):
slugs = [panel.slug for panel in registered.values()]
new_group = PanelGroup(self,
slug="other",
name=_("Other"),
panels=slugs)
panel_groups.append((new_group.slug, new_group))
return collections.OrderedDict(panel_groups)
def get_absolute_url(self):
"""Returns the default URL for this dashboard.
The default URL is defined as the URL pattern with ``name="index"``
in the URLconf for the :class:`~horizon.Panel` specified by
:attr:`~horizon.Dashboard.default_panel`.
"""
try:
return self._registered(self.default_panel).get_absolute_url()
except Exception:
# Logging here since this will often be called in a template
# where the exception would be hidden.
LOG.exception("Error reversing absolute URL for %s." % self)
raise
@property
def _decorated_urls(self):
urlpatterns = self._get_default_urlpatterns()
default_panel = None
# Add in each panel's views except for the default view.
for panel in self._registry.values():
if panel.slug == self.default_panel:
default_panel = panel
continue
url_slug = panel.slug.replace('.', '/')
urlpatterns.append(url(r'^%s/' % url_slug,
include(panel._decorated_urls)))
# Now the default view, which should come last
if not default_panel:
raise NotRegistered('The default panel "%s" is not registered.'
% self.default_panel)
urlpatterns.append(url(r'', include(default_panel._decorated_urls)))
# Require login if not public.
if not self.public:
_decorate_urlconf(urlpatterns, require_auth)
# Apply access controls to all views in the patterns
permissions = getattr(self, 'permissions', [])
_decorate_urlconf(urlpatterns, require_perms, permissions)
_decorate_urlconf(urlpatterns, _current_component, dashboard=self)
# Return the three arguments to django.conf.urls.include
return urlpatterns, self.slug, self.slug
def _autodiscover(self):
"""Discovers panels to register from the current dashboard module."""
if getattr(self, "_autodiscover_complete", False):
return
panels_to_discover = []
panel_groups = []
# If we have a flat iterable of panel names, wrap it again so
# we have a consistent structure for the next step.
if all([isinstance(i, six.string_types) for i in self.panels]):
self.panels = [self.panels]
# Now iterate our panel sets.
default_created = False
for panel_set in self.panels:
# Instantiate PanelGroup classes.
if not isinstance(panel_set, collections.Iterable) and \
issubclass(panel_set, PanelGroup):
panel_group = panel_set(self)
# Check for nested tuples, and convert them to PanelGroups
elif not isinstance(panel_set, PanelGroup):
panel_group = PanelGroup(self, panels=panel_set)
# Put our results into their appropriate places
panels_to_discover.extend(panel_group.panels)
panel_groups.append((panel_group.slug, panel_group))
if panel_group.slug == DEFAULT_PANEL_GROUP:
default_created = True
# Plugin panels can be added to a default panel group. Make sure such a
# default group exists.
if not default_created:
default_group = PanelGroup(self)
panel_groups.insert(0, (default_group.slug, default_group))
self._panel_groups = collections.OrderedDict(panel_groups)
# Do the actual discovery
package = '.'.join(self.__module__.split('.')[:-1])
mod = import_module(package)
for panel in panels_to_discover:
try:
before_import_registry = copy.copy(self._registry)
import_module('.%s.panel' % panel, package)
except Exception:
self._registry = before_import_registry
if module_has_submodule(mod, panel):
raise
self._autodiscover_complete = True
@classmethod
def register(cls, panel):
"""Registers a :class:`~horizon.Panel` with this dashboard."""
panel_class = Horizon.register_panel(cls, panel)
# Support template loading from panel template directories.
panel_mod = import_module(panel.__module__)
panel_dir = os.path.dirname(panel_mod.__file__)
template_dir = os.path.join(panel_dir, "templates")
if os.path.exists(template_dir):
key = os.path.join(cls.slug, panel.slug)
loaders.panel_template_dirs[key] = template_dir
return panel_class
@classmethod
def unregister(cls, panel):
"""Unregisters a :class:`~horizon.Panel` from this dashboard."""
success = Horizon.unregister_panel(cls, panel)
if success:
# Remove the panel's template directory.
key = os.path.join(cls.slug, panel.slug)
if key in loaders.panel_template_dirs:
del loaders.panel_template_dirs[key]
return success
def allowed(self, context):
"""Checks for role based access for this dashboard.
Checks for access to any panels in the dashboard and of the
dashboard itself.
This method should be overridden to return the result of
any policy checks required for the user to access this dashboard
when more complex checks are required.
"""
# if the dashboard has policy rules, honor those above individual
# panels
if not self._can_access(context['request']):
return False
# check if access is allowed to a single panel,
# the default for each panel is True
for panel in self.get_panels():
if panel.can_access(context):
return True
return False
class Workflow(object):
pass
class LazyURLPattern(SimpleLazyObject):
def __iter__(self):
if self._wrapped is empty:
self._setup()
return iter(self._wrapped)
def __reversed__(self):
if self._wrapped is empty:
self._setup()
return reversed(self._wrapped)
def __len__(self):
if self._wrapped is empty:
self._setup()
return len(self._wrapped)
def __getitem__(self, idx):
if self._wrapped is empty:
self._setup()
return self._wrapped[idx]
class Site(Registry, HorizonComponent):
"""The overarching class which encompasses all dashboards and panels."""
# Required for registry
_registerable_class = Dashboard
name = "Horizon"
namespace = 'horizon'
slug = 'horizon'
urls = 'horizon.site_urls'
def __repr__(self):
return u"<Site: %s>" % self.slug
@property
def _conf(self):
return conf.HORIZON_CONFIG
@property
def dashboards(self):
return self._conf['dashboards']
@property
def default_dashboard(self):
return self._conf['default_dashboard']
def register(self, dashboard):
"""Registers a :class:`~horizon.Dashboard` with Horizon."""
return self._register(dashboard)
def unregister(self, dashboard):
"""Unregisters a :class:`~horizon.Dashboard` from Horizon."""
return self._unregister(dashboard)
def registered(self, dashboard):
return self._registered(dashboard)
def register_panel(self, dashboard, panel):
dash_instance = self.registered(dashboard)
return dash_instance._register(panel)
def unregister_panel(self, dashboard, panel):
dash_instance = self.registered(dashboard)
if not dash_instance:
raise NotRegistered("The dashboard %s is not registered."
% dashboard)
return dash_instance._unregister(panel)
def get_dashboard(self, dashboard):
"""Returns the specified :class:`~horizon.Dashboard` instance."""
return self._registered(dashboard)
def get_dashboards(self):
"""Returns an ordered tuple of :class:`~horizon.Dashboard` modules.
Orders dashboards according to the ``"dashboards"`` key in
``HORIZON_CONFIG`` or else returns all registered dashboards
in alphabetical order.
Any remaining :class:`~horizon.Dashboard` classes registered with
Horizon but not listed in ``HORIZON_CONFIG['dashboards']``
will be appended to the end of the list alphabetically.
"""
if self.dashboards:
registered = copy.copy(self._registry)
dashboards = []
for item in self.dashboards:
dashboard = self._registered(item)
dashboards.append(dashboard)
registered.pop(dashboard.__class__)
if len(registered):
extra = sorted(registered.values())
dashboards.extend(extra)
return dashboards
else:
return sorted(self._registry.values())
def get_default_dashboard(self):
"""Returns the default :class:`~horizon.Dashboard` instance.
If ``"default_dashboard"`` is specified in ``HORIZON_CONFIG``
then that dashboard will be returned. If not, the first dashboard
returned by :func:`~horizon.get_dashboards` will be returned.
"""
if self.default_dashboard:
return self._registered(self.default_dashboard)
elif len(self._registry):
return self.get_dashboards()[0]
else:
raise NotRegistered("No dashboard modules have been registered.")
def get_user_home(self, user):
"""Returns the default URL for a particular user.
This method can be used to customize where a user is sent when
they log in, etc. By default it returns the value of
:meth:`get_absolute_url`.
An alternative function can be supplied to customize this behavior
by specifying a either a URL or a function which returns a URL via
the ``"user_home"`` key in ``HORIZON_CONFIG``. Each of these
would be valid::
{"user_home": "/home",} # A URL
{"user_home": "my_module.get_user_home",} # Path to a function
{"user_home": lambda user: "/" + user.name,} # A function
{"user_home": None,} # Will always return the default dashboard
This can be useful if the default dashboard may not be accessible
to all users. When user_home is missing from HORIZON_CONFIG,
it will default to the settings.LOGIN_REDIRECT_URL value.
"""
user_home = self._conf['user_home']
if user_home:
if callable(user_home):
return user_home(user)
elif isinstance(user_home, six.string_types):
# Assume we've got a URL if there's a slash in it
if '/' in user_home:
return user_home
else:
mod, func = user_home.rsplit(".", 1)
return getattr(import_module(mod), func)(user)
# If it's not callable and not a string, it's wrong.
raise ValueError('The user_home setting must be either a string '
'or a callable object (e.g. a function).')
else:
return self.get_absolute_url()
def get_absolute_url(self):
"""Returns the default URL for Horizon's URLconf.
The default URL is determined by calling
:meth:`~horizon.Dashboard.get_absolute_url`
on the :class:`~horizon.Dashboard` instance returned by
:meth:`~horizon.get_default_dashboard`.
"""
return self.get_default_dashboard().get_absolute_url()
@property
def _lazy_urls(self):
"""Lazy loading for URL patterns.
This method avoids problems associated with attempting to evaluate
the URLconf before the settings module has been loaded.
"""
def url_patterns():
return self._urls()[0]
return LazyURLPattern(url_patterns), self.namespace, self.slug
def _urls(self):
"""Constructs the URLconf for Horizon from registered Dashboards."""
urlpatterns = self._get_default_urlpatterns()
self._autodiscover()
# Discover each dashboard's panels.
for dash in self._registry.values():
dash._autodiscover()
# Load the plugin-based panel configuration
self._load_panel_customization()
# Allow for override modules
if self._conf.get("customization_module", None):
customization_module = self._conf["customization_module"]
bits = customization_module.split('.')
mod_name = bits.pop()
package = '.'.join(bits)
mod = import_module(package)
try:
before_import_registry = copy.copy(self._registry)
import_module('%s.%s' % (package, mod_name))
except Exception:
self._registry = before_import_registry
if module_has_submodule(mod, mod_name):
raise
# Compile the dynamic urlconf.
for dash in self._registry.values():
urlpatterns.append(url(r'^%s/' % dash.slug,
include(dash._decorated_urls)))
# Return the three arguments to django.conf.urls.include
return urlpatterns, self.namespace, self.slug
def _autodiscover(self):
"""Discovers modules to register from ``settings.INSTALLED_APPS``.
This makes sure that the appropriate modules get imported to register
themselves with Horizon.
"""
if not getattr(self, '_registerable_class', None):
raise ImproperlyConfigured('You must set a '
'"_registerable_class" property '
'in order to use autodiscovery.')
# Discover both dashboards and panels, in that order
for mod_name in ('dashboard', 'panel'):
for app in settings.INSTALLED_APPS:
mod = import_module(app)
try:
before_import_registry = copy.copy(self._registry)
import_module('%s.%s' % (app, mod_name))
except Exception:
self._registry = before_import_registry
if module_has_submodule(mod, mod_name):
raise
def _load_panel_customization(self):
"""Applies the plugin-based panel configurations.
This method parses the panel customization from the ``HORIZON_CONFIG``
and make changes to the dashboard accordingly.
It supports adding, removing and setting default panels on the
dashboard. It also support registering a panel group.
"""
panel_customization = self._conf.get("panel_customization", [])
# Process all the panel groups first so that they exist before panels
# are added to them and Dashboard._autodiscover() doesn't wipe out any
# panels previously added when its panel groups are instantiated.
panel_configs = []
for config in panel_customization:
if config.get('PANEL'):
panel_configs.append(config)
elif config.get('PANEL_GROUP'):
self._process_panel_group_configuration(config)
else:
LOG.warning("Skipping %s because it doesn't have PANEL or "
"PANEL_GROUP defined.", config.__name__)
# Now process the panels.
for config in panel_configs:
self._process_panel_configuration(config)
def _process_panel_configuration(self, config):
"""Add, remove and set default panels on the dashboard."""
try:
dashboard = config.get('PANEL_DASHBOARD')
if not dashboard:
LOG.warning("Skipping %s because it doesn't have "
"PANEL_DASHBOARD defined.", config.__name__)
return
panel_slug = config.get('PANEL')
dashboard_cls = self.get_dashboard(dashboard)
panel_group = config.get('PANEL_GROUP')
default_panel = config.get('DEFAULT_PANEL')
# Set the default panel
if default_panel:
dashboard_cls.default_panel = default_panel
# Remove the panel
if config.get('REMOVE_PANEL', False):
for panel in dashboard_cls.get_panels():
if panel_slug == panel.slug:
dashboard_cls.unregister(panel.__class__)
elif config.get('ADD_PANEL', None):
# Add the panel to the dashboard
panel_path = config['ADD_PANEL']
mod_path, panel_cls = panel_path.rsplit(".", 1)
try:
mod = import_module(mod_path)
except ImportError:
LOG.warning("Could not load panel: %s", mod_path)
return
panel = getattr(mod, panel_cls)
# test is can_register method is present and call method if
# it is to determine if the panel should be loaded
if hasattr(panel, 'can_register') and \
callable(getattr(panel, 'can_register')):
if not panel.can_register():
LOG.debug("Load condition failed for panel: %(panel)s",
{'panel': panel_slug})
return
dashboard_cls.register(panel)
if panel_group:
dashboard_cls.get_panel_group(panel_group).\
panels.append(panel.slug)
else:
panels = list(dashboard_cls.panels)
panels.append(panel)
dashboard_cls.panels = tuple(panels)
except Exception as e:
LOG.warning('Could not process panel %(panel)s: %(exc)s',
{'panel': panel_slug, 'exc': e})
def _process_panel_group_configuration(self, config):
"""Adds a panel group to the dashboard."""
panel_group_slug = config.get('PANEL_GROUP')
try:
dashboard = config.get('PANEL_GROUP_DASHBOARD')
if not dashboard:
LOG.warning("Skipping %s because it doesn't have "
"PANEL_GROUP_DASHBOARD defined.", config.__name__)
return
dashboard_cls = self.get_dashboard(dashboard)
panel_group_name = config.get('PANEL_GROUP_NAME')
if not panel_group_name:
LOG.warning("Skipping %s because it doesn't have "
"PANEL_GROUP_NAME defined.", config.__name__)
return
# Create the panel group class
panel_group = type(panel_group_slug,
(PanelGroup, ),
{'slug': panel_group_slug,
'name': panel_group_name,
'panels': []},)
# Add the panel group to dashboard
panels = list(dashboard_cls.panels)
panels.append(panel_group)
dashboard_cls.panels = tuple(panels)
# Trigger the autodiscovery to completely load the new panel group
dashboard_cls._autodiscover_complete = False
dashboard_cls._autodiscover()
except Exception as e:
LOG.warning('Could not process panel group %(panel_group)s: '
'%(exc)s',
{'panel_group': panel_group_slug, 'exc': e})
class HorizonSite(Site):
"""A singleton implementation of Site such that all dealings with horizon
get the same instance no matter what. There can be only one.
"""
_instance = None
def __new__(cls, *args, **kwargs):
if not cls._instance:
cls._instance = super(Site, cls).__new__(cls, *args, **kwargs)
return cls._instance
# The one true Horizon
Horizon = HorizonSite()
|
bac/horizon
|
horizon/base.py
|
Python
|
apache-2.0
| 39,077
|
#!/usr/bin/env python
# ___ ___ _ _ ___ ___ _ _____ ___ ___
# / __| __| \| | __| _ \ /_\_ _| __| \
# | (_ | _|| .` | _|| / / _ \| | | _|| |) |
# \___|___|_|\_|___|_|_\/_/_\_\_|_|___|___/_ _____
# | \ / _ \ | \| |/ _ \_ _| | __| \_ _|_ _|
# | |) | (_) | | .` | (_) || | | _|| |) | | | |
# |___/ \___/ |_|\_|\___/ |_| |___|___/___| |_|
'''
GcloudCLI class that wraps the oc commands in a subprocess
'''
import atexit
import json
import os
import random
# Not all genearated modules use this.
# pylint: disable=unused-import
import re
import shutil
import string
import subprocess
import tempfile
import yaml
# Not all genearated modules use this.
# pylint: disable=unused-import
import copy
# pylint: disable=import-error
from apiclient.discovery import build
# pylint: disable=import-error
from oauth2client.client import GoogleCredentials
from ansible.module_utils.basic import AnsibleModule
class GcloudCLIError(Exception):
'''Exception class for openshiftcli'''
pass
# pylint: disable=too-few-public-methods
class GcloudCLI(object):
''' Class to wrap the command line tools '''
def __init__(self, credentials=None, project=None, verbose=False):
''' Constructor for GcloudCLI '''
self.scope = None
self._project = project
if not credentials:
self.credentials = GoogleCredentials.get_application_default()
else:
tmp = tempfile.NamedTemporaryFile()
tmp.write(json.dumps(credentials))
tmp.seek(0)
self.credentials = GoogleCredentials.from_stream(tmp.name)
tmp.close()
self.scope = build('compute', 'beta', credentials=self.credentials)
self.verbose = verbose
@property
def project(self):
'''property for project'''
return self._project
def _create_image(self, image_name, image_info):
'''create an image name'''
cmd = ['compute', 'images', 'create', image_name]
for key, val in image_info.items():
if val:
cmd.extend(['--%s' % key, val])
return self.gcloud_cmd(cmd, output=True, output_type='raw')
def _delete_image(self, image_name):
'''delete image by name '''
cmd = ['compute', 'images', 'delete', image_name]
if image_name:
cmd.extend(['describe', image_name])
else:
cmd.append('list')
cmd.append('-q')
return self.gcloud_cmd(cmd, output=True, output_type='raw')
def _list_images(self, image_name=None):
'''list images.
if name is supplied perform a describe and return
'''
cmd = ['compute', 'images']
if image_name:
cmd.extend(['describe', image_name])
else:
cmd.append('list')
return self.gcloud_cmd(cmd, output=True, output_type='raw')
def _list_deployments(self, simple=True):
'''list deployments by name '''
cmd = ['deployment-manager', 'deployments', 'list']
if simple:
cmd.append('--simple-list')
return self.gcloud_cmd(cmd, output=True, output_type='raw')
def _delete_deployment(self, dname):
'''list deployments by name '''
cmd = ['deployment-manager', 'deployments', 'delete', dname, '-q']
return self.gcloud_cmd(cmd, output=True, output_type='raw')
def _create_deployment(self, dname, config=None, opts=None):
''' create a deployment'''
cmd = ['deployment-manager', 'deployments', 'create', dname]
if config:
if isinstance(config, dict):
config = Utils.create_file(dname, config)
if isinstance(config, str) and os.path.exists(config):
cmd.extend(['--config=%s' % config])
if opts:
for key, val in opts.items():
cmd.append('--%s=%s' % (key, val))
return self.gcloud_cmd(cmd, output=True, output_type='raw')
def _update_deployment(self, dname, config=None, opts=None):
''' create a deployment'''
cmd = ['deployment-manager', 'deployments', 'update', dname]
if config:
if isinstance(config, dict):
config = Utils.create_file(dname, config)
if isinstance(config, str) and os.path.exists(config):
cmd.extend(['--config=%s' % config])
if opts:
for key, val in opts.items():
cmd.append('--%s=%s' % (key, val))
return self.gcloud_cmd(cmd, output=True, output_type='raw')
def _list_manifests(self, deployment, mname=None):
''' list manifests
if a name is specified then perform a describe
'''
cmd = ['deployment-manager', 'manifests', '--deployment', deployment]
if mname:
cmd.extend(['describe', mname])
else:
cmd.append('list')
cmd.extend(['--format', 'json'])
return self.gcloud_cmd(cmd, output=True, output_type='json')
def _delete_address(self, aname):
''' list addresses
if a name is specified then perform a describe
'''
cmd = ['compute', 'addresses', 'delete', aname, '-q']
return self.gcloud_cmd(cmd, output=True, output_type='raw')
def _list_addresses(self, aname=None):
''' list addresses
if a name is specified then perform a describe
'''
cmd = ['compute', 'addresses']
if aname:
cmd.extend(['describe', aname])
else:
cmd.append('list')
return self.gcloud_cmd(cmd, output=True, output_type='raw')
def _create_address(self, address_name, address_info, address=None, isglobal=False):
''' create a deployment'''
cmd = ['compute', 'addresses', 'create', address_name]
if address:
cmd.append(address)
if isglobal:
cmd.append('--global')
for key, val in address_info.items():
if val:
cmd.extend(['--%s' % key, val])
return self.gcloud_cmd(cmd, output=True, output_type='raw')
def _list_metadata(self, resource_type, name=None, zone=None):
''' list metadata'''
cmd = ['compute', resource_type, 'describe']
if name:
cmd.extend([name])
if zone:
cmd.extend(['--zone', zone])
return self.gcloud_cmd(cmd, output=True, output_type='raw')
# pylint: disable=too-many-arguments
def _delete_metadata(self, resource_type, keys, remove_all=False, name=None, zone=None):
'''create metadata'''
cmd = ['compute', resource_type, 'remove-metadata']
if name:
cmd.extend([name])
if zone:
cmd.extend(['--zone', zone])
if remove_all:
cmd.append('--all')
else:
cmd.append('--keys')
cmd.append(','.join(keys))
cmd.append('-q')
return self.gcloud_cmd(cmd, output=True, output_type='raw')
# pylint: disable=too-many-arguments
def _create_metadata(self, resource_type, metadata=None, metadata_from_file=None, name=None, zone=None):
'''create metadata'''
cmd = ['compute', resource_type, 'add-metadata']
if name:
cmd.extend([name])
if zone:
cmd.extend(['--zone', zone])
data = None
if metadata_from_file:
cmd.append('--metadata-from-file')
data = metadata_from_file
else:
cmd.append('--metadata')
data = metadata
cmd.append(','.join(['%s=%s' % (key, val) for key, val in data.items()]))
return self.gcloud_cmd(cmd, output=True, output_type='raw')
def _list_service_accounts(self, sa_name=None):
'''return service accounts '''
cmd = ['iam', 'service-accounts']
if sa_name:
cmd.extend(['describe', sa_name])
else:
cmd.append('list')
cmd.extend(['--format', 'json'])
return self.gcloud_cmd(cmd, output=True, output_type='json')
def _delete_service_account(self, sa_name):
'''delete service account '''
cmd = ['iam', 'service-accounts', 'delete', sa_name, '-q']
cmd.extend(['--format', 'json'])
return self.gcloud_cmd(cmd, output=True, output_type='json')
def _create_service_account(self, sa_name, display_name=None):
'''create service account '''
cmd = ['iam', 'service-accounts', 'create', sa_name]
if display_name:
cmd.extend(['--display-name', display_name])
cmd.extend(['--format', 'json'])
return self.gcloud_cmd(cmd, output=True, output_type='json')
def _update_service_account(self, sa_name, display_name=None):
'''update service account '''
cmd = ['iam', 'service-accounts', 'update', sa_name]
if display_name:
cmd.extend(['--display-name', display_name])
cmd.extend(['--format', 'json'])
return self.gcloud_cmd(cmd, output=True, output_type='json')
def _delete_service_account_key(self, sa_name, key_id):
'''delete service account key'''
cmd = ['iam', 'service-accounts', 'keys', 'delete', key_id, '--iam-account', sa_name, '-q']
return self.gcloud_cmd(cmd, output=True, output_type='raw')
def _list_service_account_keys(self, sa_name):
'''return service account keys '''
cmd = ['iam', 'service-accounts', 'keys', 'list', '--iam-account', sa_name]
cmd.extend(['--format', 'json'])
return self.gcloud_cmd(cmd, output=True, output_type='json')
def _create_service_account_key(self, sa_name, outputfile, key_format='p12'):
'''create service account key '''
# Ensure we remove the key file
atexit.register(Utils.cleanup, [outputfile])
cmd = ['iam', 'service-accounts', 'keys', 'create', outputfile,
'--iam-account', sa_name, '--key-file-type', key_format]
return self.gcloud_cmd(cmd, output=True, output_type='raw')
def _list_project_policy(self, project):
'''create service account key '''
cmd = ['projects', 'get-iam-policy', project]
cmd.extend(['--format', 'json'])
return self.gcloud_cmd(cmd, output=True, output_type='json')
def _add_project_policy(self, project, member, role):
'''create service account key '''
cmd = ['projects', 'add-iam-policy-binding', project, '--member', member, '--role', role]
cmd.extend(['--format', 'json'])
return self.gcloud_cmd(cmd, output=True, output_type='json')
def _remove_project_policy(self, project, member, role):
'''create service account key '''
cmd = ['projects', 'remove-iam-policy-binding', project, '--member', member, '--role', role]
cmd.extend(['--format', 'json'])
return self.gcloud_cmd(cmd, output=True, output_type='json')
def _set_project_policy(self, project, policy_path):
'''create service account key '''
cmd = ['projects', 'set-iam-policy', project, policy_path]
cmd.extend(['--format', 'json'])
return self.gcloud_cmd(cmd, output=True, output_type='json')
def _list_zones(self):
''' list zones '''
cmd = ['compute', 'zones', 'list']
cmd.extend(['--format', 'json'])
return self.gcloud_cmd(cmd, output=True, output_type='json')
def _config_set(self, config_param, config_value, config_section):
''' set config params with gcloud config set '''
param = config_section + '/' + config_param
cmd = ['config', 'set', param, config_value]
cmd.extend(['--format', 'json'])
return self.gcloud_cmd(cmd, output=True, output_type='json')
def _list_config(self):
'''return config '''
cmd = ['config', 'list']
cmd.extend(['--format', 'json'])
return self.gcloud_cmd(cmd, output=True, output_type='json')
def list_disks(self, zone=None, disk_name=None):
'''return a list of disk objects in this project and zone'''
cmd = ['beta', 'compute', 'disks']
if disk_name and zone:
cmd.extend(['describe', disk_name, '--zone', zone])
else:
cmd.append('list')
cmd.extend(['--format', 'json'])
return self.gcloud_cmd(cmd, output=True, output_type='json')
# disabling too-many-arguments as these are all required for the disk labels
# pylint: disable=too-many-arguments
def _set_disk_labels(self, project, zone, dname, labels, finger_print):
'''create service account key '''
if labels == None:
labels = {}
self.scope = build('compute', 'beta', credentials=self.credentials)
body = {'labels': labels, 'labelFingerprint': finger_print}
result = self.scope.disks().setLabels(project=project,
zone=zone,
resource=dname,
body=body,
).execute()
return result
def gcloud_cmd(self, cmd, output=False, output_type='json'):
'''Base command for gcloud '''
cmds = ['/usr/bin/gcloud']
if self.project:
cmds.extend(['--project', self.project])
cmds.extend(cmd)
rval = {}
results = ''
err = None
if self.verbose:
print ' '.join(cmds)
proc = subprocess.Popen(cmds,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
env={})
stdout, stderr = proc.communicate()
rval = {"returncode": proc.returncode,
"results": results,
"cmd": ' '.join(cmds),
}
if proc.returncode == 0:
if output:
if output_type == 'json':
try:
rval['results'] = json.loads(stdout)
except ValueError as err:
if "No JSON object could be decoded" in err.message:
err = err.message
elif output_type == 'raw':
rval['results'] = stdout
if self.verbose:
print stdout
print stderr
if err:
rval.update({"err": err,
"stderr": stderr,
"stdout": stdout,
"cmd": cmds
})
else:
rval.update({"stderr": stderr,
"stdout": stdout,
"results": {},
})
return rval
################################################################################
# utilities and helpers for generation
################################################################################
class Utils(object):
''' utilities for openshiftcli modules '''
COMPUTE_URL_BASE = 'https://www.googleapis.com/compute/v1/'
@staticmethod
def create_file(rname, data, ftype='yaml'):
''' create a file in tmp with name and contents'''
path = os.path.join('/tmp', rname)
with open(path, 'w') as fds:
if ftype == 'yaml':
fds.write(yaml.safe_dump(data, default_flow_style=False))
elif ftype == 'json':
fds.write(json.dumps(data))
else:
fds.write(data)
# Register cleanup when module is done
atexit.register(Utils.cleanup, [path])
return path
@staticmethod
def global_compute_url(project, collection, rname):
'''build the global compute url for a resource'''
return ''.join([Utils.COMPUTE_URL_BASE, 'projects/', project, '/global/', collection, '/', rname])
@staticmethod
def zonal_compute_url(project, zone, collection, rname):
'''build the zone compute url for a resource'''
return ''.join([Utils.COMPUTE_URL_BASE, 'projects/', project, '/zones/', zone, '/', collection, '/', rname])
@staticmethod
def generate_random_name(size):
'''generate a random string of lowercase and digits the length of size'''
return ''.join(random.choice(string.ascii_lowercase + string.digits) for _ in range(size))
@staticmethod
def cleanup(files):
'''Clean up on exit '''
for sfile in files:
if os.path.exists(sfile):
if os.path.isdir(sfile):
shutil.rmtree(sfile)
elif os.path.isfile(sfile):
os.remove(sfile)
# pylint: disable=too-many-instance-attributes
class GcloudProjectPolicy(GcloudCLI):
''' Class to wrap the gcloud compute iam service-accounts command'''
# pylint allows 5
# pylint: disable=too-many-arguments
def __init__(self,
project,
role=None,
member=None,
member_type='serviceAccount',
verbose=False):
''' Constructor for gcloud resource '''
super(GcloudProjectPolicy, self).__init__(project=project)
self._role = role
self._member = '%s:%s' % (member_type, member)
self._exist_policy = None
self._policy_data = None
self._policy_path = None
self.verbose = verbose
@property
def existing_policy(self):
'''existing project policy'''
if self._exist_policy == None:
results = self.list_project_policy()
self._exist_policy = results['results']
return self._exist_policy
@property
def member(self):
'''property for member'''
return self._member
@property
def role(self):
'''property for role '''
return self._role
@property
def policy_path(self):
'''property for policy path'''
return self._policy_path
@policy_path.setter
def policy_path(self, value):
'''property for policy path'''
self._policy_path = value
@property
def policy_data(self):
'''property for policy data'''
return self._policy_data
@policy_data.setter
def policy_data(self, value):
'''property for policy data'''
self._policy_data = value
def list_project_policy(self):
'''return project policy'''
return self._list_project_policy(self.project)
def remove_project_policy(self):
''' remove a member from a role in a project'''
return self._remove_project_policy(self.project, self.member, self.role)
def add_project_policy(self):
'''create an service account key'''
return self._add_project_policy(self.project, self.member, self.role)
def set_project_policy(self, policy_data=None, policy_path=None):
'''set a project policy '''
# set the policy data and policy path
self.convert_to_file(policy_data, policy_path)
return self._set_project_policy(self.project, self.policy_path)
def exists(self):
'''check whether a member is in a project policy'''
for policy in self.existing_policy['bindings']:
if policy['role'] == self.role:
return self.member in policy['members']
return False
def needs_update(self, policy_data=None, policy_path=None):
'''compare results with incoming policy'''
# set the policy data and policy path
self.convert_to_file(policy_data, policy_path)
for policy in self.policy_data['bindings']:
for exist_policy in self.existing_policy['bindings']:
if policy['role'] == exist_policy['role']:
if policy['members'] != exist_policy['members']:
return True
break
else:
# Did not find the role
return True
return False
def convert_to_file(self, policy_data=None, policy_path=None):
'''convert the policy data into a dict and ensure we have a file'''
if policy_data:
self.policy_data = policy_data
self.policy_path = Utils.create_file('policy', policy_data, 'json')
elif policy_path:
self.policy_data = json.load(open(policy_path))
self.policy_path = policy_path
# vim: expandtab:tabstop=4:shiftwidth=4
#pylint: disable=too-many-branches
def main():
''' ansible module for gcloud project policy'''
module = AnsibleModule(
argument_spec=dict(
# credentials
state=dict(default='present', type='str', choices=['present', 'absent', 'list']),
project=dict(required=True, type='str'),
member=dict(default=None, type='str'),
member_type=dict(type='str', choices=['serviceAccount', 'user']),
role=dict(default=None, type='str'),
policy_data=dict(default=None, type='dict'),
policy_path=dict(default=None, type='str'),
),
supports_check_mode=True,
mutually_exclusive=[['policy_path', 'policy_data']],
)
gcloud = GcloudProjectPolicy(module.params['project'],
module.params['role'],
module.params['member'],
module.params['member_type'])
state = module.params['state']
api_rval = gcloud.list_project_policy()
#####
# Get
#####
if state == 'list':
if api_rval['returncode'] != 0:
module.fail_json(msg=api_rval, state="list")
module.exit_json(changed=False, results=api_rval['results'], state="list")
########
# Delete
########
if state == 'absent':
if gcloud.exists():
if module.check_mode:
module.exit_json(changed=False, msg='Would have performed a delete.')
api_rval = gcloud.remove_project_policy()
if api_rval['returncode'] != 0:
module.fail_json(msg=api_rval)
module.exit_json(changed=True, results=api_rval, state="absent")
module.exit_json(changed=False, results=api_rval, state="absent")
if state == 'present':
########
# Create
########
if module.params['policy_data'] or module.params['policy_path']:
if gcloud.needs_update(module.params['policy_data'], module.params['policy_path']):
# perform set
if module.check_mode:
module.exit_json(changed=False, msg='Would have performed a set policy.')
api_rval = gcloud.set_project_policy(module.params['policy_data'], module.params['policy_path'])
if api_rval['returncode'] != 0:
module.fail_json(msg=api_rval)
module.exit_json(changed=True, results=api_rval, state="present")
module.exit_json(changed=False, results=api_rval, state="present")
if not gcloud.exists():
if module.check_mode:
module.exit_json(changed=False, msg='Would have performed a create.')
api_rval = gcloud.add_project_policy()
if api_rval['returncode'] != 0:
module.fail_json(msg=api_rval)
module.exit_json(changed=True, results=api_rval, state="present")
module.exit_json(changed=False, results=api_rval, state="present")
module.exit_json(failed=True,
changed=False,
results='Unknown state passed. %s' % state,
state="unknown")
# pylint: disable=redefined-builtin, unused-wildcard-import, wildcard-import, locally-disabled
# import module snippets. This are required
from ansible.module_utils.basic import *
main()
|
rhdedgar/openshift-tools
|
ansible/roles/lib_gcloud/library/gcloud_project_policy.py
|
Python
|
apache-2.0
| 23,977
|
#!/usr/bin/python
#
# Copyright 2014 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This example displays all of the available subaccount permissions.
To get a subaccount ID, run get_subaccounts.py.
"""
import argparse
import sys
from apiclient import sample_tools
from oauth2client import client
# Declare command-line flags.
argparser = argparse.ArgumentParser(add_help=False)
argparser.add_argument(
'profile_id', type=int,
help='The ID of the profile to list permissions for')
argparser.add_argument(
'subaccount_id', type=int,
help='The ID of the subaccount to list permissions for')
def main(argv):
# Authenticate and construct service.
service, flags = sample_tools.init(
argv, 'dfareporting', 'v2.1', __doc__, __file__, parents=[argparser],
scope=['https://www.googleapis.com/auth/dfareporting',
'https://www.googleapis.com/auth/dfatrafficking'])
profile_id = flags.profile_id
subaccount_id = flags.subaccount_id
try:
# Construct and execute the subaccount request.
request = service.subaccounts().get(
profileId=profile_id, id=subaccount_id)
subaccount = request.execute()
# Construct the user role permissions request.
request = service.userRolePermissions().list(
profileId=profile_id, ids=subaccount['availablePermissionIds'])
# Execute request and print response.
result = request.execute()
for permission in result['userRolePermissions']:
print ('Found user role permission with ID %s and name "%s".'
% (permission['id'], permission['name']))
except client.AccessTokenRefreshError:
print ('The credentials have been revoked or expired, please re-run the '
'application to re-authorize')
if __name__ == '__main__':
main(sys.argv)
|
falbassini/googleads-dfa-reporting-samples
|
python/v2.1/get_user_role_permissions.py
|
Python
|
apache-2.0
| 2,332
|
#!/router/bin/python
import trex_root_path
from client.trex_stateless_client import *
from common.trex_exceptions import *
import cmd
from termstyle import termstyle
# import termstyle
import os
from argparse import ArgumentParser
import socket
import errno
import ast
import json
class InteractiveStatelessTRex(cmd.Cmd):
intro = termstyle.green("\nInteractive shell to play with Cisco's TRex stateless API.\
\nType help to view available pre-defined scenarios\n(c) All rights reserved.\n")
prompt = '> '
def __init__(self, trex_host, trex_port, virtual, verbose):
cmd.Cmd.__init__(self)
self.verbose = verbose
self.virtual = virtual
self.trex = STLClient(trex_host, trex_port, self.virtual)
self.DEFAULT_RUN_PARAMS = dict(m=1.5,
nc=True,
p=True,
d=100,
f='avl/sfr_delay_10_1g.yaml',
l=1000)
self.run_params = dict(self.DEFAULT_RUN_PARAMS)
def do_transmit(self, line):
"""Transmits a request over using a given link to server.\
\nuse: transmit [method_name] [method_params]"""
if line == "":
print "\nUsage: [method name] [param dict as string]\n"
print "Example: rpc test_add {'x': 12, 'y': 17}\n"
return
args = line.split(' ', 1) # args will have max length of 2
method_name = args[0]
params = None
bad_parse = False
try:
params = ast.literal_eval(args[1])
if not isinstance(params, dict):
bad_parse = True
except ValueError as e1:
bad_parse = True
except SyntaxError as e2:
bad_parse = True
if bad_parse:
print "\nValue should be a valid dict: '{0}'".format(args[1])
print "\nUsage: [method name] [param dict as string]\n"
print "Example: rpc test_add {'x': 12, 'y': 17}\n"
return
response = self.trex.transmit(method_name, params)
if not self.virtual:
# expect response
rc, msg = response
if rc:
print "\nServer Response:\n\n" + json.dumps(msg) + "\n"
else:
print "\n*** " + msg + "\n"
def do_push_files(self, filepaths):
"""Pushes a custom file to be stored locally on TRex server.\
\nPush multiple files by specifying their path separated by ' ' (space)."""
try:
filepaths = filepaths.split(' ')
print termstyle.green("*** Starting pushing files ({trex_files}) to TRex. ***".format(
trex_files=', '.join(filepaths))
)
ret_val = self.trex.push_files(filepaths)
if ret_val:
print termstyle.green("*** End of TRex push_files method (success) ***")
else:
print termstyle.magenta("*** End of TRex push_files method (failed) ***")
except IOError as inst:
print termstyle.magenta(inst)
if __name__ == "__main__":
parser = ArgumentParser(description=termstyle.cyan('Run TRex client stateless API demos and scenarios.'),
usage="client_interactive_example [options]")
parser.add_argument('-v', '--version', action='version', version='%(prog)s 1.0 \t (C) Cisco Systems Inc.\n')
parser.add_argument("-t", "--trex-host", required = True, dest="trex_host",
action="store", help="Specify the hostname or ip to connect with TRex server.",
metavar="HOST" )
parser.add_argument("-p", "--trex-port", type=int, default = 5050, metavar="PORT", dest="trex_port",
help="Select port on which the TRex server listens. Default port is 5050.", action="store")
# parser.add_argument("-m", "--maxhist", type=int, default = 100, metavar="SIZE", dest="hist_size",
# help="Specify maximum history size saved at client side. Default size is 100.", action="store")
parser.add_argument("--virtual", dest="virtual",
action="store_true",
help="Switch ON virtual option at TRex client. Default is: OFF.",
default=False)
parser.add_argument("--verbose", dest="verbose",
action="store_true",
help="Switch ON verbose option at TRex client. Default is: OFF.",
default=False)
args = parser.parse_args()
try:
InteractiveStatelessTRex(**vars(args)).cmdloop()
except KeyboardInterrupt:
print termstyle.cyan('Bye Bye!')
exit(-1)
except socket.error, e:
if e.errno == errno.ECONNREFUSED:
raise socket.error(errno.ECONNREFUSED,
"Connection from TRex server was terminated. \
Please make sure the server is up.")
|
dimagol/trex-core
|
scripts/automation/trex_control_plane/examples/interactive_stateless.py
|
Python
|
apache-2.0
| 5,035
|
import asyncio
import functools
import logging
import traceback
import typing
from starlette.websockets import WebSocket, WebSocketState
from starlette.status import WS_1001_GOING_AWAY
from . import serialize
from opentrons.protocols.execution.errors import ExceptionInProtocolError
from concurrent.futures import ThreadPoolExecutor
log = logging.getLogger(__name__)
# Number of executor threads
MAX_WORKERS = 2
# Keep these in sync with ES code
CALL_RESULT_MESSAGE = 0
CALL_ACK_MESSAGE = 1
NOTIFICATION_MESSAGE = 2
CONTROL_MESSAGE = 3
CALL_NACK_MESSAGE = 4
PONG_MESSAGE = 5
class ClientWriterTask(typing.NamedTuple):
socket: WebSocket
queue: asyncio.Queue
task: asyncio.Task
class RPCServer(object):
def __init__(self, loop, root=None):
self.monitor_events_task = None
self.loop = loop or asyncio.get_event_loop()
self.objects: typing.Dict[typing.Any, typing.Any] = {}
self.system = SystemCalls(self.objects)
self.root = root
# Allow for two concurrent calls max
self.executor = ThreadPoolExecutor(max_workers=MAX_WORKERS)
self.clients: typing.List[ClientWriterTask] = []
self.tasks = []
@property
def root(self):
return self._root
@root.setter
def root(self, value):
if self.monitor_events_task:
self.monitor_events_task.cancel()
self.monitor_events_task = \
self.loop.create_task(self.monitor_events(value))
self._root = value
def shutdown(self):
for writer in self.clients:
writer.task.cancel()
if self.monitor_events_task:
self.monitor_events_task.cancel()
async def on_shutdown(self):
"""
Graceful shutdown handler
See https://docs.aiohttp.org/en/stable/web.html#graceful-shutdown
"""
for client_write_tasks in self.clients.copy():
await client_write_tasks.socket.close(code=WS_1001_GOING_AWAY)
self.shutdown()
def send_worker(self, socket: WebSocket) -> ClientWriterTask:
"""
Create a send queue and task to read from said queue and send objects
over socket.
:param socket: Web socket
:return: The client object.
"""
_id = id(socket)
def task_done(future):
try:
future.result()
except Exception:
log.exception("send_task for socket {} threw:".format(_id))
async def send_task(socket_: WebSocket, queue_: asyncio.Queue):
while True:
payload = await queue_.get()
if socket_.client_state == WebSocketState.DISCONNECTED:
log.debug(f'Websocket {_id} closed')
break
await socket_.send_json(payload)
queue: asyncio.Queue = asyncio.Queue(loop=self.loop)
task = self.loop.create_task(send_task(socket, queue))
task.add_done_callback(task_done)
log.debug(f'Send task for {_id} started')
return ClientWriterTask(socket=socket, queue=queue, task=task)
async def monitor_events(self, instance):
async for event in instance.notifications:
try:
# Apply notification_max_depth to control object tree depth
# during serialization to avoid flooding comms
data = self.call_and_serialize(
lambda: event)
self.send(
{
'$': {'type': NOTIFICATION_MESSAGE},
'data': data
})
except Exception:
log.exception('While processing event {0}:'.format(event))
async def handle_new_connection(self, socket: WebSocket):
"""Handle a new client connection"""
def task_done(future):
self.tasks.remove(future)
exception = future.exception()
if exception:
log.warning(
'While processing message: {0}\nDetails: {1}'.format(
exception,
traceback.format_exc())
)
socket_id = id(socket)
log.info('Opening Websocket {0}'.format(id(socket)))
try:
await socket.send_json({
'$': {'type': CONTROL_MESSAGE, 'monitor': True},
'root': self.call_and_serialize(lambda: self.root),
'type': self.call_and_serialize(lambda: type(self.root))
})
except Exception:
log.exception('While sending root info to {0}'.format(socket_id))
try:
# Add new client to list of clients
self.clients.append(self.send_worker(socket))
# Async receive client data until websocket is closed
while socket.client_state != WebSocketState.DISCONNECTED:
msg = await socket.receive_json()
task = self.loop.create_task(self.process(msg))
task.add_done_callback(task_done)
self.tasks += [task]
except Exception:
log.exception('While reading from socket:')
finally:
log.info('Closing WebSocket {0}'.format(id(socket)))
await socket.close()
# Remove the client from the list
self.clients = [c for c in self.clients if c.socket != socket]
return socket
def build_call(self, _id, name, args):
if _id not in self.objects:
raise ValueError(
'object with id {0} not found'.format(_id))
obj = self.objects[_id]
function = getattr(type(obj), name)
args = self.resolve_args(args)
kwargs: typing.Dict[typing.Any, typing.Any] = {}
# NOTE: since ECMAScript doesn't have a notion of named arguments
# we are using a convention that the last dictionary parameter will
# be expanded into kwargs. This introduces a risk of mistreating a
# legitimate dictionary as kwargs, but we consider it very low.
if (len(args) > 0) and (isinstance(args[-1], dict)):
kwargs = args.pop()
if not function:
raise ValueError(
'Function {0} not found in {1}'.format(name, type(obj)))
if not callable(function):
raise ValueError(
'Attribute {0} of {1} is not a function'
.format(name, type(obj)))
return functools.partial(function, obj, *args, **kwargs)
def resolve_args(self, args):
"""
Resolve function call arguments that have object ids
into instances of these objects
"""
def resolve(a):
if isinstance(a, dict):
_id = a.get('i', None)
# If it's a compound type (including dict)
# Check if it has id (i) to determine that it has
# a reference in object storage. If it's None, then it's
# a dict originated at the remote
return self.objects[_id] if _id else a['v']
# if array, resolve it's elements
if isinstance(a, (list, tuple)):
return [resolve(i) for i in a]
return a
return [resolve(a) for a in args]
async def process(self, data):
"""
Process the payload from a call
:param data: dict
:return: None
"""
try:
meta = data.get('$', {})
token = meta.get('token')
_id = data.get('id')
if meta.get('ping'):
return self.send_pong()
# if id is missing from payload or explicitly set to null,
# use the system object
if _id is None:
_id = id(self.system)
try:
self.send_ack(token)
func = self.build_call(
_id=_id,
name=data.get('name'),
args=data.get('args', []))
except Exception as e:
log.exception("Exception during rpc.Server.process:")
error = '{0}: {1}'.format(e.__class__.__name__, e)
self.send_error(error, token)
else:
response = await self.make_call(func, token)
self.send(response)
except Exception:
log.exception('Error while processing request')
def call_and_serialize(self, func, max_depth=0):
# XXXX: This should really only be called in a new thread (as in
# the normal case where it is called in a threadpool)
call_result = func()
serialized, refs = serialize.get_object_tree(
call_result, max_depth=max_depth)
self.objects.update(refs)
return serialized
async def make_call(self, func, token):
response = {'$': {'type': CALL_RESULT_MESSAGE, 'token': token}}
try:
call_result = await self.loop.run_in_executor(
self.executor, self.call_and_serialize, func)
response['$']['status'] = 'success'
except ExceptionInProtocolError as eipe:
log.exception("Smart exception in protocol")
response['$']['status'] = 'error'
call_result = {
'message': str(eipe),
'traceback': ''.join(traceback.format_exception(
type(eipe.original_exc),
eipe.original_exc,
eipe.original_tb))
}
except Exception as e:
log.exception("Exception during RPC call:")
trace = traceback.format_exc()
try:
line_msg = ' [line ' + [
line.split(',')[0].strip()
for line in trace.split('line')
if '<module>' in line][0] + ']'
except Exception:
line_msg = ''
finally:
response['$']['status'] = 'error'
call_result = {
'message': '{0}{1}: {2}'.format(
e.__class__.__name__, line_msg, str(e)),
'traceback': trace
}
finally:
response['data'] = call_result
return response
def send_error(self, text, token):
self.send({
'$': {
'token': token,
'type': CALL_NACK_MESSAGE
},
'reason': text
})
def send_ack(self, token):
self.send({
'$': {
'token': token,
'type': CALL_ACK_MESSAGE
}
})
def send_pong(self):
self.send({
'$': {
'type': PONG_MESSAGE
}
})
def send(self, payload):
for writer in self.clients:
asyncio.run_coroutine_threadsafe(
writer.queue.put(payload),
self.loop
)
class SystemCalls(object):
def __init__(self, objects):
self.objects = objects
objects[id(self)] = self
def get_object_by_id(self, id):
return self.objects[id]
|
OpenTrons/opentrons-api
|
robot-server/robot_server/service/legacy/rpc/rpc.py
|
Python
|
apache-2.0
| 11,213
|
# Copyright 2015 Cisco Systems
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import abc
import six
from neutron.conf.agent import common as agent_conf
agent_conf.register_pddriver_opts()
@six.add_metaclass(abc.ABCMeta)
class PDDriverBase(object):
def __init__(self, router_id, subnet_id, ri_ifname):
self.router_id = router_id
self.subnet_id = subnet_id
self.ri_ifname = ri_ifname
@abc.abstractmethod
def enable(self, pmon, router_ns, ex_gw_ifname, lla):
"""Enable IPv6 Prefix Delegation for this PDDriver on the given
external interface, with the given link local address
"""
@abc.abstractmethod
def disable(self, pmon, router_ns):
"""Disable IPv6 Prefix Delegation for this PDDriver
"""
@abc.abstractmethod
def get_prefix(self):
"""Get the current assigned prefix for this PDDriver from the PD agent.
If no prefix is currently assigned, return
neutron_lib.constants.PROVISIONAL_IPV6_PD_PREFIX
"""
@staticmethod
@abc.abstractmethod
def get_sync_data():
"""Get the latest router_id, subnet_id, and ri_ifname from the PD agent
so that the PDDriver can be kept up to date
"""
|
noironetworks/neutron
|
neutron/agent/linux/pd_driver.py
|
Python
|
apache-2.0
| 1,796
|
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
Random Forest Classifier Example.
"""
from __future__ import print_function
import sys
from pyspark import SparkContext, SQLContext
# $example on$
from pyspark.ml import Pipeline
from pyspark.ml.classification import RandomForestClassifier
from pyspark.ml.feature import StringIndexer, VectorIndexer
from pyspark.ml.evaluation import MulticlassClassificationEvaluator
# $example off$
if __name__ == "__main__":
sc = SparkContext(appName="random_forest_classifier_example")
sqlContext = SQLContext(sc)
# $example on$
# Load and parse the data file, converting it to a DataFrame.
data = sqlContext.read.format("libsvm").load("data/mllib/sample_libsvm_data.txt")
# Index labels, adding metadata to the label column.
# Fit on whole dataset to include all labels in index.
labelIndexer = StringIndexer(inputCol="label", outputCol="indexedLabel").fit(data)
# Automatically identify categorical features, and index them.
# Set maxCategories so features with > 4 distinct values are treated as continuous.
featureIndexer =\
VectorIndexer(inputCol="features", outputCol="indexedFeatures", maxCategories=4).fit(data)
# Split the data into training and test sets (30% held out for testing)
(trainingData, testData) = data.randomSplit([0.7, 0.3])
# Train a RandomForest model.
rf = RandomForestClassifier(labelCol="indexedLabel", featuresCol="indexedFeatures")
# Chain indexers and forest in a Pipeline
pipeline = Pipeline(stages=[labelIndexer, featureIndexer, rf])
# Train model. This also runs the indexers.
model = pipeline.fit(trainingData)
# Make predictions.
predictions = model.transform(testData)
# Select example rows to display.
predictions.select("prediction", "indexedLabel", "features").show(5)
# Select (prediction, true label) and compute test error
evaluator = MulticlassClassificationEvaluator(
labelCol="indexedLabel", predictionCol="prediction", metricName="precision")
accuracy = evaluator.evaluate(predictions)
print("Test Error = %g" % (1.0 - accuracy))
rfModel = model.stages[2]
print(rfModel) # summary only
# $example off$
sc.stop()
|
chenc10/Spark-PAF
|
examples/src/main/python/ml/random_forest_classifier_example.py
|
Python
|
apache-2.0
| 2,991
|
"""Support for Legacy MQTT vacuum."""
import json
import logging
import voluptuous as vol
from homeassistant.components import mqtt
from homeassistant.components.mqtt import (
CONF_UNIQUE_ID,
MqttAttributes,
MqttAvailability,
MqttDiscoveryUpdate,
MqttEntityDeviceInfo,
subscription,
)
from homeassistant.components.vacuum import (
SUPPORT_BATTERY,
SUPPORT_CLEAN_SPOT,
SUPPORT_FAN_SPEED,
SUPPORT_LOCATE,
SUPPORT_PAUSE,
SUPPORT_RETURN_HOME,
SUPPORT_SEND_COMMAND,
SUPPORT_STATUS,
SUPPORT_STOP,
SUPPORT_TURN_OFF,
SUPPORT_TURN_ON,
VacuumDevice,
)
from homeassistant.const import ATTR_SUPPORTED_FEATURES, CONF_DEVICE, CONF_NAME
from homeassistant.core import callback
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.icon import icon_for_battery_level
from .schema import MQTT_VACUUM_SCHEMA, services_to_strings, strings_to_services
_LOGGER = logging.getLogger(__name__)
SERVICE_TO_STRING = {
SUPPORT_TURN_ON: "turn_on",
SUPPORT_TURN_OFF: "turn_off",
SUPPORT_PAUSE: "pause",
SUPPORT_STOP: "stop",
SUPPORT_RETURN_HOME: "return_home",
SUPPORT_FAN_SPEED: "fan_speed",
SUPPORT_BATTERY: "battery",
SUPPORT_STATUS: "status",
SUPPORT_SEND_COMMAND: "send_command",
SUPPORT_LOCATE: "locate",
SUPPORT_CLEAN_SPOT: "clean_spot",
}
STRING_TO_SERVICE = {v: k for k, v in SERVICE_TO_STRING.items()}
DEFAULT_SERVICES = (
SUPPORT_TURN_ON
| SUPPORT_TURN_OFF
| SUPPORT_STOP
| SUPPORT_RETURN_HOME
| SUPPORT_STATUS
| SUPPORT_BATTERY
| SUPPORT_CLEAN_SPOT
)
ALL_SERVICES = (
DEFAULT_SERVICES
| SUPPORT_PAUSE
| SUPPORT_LOCATE
| SUPPORT_FAN_SPEED
| SUPPORT_SEND_COMMAND
)
CONF_SUPPORTED_FEATURES = ATTR_SUPPORTED_FEATURES
CONF_BATTERY_LEVEL_TEMPLATE = "battery_level_template"
CONF_BATTERY_LEVEL_TOPIC = "battery_level_topic"
CONF_CHARGING_TEMPLATE = "charging_template"
CONF_CHARGING_TOPIC = "charging_topic"
CONF_CLEANING_TEMPLATE = "cleaning_template"
CONF_CLEANING_TOPIC = "cleaning_topic"
CONF_DOCKED_TEMPLATE = "docked_template"
CONF_DOCKED_TOPIC = "docked_topic"
CONF_ERROR_TEMPLATE = "error_template"
CONF_ERROR_TOPIC = "error_topic"
CONF_FAN_SPEED_LIST = "fan_speed_list"
CONF_FAN_SPEED_TEMPLATE = "fan_speed_template"
CONF_FAN_SPEED_TOPIC = "fan_speed_topic"
CONF_PAYLOAD_CLEAN_SPOT = "payload_clean_spot"
CONF_PAYLOAD_LOCATE = "payload_locate"
CONF_PAYLOAD_RETURN_TO_BASE = "payload_return_to_base"
CONF_PAYLOAD_START_PAUSE = "payload_start_pause"
CONF_PAYLOAD_STOP = "payload_stop"
CONF_PAYLOAD_TURN_OFF = "payload_turn_off"
CONF_PAYLOAD_TURN_ON = "payload_turn_on"
CONF_SEND_COMMAND_TOPIC = "send_command_topic"
CONF_SET_FAN_SPEED_TOPIC = "set_fan_speed_topic"
DEFAULT_NAME = "MQTT Vacuum"
DEFAULT_PAYLOAD_CLEAN_SPOT = "clean_spot"
DEFAULT_PAYLOAD_LOCATE = "locate"
DEFAULT_PAYLOAD_RETURN_TO_BASE = "return_to_base"
DEFAULT_PAYLOAD_START_PAUSE = "start_pause"
DEFAULT_PAYLOAD_STOP = "stop"
DEFAULT_PAYLOAD_TURN_OFF = "turn_off"
DEFAULT_PAYLOAD_TURN_ON = "turn_on"
DEFAULT_RETAIN = False
DEFAULT_SERVICE_STRINGS = services_to_strings(DEFAULT_SERVICES, SERVICE_TO_STRING)
PLATFORM_SCHEMA_LEGACY = (
mqtt.MQTT_BASE_PLATFORM_SCHEMA.extend(
{
vol.Inclusive(CONF_BATTERY_LEVEL_TEMPLATE, "battery"): cv.template,
vol.Inclusive(
CONF_BATTERY_LEVEL_TOPIC, "battery"
): mqtt.valid_publish_topic,
vol.Inclusive(CONF_CHARGING_TEMPLATE, "charging"): cv.template,
vol.Inclusive(CONF_CHARGING_TOPIC, "charging"): mqtt.valid_publish_topic,
vol.Inclusive(CONF_CLEANING_TEMPLATE, "cleaning"): cv.template,
vol.Inclusive(CONF_CLEANING_TOPIC, "cleaning"): mqtt.valid_publish_topic,
vol.Optional(CONF_DEVICE): mqtt.MQTT_ENTITY_DEVICE_INFO_SCHEMA,
vol.Inclusive(CONF_DOCKED_TEMPLATE, "docked"): cv.template,
vol.Inclusive(CONF_DOCKED_TOPIC, "docked"): mqtt.valid_publish_topic,
vol.Inclusive(CONF_ERROR_TEMPLATE, "error"): cv.template,
vol.Inclusive(CONF_ERROR_TOPIC, "error"): mqtt.valid_publish_topic,
vol.Optional(CONF_FAN_SPEED_LIST, default=[]): vol.All(
cv.ensure_list, [cv.string]
),
vol.Inclusive(CONF_FAN_SPEED_TEMPLATE, "fan_speed"): cv.template,
vol.Inclusive(CONF_FAN_SPEED_TOPIC, "fan_speed"): mqtt.valid_publish_topic,
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
vol.Optional(
CONF_PAYLOAD_CLEAN_SPOT, default=DEFAULT_PAYLOAD_CLEAN_SPOT
): cv.string,
vol.Optional(
CONF_PAYLOAD_LOCATE, default=DEFAULT_PAYLOAD_LOCATE
): cv.string,
vol.Optional(
CONF_PAYLOAD_RETURN_TO_BASE, default=DEFAULT_PAYLOAD_RETURN_TO_BASE
): cv.string,
vol.Optional(
CONF_PAYLOAD_START_PAUSE, default=DEFAULT_PAYLOAD_START_PAUSE
): cv.string,
vol.Optional(CONF_PAYLOAD_STOP, default=DEFAULT_PAYLOAD_STOP): cv.string,
vol.Optional(
CONF_PAYLOAD_TURN_OFF, default=DEFAULT_PAYLOAD_TURN_OFF
): cv.string,
vol.Optional(
CONF_PAYLOAD_TURN_ON, default=DEFAULT_PAYLOAD_TURN_ON
): cv.string,
vol.Optional(CONF_SEND_COMMAND_TOPIC): mqtt.valid_publish_topic,
vol.Optional(CONF_SET_FAN_SPEED_TOPIC): mqtt.valid_publish_topic,
vol.Optional(
CONF_SUPPORTED_FEATURES, default=DEFAULT_SERVICE_STRINGS
): vol.All(cv.ensure_list, [vol.In(STRING_TO_SERVICE.keys())]),
vol.Optional(CONF_UNIQUE_ID): cv.string,
vol.Optional(mqtt.CONF_COMMAND_TOPIC): mqtt.valid_publish_topic,
vol.Optional(mqtt.CONF_RETAIN, default=DEFAULT_RETAIN): cv.boolean,
}
)
.extend(mqtt.MQTT_AVAILABILITY_SCHEMA.schema)
.extend(mqtt.MQTT_JSON_ATTRS_SCHEMA.schema)
.extend(MQTT_VACUUM_SCHEMA.schema)
)
async def async_setup_entity_legacy(
config, async_add_entities, config_entry, discovery_hash
):
"""Set up a MQTT Vacuum Legacy."""
async_add_entities([MqttVacuum(config, config_entry, discovery_hash)])
# pylint: disable=too-many-ancestors
class MqttVacuum(
MqttAttributes,
MqttAvailability,
MqttDiscoveryUpdate,
MqttEntityDeviceInfo,
VacuumDevice,
):
"""Representation of a MQTT-controlled legacy vacuum."""
def __init__(self, config, config_entry, discovery_info):
"""Initialize the vacuum."""
self._cleaning = False
self._charging = False
self._docked = False
self._error = None
self._status = "Unknown"
self._battery_level = 0
self._fan_speed = "unknown"
self._fan_speed_list = []
self._sub_state = None
self._unique_id = config.get(CONF_UNIQUE_ID)
# Load config
self._setup_from_config(config)
device_config = config.get(CONF_DEVICE)
MqttAttributes.__init__(self, config)
MqttAvailability.__init__(self, config)
MqttDiscoveryUpdate.__init__(self, discovery_info, self.discovery_update)
MqttEntityDeviceInfo.__init__(self, device_config, config_entry)
def _setup_from_config(self, config):
self._name = config[CONF_NAME]
supported_feature_strings = config[CONF_SUPPORTED_FEATURES]
self._supported_features = strings_to_services(
supported_feature_strings, STRING_TO_SERVICE
)
self._fan_speed_list = config[CONF_FAN_SPEED_LIST]
self._qos = config[mqtt.CONF_QOS]
self._retain = config[mqtt.CONF_RETAIN]
self._command_topic = config.get(mqtt.CONF_COMMAND_TOPIC)
self._set_fan_speed_topic = config.get(CONF_SET_FAN_SPEED_TOPIC)
self._send_command_topic = config.get(CONF_SEND_COMMAND_TOPIC)
self._payloads = {
key: config.get(key)
for key in (
CONF_PAYLOAD_TURN_ON,
CONF_PAYLOAD_TURN_OFF,
CONF_PAYLOAD_RETURN_TO_BASE,
CONF_PAYLOAD_STOP,
CONF_PAYLOAD_CLEAN_SPOT,
CONF_PAYLOAD_LOCATE,
CONF_PAYLOAD_START_PAUSE,
)
}
self._state_topics = {
key: config.get(key)
for key in (
CONF_BATTERY_LEVEL_TOPIC,
CONF_CHARGING_TOPIC,
CONF_CLEANING_TOPIC,
CONF_DOCKED_TOPIC,
CONF_ERROR_TOPIC,
CONF_FAN_SPEED_TOPIC,
)
}
self._templates = {
key: config.get(key)
for key in (
CONF_BATTERY_LEVEL_TEMPLATE,
CONF_CHARGING_TEMPLATE,
CONF_CLEANING_TEMPLATE,
CONF_DOCKED_TEMPLATE,
CONF_ERROR_TEMPLATE,
CONF_FAN_SPEED_TEMPLATE,
)
}
async def discovery_update(self, discovery_payload):
"""Handle updated discovery message."""
config = PLATFORM_SCHEMA_LEGACY(discovery_payload)
self._setup_from_config(config)
await self.attributes_discovery_update(config)
await self.availability_discovery_update(config)
await self.device_info_discovery_update(config)
await self._subscribe_topics()
self.async_write_ha_state()
async def async_added_to_hass(self):
"""Subscribe MQTT events."""
await super().async_added_to_hass()
await self._subscribe_topics()
async def async_will_remove_from_hass(self):
"""Unsubscribe when removed."""
await subscription.async_unsubscribe_topics(self.hass, self._sub_state)
await MqttAttributes.async_will_remove_from_hass(self)
await MqttAvailability.async_will_remove_from_hass(self)
async def _subscribe_topics(self):
"""(Re)Subscribe to topics."""
for tpl in self._templates.values():
if tpl is not None:
tpl.hass = self.hass
@callback
def message_received(msg):
"""Handle new MQTT message."""
if (
msg.topic == self._state_topics[CONF_BATTERY_LEVEL_TOPIC]
and self._templates[CONF_BATTERY_LEVEL_TEMPLATE]
):
battery_level = self._templates[
CONF_BATTERY_LEVEL_TEMPLATE
].async_render_with_possible_json_value(msg.payload, error_value=None)
if battery_level:
self._battery_level = int(battery_level)
if (
msg.topic == self._state_topics[CONF_CHARGING_TOPIC]
and self._templates[CONF_CHARGING_TEMPLATE]
):
charging = self._templates[
CONF_CHARGING_TEMPLATE
].async_render_with_possible_json_value(msg.payload, error_value=None)
if charging:
self._charging = cv.boolean(charging)
if (
msg.topic == self._state_topics[CONF_CLEANING_TOPIC]
and self._templates[CONF_CLEANING_TEMPLATE]
):
cleaning = self._templates[
CONF_CLEANING_TEMPLATE
].async_render_with_possible_json_value(msg.payload, error_value=None)
if cleaning:
self._cleaning = cv.boolean(cleaning)
if (
msg.topic == self._state_topics[CONF_DOCKED_TOPIC]
and self._templates[CONF_DOCKED_TEMPLATE]
):
docked = self._templates[
CONF_DOCKED_TEMPLATE
].async_render_with_possible_json_value(msg.payload, error_value=None)
if docked:
self._docked = cv.boolean(docked)
if (
msg.topic == self._state_topics[CONF_ERROR_TOPIC]
and self._templates[CONF_ERROR_TEMPLATE]
):
error = self._templates[
CONF_ERROR_TEMPLATE
].async_render_with_possible_json_value(msg.payload, error_value=None)
if error is not None:
self._error = cv.string(error)
if self._docked:
if self._charging:
self._status = "Docked & Charging"
else:
self._status = "Docked"
elif self._cleaning:
self._status = "Cleaning"
elif self._error:
self._status = f"Error: {self._error}"
else:
self._status = "Stopped"
if (
msg.topic == self._state_topics[CONF_FAN_SPEED_TOPIC]
and self._templates[CONF_FAN_SPEED_TEMPLATE]
):
fan_speed = self._templates[
CONF_FAN_SPEED_TEMPLATE
].async_render_with_possible_json_value(msg.payload, error_value=None)
if fan_speed:
self._fan_speed = fan_speed
self.async_write_ha_state()
topics_list = {topic for topic in self._state_topics.values() if topic}
self._sub_state = await subscription.async_subscribe_topics(
self.hass,
self._sub_state,
{
f"topic{i}": {
"topic": topic,
"msg_callback": message_received,
"qos": self._qos,
}
for i, topic in enumerate(topics_list)
},
)
@property
def name(self):
"""Return the name of the vacuum."""
return self._name
@property
def should_poll(self):
"""No polling needed for an MQTT vacuum."""
return False
@property
def is_on(self):
"""Return true if vacuum is on."""
return self._cleaning
@property
def unique_id(self):
"""Return a unique ID."""
return self._unique_id
@property
def status(self):
"""Return a status string for the vacuum."""
if self.supported_features & SUPPORT_STATUS == 0:
return None
return self._status
@property
def fan_speed(self):
"""Return the status of the vacuum."""
if self.supported_features & SUPPORT_FAN_SPEED == 0:
return None
return self._fan_speed
@property
def fan_speed_list(self):
"""Return the status of the vacuum."""
if self.supported_features & SUPPORT_FAN_SPEED == 0:
return []
return self._fan_speed_list
@property
def battery_level(self):
"""Return the status of the vacuum."""
if self.supported_features & SUPPORT_BATTERY == 0:
return
return max(0, min(100, self._battery_level))
@property
def battery_icon(self):
"""Return the battery icon for the vacuum cleaner."""
if self.supported_features & SUPPORT_BATTERY == 0:
return
return icon_for_battery_level(
battery_level=self.battery_level, charging=self._charging
)
@property
def supported_features(self):
"""Flag supported features."""
return self._supported_features
async def async_turn_on(self, **kwargs):
"""Turn the vacuum on."""
if self.supported_features & SUPPORT_TURN_ON == 0:
return
mqtt.async_publish(
self.hass,
self._command_topic,
self._payloads[CONF_PAYLOAD_TURN_ON],
self._qos,
self._retain,
)
self._status = "Cleaning"
self.async_write_ha_state()
async def async_turn_off(self, **kwargs):
"""Turn the vacuum off."""
if self.supported_features & SUPPORT_TURN_OFF == 0:
return None
mqtt.async_publish(
self.hass,
self._command_topic,
self._payloads[CONF_PAYLOAD_TURN_OFF],
self._qos,
self._retain,
)
self._status = "Turning Off"
self.async_write_ha_state()
async def async_stop(self, **kwargs):
"""Stop the vacuum."""
if self.supported_features & SUPPORT_STOP == 0:
return None
mqtt.async_publish(
self.hass,
self._command_topic,
self._payloads[CONF_PAYLOAD_STOP],
self._qos,
self._retain,
)
self._status = "Stopping the current task"
self.async_write_ha_state()
async def async_clean_spot(self, **kwargs):
"""Perform a spot clean-up."""
if self.supported_features & SUPPORT_CLEAN_SPOT == 0:
return None
mqtt.async_publish(
self.hass,
self._command_topic,
self._payloads[CONF_PAYLOAD_CLEAN_SPOT],
self._qos,
self._retain,
)
self._status = "Cleaning spot"
self.async_write_ha_state()
async def async_locate(self, **kwargs):
"""Locate the vacuum (usually by playing a song)."""
if self.supported_features & SUPPORT_LOCATE == 0:
return None
mqtt.async_publish(
self.hass,
self._command_topic,
self._payloads[CONF_PAYLOAD_LOCATE],
self._qos,
self._retain,
)
self._status = "Hi, I'm over here!"
self.async_write_ha_state()
async def async_start_pause(self, **kwargs):
"""Start, pause or resume the cleaning task."""
if self.supported_features & SUPPORT_PAUSE == 0:
return None
mqtt.async_publish(
self.hass,
self._command_topic,
self._payloads[CONF_PAYLOAD_START_PAUSE],
self._qos,
self._retain,
)
self._status = "Pausing/Resuming cleaning..."
self.async_write_ha_state()
async def async_return_to_base(self, **kwargs):
"""Tell the vacuum to return to its dock."""
if self.supported_features & SUPPORT_RETURN_HOME == 0:
return None
mqtt.async_publish(
self.hass,
self._command_topic,
self._payloads[CONF_PAYLOAD_RETURN_TO_BASE],
self._qos,
self._retain,
)
self._status = "Returning home..."
self.async_write_ha_state()
async def async_set_fan_speed(self, fan_speed, **kwargs):
"""Set fan speed."""
if (
self.supported_features & SUPPORT_FAN_SPEED == 0
) or fan_speed not in self._fan_speed_list:
return None
mqtt.async_publish(
self.hass, self._set_fan_speed_topic, fan_speed, self._qos, self._retain
)
self._status = f"Setting fan to {fan_speed}..."
self.async_write_ha_state()
async def async_send_command(self, command, params=None, **kwargs):
"""Send a command to a vacuum cleaner."""
if self.supported_features & SUPPORT_SEND_COMMAND == 0:
return
if params:
message = {"command": command}
message.update(params)
message = json.dumps(message)
else:
message = command
mqtt.async_publish(
self.hass, self._send_command_topic, message, self._qos, self._retain
)
self._status = f"Sending command {message}..."
self.async_write_ha_state()
|
leppa/home-assistant
|
homeassistant/components/mqtt/vacuum/schema_legacy.py
|
Python
|
apache-2.0
| 19,559
|
# -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from .client import CustomerAssetServiceClient
__all__ = ("CustomerAssetServiceClient",)
|
googleads/google-ads-python
|
google/ads/googleads/v9/services/services/customer_asset_service/__init__.py
|
Python
|
apache-2.0
| 690
|
# Copyright (c) 2014 The Johns Hopkins University/Applied Physics Laboratory
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
class ErrorStrings:
BAD_EXP_RECV = "Bad {0} {1}: expected {2}, received {3}"
BAD_ENCODING = "Bad {0} {1}: encoding mismatch"
class BaseError(Exception):
"""Base class for exceptions defined in this module."""
def __init__(self, args):
[setattr(self, k, v) for k, v in args.items() if k is not 'self']
class KMIPServerError(BaseError):
"""Base Exception for KMIP server errors."""
def __init__(self, args):
super(KMIPServerError, self).__init__(args)
class KMIPServerZombieError(KMIPServerError):
"""KMIP server error for hung and persistent live KMIP servers."""
def __init__(self, pid):
message = 'KMIP server alive after termination: PID {0}'.format(pid)
super(KMIPServerZombieError, self).__init__({'message': message})
def __str__(self):
return self.message
class KMIPServerSuicideError(KMIPServerError):
"""KMIP server error for prematurely dead KMIP servers."""
def __init__(self, pid):
message = 'KMIP server dead prematurely: PID {0}'.format(pid)
super(KMIPServerSuicideError, self).__init__({'message': message})
def __str__(self):
return self.message
class InitError(BaseError):
"""Exception thrown for bad initializations."""
def __init__(self, cls, exp, recv):
super(InitError, self).__init__(locals())
def __str__(self):
msg = "Tried to initialize {0} instance with bad type: "
msg += "expected {1}, received {2}"
return msg.format(self.cls, self.exp, self.recv)
class WriteValueError(BaseError):
def __init__(self, cls, attr, value):
super(WriteValueError, self).__init__(locals())
def __str__(self):
msg = "Tried to write {0}.{1} with invalid value: {2}"
return msg.format(self.cls, self.attr, self.value)
class WriteTypeError(BaseError):
def __init__(self, cls, attr, value):
super(WriteTypeError, self).__init__(locals())
def __str__(self):
msg = "Tried to write {0}.{1} with invalid type: {2}"
return msg.format(self.cls, self.attr, self.value)
class WriteOverflowError(BaseError):
def __init__(self, cls, attr, exp, recv):
super(WriteOverflowError, self).__init__(locals())
def __str__(self):
msg = "Tried to write {0}.{1} with too many bytes: "
msg += "expected {2}, received {3}"
return msg.format(self.cls, self.attr, self.exp, self.recv)
class ReadValueError(BaseError):
def __init__(self, cls, attr, exp, recv):
super(ReadValueError, self).__init__(locals())
def __str__(self):
msg = "Tried to read {0}.{1}: expected {2}, received {3}"
return msg.format(self.cls, self.attr, self.exp, self.recv)
class InvalidLengthError(ValueError):
def __init__(self, cls, exp, recv):
msg = "Invalid length read for {0}: expected {1}, received {2}"
super(InvalidLengthError, self).__init__(msg.format(cls, exp, recv))
class StreamNotEmptyError(BaseError):
def __init__(self, cls, extra):
super(StreamNotEmptyError, self).__init__(locals())
def __str__(self):
msg = "Invalid length used to read {0}, bytes remaining: {1}"
return msg.format(self.cls, self.extra)
class StateTypeError(TypeError):
def __init__(self, cls, exp, recv):
msg = "Tried to initialize {0} instance with bad type: "
msg += "expected {1}, received {2}"
super(StateTypeError, self).__init__(msg.format(cls, exp, recv))
class StateOverflowError(ValueError):
def __init__(self, cls, attr, exp, recv):
msg = "Tried to write {0}.{1} with too many bytes: "
msg += "expected {2}, received {3}"
super(StateOverflowError, self).__init__(msg.format(cls, attr, exp,
recv))
|
viktorTarasov/PyKMIP
|
kmip/core/errors.py
|
Python
|
apache-2.0
| 4,483
|
# (c) Copyright 2014 Brocade Communications Systems Inc.
# All Rights Reserved.
#
# Copyright 2014 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
"""
ZoneManager is responsible to manage access control using FC zoning
when zoning mode is set as 'fabric'.
ZoneManager provides interfaces to add connection and remove connection
for given initiator and target list associated with a FC volume attach and
detach operation.
**Related Flags**
:zone_driver: Used by:class:`ZoneManager`.
Defaults to
`cinder.zonemanager.drivers.brocade.brcd_fc_zone_driver.BrcdFCZoneDriver`
:zoning_policy: Used by: class: 'ZoneManager'. Defaults to 'none'
"""
from oslo_config import cfg
from oslo_log import log as logging
from oslo_utils import importutils
from cinder import exception
from cinder.i18n import _, _LI
from cinder.volume import configuration as config
from cinder.zonemanager import fc_common
LOG = logging.getLogger(__name__)
zone_manager_opts = [
cfg.StrOpt('zone_driver',
default='cinder.zonemanager.drivers.brocade.brcd_fc_zone_driver'
'.BrcdFCZoneDriver',
help='FC Zone Driver responsible for zone management'),
cfg.StrOpt('zoning_policy',
default='initiator-target',
help='Zoning policy configured by user; valid values include '
'"initiator-target" or "initiator"'),
cfg.StrOpt('fc_fabric_names',
default=None,
help='Comma separated list of Fibre Channel fabric names.'
' This list of names is used to retrieve other SAN credentials'
' for connecting to each SAN fabric'),
cfg.StrOpt('fc_san_lookup_service',
default='cinder.zonemanager.drivers.brocade'
'.brcd_fc_san_lookup_service.BrcdFCSanLookupService',
help='FC SAN Lookup Service'),
]
CONF = cfg.CONF
CONF.register_opts(zone_manager_opts, 'fc-zone-manager')
class ZoneManager(fc_common.FCCommon):
"""Manages Connection control during attach/detach.
Version History:
1.0 - Initial version
1.0.1 - Added __new__ for singleton
"""
VERSION = "1.0.1"
driver = None
fabric_names = []
def __new__(class_, *args, **kwargs):
if not hasattr(class_, "_instance"):
class_._instance = object.__new__(class_, *args, **kwargs)
return class_._instance
def __init__(self, **kwargs):
"""Load the driver from the one specified in args, or from flags."""
super(ZoneManager, self).__init__(**kwargs)
self.configuration = config.Configuration(zone_manager_opts,
'fc-zone-manager')
self._build_driver()
def _build_driver(self):
zone_driver = self.configuration.zone_driver
LOG.debug("Zone Driver from config: {%s}", zone_driver)
# Initialize vendor specific implementation of FCZoneDriver
self.driver = importutils.import_object(
zone_driver,
configuration=self.configuration)
def get_zoning_state_ref_count(self, initiator_wwn, target_wwn):
"""Zone management state check.
Performs state check for given I-T pair to return the current count of
active attach for the pair.
"""
# TODO(sk): ref count state management
count = 0
# check the state for I-T pair
return count
def add_connection(self, initiator_target_map):
"""Add connection control.
Adds connection control for the given initiator target map.
initiator_target_map - each initiator WWN mapped to a list of one
or more target WWN:
eg:
{
'10008c7cff523b01': ['20240002ac000a50', '20240002ac000a40']
}
"""
connected_fabric = None
try:
for initiator in initiator_target_map.keys():
target_list = initiator_target_map[initiator]
LOG.debug("Target List: %s", target_list)
# get SAN context for the target list
fabric_map = self.get_san_context(target_list)
LOG.debug("Fabric Map after context lookup: %s", fabric_map)
# iterate over each SAN and apply connection control
for fabric in fabric_map.keys():
connected_fabric = fabric
t_list = fabric_map[fabric]
# get valid I-T map to add connection control
i_t_map = {initiator: t_list}
valid_i_t_map = self.get_valid_initiator_target_map(
i_t_map, True)
LOG.info(_LI("Final filtered map for fabric: %s"),
valid_i_t_map)
# Call driver to add connection control
self.driver.add_connection(fabric, valid_i_t_map)
LOG.info(_LI("Add Connection: Finished iterating "
"over all target list"))
except Exception as e:
msg = _("Failed adding connection for fabric=%(fabric)s: "
"Error: %(err)s") % {'fabric': connected_fabric,
'err': e}
LOG.error(msg)
raise exception.ZoneManagerException(reason=msg)
def delete_connection(self, initiator_target_map):
"""Delete connection.
Updates/deletes connection control for the given initiator target map.
initiator_target_map - each initiator WWN mapped to a list of one
or more target WWN:
eg:
{
'10008c7cff523b01': ['20240002ac000a50', '20240002ac000a40']
}
"""
connected_fabric = None
try:
for initiator in initiator_target_map.keys():
target_list = initiator_target_map[initiator]
LOG.info(_LI("Delete connection Target List: %s"),
target_list)
# get SAN context for the target list
fabric_map = self.get_san_context(target_list)
LOG.debug("Delete connection Fabric Map from SAN "
"context: %s", fabric_map)
# iterate over each SAN and apply connection control
for fabric in fabric_map.keys():
connected_fabric = fabric
t_list = fabric_map[fabric]
# get valid I-T map to add connection control
i_t_map = {initiator: t_list}
valid_i_t_map = self.get_valid_initiator_target_map(
i_t_map, False)
LOG.info(_LI("Final filtered map for delete "
"connection: %s"), valid_i_t_map)
# Call driver to delete connection control
if len(valid_i_t_map) > 0:
self.driver.delete_connection(fabric, valid_i_t_map)
LOG.debug("Delete Connection - Finished iterating over all"
" target list")
except Exception as e:
msg = _("Failed removing connection for fabric=%(fabric)s: "
"Error: %(err)s") % {'fabric': connected_fabric,
'err': e}
LOG.error(msg)
raise exception.ZoneManagerException(reason=msg)
def get_san_context(self, target_wwn_list):
"""SAN lookup for end devices.
Look up each SAN configured and return a map of SAN (fabric IP)
to list of target WWNs visible to the fabric.
"""
fabric_map = self.driver.get_san_context(target_wwn_list)
LOG.debug("Got SAN context: %s", fabric_map)
return fabric_map
def get_valid_initiator_target_map(self, initiator_target_map,
add_control):
"""Reference count check for end devices.
Looks up the reference count for each initiator-target pair from the
map and returns a filtered list based on the operation type
add_control - operation type can be true for add connection control
and false for remove connection control
"""
filtered_i_t_map = {}
for initiator in initiator_target_map.keys():
t_list = initiator_target_map[initiator]
for target in t_list:
count = self.get_zoning_state_ref_count(initiator, target)
if add_control:
if count > 0:
t_list.remove(target)
# update count = count + 1
else:
if count > 1:
t_list.remove(target)
# update count = count - 1
if t_list:
filtered_i_t_map[initiator] = t_list
else:
LOG.info(_LI("No targets to add or remove connection for "
"I: %s"), initiator)
return filtered_i_t_map
|
tmenjo/cinder-2015.1.0
|
cinder/zonemanager/fc_zone_manager.py
|
Python
|
apache-2.0
| 9,613
|
# Copyright 2020 The StackStorm Authors.
# Copyright 2019 Extreme Networks, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# All Exchanges and Queues related to liveaction.
from __future__ import absolute_import
from kombu import Exchange, Queue
from st2common.transport import publishers
__all__ = [
"ActionExecutionPublisher",
"ActionExecutionOutputPublisher",
"get_queue",
"get_output_queue",
]
EXECUTION_XCHG = Exchange("st2.execution", type="topic")
EXECUTION_OUTPUT_XCHG = Exchange("st2.execution.output", type="topic")
class ActionExecutionPublisher(publishers.CUDPublisher):
def __init__(self):
super(ActionExecutionPublisher, self).__init__(exchange=EXECUTION_XCHG)
class ActionExecutionOutputPublisher(publishers.CUDPublisher):
def __init__(self):
super(ActionExecutionOutputPublisher, self).__init__(
exchange=EXECUTION_OUTPUT_XCHG
)
def get_queue(name=None, routing_key=None, exclusive=False, auto_delete=False):
return Queue(
name,
EXECUTION_XCHG,
routing_key=routing_key,
exclusive=exclusive,
auto_delete=auto_delete,
)
def get_output_queue(name=None, routing_key=None, exclusive=False, auto_delete=False):
return Queue(
name,
EXECUTION_OUTPUT_XCHG,
routing_key=routing_key,
exclusive=exclusive,
auto_delete=auto_delete,
)
|
StackStorm/st2
|
st2common/st2common/transport/execution.py
|
Python
|
apache-2.0
| 1,909
|
import django_filters
from django.contrib.auth.models import User, Group
from rest_framework import viewsets, mixins
from rest_framework.response import Response
from rest_framework.authentication import TokenAuthentication
from rest_framework import filters
from api.pagination import LargeResultsSetPagination
from api.permissions import IsUser
from api.serializers import NotificationSerializer
from api.models import Notification
class NotificationFilter(django_filters.FilterSet):
class Meta:
model = Notification
fields = ['id', 'type', 'created', 'title', 'description', 'user', 'xplevel', 'badge',]
class NotificationViewSet(viewsets.ModelViewSet):
queryset = Notification.objects.all()
serializer_class = NotificationSerializer
pagination_class = LargeResultsSetPagination
authentication_classes = (TokenAuthentication,)
permission_classes = (IsUser,)
filter_class = NotificationFilter
|
Oinweb/py-fly
|
api/views/notification.py
|
Python
|
bsd-2-clause
| 942
|
# django-openid-auth - OpenID integration for django.contrib.auth
#
# Copyright (C) 2009-2013 Canonical Ltd.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# 'AS IS' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""
Tests for the django_openid_auth Admin login form replacement.
"""
import unittest
from django.conf import settings
from django.contrib.auth.models import User, AnonymousUser
settings.OPENID_USE_AS_ADMIN_LOGIN = True
from django.test import TestCase
def create_user(is_staff=False, authenticated=True):
"""
Create and return a user, either the AnonymousUser or a normal Django user,
setting the is_staff attribute if appropriate.
"""
if not authenticated:
return AnonymousUser()
else:
user = User(
username=u'testing', email='testing@example.com',
is_staff=is_staff)
user.set_password(u'test')
user.save()
class SiteAdminTests(TestCase):
"""
TestCase for accessing /admin/ when the django_openid_auth form replacement
is in use.
"""
def test_admin_site_with_openid_login_authenticated_non_staff(self):
"""
If the request has an authenticated user, who is not flagged as a
staff member, then they get a failure response.
"""
create_user()
self.client.login(username='testing', password='test')
response = self.client.get('/admin/')
self.assertTrue('User testing does not have admin access.' in
response.content, 'Missing error message in response')
def test_admin_site_with_openid_login_non_authenticated_user(self):
"""
Unauthenticated users accessing the admin page should be directed to
the OpenID login url.
"""
response = self.client.get('/admin/')
self.assertEqual(302, response.status_code)
self.assertEqual('http://testserver' + getattr(settings, 'LOGIN_URL',
'/openid/login') + '?next=/admin/',
response['Location'])
def suite():
return unittest.TestLoader().loadTestsFromName(__name__)
|
somcomltd/django-openid-auth
|
django_openid_auth/tests/test_admin.py
|
Python
|
bsd-2-clause
| 3,301
|
from __future__ import unicode_literals
import json
from django.test import TestCase, RequestFactory
from django.core.urlresolvers import reverse
from ..models import get_application_model
from ..settings import oauth2_settings
from ..views import ProtectedResourceView
from ..compat import get_user_model
from .test_utils import TestCaseUtils
Application = get_application_model()
UserModel = get_user_model()
# mocking a protected resource view
class ResourceView(ProtectedResourceView):
def get(self, request, *args, **kwargs):
return "This is a protected resource"
class BaseTest(TestCaseUtils, TestCase):
def setUp(self):
self.factory = RequestFactory()
self.test_user = UserModel.objects.create_user("test_user", "test@user.com", "123456")
self.dev_user = UserModel.objects.create_user("dev_user", "dev@user.com", "123456")
self.application = Application(
name="Test Password Application",
user=self.dev_user,
client_type=Application.CLIENT_PUBLIC,
authorization_grant_type=Application.GRANT_PASSWORD,
)
self.application.save()
oauth2_settings._SCOPES = ['read', 'write']
def tearDown(self):
self.application.delete()
self.test_user.delete()
self.dev_user.delete()
class TestPasswordTokenView(BaseTest):
def test_get_token(self):
"""
Request an access token using Resource Owner Password Flow
"""
token_request_data = {
'grant_type': 'password',
'username': 'test_user',
'password': '123456',
}
auth_headers = self.get_basic_auth_header(self.application.client_id, self.application.client_secret)
response = self.client.post(reverse('oauth2_provider:token'), data=token_request_data, **auth_headers)
self.assertEqual(response.status_code, 200)
content = json.loads(response.content.decode("utf-8"))
self.assertEqual(content['token_type'], "Bearer")
self.assertEqual(content['scope'], "read write")
self.assertEqual(content['expires_in'], oauth2_settings.ACCESS_TOKEN_EXPIRE_SECONDS)
def test_bad_credentials(self):
"""
Request an access token using Resource Owner Password Flow
"""
token_request_data = {
'grant_type': 'password',
'username': 'test_user',
'password': 'NOT_MY_PASS',
}
auth_headers = self.get_basic_auth_header(self.application.client_id, self.application.client_secret)
response = self.client.post(reverse('oauth2_provider:token'), data=token_request_data, **auth_headers)
self.assertEqual(response.status_code, 400)
class TestPasswordProtectedResource(BaseTest):
def test_password_resource_access_allowed(self):
token_request_data = {
'grant_type': 'password',
'username': 'test_user',
'password': '123456',
}
auth_headers = self.get_basic_auth_header(self.application.client_id, self.application.client_secret)
response = self.client.post(reverse('oauth2_provider:token'), data=token_request_data, **auth_headers)
content = json.loads(response.content.decode("utf-8"))
access_token = content['access_token']
# use token to access the resource
auth_headers = {
'HTTP_AUTHORIZATION': 'Bearer ' + access_token,
}
request = self.factory.get("/fake-resource", **auth_headers)
request.user = self.test_user
view = ResourceView.as_view()
response = view(request)
self.assertEqual(response, "This is a protected resource")
|
Natgeoed/django-oauth-toolkit
|
oauth2_provider/tests/test_password.py
|
Python
|
bsd-2-clause
| 3,715
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from pymisp import PyMISP
from keys import url, key
import argparse
import tools
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Create a given number of event containing a given number of attributes eachh.')
parser.add_argument("-l", "--limit", type=int, help="Number of events to create (default 1)")
parser.add_argument("-a", "--attribute", type=int, help="Number of attributes per event (default 3000)")
args = parser.parse_args()
misp = PyMISP(url, key, True, 'json')
if args.limit is None:
args.limit = 1
if args.attribute is None:
args.attribute = 3000
for i in range(args.limit):
tools.create_massive_dummy_events(misp, args.attribute)
|
iglocska/PyMISP
|
examples/events/create_massive_dummy_events.py
|
Python
|
bsd-2-clause
| 777
|
# Fix for older setuptools
import re
import os
from setuptools import setup, find_packages
def fpath(name):
return os.path.join(os.path.dirname(__file__), name)
def read(fname):
return open(fpath(fname)).read()
def desc():
info = read('README.rst')
try:
return info + '\n\n' + read('doc/changelog.rst')
except IOError:
return info
# grep flask_admin/__init__.py since python 3.x cannot import it before using 2to3
file_text = read(fpath('flask_admin/__init__.py'))
def grep(attrname):
pattern = r"{0}\W*=\W*'([^']+)'".format(attrname)
strval, = re.findall(pattern, file_text)
return strval
setup(
name='Flask-Admin',
version=grep('__version__'),
url='https://github.com/mrjoes/flask-admin/',
license='BSD',
author=grep('__author__'),
author_email=grep('__email__'),
description='Simple and extensible admin interface framework for Flask',
long_description=desc(),
packages=find_packages(),
include_package_data=True,
zip_safe=False,
platforms='any',
install_requires=[
'Flask>=0.7',
'wtforms'
],
tests_require=[
'nose>=1.0',
'pillow',
'mongoengine',
'pymongo',
'wtf-peewee',
'sqlalchemy',
'flask-mongoengine',
'flask-sqlalchemy'
],
classifiers=[
'Development Status :: 4 - Beta',
'Environment :: Web Environment',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Topic :: Software Development :: Libraries :: Python Modules'
],
test_suite='nose.collector'
)
|
pawl/flask-admin
|
setup.py
|
Python
|
bsd-3-clause
| 1,731
|
# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-
# vi: set ft=python sts=4 ts=4 sw=4 et:
"""
TODO
"""
from __future__ import absolute_import
__docformat__ = 'restructuredtext'
from . import fmristat
from nipy.testing import Tester
test = Tester().test
bench = Tester().bench
|
alexis-roche/nipy
|
nipy/modalities/fmri/__init__.py
|
Python
|
bsd-3-clause
| 303
|
import logging
from optparse import make_option
from django.core.management.base import BaseCommand
from amo.utils import chunked
from mkt.developers.tasks import refresh_iarc_ratings
log = logging.getLogger('z.task')
class Command(BaseCommand):
"""
Refresh old or corrupt IARC ratings by re-fetching the certificate.
"""
option_list = BaseCommand.option_list + (
make_option('--apps',
help='Webapp ids to process. Use commas to separate '
'multiple ids.'),
)
help = __doc__
def handle(self, *args, **kw):
from mkt.webapps.models import Webapp
# Get apps.
apps = Webapp.objects.filter(iarc_info__isnull=False)
ids = kw.get('apps')
if ids:
apps = apps.filter(
id__in=(int(id.strip()) for id in ids.split(',')))
for chunk in chunked(apps.values_list('id', flat=True), 100):
refresh_iarc_ratings.delay(chunk)
|
jinankjain/zamboni
|
mkt/developers/management/commands/refresh_iarc_ratings.py
|
Python
|
bsd-3-clause
| 985
|
import copy
import weakref
import numpy
import six
from chainer import cuda
from chainer.utils import type_check
from chainer import variable
class Function(object):
"""Function on variables with backpropagation ability.
All function implementations defined in :mod:`chainer.functions` inherit
this class.
The main feature of this class is keeping track of function applications as
a backward graph. When a function is applied to :class:`Variable` objects,
the function is copied, and its :meth:`forward` method is called on
:data:`~Variable.data` fields of input variables, and at the same time it
chains references from output variables to the function and from the
function to its inputs.
.. note::
Strictly speaking, when a function is applied to some variable, a
special :class:`Function` object called *splitter* is inserted between
the variable and the function. The splitter is used to manipulate
multiple function applications on the same variable, where gradients
from different backward paths are accumulated at the variable.
.. note::
:meth:`__call__` copies the function instance before the forward
computation and chaining. This enables us to reuse one function object
for multiple function applications, where the different calls must use
different references to the function object. Note that the copy is
shallow, so implementations of :class:`Function` must take care of any
member attributes shared accross forward and backward computations.
.. admonition:: Example
Let ``x`` an instance of :class:`Variable` and ``f`` an instance of
:class:`Function` taking only one argument. Then a line
>>> y = f(x)
computes a new variable ``y`` and creates backward references. Actually,
backward references are set as per the following diagram::
x <--- (splitter) <--- x' <--- f' <--- y
where prime "'" indicates a copy of the original object. If another
application the function occurs as
>>> z = f(x)
then the splitter acts like a branch as the following new diagram::
|--- x' <--- f' <--- y
x <--- (splitter) <-+
|--- x'' <--- f'' <--- z
Note that the splitter is implicitly inserted and user does not need to
take any special care of it; just remember that such branching is
correctly managed by chainer.
Every function implementation should provide :meth:`forward_cpu`,
:meth:`forward_gpu`, :meth:`backward_cpu` and :meth:`backward_gpu`.
Alternatively, one can provide :meth:`forward` and :meth:`backward` instead
of separate methods. Backward methods have default implementations that
just return ``None``, which indicates that the function is non-
differentiable.
Function implementations are classified into two types: parameterized ones
and non-parameterized ones. A parameterized function holds parameter arrays
and coresponding gradient arrays. Implementation can choose any way to keep
these arrays, but it is recommended to keep them as attributes to easily
migrate between CPU and GPU. Parameterized function must provide accessors
to these arrays called :meth:`parameters` and :meth:`gradients`.
Attributes:
inputs: A tuple or list of input variables.
outputs: A tuple or list of output variables.
parameter_names: A tuple or list of names of parameter attributes.
It is set to an empty tuple by default. This attribute is used by
the default implementation of :meth:`parameters` property to gather
the collection of parameter arrays. Implementation of parameterized
function should override this field as an attribute or a property,
or otherwise it should override :meth:`parameters` property.
gradient_names: A tuple or list of names of gradient attributes. The
detail is same as :data:`parameter_names`.
"""
parameter_names = ()
gradient_names = ()
def __init__(self):
self.inputs = None
self.outputs = None
self.rank = None
def __call__(self, *inputs):
"""Applies forward propagation with chaining backward references.
Basic behavior is also expressed in documentation of :class:`Function`
class. This function first copies itself to avoid conflict over
multiple invocations.
.. note::
If the :data:`~Variable.data` attribute of input variables reside on
GPU device, then, before it calls :meth:`forward` method, the
appropriate device is selected, so in most cases implementers do
not need to take care of device selection.
Args:
inputs: Tuple of input :class:`Variable` objects. All input
variables must have same volatile flag.
Returns:
One
:class:`Variable` object or a tuple of multiple
:class:`Variable` objects.
"""
# First copy itself to avoid duplication within the graph.
self = copy.copy(self)
if any(x.volatile for x in inputs): # not build graph
# do not mix multiple volatility
assert all(x.volatile for x in inputs)
in_data = tuple(x.data for x in inputs)
self._check_data_type_forward(in_data)
with cuda.get_device(*in_data):
out_data = self.forward(in_data)
assert type(out_data) == tuple
outputs = list(variable.Variable(y, volatile=True)
for y in out_data)
if len(outputs) == 1:
return outputs[0]
return outputs
# Build graph
# Be careful that forward references must be weak
self.inputs = []
for x in inputs:
splitter = x.splitter()
if splitter is None:
splitter = Split(x)
x.splitter = weakref.ref(splitter)
self.inputs.append(splitter.add_branch())
if self.inputs:
self.rank = max(x.rank for x in self.inputs)
else:
self.rank = 0
in_data = tuple(x.data for x in self.inputs)
self._check_data_type_forward(in_data)
with cuda.get_device(*in_data):
outputs = self.forward(in_data)
assert type(outputs) == tuple
ret = tuple(variable.Variable(y) for y in outputs)
for y in ret:
y.set_creator(self)
# Make forward references weak
self.outputs = tuple(weakref.ref(y) for y in ret)
if len(ret) == 1:
return ret[0]
return ret
@property
def label(self):
"""Short text that represents the function.
The default implementation returns its type name.
Each function should override it to give more information.
"""
return self.__class__.__name__
def _check_data_type_forward(self, in_data):
in_type = type_check.get_types(in_data, 'in_types', False)
self.check_type_forward(in_type)
def check_type_forward(self, in_types):
"""Checks types of input data before forward propagation.
Before :meth:`forward` is called, this function is called.
You need to validate types of input data in this function
using :ref:`the type checking utilities <type-check-utils>`.
Args:
in_types (~chainer.utils.type_check.TypeInfoTuple): The type
information of input data for :meth:`forward`.
"""
pass
def forward(self, inputs):
"""Applies forward propagation to input arrays.
It delegates the procedure to :meth:`forward_cpu` or
:meth:`forward_gpu` by default. Which it selects is determined by the
type of input arrays.
Implementations of :class:`Function` must implement either cpu/gpu
methods or this method.
Args:
inputs: Tuple of input array(s).
Returns:
Tuple of output array(s).
.. warning::
Implementations of :class:`Function` must take care that the
return value must be a tuple even if it returns only one array.
"""
if any(isinstance(x, cuda.ndarray) for x in inputs):
return self.forward_gpu(inputs)
else:
return self.forward_cpu(inputs)
def forward_cpu(self, inputs):
"""Applies forward propagation to input arrays on CPU.
Args:
inputs: Tuple of :class:`numpy.ndarray` object(s).
Returns:
tuple: Tuple of :class:`numpy.ndarray` object(s).
.. warning::
Implementations of :class:`Function` must take care that the
return value must be a tuple even if it returns only one array.
"""
raise NotImplementedError()
def forward_gpu(self, inputs):
"""Applies forward propagation to input arrays on GPU.
Args:
inputs: Tuple of :class:`cupy.ndarray` object(s).
Returns:
tuple: Tuple of :class:`cupy.ndarray` object(s).
.. warning::
Implementations of :class:`Function` must take care that the
return value must be a tuple even if it returns only one array.
"""
raise NotImplementedError()
def backward(self, inputs, grad_outputs):
"""Applies backprop to output gradient arrays.
It delegates the procedure to :meth:`backward_cpu` or
:meth:`backward_gpu` by default. Which it selects is determined by the
type of input arrays and output gradient arrays. Implementations of
:class:`Function` must implement either cpu/gpu methods or this method,
if the function is intended to be backprop-ed.
Args:
inputs: Tuple of input arrays.
grad_outputs: Tuple of output gradient arrays.
Returns:
tuple: Tuple of input gradient arrays. Some or all of them can be
``None``, if the function is not differentiable on
inputs.
.. warning::
Implementations of :class:`Function` must take care that the
return value must be a tuple even if it returns only one array.
"""
if any(isinstance(x, cuda.ndarray) for x in inputs + grad_outputs):
return self.backward_gpu(inputs, grad_outputs)
else:
return self.backward_cpu(inputs, grad_outputs)
def backward_cpu(self, inputs, grad_outputs):
"""Applies backprop to output gradient arrays on CPU.
Args:
inputs: Tuple of input :class:`numpy.ndarray` object(s).
grad_outputs: Tuple of output gradient :class:`numpy.ndarray`
object(s).
Returns:
tuple: Tuple of input gradient :class:`numpy.ndarray` object(s).
Some or all of them can be ``None``, if the function is not
differentiable on corresponding inputs.
.. warning::
Implementations of :class:`Function` must take care that the
return value must be a tuple even if it returns only one array.
"""
return tuple(None for _ in inputs)
def backward_gpu(self, inputs, grad_outputs):
"""Applies backprop to output gradient arrays on GPU.
Args:
inputs: Tuple of input :class:`cupy.ndarray`
object(s).
grad_outputs: Tuple of output gradient
:class:`cupy.ndarray` object(s).
Returns:
tuple: Tuple of input gradient :class:`cupy.ndarray`
object(s). Some or all of them can be ``None``, if the function is
not differentiable on corresponding inputs.
.. warning::
Implementations of :class:`Function` must take care that the
return value must be a tuple even if it returns only one array.
"""
return tuple(None for _ in inputs)
def unchain(self):
"""Purges in/out variables and this function itself from the graph.
This method is called from :meth:`Variable.unchain_backward` method.
"""
for y in self.outputs:
y_ref = y()
if y_ref is not None:
y_ref.creator = None
for x in self.inputs:
x.splitter = weakref.ref(lambda: 0) # dead ref
self.inputs = None
def to_gpu(self, device=None):
"""Migrates the function to GPU and returns self.
The default implementation moves all fields of type
:class:`numpy.ndarray` onto GPU.
Args:
device (int or :class:`cupy.cuda.Device` or ``None``): Device
ID of GPU that the function will be migrated on. If this is
``None``, the current device is used.
Returns:
self.
"""
with cuda.get_device(device):
for k, v in six.iteritems(self.__dict__):
if isinstance(v, numpy.ndarray):
setattr(self, k, cuda.cupy.array(v))
return self
def to_cpu(self):
"""Migrates the function to CPU and returns self.
The default implementation moves all fields of type
:class:`cupy.ndarray` onto CPU.
Returns:
self.
"""
for k, v in six.iteritems(self.__dict__):
if isinstance(v, cuda.ndarray):
setattr(self, k, v.get())
return self
@property
def parameters(self):
"""A tuple of parameter arrays.
Default implementation collects parameter arrays based on
:data:`parameter_names` attribute.
"""
return tuple(getattr(self, name) for name in self.parameter_names)
@parameters.setter
def parameters(self, values):
assert len(self.parameter_names) == len(values)
for name, value in zip(self.parameter_names, values):
setattr(self, name, value)
@property
def gradients(self):
"""A tuple of gradient arrays.
Default implementation collects gradient arrays based on
:data:`gradient_names` attribute.
"""
return tuple(getattr(self, name) for name in self.gradient_names)
@gradients.setter
def gradients(self, values):
assert len(self.gradient_names) == len(values)
for name, value in zip(self.gradient_names, values):
setattr(self, name, value)
class Split(Function):
"""Special function to branch the graph at variable node.
Split does not implement forward: it is intended to implicitly used by
Function.
"""
def __init__(self, var):
self.inputs = [var]
self.outputs = []
self.rank = var.rank
def add_branch(self):
x = self.inputs[0]
output = variable.Variable(x.data)
output.set_creator(self)
self.outputs.append(weakref.ref(output))
return output
def backward(self, inputs, grad_outputs):
# Accumulate gradients
if len(grad_outputs) == 1:
return grad_outputs # no copy
gx = None
grad_outputs = [gy for gy in grad_outputs if gy is not None]
with cuda.get_device(*grad_outputs):
for gy in grad_outputs:
if gx is None:
gx = gy.copy()
else:
gx += gy
return gx,
|
masia02/chainer
|
chainer/function.py
|
Python
|
mit
| 15,610
|
"""Unicode Properties (autogen)."""
from __future__ import unicode_literals
unicode_alias = {
"_": {
"age": "age",
"bc": "bidiclass",
"blk": "block",
"ccc": "canonicalcombiningclass",
"dt": "decompositiontype",
"ea": "eastasianwidth",
"gc": "generalcategory",
"gcb": "graphemeclusterbreak",
"hst": "hangulsyllabletype",
"jg": "joininggroup",
"jt": "joiningtype",
"lb": "linebreak",
"nfcqc": "nfcquickcheck",
"nfdqc": "nfdquickcheck",
"nfkcqc": "nfkcquickcheck",
"nfkdqc": "nfkdquickcheck",
"nt": "numerictype",
"nv": "numericvalue",
"sb": "sentencebreak",
"sc": "script",
"wb": "wordbreak"
},
"age": {
"unassigned": "na",
"v11": "1.1",
"v20": "2.0",
"v21": "2.1",
"v30": "3.0",
"v31": "3.1",
"v32": "3.2",
"v40": "4.0",
"v41": "4.1",
"v50": "5.0",
"v51": "5.1",
"v52": "5.2",
"v60": "6.0",
"v61": "6.1"
},
"bidiclass": {
"arabicletter": "al",
"arabicnumber": "an",
"boundaryneutral": "bn",
"commonseparator": "cs",
"europeannumber": "en",
"europeanseparator": "es",
"europeanterminator": "et",
"lefttoright": "l",
"lefttorightembedding": "lre",
"lefttorightoverride": "lro",
"nonspacingmark": "nsm",
"otherneutral": "on",
"paragraphseparator": "b",
"popdirectionalformat": "pdf",
"righttoleft": "r",
"righttoleftembedding": "rle",
"righttoleftoverride": "rlo",
"segmentseparator": "s",
"whitespace": "ws"
},
"binary": {
"ahex": "asciihexdigit",
"alnum": "posixalnum",
"alpha": "alphabetic",
"bidic": "bidicontrol",
"bidim": "bidimirrored",
"blank": "posixblank",
"cased": "cased",
"ce": "compositionexclusion",
"ci": "caseignorable",
"compex": "fullcompositionexclusion",
"cwcf": "changeswhencasefolded",
"cwcm": "changeswhencasemapped",
"cwkcf": "changeswhennfkccasefolded",
"cwl": "changeswhenlowercased",
"cwt": "changeswhentitlecased",
"cwu": "changeswhenuppercased",
"dash": "dash",
"dep": "deprecated",
"di": "defaultignorablecodepoint",
"dia": "diacritic",
"ext": "extender",
"graph": "posixgraph",
"grbase": "graphemebase",
"grext": "graphemeextend",
"grlink": "graphemelink",
"hex": "hexdigit",
"hyphen": "hyphen",
"idc": "idcontinue",
"ideo": "ideographic",
"ids": "idstart",
"idsb": "idsbinaryoperator",
"idst": "idstrinaryoperator",
"joinc": "joincontrol",
"loe": "logicalorderexception",
"lower": "lowercase",
"math": "math",
"nchar": "noncharactercodepoint",
"oalpha": "otheralphabetic",
"odi": "otherdefaultignorablecodepoint",
"ogrext": "othergraphemeextend",
"oidc": "otheridcontinue",
"oids": "otheridstart",
"olower": "otherlowercase",
"omath": "othermath",
"oupper": "otheruppercase",
"patsyn": "patternsyntax",
"patws": "patternwhitespace",
"print": "posixprint",
"qmark": "quotationmark",
"radical": "radical",
"sd": "softdotted",
"space": "whitespace",
"sterm": "sterm",
"term": "terminalpunctuation",
"uideo": "unifiedideograph",
"upper": "uppercase",
"vs": "variationselector",
"wspace": "whitespace",
"xdigit": "posixxdigit",
"xidc": "xidcontinue",
"xids": "xidstart"
},
"block": {
"alchemical": "alchemicalsymbols",
"alphabeticpf": "alphabeticpresentationforms",
"ancientgreekmusic": "ancientgreekmusicalnotation",
"arabicexta": "arabicextendeda",
"arabicmath": "arabicmathematicalalphabeticsymbols",
"arabicpfa": "arabicpresentationformsa",
"arabicpfb": "arabicpresentationformsb",
"arabicsup": "arabicsupplement",
"ascii": "basiclatin",
"bamumsup": "bamumsupplement",
"bopomofoext": "bopomofoextended",
"braille": "braillepatterns",
"byzantinemusic": "byzantinemusicalsymbols",
"canadiansyllabics": "unifiedcanadianaboriginalsyllabics",
"cjk": "cjkunifiedideographs",
"cjkcompat": "cjkcompatibility",
"cjkcompatforms": "cjkcompatibilityforms",
"cjkcompatideographs": "cjkcompatibilityideographs",
"cjkcompatideographssup": "cjkcompatibilityideographssupplement",
"cjkexta": "cjkunifiedideographsextensiona",
"cjkextb": "cjkunifiedideographsextensionb",
"cjkextc": "cjkunifiedideographsextensionc",
"cjkextd": "cjkunifiedideographsextensiond",
"cjkradicalssup": "cjkradicalssupplement",
"cjksymbols": "cjksymbolsandpunctuation",
"combiningmarksforsymbols": "combiningdiacriticalmarksforsymbols",
"compatjamo": "hangulcompatibilityjamo",
"countingrod": "countingrodnumerals",
"cuneiformnumbers": "cuneiformnumbersandpunctuation",
"cyrillicexta": "cyrillicextendeda",
"cyrillicextb": "cyrillicextendedb",
"cyrillicsup": "cyrillicsupplement",
"cyrillicsupplementary": "cyrillicsupplement",
"devanagariext": "devanagariextended",
"diacriticals": "combiningdiacriticalmarks",
"diacriticalsforsymbols": "combiningdiacriticalmarksforsymbols",
"diacriticalssup": "combiningdiacriticalmarkssupplement",
"domino": "dominotiles",
"enclosedalphanum": "enclosedalphanumerics",
"enclosedalphanumsup": "enclosedalphanumericsupplement",
"enclosedcjk": "enclosedcjklettersandmonths",
"enclosedideographicsup": "enclosedideographicsupplement",
"ethiopicext": "ethiopicextended",
"ethiopicexta": "ethiopicextendeda",
"ethiopicsup": "ethiopicsupplement",
"georgiansup": "georgiansupplement",
"greek": "greekandcoptic",
"greekext": "greekextended",
"halfandfullforms": "halfwidthandfullwidthforms",
"halfmarks": "combininghalfmarks",
"hangul": "hangulsyllables",
"highpusurrogates": "highprivateusesurrogates",
"idc": "ideographicdescriptioncharacters",
"indicnumberforms": "commonindicnumberforms",
"ipaext": "ipaextensions",
"jamo": "hanguljamo",
"jamoexta": "hanguljamoextendeda",
"jamoextb": "hanguljamoextendedb",
"kanasup": "kanasupplement",
"kangxi": "kangxiradicals",
"katakanaext": "katakanaphoneticextensions",
"latin1": "latin1supplement",
"latin1sup": "latin1supplement",
"latinexta": "latinextendeda",
"latinextadditional": "latinextendedadditional",
"latinextb": "latinextendedb",
"latinextc": "latinextendedc",
"latinextd": "latinextendedd",
"mahjong": "mahjongtiles",
"mathalphanum": "mathematicalalphanumericsymbols",
"mathoperators": "mathematicaloperators",
"meeteimayekext": "meeteimayekextensions",
"miscarrows": "miscellaneoussymbolsandarrows",
"miscmathsymbolsa": "miscellaneousmathematicalsymbolsa",
"miscmathsymbolsb": "miscellaneousmathematicalsymbolsb",
"miscpictographs": "miscellaneoussymbolsandpictographs",
"miscsymbols": "miscellaneoussymbols",
"misctechnical": "miscellaneoustechnical",
"modifierletters": "spacingmodifierletters",
"music": "musicalsymbols",
"myanmarexta": "myanmarextendeda",
"nb": "noblock",
"ocr": "opticalcharacterrecognition",
"phaistos": "phaistosdisc",
"phoneticext": "phoneticextensions",
"phoneticextsup": "phoneticextensionssupplement",
"privateuse": "privateusearea",
"pua": "privateusearea",
"punctuation": "generalpunctuation",
"rumi": "ruminumeralsymbols",
"smallforms": "smallformvariants",
"sundanesesup": "sundanesesupplement",
"suparrowsa": "supplementalarrowsa",
"suparrowsb": "supplementalarrowsb",
"superandsub": "superscriptsandsubscripts",
"supmathoperators": "supplementalmathematicaloperators",
"suppuaa": "supplementaryprivateuseareaa",
"suppuab": "supplementaryprivateuseareab",
"suppunctuation": "supplementalpunctuation",
"taixuanjing": "taixuanjingsymbols",
"transportandmap": "transportandmapsymbols",
"ucas": "unifiedcanadianaboriginalsyllabics",
"ucasext": "unifiedcanadianaboriginalsyllabicsextended",
"vedicext": "vedicextensions",
"vs": "variationselectors",
"vssup": "variationselectorssupplement",
"yijing": "yijinghexagramsymbols"
},
"canonicalcombiningclass": {
"a": "230",
"above": "230",
"aboveleft": "228",
"aboveright": "232",
"al": "228",
"ar": "232",
"ata": "214",
"atar": "216",
"atb": "202",
"atbl": "200",
"attachedabove": "214",
"attachedaboveright": "216",
"attachedbelow": "202",
"attachedbelowleft": "200",
"b": "220",
"below": "220",
"belowleft": "218",
"belowright": "222",
"bl": "218",
"br": "222",
"ccc10": "10",
"ccc103": "103",
"ccc107": "107",
"ccc11": "11",
"ccc118": "118",
"ccc12": "12",
"ccc122": "122",
"ccc129": "129",
"ccc13": "13",
"ccc130": "130",
"ccc133": "132",
"ccc14": "14",
"ccc15": "15",
"ccc16": "16",
"ccc17": "17",
"ccc18": "18",
"ccc19": "19",
"ccc20": "20",
"ccc21": "21",
"ccc22": "22",
"ccc23": "23",
"ccc24": "24",
"ccc25": "25",
"ccc26": "26",
"ccc27": "27",
"ccc28": "28",
"ccc29": "29",
"ccc30": "30",
"ccc31": "31",
"ccc32": "32",
"ccc33": "33",
"ccc34": "34",
"ccc35": "35",
"ccc36": "36",
"ccc84": "84",
"ccc91": "91",
"da": "234",
"db": "233",
"doubleabove": "234",
"doublebelow": "233",
"iotasubscript": "240",
"is": "240",
"kanavoicing": "8",
"kv": "8",
"l": "224",
"left": "224",
"nk": "7",
"notreordered": "0",
"nr": "0",
"nukta": "7",
"ov": "1",
"overlay": "1",
"r": "226",
"right": "226",
"virama": "9",
"vr": "9"
},
"decompositiontype": {
"can": "canonical",
"com": "compat",
"enc": "circle",
"fin": "final",
"fra": "fraction",
"init": "initial",
"iso": "isolated",
"med": "medial",
"nar": "narrow",
"nb": "nobreak",
"sml": "small",
"sqr": "square",
"sup": "super",
"vert": "vertical"
},
"eastasianwidth": {
"ambiguous": "a",
"fullwidth": "f",
"halfwidth": "h",
"narrow": "na",
"neutral": "n",
"wide": "w"
},
"generalcategory": {
"casedletter": "lc",
"closepunctuation": "pe",
"cntrl": "cc",
"combiningmark": "m",
"connectorpunctuation": "pc",
"control": "cc",
"currencysymbol": "sc",
"dashpunctuation": "pd",
"decimalnumber": "nd",
"digit": "nd",
"enclosingmark": "me",
"finalpunctuation": "pf",
"format": "cf",
"initialpunctuation": "pi",
"letter": "l",
"letternumber": "nl",
"lineseparator": "zl",
"lowercaseletter": "ll",
"mark": "m",
"mathsymbol": "sm",
"modifierletter": "lm",
"modifiersymbol": "sk",
"nonspacingmark": "mn",
"number": "n",
"openpunctuation": "ps",
"other": "c",
"otherletter": "lo",
"othernumber": "no",
"otherpunctuation": "po",
"othersymbol": "so",
"paragraphseparator": "zp",
"privateuse": "co",
"punct": "p",
"punctuation": "p",
"separator": "z",
"spaceseparator": "zs",
"spacingmark": "mc",
"surrogate": "cs",
"symbol": "s",
"titlecaseletter": "lt",
"unassigned": "cn",
"uppercaseletter": "lu"
},
"graphemeclusterbreak": {
"cn": "control",
"ex": "extend",
"pp": "prepend",
"sm": "spacingmark",
"xx": "other"
},
"hangulsyllabletype": {
"leadingjamo": "l",
"lvsyllable": "lv",
"lvtsyllable": "lvt",
"notapplicable": "na",
"trailingjamo": "t",
"voweljamo": "v"
},
"joininggroup": {
"tehmarbutagoal": "hamzaonhehgoal"
},
"joiningtype": {
"dualjoining": "d",
"joincausing": "c",
"leftjoining": "l",
"nonjoining": "u",
"rightjoining": "r",
"transparent": "t"
},
"linebreak": {
"ai": "ambiguous",
"al": "alphabetic",
"b2": "breakboth",
"ba": "breakafter",
"bb": "breakbefore",
"bk": "mandatorybreak",
"cb": "contingentbreak",
"cj": "conditionaljapanesestarter",
"cl": "closepunctuation",
"cm": "combiningmark",
"cp": "closeparenthesis",
"cr": "carriagereturn",
"ex": "exclamation",
"gl": "glue",
"hl": "hebrewletter",
"hy": "hyphen",
"id": "ideographic",
"in": "inseparable",
"inseperable": "inseparable",
"is": "infixnumeric",
"lf": "linefeed",
"nl": "nextline",
"ns": "nonstarter",
"nu": "numeric",
"op": "openpunctuation",
"po": "postfixnumeric",
"pr": "prefixnumeric",
"qu": "quotation",
"sa": "complexcontext",
"sg": "surrogate",
"sp": "space",
"sy": "breaksymbols",
"wj": "wordjoiner",
"xx": "unknown",
"zw": "zwspace"
},
"nfcquickcheck": {
"maybe": "m",
"no": "n",
"yes": "y"
},
"nfdquickcheck": {
"no": "n",
"yes": "y"
},
"nfkcquickcheck": {
"maybe": "m",
"no": "n",
"yes": "y"
},
"nfkdquickcheck": {
"no": "n",
"yes": "y"
},
"numerictype": {
"de": "decimal",
"di": "digit",
"nu": "numeric"
},
"numericvalue": {
},
"script": {
"arab": "arabic",
"armi": "imperialaramaic",
"armn": "armenian",
"avst": "avestan",
"bali": "balinese",
"bamu": "bamum",
"batk": "batak",
"beng": "bengali",
"bopo": "bopomofo",
"brah": "brahmi",
"brai": "braille",
"bugi": "buginese",
"buhd": "buhid",
"cakm": "chakma",
"cans": "canadianaboriginal",
"cari": "carian",
"cher": "cherokee",
"copt": "coptic",
"cprt": "cypriot",
"cyrl": "cyrillic",
"deva": "devanagari",
"dsrt": "deseret",
"egyp": "egyptianhieroglyphs",
"ethi": "ethiopic",
"geor": "georgian",
"glag": "glagolitic",
"goth": "gothic",
"grek": "greek",
"gujr": "gujarati",
"guru": "gurmukhi",
"hang": "hangul",
"hani": "han",
"hano": "hanunoo",
"hebr": "hebrew",
"hira": "hiragana",
"hrkt": "katakanaorhiragana",
"ital": "olditalic",
"java": "javanese",
"kali": "kayahli",
"kana": "katakana",
"khar": "kharoshthi",
"khmr": "khmer",
"knda": "kannada",
"kthi": "kaithi",
"lana": "taitham",
"laoo": "lao",
"latn": "latin",
"lepc": "lepcha",
"limb": "limbu",
"linb": "linearb",
"lyci": "lycian",
"lydi": "lydian",
"mand": "mandaic",
"merc": "meroiticcursive",
"mero": "meroitichieroglyphs",
"mlym": "malayalam",
"mong": "mongolian",
"mtei": "meeteimayek",
"mymr": "myanmar",
"nkoo": "nko",
"ogam": "ogham",
"olck": "olchiki",
"orkh": "oldturkic",
"orya": "oriya",
"osma": "osmanya",
"phag": "phagspa",
"phli": "inscriptionalpahlavi",
"phnx": "phoenician",
"plrd": "miao",
"prti": "inscriptionalparthian",
"qaac": "coptic",
"qaai": "inherited",
"rjng": "rejang",
"runr": "runic",
"samr": "samaritan",
"sarb": "oldsoutharabian",
"saur": "saurashtra",
"shaw": "shavian",
"shrd": "sharada",
"sinh": "sinhala",
"sora": "sorasompeng",
"sund": "sundanese",
"sylo": "sylotinagri",
"syrc": "syriac",
"tagb": "tagbanwa",
"takr": "takri",
"tale": "taile",
"talu": "newtailue",
"taml": "tamil",
"tavt": "taiviet",
"telu": "telugu",
"tfng": "tifinagh",
"tglg": "tagalog",
"thaa": "thaana",
"tibt": "tibetan",
"ugar": "ugaritic",
"vaii": "vai",
"xpeo": "oldpersian",
"xsux": "cuneiform",
"yiii": "yi",
"zinh": "inherited",
"zyyy": "common",
"zzzz": "unknown"
},
"sentencebreak": {
"at": "aterm",
"cl": "close",
"ex": "extend",
"fo": "format",
"le": "oletter",
"lo": "lower",
"nu": "numeric",
"sc": "scontinue",
"se": "sep",
"st": "sterm",
"up": "upper",
"xx": "other"
},
"wordbreak": {
"ex": "extendnumlet",
"fo": "format",
"ka": "katakana",
"le": "aletter",
"mb": "midnumlet",
"ml": "midletter",
"mn": "midnum",
"nl": "newline",
"nu": "numeric",
"xx": "other"
}
}
enum_names = {
"age",
"bc",
"bidiclass",
"blk",
"block",
"canonicalcombiningclass",
"ccc",
"decompositiontype",
"dt",
"ea",
"eastasianwidth",
"gc",
"gcb",
"generalcategory",
"graphemeclusterbreak",
"hangulsyllabletype",
"hst",
"jg",
"joininggroup",
"joiningtype",
"jt",
"lb",
"linebreak",
"nfcqc",
"nfcquickcheck",
"nfdqc",
"nfdquickcheck",
"nfkcqc",
"nfkcquickcheck",
"nfkdqc",
"nfkdquickcheck",
"nt",
"numerictype",
"numericvalue",
"nv",
"sb",
"sc",
"script",
"sentencebreak",
"wb",
"wordbreak"
}
|
okoala/sublime-bak
|
Packages/backrefs/st3/backrefs/uniprops/unidata/alias.py
|
Python
|
mit
| 19,024
|
# -*- coding: utf-8 -*-
from gettext import gettext as _
from logging import getLogger
import os
from pulp.plugins.distributor import Distributor
from pulp.server.db import model
from pulp.server.managers import factory
from pulp.server.config import config as pulp_conf
from pulp.server.compat import json
from pulp_node import constants
from pulp_node import pathlib
from pulp_node.conduit import NodesConduit
from pulp_node.distributors.http.publisher import HttpPublisher
_LOG = getLogger(__name__)
# --- constants -------------------------------------------------------------------------
PROPERTY_MISSING = _('Missing required configuration property: %(p)s')
PROPERTY_INVALID = _('Property %(p)s must be: %(v)s')
CONFIGURATION_PATH = '/etc/pulp/server/plugins.conf.d/nodes/distributor/http.conf'
# --- plugin loading --------------------------------------------------------------------
def entry_point():
"""
Entry point that pulp platform uses to load the distributor.
:return: distributor class and its configuration.
:rtype: Distributor, dict
"""
with open(CONFIGURATION_PATH) as fp:
return NodesHttpDistributor, json.load(fp)
# --- plugin ----------------------------------------------------------------------------
class NodesHttpDistributor(Distributor):
"""
The (nodes) distributor
"""
@classmethod
def metadata(cls):
return {
'id' : constants.HTTP_DISTRIBUTOR,
'display_name' : 'Pulp Nodes HTTP Distributor',
'types' : ['node',]
}
def validate_config(self, repo, config, config_conduit):
"""
Layout:
{
protocol : (http|https|file),
http : {
alias : [url, directory]
},
https : {
alias : [url, directory],
ssl (optional) : {
ca_cert : <path>,
client_cert : <path>
verify : <bool>
}
}
}
"""
key = constants.PROTOCOL_KEYWORD
protocol = config.get(key)
valid_protocols = ('http', 'https', 'file')
if not protocol:
return (False, PROPERTY_MISSING % {'p':key})
if protocol not in valid_protocols:
return (False, PROPERTY_INVALID % {'p':key, 'v':valid_protocols})
for key in ('http', 'https'):
section = config.get(key)
if not section:
return (False, PROPERTY_MISSING % {'p':key})
key = (key, 'alias')
alias = section.get(key[1])
if not alias:
return (False, PROPERTY_MISSING % {'p':'.'.join(key)})
return (True, None)
def publish_repo(self, repo, conduit, config):
"""
Publishes the given repository.
While this call may be implemented using multiple threads, its execution
from the Pulp server's standpoint should be synchronous. This call should
not return until the publish is complete.
It is not expected that this call be atomic. Should an error occur, it
is not the responsibility of the distributor to rollback any changes
that have been made.
:param repo: metadata describing the repository
:type repo: pulp.plugins.model.Repository
:param conduit: provides access to relevant Pulp functionality
:type conduit: pulp.plugins.conduits.repo_publish.RepoPublishConduit
:param config: plugin configuration
:type config: pulp.plugins.config.PluginConfiguration
:return: report describing the publish run
:rtype: pulp.plugins.model.PublishReport
"""
nodes_conduit = NodesConduit()
units = nodes_conduit.get_units(repo.id)
with self.publisher(repo, config) as publisher:
publisher.publish(units)
publisher.commit()
details = dict(unit_count=len(units))
return conduit.build_success_report('succeeded', details)
def publisher(self, repo, config):
"""
Get a configured publisher.
:param repo: A repository.
:type repo: pulp.plugins.model.Repository
:param config: plugin configuration
:type config: pulp.plugins.config.PluginConfiguration
:return: The configured publisher.
"""
protocol = config.get(constants.PROTOCOL_KEYWORD)
host = pulp_conf.get('server', 'server_name')
section = config.get(protocol)
alias = section.get('alias')
base_url = '://'.join((protocol, host))
repo_publish_dir = self._get_publish_dir(repo.id, config)
return HttpPublisher(base_url, alias, repo.id, repo_publish_dir)
def cancel_publish_repo(self):
pass
def create_consumer_payload(self, repo, config, binding_config):
"""
Called when a consumer binds to a repository using this distributor.
This call should return a dictionary describing all data the consumer
will need to access the repository. The contents will vary wildly
depending on the method the repository is published, but examples
of returned data includes authentication information, location of the
repository (e.g. URL), and data required to verify the contents
of the published repository.
:param repo: metadata describing the repository
:type repo: pulp.plugins.model.Repository
:param config: plugin configuration
:type config: pulp.plugins.config.PluginCallConfiguration
:param binding_config: The configuration stored on the binding.
:type binding_config: dict
:return: dictionary of relevant data
:rtype: dict
"""
payload = {}
self._add_repository(repo.id, payload)
self._add_importers(repo, config, binding_config or {}, payload)
self._add_distributors(repo.id, payload)
return payload
def distributor_removed(self, repo, config):
"""
Called when a distributor of this type is removed from a repository.
This will delete any published node data from the filesystem.
:param repo: metadata describing the repository
:type repo: pulp.plugins.model.Repository
:param config: plugin config
:type config: pulp.plugins.config.PluginCallConfiguration
"""
_LOG.debug(_('removing published node data for repo %s' % repo.id))
repo_publish_path = self._get_publish_dir(repo.id, config)
os.system('rm -rf %s' % repo_publish_path)
def _get_publish_dir(self, repo_id, config):
"""
generate the full path where the given repo should be published
:param repo_id: unique ID for the repository
:type repo_id: str
:param config: plugin config
:type config: pulp.plugins.config.PluginCallConfiguration
:return: full path to the directory where this repo's data
should be published
:rtype: str
"""
protocol = config.get(constants.PROTOCOL_KEYWORD)
section = config.get(protocol)
url, publish_path = section.get('alias')
return os.path.join(publish_path, repo_id)
def _add_repository(self, repo_id, payload):
"""
Add repository information to the payload.
:param repo_id: The repository ID.
:type repo_id: str
:param payload: The repository payload
:type payload: dict
"""
repo_obj = model.Repository.objects.get_repo_or_missing_resource(repo_id)
# Pseudo serialize the repository object so that it can be used by a node.
payload['repository'] = {'id': repo_obj.repo_id, 'display_name': repo_obj.display_name,
'description': repo_obj.description, 'notes': repo_obj.notes,
'scratchpad': repo_obj.scratchpad}
def _add_importers(self, repo, config, binding_config, payload):
"""
Add the nodes importer.
:param repo: A repo object.
:type repo: pulp.plugins.model.Repository
:param config: plugin configuration
:type config: pulp.plugins.config.PluginCallConfiguration
:param binding_config: The configuration stored on the binding.
:type binding_config: dict
:param payload: The bind payload.
:type payload: dict
"""
conf = self._importer_conf(repo, config, binding_config)
importer = {
'id': constants.HTTP_IMPORTER,
'importer_type_id': constants.HTTP_IMPORTER,
'config': conf,
}
payload['importers'] = [importer]
def _importer_conf(self, repo, config, binding_config):
"""
Build the nodes importer configuration.
:param repo: A repo object.
:type repo: pulp.plugins.model.Repository
:param config: plugin configuration
:type config: pulp.plugins.config.PluginCallConfiguration
:param binding_config: The configuration stored on the binding.
:type binding_config: dict
:return: The importer configuration.
:rtype: dict
"""
publisher = self.publisher(repo, config)
manifest_url = pathlib.url_join(publisher.base_url, publisher.manifest_path())
strategy = binding_config.get(constants.STRATEGY_KEYWORD, constants.DEFAULT_STRATEGY)
configuration = {
constants.STRATEGY_KEYWORD: strategy,
constants.MANIFEST_URL_KEYWORD: manifest_url,
}
return configuration
def _add_distributors(self, repo_id, payload):
"""
Add repository distributors information to the payload.
:param repo_id: The repository ID.
:type repo_id: str
:param payload: The distributor(s) payload
:type payload: dict
"""
distributors = []
manager = factory.repo_distributor_manager()
for dist in manager.get_distributors(repo_id):
if dist['distributor_type_id'] in constants.ALL_DISTRIBUTORS:
continue
distributors.append(dist)
payload['distributors'] = distributors
|
rbramwell/pulp
|
nodes/parent/pulp_node/distributors/http/distributor.py
|
Python
|
gpl-2.0
| 10,260
|
##
# Copyright 2012-2021 Ghent University
#
# This file is part of EasyBuild,
# originally created by the HPC team of Ghent University (http://ugent.be/hpc/en),
# with support of Ghent University (http://ugent.be/hpc),
# the Flemish Supercomputer Centre (VSC) (https://www.vscentrum.be),
# Flemish Research Foundation (FWO) (http://www.fwo.be/en)
# and the Department of Economy, Science and Innovation (EWI) (http://www.ewi-vlaanderen.be/en).
#
# https://github.com/easybuilders/easybuild
#
# EasyBuild is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation v2.
#
# EasyBuild is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with EasyBuild. If not, see <http://www.gnu.org/licenses/>.
##
"""
Declaration of toolchains.compiler namespace.
:author: Stijn De Weirdt (Ghent University)
:author: Kenneth Hoste (Ghent University)
"""
__path__ = __import__('pkgutil').extend_path(__path__, __name__)
|
hpcugent/easybuild-framework
|
easybuild/toolchains/compiler/__init__.py
|
Python
|
gpl-2.0
| 1,255
|
#!/usr/bin/env python
import distutils.ccompiler
from distutils.core import setup, Extension
from distutils.command.build import build
from distutils.command.build_ext import build_ext
from distutils.command.install_lib import install_lib
import os, sys
from glob import glob
if not hasattr(sys, 'version_info') or sys.version_info < (2,6,0,'final'):
raise SystemExit("Ohcount requires Python 2.6 or later.")
class build_ohcount(build):
"""Ohcount already have a script named 'build', from the original package,
so it conflicts with Python default build path. To solve this, setup.py
will use the directory 'build-python' instead. The original distutils
execute 'build_py' before 'build_ext', but we need the wrapper ohcount.py
created by SWIG to be installed too, so we need to invert this order.
"""
sub_commands = [('build_ext', build.has_ext_modules), # changed
('build_py', build.has_pure_modules), # changed
('build_clib', build.has_c_libraries),
('build_scripts', build.has_scripts),
]
def initialize_options(self):
build.initialize_options(self)
self.build_base = 'build-python'
def newer_than(srclist, dstlist):
for left, right in zip(srclist, dstlist):
if not os.path.exists(right):
return True
left_stat = os.lstat(left)
right_stat = os.lstat(right)
if left_stat.st_mtime > right_stat.st_mtime:
return True
return False
class build_ohcount_ext(build_ext):
"""This class implements extra steps needed by Ohcount build process."""
def run(self):
parsers = glob('src/parsers/*.rl')
parsers_h = [f.replace('.rl', '.h') for f in parsers]
if newer_than(parsers, parsers_h):
os.system('cd src/parsers/ && bash ./compile')
hash_files = glob('src/hash/*.gperf')
hash_srcs = []
for f in hash_files:
if not f.endswith('languages.gperf'):
hash_srcs.append(f.replace('s.gperf', '_hash.h'))
else:
hash_srcs.append(f.replace('s.gperf', '_hash.c'))
if newer_than(hash_files, hash_srcs):
os.system('cd src/hash/ && bash ./generate_headers')
return build_ext.run(self)
# Overwrite default Mingw32 compiler
(module_name, class_name, long_description) = \
distutils.ccompiler.compiler_class['mingw32']
module_name = "distutils." + module_name
__import__(module_name)
module = sys.modules[module_name]
Mingw32CCompiler = vars(module)[class_name]
class Mingw32CCompiler_ohcount(Mingw32CCompiler):
"""Ohcount CCompiler version for Mingw32. There is a problem linking
against msvcrXX for Python 2.6.4: as both DLLs msvcr and msvcr90 are
loaded, it seems to happen some unexpected segmentation faults in
several function calls."""
def __init__(self, *args, **kwargs):
Mingw32CCompiler.__init__(self, *args, **kwargs)
self.dll_libraries=[] # empty link libraries list
_new_compiler = distutils.ccompiler.new_compiler
def ohcount_new_compiler(plat=None,compiler=None,verbose=0,dry_run=0,force=0):
if compiler == 'mingw32':
inst = Mingw32CCompiler_ohcount(None, dry_run, force)
else:
inst = _new_compiler(plat,compiler,verbose,dry_run,force)
return inst
distutils.ccompiler.new_compiler = ohcount_new_compiler
# Ohcount python extension
ext_modules=[
Extension(
name='ohcount._ohcount',
sources= [
'ruby/ohcount.i',
'src/sourcefile.c',
'src/detector.c',
'src/licenses.c',
'src/parser.c',
'src/loc.c',
'src/log.c',
'src/diff.c',
'src/parsed_language.c',
'src/hash/language_hash.c',
],
libraries=['pcre'],
swig_opts=['-outdir', './python/'],
)
]
setup(
name='ohcount',
version = '3.0.0',
description = 'Ohcount is the source code line counter that powers Ohloh.',
long_description =
'Ohcount supports over 70 popular programming languages, and has been '
'used to count over 6 billion lines of code by 300,000 developers! '
'Ohcount does more more than just count lines of code. It can also '
'detect popular open source licenses such as GPL within a large '
'directory of source code. It can also detect code that targets a '
'particular programming API, such as Win32 or KDE.',
author = 'Mitchell Foral',
author_email = 'mitchell@caladbolg.net',
license = 'GNU GPL',
platforms = ['Linux','Mac OSX'],
keywords = ['ohcount','ohloh','loc','source','code','line','counter'],
url = 'http://www.ohloh.net/p/ohcount',
download_url = 'http://sourceforge.net/projects/ohcount/files/',
packages = ['ohcount'],
package_dir = {'ohcount': 'python'},
classifiers = [
'Development Status :: 5 - Production/Stable',
'License :: OSI Approved :: GNU General Public License (GPL)'
'Intended Audience :: Developers',
'Natural Language :: English',
'Programming Language :: C',
'Programming Language :: Python',
'Topic :: Software Development :: Libraries :: Python Modules',
],
ext_modules=ext_modules,
cmdclass={
'build': build_ohcount,
'build_ext': build_ohcount_ext,
},
)
|
codeimpossible/ohcount
|
python/setup.py
|
Python
|
gpl-2.0
| 5,447
|
#!/usr/bin/env python
# Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
#
# Use of this source code is governed by a BSD-style license
# that can be found in the LICENSE file in the root of the source
# tree. An additional intellectual property rights grant can be found
# in the file PATENTS. All contributing project authors may
# be found in the AUTHORS file in the root of the source tree.
"""Script for constraining traffic on the local machine."""
import logging
import optparse
import socket
import sys
import config
import network_emulator
_DEFAULT_LOG_LEVEL = logging.INFO
# Default port range to apply network constraints on.
_DEFAULT_PORT_RANGE = (32768, 65535)
# The numbers below are gathered from Google stats from the presets of the Apple
# developer tool called Network Link Conditioner.
_PRESETS = [
config.ConnectionConfig(1, 'Generic, Bad', 95, 95, 250, 2, 100),
config.ConnectionConfig(2, 'Generic, Average', 375, 375, 145, 0.1, 100),
config.ConnectionConfig(3, 'Generic, Good', 1000, 1000, 35, 0, 100),
config.ConnectionConfig(4, '3G, Average Case', 780, 330, 100, 0, 100),
config.ConnectionConfig(5, '3G, Good', 850, 420, 90, 0, 100),
config.ConnectionConfig(6, '3G, Lossy Network', 780, 330, 100, 1, 100),
config.ConnectionConfig(7, 'Cable Modem', 6000, 1000, 2, 0, 10),
config.ConnectionConfig(8, 'DSL', 2000, 256, 5, 0, 10),
config.ConnectionConfig(9, 'Edge, Average Case', 240, 200, 400, 0, 100),
config.ConnectionConfig(10, 'Edge, Good', 250, 200, 350, 0, 100),
config.ConnectionConfig(11, 'Edge, Lossy Network', 240, 200, 400, 1, 100),
config.ConnectionConfig(12, 'Wifi, Average Case', 40000, 33000, 1, 0, 100),
config.ConnectionConfig(13, 'Wifi, Good', 45000, 40000, 1, 0, 100),
config.ConnectionConfig(14, 'Wifi, Lossy', 40000, 33000, 1, 0, 100),
]
_PRESETS_DICT = dict((p.num, p) for p in _PRESETS)
_DEFAULT_PRESET_ID = 2
_DEFAULT_PRESET = _PRESETS_DICT[_DEFAULT_PRESET_ID]
class NonStrippingEpilogOptionParser(optparse.OptionParser):
"""Custom parser to let us show the epilog without weird line breaking."""
def format_epilog(self, formatter):
return self.epilog
def _get_external_ip():
"""Finds out the machine's external IP by connecting to google.com."""
external_socket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
external_socket.connect(('google.com', 80))
return external_socket.getsockname()[0]
def _parse_args():
"""Define and parse the command-line arguments."""
presets_string = '\n'.join(str(p) for p in _PRESETS)
parser = NonStrippingEpilogOptionParser(epilog=(
'\nAvailable presets:\n'
' Bandwidth (kbps) Packet\n'
'ID Name Receive Send Queue Delay loss \n'
'-- ---- --------- -------- ----- ------- ------\n'
'%s\n' % presets_string))
parser.add_option('-p', '--preset', type='int', default=_DEFAULT_PRESET_ID,
help=('ConnectionConfig configuration, specified by ID. '
'Default: %default'))
parser.add_option('-r', '--receive-bw', type='int',
default=_DEFAULT_PRESET.receive_bw_kbps,
help=('Receive bandwidth in kilobit/s. Default: %default'))
parser.add_option('-s', '--send-bw', type='int',
default=_DEFAULT_PRESET.send_bw_kbps,
help=('Send bandwidth in kilobit/s. Default: %default'))
parser.add_option('-d', '--delay', type='int',
default=_DEFAULT_PRESET.delay_ms,
help=('Delay in ms. Default: %default'))
parser.add_option('-l', '--packet-loss', type='float',
default=_DEFAULT_PRESET.packet_loss_percent,
help=('Packet loss in %. Default: %default'))
parser.add_option('-q', '--queue', type='int',
default=_DEFAULT_PRESET.queue_slots,
help=('Queue size as number of slots. Default: %default'))
parser.add_option('--port-range', default='%s,%s' % _DEFAULT_PORT_RANGE,
help=('Range of ports for constrained network. Specify as '
'two comma separated integers. Default: %default'))
parser.add_option('--target-ip', default=None,
help=('The interface IP address to apply the rules for. '
'Default: the external facing interface IP address.'))
parser.add_option('-v', '--verbose', action='store_true', default=False,
help=('Turn on verbose output. Will print all \'ipfw\' '
'commands that are executed.'))
options = parser.parse_args()[0]
# Find preset by ID, if specified.
if options.preset and not _PRESETS_DICT.has_key(options.preset):
parser.error('Invalid preset: %s' % options.preset)
# Simple validation of the IP address, if supplied.
if options.target_ip:
try:
socket.inet_aton(options.target_ip)
except socket.error:
parser.error('Invalid IP address specified: %s' % options.target_ip)
# Convert port range into the desired tuple format.
try:
if isinstance(options.port_range, str):
options.port_range = tuple(int(port) for port in
options.port_range.split(','))
if len(options.port_range) != 2:
parser.error('Invalid port range specified, please specify two '
'integers separated by a comma.')
except ValueError:
parser.error('Invalid port range specified.')
_set_logger(options.verbose)
return options
def _set_logger(verbose):
"""Setup logging."""
log_level = _DEFAULT_LOG_LEVEL
if verbose:
log_level = logging.DEBUG
logging.basicConfig(level=log_level, format='%(message)s')
def _main():
options = _parse_args()
# Build a configuration object. Override any preset configuration settings if
# a value of a setting was also given as a flag.
connection_config = _PRESETS_DICT[options.preset]
if options.receive_bw is not _DEFAULT_PRESET.receive_bw_kbps:
connection_config.receive_bw_kbps = options.receive_bw
if options.send_bw is not _DEFAULT_PRESET.send_bw_kbps:
connection_config.send_bw_kbps = options.send_bw
if options.delay is not _DEFAULT_PRESET.delay_ms:
connection_config.delay_ms = options.delay
if options.packet_loss is not _DEFAULT_PRESET.packet_loss_percent:
connection_config.packet_loss_percent = options.packet_loss
if options.queue is not _DEFAULT_PRESET.queue_slots:
connection_config.queue_slots = options.queue
emulator = network_emulator.NetworkEmulator(connection_config,
options.port_range)
try:
emulator.check_permissions()
except network_emulator.NetworkEmulatorError as e:
logging.error('Error: %s\n\nCause: %s', e.fail_msg, e.error)
return -1
if not options.target_ip:
external_ip = _get_external_ip()
else:
external_ip = options.target_ip
logging.info('Constraining traffic to/from IP: %s', external_ip)
try:
emulator.emulate(external_ip)
logging.info('Started network emulation with the following configuration:\n'
' Receive bandwidth: %s kbps (%s kB/s)\n'
' Send bandwidth : %s kbps (%s kB/s)\n'
' Delay : %s ms\n'
' Packet loss : %s %%\n'
' Queue slots : %s',
connection_config.receive_bw_kbps,
connection_config.receive_bw_kbps/8,
connection_config.send_bw_kbps,
connection_config.send_bw_kbps/8,
connection_config.delay_ms,
connection_config.packet_loss_percent,
connection_config.queue_slots)
logging.info('Affected traffic: IP traffic on ports %s-%s',
options.port_range[0], options.port_range[1])
raw_input('Press Enter to abort Network Emulation...')
logging.info('Flushing all Dummynet rules...')
network_emulator.cleanup()
logging.info('Completed Network Emulation.')
return 0
except network_emulator.NetworkEmulatorError as e:
logging.error('Error: %s\n\nCause: %s', e.fail_msg, e.error)
return -2
if __name__ == '__main__':
sys.exit(_main())
|
golden1232004/webrtc_new
|
tools/network_emulator/emulate.py
|
Python
|
gpl-3.0
| 8,370
|
# Based on local.py (c) 2012, Michael DeHaan <michael.dehaan@gmail.com>
# Based on chroot.py (c) 2013, Maykel Moya <mmoya@speedyrails.com>
# (c) 2013, Michael Scherer <misc@zarb.org>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import distutils.spawn
import os
import subprocess
from ansible import errors
from ansible.callbacks import vvv
class Connection(object):
''' Local lxc based connections '''
def _search_executable(self, executable):
cmd = distutils.spawn.find_executable(executable)
if not cmd:
raise errors.AnsibleError("%s command not found in PATH") % executable
return cmd
def _check_domain(self, domain):
p = subprocess.Popen([self.cmd, '-q', '-c', 'lxc:///', 'dominfo', domain],
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
p.communicate()
if p.returncode:
raise errors.AnsibleError("%s is not a lxc defined in libvirt" % domain)
def __init__(self, runner, host, port, *args, **kwargs):
self.lxc = host
self.cmd = self._search_executable('virsh')
self._check_domain(host)
self.runner = runner
self.host = host
# port is unused, since this is local
self.port = port
def connect(self, port=None):
''' connect to the lxc; nothing to do here '''
vvv("THIS IS A LOCAL LXC DIR", host=self.lxc)
return self
def _generate_cmd(self, executable, cmd):
if executable:
local_cmd = [self.cmd, '-q', '-c', 'lxc:///', 'lxc-enter-namespace', self.lxc, '--', executable , '-c', cmd]
else:
local_cmd = '%s -q -c lxc:/// lxc-enter-namespace %s -- %s' % (self.cmd, self.lxc, cmd)
return local_cmd
def exec_command(self, cmd, tmp_path, sudo_user, sudoable=False, executable='/bin/sh', in_data=None, su=None, su_user=None):
''' run a command on the chroot '''
if su or su_user:
raise errors.AnsibleError("Internal Error: this module does not support running commands via su")
if in_data:
raise errors.AnsibleError("Internal Error: this module does not support optimized module pipelining")
# We enter lxc as root so sudo stuff can be ignored
local_cmd = self._generate_cmd(executable, cmd)
vvv("EXEC %s" % (local_cmd), host=self.lxc)
p = subprocess.Popen(local_cmd, shell=isinstance(local_cmd, basestring),
cwd=self.runner.basedir,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout, stderr = p.communicate()
return (p.returncode, '', stdout, stderr)
def _normalize_path(self, path, prefix):
if not path.startswith(os.path.sep):
path = os.path.join(os.path.sep, path)
normpath = os.path.normpath(path)
return os.path.join(prefix, normpath[1:])
def put_file(self, in_path, out_path):
''' transfer a file from local to lxc '''
out_path = self._normalize_path(out_path, '/')
vvv("PUT %s TO %s" % (in_path, out_path), host=self.lxc)
local_cmd = [self.cmd, '-q', '-c', 'lxc:///', 'lxc-enter-namespace', self.lxc, '--', '/bin/tee', out_path]
vvv("EXEC %s" % (local_cmd), host=self.lxc)
p = subprocess.Popen(local_cmd, cwd=self.runner.basedir,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout, stderr = p.communicate(open(in_path,'rb').read())
def fetch_file(self, in_path, out_path):
''' fetch a file from lxc to local '''
in_path = self._normalize_path(in_path, '/')
vvv("FETCH %s TO %s" % (in_path, out_path), host=self.lxc)
local_cmd = [self.cmd, '-q', '-c', 'lxc:///', 'lxc-enter-namespace', self.lxc, '--', '/bin/cat', in_path]
vvv("EXEC %s" % (local_cmd), host=self.lxc)
p = subprocess.Popen(local_cmd, cwd=self.runner.basedir,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout, stderr = p.communicate()
open(out_path,'wb').write(stdout)
def close(self):
''' terminate the connection; nothing to do here '''
pass
|
ralphbean/ansible
|
v2/ansible/plugins/connections/libvirt_lxc.py
|
Python
|
gpl-3.0
| 5,075
|
# Author: Nic Wolfe <nic@wolfeden.ca>
# URL: http://code.google.com/p/sickbeard/
#
# This file is part of Sick Beard.
#
# Sick Beard is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Sick Beard is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Sick Beard. If not, see <http://www.gnu.org/licenses/>.
from __future__ import with_statement
import os.path
import time
import urllib
import re
import threading
import datetime
import random
import locale
from Cheetah.Template import Template
import cherrypy.lib
import sickbeard
from sickbeard import config, sab
from sickbeard import clients
from sickbeard import history, notifiers, processTV
from sickbeard import ui
from sickbeard import logger, helpers, exceptions, classes, db
from sickbeard import encodingKludge as ek
from sickbeard import search_queue
from sickbeard import image_cache
from sickbeard import scene_exceptions
from sickbeard import naming
from sickbeard import subtitles
from sickbeard.providers import newznab
from sickbeard.common import Quality, Overview, statusStrings
from sickbeard.common import SNATCHED, SKIPPED, UNAIRED, IGNORED, ARCHIVED, WANTED
from sickbeard.exceptions import ex
from sickbeard.webapi import Api
from lib.tvdb_api import tvdb_api
from lib.dateutil import tz
import network_timezones
import subliminal
try:
import json
except ImportError:
from lib import simplejson as json
try:
import xml.etree.cElementTree as etree
except ImportError:
import xml.etree.ElementTree as etree
from sickbeard import browser
class PageTemplate (Template):
def __init__(self, *args, **KWs):
KWs['file'] = os.path.join(sickbeard.PROG_DIR, "data/interfaces/default/", KWs['file'])
super(PageTemplate, self).__init__(*args, **KWs)
self.sbRoot = sickbeard.WEB_ROOT
self.sbHttpPort = sickbeard.WEB_PORT
self.sbHttpsPort = sickbeard.WEB_PORT
self.sbHttpsEnabled = sickbeard.ENABLE_HTTPS
if cherrypy.request.headers['Host'][0] == '[':
self.sbHost = re.match("^\[.*\]", cherrypy.request.headers['Host'], re.X|re.M|re.S).group(0)
else:
self.sbHost = re.match("^[^:]+", cherrypy.request.headers['Host'], re.X|re.M|re.S).group(0)
self.projectHomePage = "http://code.google.com/p/sickbeard/"
if sickbeard.NZBS and sickbeard.NZBS_UID and sickbeard.NZBS_HASH:
logger.log(u"NZBs.org has been replaced, please check the config to configure the new provider!", logger.ERROR)
ui.notifications.error("NZBs.org Config Update", "NZBs.org has a new site. Please <a href=\""+sickbeard.WEB_ROOT+"/config/providers\">update your config</a> with the api key from <a href=\"http://nzbs.org/login\">http://nzbs.org</a> and then disable the old NZBs.org provider.")
if "X-Forwarded-Host" in cherrypy.request.headers:
self.sbHost = cherrypy.request.headers['X-Forwarded-Host']
if "X-Forwarded-Port" in cherrypy.request.headers:
self.sbHttpPort = cherrypy.request.headers['X-Forwarded-Port']
self.sbHttpsPort = self.sbHttpPort
if "X-Forwarded-Proto" in cherrypy.request.headers:
self.sbHttpsEnabled = True if cherrypy.request.headers['X-Forwarded-Proto'] == 'https' else False
logPageTitle = 'Logs & Errors'
if len(classes.ErrorViewer.errors):
logPageTitle += ' ('+str(len(classes.ErrorViewer.errors))+')'
self.logPageTitle = logPageTitle
self.sbPID = str(sickbeard.PID)
self.menu = [
{ 'title': 'Home', 'key': 'home' },
{ 'title': 'Coming Episodes', 'key': 'comingEpisodes' },
{ 'title': 'History', 'key': 'history' },
{ 'title': 'Manage', 'key': 'manage' },
{ 'title': 'Config', 'key': 'config' },
{ 'title': logPageTitle, 'key': 'errorlogs' },
]
def redirect(abspath, *args, **KWs):
assert abspath[0] == '/'
raise cherrypy.HTTPRedirect(sickbeard.WEB_ROOT + abspath, *args, **KWs)
class TVDBWebUI:
def __init__(self, config, log=None):
self.config = config
self.log = log
def selectSeries(self, allSeries):
searchList = ",".join([x['id'] for x in allSeries])
showDirList = ""
for curShowDir in self.config['_showDir']:
showDirList += "showDir="+curShowDir+"&"
redirect("/home/addShows/addShow?" + showDirList + "seriesList=" + searchList)
def _munge(string):
return unicode(string).encode('utf-8', 'xmlcharrefreplace')
def _genericMessage(subject, message):
t = PageTemplate(file="genericMessage.tmpl")
t.submenu = HomeMenu()
t.subject = subject
t.message = message
return _munge(t)
def _getEpisode(show, season, episode):
if show == None or season == None or episode == None:
return "Invalid parameters"
showObj = sickbeard.helpers.findCertainShow(sickbeard.showList, int(show))
if showObj == None:
return "Show not in show list"
epObj = showObj.getEpisode(int(season), int(episode))
if epObj == None:
return "Episode couldn't be retrieved"
return epObj
ManageMenu = [
{ 'title': 'Backlog Overview', 'path': 'manage/backlogOverview' },
{ 'title': 'Manage Searches', 'path': 'manage/manageSearches' },
{ 'title': 'Episode Status Management', 'path': 'manage/episodeStatuses' },
{ 'title': 'Manage Missed Subtitles', 'path': 'manage/subtitleMissed' },
]
if sickbeard.USE_SUBTITLES:
ManageMenu.append({ 'title': 'Missed Subtitle Management', 'path': 'manage/subtitleMissed' })
class ManageSearches:
@cherrypy.expose
def index(self):
t = PageTemplate(file="manage_manageSearches.tmpl")
#t.backlogPI = sickbeard.backlogSearchScheduler.action.getProgressIndicator()
t.backlogPaused = sickbeard.searchQueueScheduler.action.is_backlog_paused() #@UndefinedVariable
t.backlogRunning = sickbeard.searchQueueScheduler.action.is_backlog_in_progress() #@UndefinedVariable
t.searchStatus = sickbeard.currentSearchScheduler.action.amActive #@UndefinedVariable
t.submenu = ManageMenu
return _munge(t)
@cherrypy.expose
def forceSearch(self):
# force it to run the next time it looks
result = sickbeard.currentSearchScheduler.forceRun()
if result:
logger.log(u"Search forced")
ui.notifications.message('Episode search started',
'Note: RSS feeds may not be updated if retrieved recently')
redirect("/manage/manageSearches")
@cherrypy.expose
def pauseBacklog(self, paused=None):
if paused == "1":
sickbeard.searchQueueScheduler.action.pause_backlog() #@UndefinedVariable
else:
sickbeard.searchQueueScheduler.action.unpause_backlog() #@UndefinedVariable
redirect("/manage/manageSearches")
@cherrypy.expose
def forceVersionCheck(self):
# force a check to see if there is a new version
result = sickbeard.versionCheckScheduler.action.check_for_new_version(force=True) #@UndefinedVariable
if result:
logger.log(u"Forcing version check")
redirect("/manage/manageSearches")
class Manage:
manageSearches = ManageSearches()
@cherrypy.expose
def index(self):
t = PageTemplate(file="manage.tmpl")
t.submenu = ManageMenu
return _munge(t)
@cherrypy.expose
def showEpisodeStatuses(self, tvdb_id, whichStatus):
myDB = db.DBConnection()
status_list = [int(whichStatus)]
if status_list[0] == SNATCHED:
status_list = Quality.SNATCHED + Quality.SNATCHED_PROPER + Quality.SNATCHED_FRENCH
cur_show_results = myDB.select("SELECT season, episode, name FROM tv_episodes WHERE showid = ? AND season != 0 AND status IN ("+','.join(['?']*len(status_list))+")", [int(tvdb_id)] + status_list)
result = {}
for cur_result in cur_show_results:
cur_season = int(cur_result["season"])
cur_episode = int(cur_result["episode"])
if cur_season not in result:
result[cur_season] = {}
result[cur_season][cur_episode] = cur_result["name"]
return json.dumps(result)
@cherrypy.expose
def episodeStatuses(self, whichStatus=None):
if whichStatus:
whichStatus = int(whichStatus)
status_list = [whichStatus]
if status_list[0] == SNATCHED:
status_list = Quality.SNATCHED + Quality.SNATCHED_PROPER + Quality.SNATCHED_FRENCH
else:
status_list = []
t = PageTemplate(file="manage_episodeStatuses.tmpl")
t.submenu = ManageMenu
t.whichStatus = whichStatus
# if we have no status then this is as far as we need to go
if not status_list:
return _munge(t)
myDB = db.DBConnection()
status_results = myDB.select("SELECT show_name, tv_shows.tvdb_id as tvdb_id FROM tv_episodes, tv_shows WHERE tv_episodes.status IN ("+','.join(['?']*len(status_list))+") AND season != 0 AND tv_episodes.showid = tv_shows.tvdb_id ORDER BY show_name", status_list)
ep_counts = {}
show_names = {}
sorted_show_ids = []
for cur_status_result in status_results:
cur_tvdb_id = int(cur_status_result["tvdb_id"])
if cur_tvdb_id not in ep_counts:
ep_counts[cur_tvdb_id] = 1
else:
ep_counts[cur_tvdb_id] += 1
show_names[cur_tvdb_id] = cur_status_result["show_name"]
if cur_tvdb_id not in sorted_show_ids:
sorted_show_ids.append(cur_tvdb_id)
t.show_names = show_names
t.ep_counts = ep_counts
t.sorted_show_ids = sorted_show_ids
return _munge(t)
@cherrypy.expose
def changeEpisodeStatuses(self, oldStatus, newStatus, *args, **kwargs):
status_list = [int(oldStatus)]
if status_list[0] == SNATCHED:
status_list = Quality.SNATCHED + Quality.SNATCHED_PROPER + Quality.SNATCHED_FRENCH
to_change = {}
# make a list of all shows and their associated args
for arg in kwargs:
tvdb_id, what = arg.split('-')
# we don't care about unchecked checkboxes
if kwargs[arg] != 'on':
continue
if tvdb_id not in to_change:
to_change[tvdb_id] = []
to_change[tvdb_id].append(what)
myDB = db.DBConnection()
for cur_tvdb_id in to_change:
# get a list of all the eps we want to change if they just said "all"
if 'all' in to_change[cur_tvdb_id]:
all_eps_results = myDB.select("SELECT season, episode FROM tv_episodes WHERE status IN ("+','.join(['?']*len(status_list))+") AND season != 0 AND showid = ?", status_list + [cur_tvdb_id])
all_eps = [str(x["season"])+'x'+str(x["episode"]) for x in all_eps_results]
to_change[cur_tvdb_id] = all_eps
Home().setStatus(cur_tvdb_id, '|'.join(to_change[cur_tvdb_id]), newStatus, direct=True)
redirect('/manage/episodeStatuses')
@cherrypy.expose
def showSubtitleMissed(self, tvdb_id, whichSubs):
myDB = db.DBConnection()
cur_show_results = myDB.select("SELECT season, episode, name, subtitles FROM tv_episodes WHERE showid = ? AND season != 0 AND status LIKE '%4'", [int(tvdb_id)])
result = {}
for cur_result in cur_show_results:
if whichSubs == 'all':
if len(set(cur_result["subtitles"].split(',')).intersection(set(subtitles.wantedLanguages()))) >= len(subtitles.wantedLanguages()):
continue
elif whichSubs in cur_result["subtitles"].split(','):
continue
cur_season = int(cur_result["season"])
cur_episode = int(cur_result["episode"])
if cur_season not in result:
result[cur_season] = {}
if cur_episode not in result[cur_season]:
result[cur_season][cur_episode] = {}
result[cur_season][cur_episode]["name"] = cur_result["name"]
result[cur_season][cur_episode]["subtitles"] = ",".join(subliminal.language.Language(subtitle).alpha2 for subtitle in cur_result["subtitles"].split(',')) if not cur_result["subtitles"] == '' else ''
return json.dumps(result)
@cherrypy.expose
def subtitleMissed(self, whichSubs=None):
t = PageTemplate(file="manage_subtitleMissed.tmpl")
t.submenu = ManageMenu
t.whichSubs = whichSubs
if not whichSubs:
return _munge(t)
myDB = db.DBConnection()
status_results = myDB.select("SELECT show_name, tv_shows.tvdb_id as tvdb_id, tv_episodes.subtitles subtitles FROM tv_episodes, tv_shows WHERE tv_shows.subtitles = 1 AND tv_episodes.status LIKE '%4' AND tv_episodes.season != 0 AND tv_episodes.showid = tv_shows.tvdb_id ORDER BY show_name")
ep_counts = {}
show_names = {}
sorted_show_ids = []
for cur_status_result in status_results:
if whichSubs == 'all':
if len(set(cur_status_result["subtitles"].split(',')).intersection(set(subtitles.wantedLanguages()))) >= len(subtitles.wantedLanguages()):
continue
elif whichSubs in cur_status_result["subtitles"].split(','):
continue
cur_tvdb_id = int(cur_status_result["tvdb_id"])
if cur_tvdb_id not in ep_counts:
ep_counts[cur_tvdb_id] = 1
else:
ep_counts[cur_tvdb_id] += 1
show_names[cur_tvdb_id] = cur_status_result["show_name"]
if cur_tvdb_id not in sorted_show_ids:
sorted_show_ids.append(cur_tvdb_id)
t.show_names = show_names
t.ep_counts = ep_counts
t.sorted_show_ids = sorted_show_ids
return _munge(t)
@cherrypy.expose
def downloadSubtitleMissed(self, *args, **kwargs):
to_download = {}
# make a list of all shows and their associated args
for arg in kwargs:
tvdb_id, what = arg.split('-')
# we don't care about unchecked checkboxes
if kwargs[arg] != 'on':
continue
if tvdb_id not in to_download:
to_download[tvdb_id] = []
to_download[tvdb_id].append(what)
for cur_tvdb_id in to_download:
# get a list of all the eps we want to download subtitles if they just said "all"
if 'all' in to_download[cur_tvdb_id]:
myDB = db.DBConnection()
all_eps_results = myDB.select("SELECT season, episode FROM tv_episodes WHERE status LIKE '%4' AND season != 0 AND showid = ?", [cur_tvdb_id])
to_download[cur_tvdb_id] = [str(x["season"])+'x'+str(x["episode"]) for x in all_eps_results]
for epResult in to_download[cur_tvdb_id]:
season, episode = epResult.split('x');
show = sickbeard.helpers.findCertainShow(sickbeard.showList, int(cur_tvdb_id))
subtitles = show.getEpisode(int(season), int(episode)).downloadSubtitles()
redirect('/manage/subtitleMissed')
@cherrypy.expose
def backlogShow(self, tvdb_id):
show_obj = helpers.findCertainShow(sickbeard.showList, int(tvdb_id))
if show_obj:
sickbeard.backlogSearchScheduler.action.searchBacklog([show_obj]) #@UndefinedVariable
redirect("/manage/backlogOverview")
@cherrypy.expose
def backlogOverview(self):
t = PageTemplate(file="manage_backlogOverview.tmpl")
t.submenu = ManageMenu
myDB = db.DBConnection()
showCounts = {}
showCats = {}
showSQLResults = {}
for curShow in sickbeard.showList:
epCounts = {}
epCats = {}
epCounts[Overview.SKIPPED] = 0
epCounts[Overview.WANTED] = 0
epCounts[Overview.QUAL] = 0
epCounts[Overview.GOOD] = 0
epCounts[Overview.UNAIRED] = 0
epCounts[Overview.SNATCHED] = 0
sqlResults = myDB.select("SELECT * FROM tv_episodes WHERE showid = ? ORDER BY season DESC, episode DESC", [curShow.tvdbid])
for curResult in sqlResults:
curEpCat = curShow.getOverview(int(curResult["status"]))
epCats[str(curResult["season"]) + "x" + str(curResult["episode"])] = curEpCat
epCounts[curEpCat] += 1
showCounts[curShow.tvdbid] = epCounts
showCats[curShow.tvdbid] = epCats
showSQLResults[curShow.tvdbid] = sqlResults
t.showCounts = showCounts
t.showCats = showCats
t.showSQLResults = showSQLResults
return _munge(t)
@cherrypy.expose
def massEdit(self, toEdit=None):
t = PageTemplate(file="manage_massEdit.tmpl")
t.submenu = ManageMenu
if not toEdit:
redirect("/manage")
showIDs = toEdit.split("|")
showList = []
for curID in showIDs:
curID = int(curID)
showObj = helpers.findCertainShow(sickbeard.showList, curID)
if showObj:
showList.append(showObj)
flatten_folders_all_same = True
last_flatten_folders = None
paused_all_same = True
last_paused = None
frenched_all_same = True
last_frenched = None
quality_all_same = True
last_quality = None
subtitles_all_same = True
last_subtitles = None
lang_all_same = True
last_lang_metadata= None
lang_audio_all_same = True
last_lang_audio = None
root_dir_list = []
for curShow in showList:
cur_root_dir = ek.ek(os.path.dirname, curShow._location)
if cur_root_dir not in root_dir_list:
root_dir_list.append(cur_root_dir)
# if we know they're not all the same then no point even bothering
if paused_all_same:
# if we had a value already and this value is different then they're not all the same
if last_paused not in (curShow.paused, None):
paused_all_same = False
else:
last_paused = curShow.paused
if frenched_all_same:
# if we had a value already and this value is different then they're not all the same
if last_frenched not in (curShow.frenchsearch, None):
frenched_all_same = False
else:
last_frenched = curShow.frenchsearch
if flatten_folders_all_same:
if last_flatten_folders not in (None, curShow.flatten_folders):
flatten_folders_all_same = False
else:
last_flatten_folders = curShow.flatten_folders
if quality_all_same:
if last_quality not in (None, curShow.quality):
quality_all_same = False
else:
last_quality = curShow.quality
if subtitles_all_same:
if last_subtitles not in (None, curShow.subtitles):
subtitles_all_same = False
else:
last_subtitles = curShow.subtitles
if lang_all_same:
if last_lang_metadata not in (None, curShow.lang):
lang_all_same = False
else:
last_lang_metadata = curShow.lang
if lang_audio_all_same:
if last_lang_audio not in (None, curShow.audio_lang):
lang_audio_all_same = False
else:
last_lang_audio = curShow.audio_lang
t.showList = toEdit
t.paused_value = last_paused if paused_all_same else None
t.frenched_value = last_frenched if frenched_all_same else None
t.flatten_folders_value = last_flatten_folders if flatten_folders_all_same else None
t.quality_value = last_quality if quality_all_same else None
t.subtitles_value = last_subtitles if subtitles_all_same else None
t.root_dir_list = root_dir_list
t.lang_value = last_lang_metadata if lang_all_same else None
t.audio_value = last_lang_audio if lang_audio_all_same else None
return _munge(t)
@cherrypy.expose
def massEditSubmit(self, paused=None, frenched=None, flatten_folders=None, quality_preset=False, subtitles=None,
anyQualities=[], bestQualities=[], tvdbLang=None, audioLang = None, toEdit=None, *args, **kwargs):
dir_map = {}
for cur_arg in kwargs:
if not cur_arg.startswith('orig_root_dir_'):
continue
which_index = cur_arg.replace('orig_root_dir_', '')
end_dir = kwargs['new_root_dir_'+which_index]
dir_map[kwargs[cur_arg]] = end_dir
showIDs = toEdit.split("|")
errors = []
for curShow in showIDs:
curErrors = []
showObj = helpers.findCertainShow(sickbeard.showList, int(curShow))
if not showObj:
continue
cur_root_dir = ek.ek(os.path.dirname, showObj._location)
cur_show_dir = ek.ek(os.path.basename, showObj._location)
if cur_root_dir in dir_map and cur_root_dir != dir_map[cur_root_dir]:
new_show_dir = ek.ek(os.path.join, dir_map[cur_root_dir], cur_show_dir)
logger.log(u"For show "+showObj.name+" changing dir from "+showObj._location+" to "+new_show_dir)
else:
new_show_dir = showObj._location
if paused == 'keep':
new_paused = showObj.paused
else:
new_paused = True if paused == 'enable' else False
new_paused = 'on' if new_paused else 'off'
if frenched == 'keep':
new_frenched = showObj.frenchsearch
else:
new_frenched = True if frenched == 'enable' else False
new_frenched = 'on' if new_frenched else 'off'
if flatten_folders == 'keep':
new_flatten_folders = showObj.flatten_folders
else:
new_flatten_folders = True if flatten_folders == 'enable' else False
new_flatten_folders = 'on' if new_flatten_folders else 'off'
if subtitles == 'keep':
new_subtitles = showObj.subtitles
else:
new_subtitles = True if subtitles == 'enable' else False
new_subtitles = 'on' if new_subtitles else 'off'
if quality_preset == 'keep':
anyQualities, bestQualities = Quality.splitQuality(showObj.quality)
if tvdbLang == 'None':
new_lang = 'en'
else:
new_lang = tvdbLang
if audioLang == 'keep':
new_audio_lang = showObj.audio_lang;
else:
new_audio_lang = audioLang
exceptions_list = []
curErrors += Home().editShow(curShow, new_show_dir, anyQualities, bestQualities, exceptions_list, new_flatten_folders, new_paused, new_frenched, subtitles=new_subtitles, tvdbLang=new_lang, audio_lang=new_audio_lang, directCall=True)
if curErrors:
logger.log(u"Errors: "+str(curErrors), logger.ERROR)
errors.append('<b>%s:</b>\n<ul>' % showObj.name + ' '.join(['<li>%s</li>' % error for error in curErrors]) + "</ul>")
if len(errors) > 0:
ui.notifications.error('%d error%s while saving changes:' % (len(errors), "" if len(errors) == 1 else "s"),
" ".join(errors))
redirect("/manage")
@cherrypy.expose
def massUpdate(self, toUpdate=None, toRefresh=None, toRename=None, toDelete=None, toMetadata=None, toSubtitle=None):
if toUpdate != None:
toUpdate = toUpdate.split('|')
else:
toUpdate = []
if toRefresh != None:
toRefresh = toRefresh.split('|')
else:
toRefresh = []
if toRename != None:
toRename = toRename.split('|')
else:
toRename = []
if toSubtitle != None:
toSubtitle = toSubtitle.split('|')
else:
toSubtitle = []
if toDelete != None:
toDelete = toDelete.split('|')
else:
toDelete = []
if toMetadata != None:
toMetadata = toMetadata.split('|')
else:
toMetadata = []
errors = []
refreshes = []
updates = []
renames = []
subtitles = []
for curShowID in set(toUpdate+toRefresh+toRename+toSubtitle+toDelete+toMetadata):
if curShowID == '':
continue
showObj = sickbeard.helpers.findCertainShow(sickbeard.showList, int(curShowID))
if showObj == None:
continue
if curShowID in toDelete:
showObj.deleteShow()
# don't do anything else if it's being deleted
continue
if curShowID in toUpdate:
try:
sickbeard.showQueueScheduler.action.updateShow(showObj, True) #@UndefinedVariable
updates.append(showObj.name)
except exceptions.CantUpdateException, e:
errors.append("Unable to update show "+showObj.name+": "+ex(e))
# don't bother refreshing shows that were updated anyway
if curShowID in toRefresh and curShowID not in toUpdate:
try:
sickbeard.showQueueScheduler.action.refreshShow(showObj) #@UndefinedVariable
refreshes.append(showObj.name)
except exceptions.CantRefreshException, e:
errors.append("Unable to refresh show "+showObj.name+": "+ex(e))
if curShowID in toRename:
sickbeard.showQueueScheduler.action.renameShowEpisodes(showObj) #@UndefinedVariable
renames.append(showObj.name)
if curShowID in toSubtitle:
sickbeard.showQueueScheduler.action.downloadSubtitles(showObj) #@UndefinedVariable
subtitles.append(showObj.name)
if len(errors) > 0:
ui.notifications.error("Errors encountered",
'<br >\n'.join(errors))
messageDetail = ""
if len(updates) > 0:
messageDetail += "<br /><b>Updates</b><br /><ul><li>"
messageDetail += "</li><li>".join(updates)
messageDetail += "</li></ul>"
if len(refreshes) > 0:
messageDetail += "<br /><b>Refreshes</b><br /><ul><li>"
messageDetail += "</li><li>".join(refreshes)
messageDetail += "</li></ul>"
if len(renames) > 0:
messageDetail += "<br /><b>Renames</b><br /><ul><li>"
messageDetail += "</li><li>".join(renames)
messageDetail += "</li></ul>"
if len(subtitles) > 0:
messageDetail += "<br /><b>Subtitles</b><br /><ul><li>"
messageDetail += "</li><li>".join(subtitles)
messageDetail += "</li></ul>"
if len(updates+refreshes+renames+subtitles) > 0:
ui.notifications.message("The following actions were queued:",
messageDetail)
redirect("/manage")
class History:
@cherrypy.expose
def index(self, limit=100):
myDB = db.DBConnection()
# sqlResults = myDB.select("SELECT h.*, show_name, name FROM history h, tv_shows s, tv_episodes e WHERE h.showid=s.tvdb_id AND h.showid=e.showid AND h.season=e.season AND h.episode=e.episode ORDER BY date DESC LIMIT "+str(numPerPage*(p-1))+", "+str(numPerPage))
if limit == "0":
sqlResults = myDB.select("SELECT h.*, show_name FROM history h, tv_shows s WHERE h.showid=s.tvdb_id ORDER BY date DESC")
else:
sqlResults = myDB.select("SELECT h.*, show_name FROM history h, tv_shows s WHERE h.showid=s.tvdb_id ORDER BY date DESC LIMIT ?", [limit])
t = PageTemplate(file="history.tmpl")
t.historyResults = sqlResults
t.limit = limit
t.submenu = [
{ 'title': 'Clear History', 'path': 'history/clearHistory' },
{ 'title': 'Trim History', 'path': 'history/trimHistory' },
{ 'title': 'Trunc Episode Links', 'path': 'history/truncEplinks' },
{ 'title': 'Trunc Episode List Processed', 'path': 'history/truncEpListProc' },
]
return _munge(t)
@cherrypy.expose
def clearHistory(self):
myDB = db.DBConnection()
myDB.action("DELETE FROM history WHERE 1=1")
ui.notifications.message('History cleared')
redirect("/history")
@cherrypy.expose
def trimHistory(self):
myDB = db.DBConnection()
myDB.action("DELETE FROM history WHERE date < "+str((datetime.datetime.today()-datetime.timedelta(days=30)).strftime(history.dateFormat)))
ui.notifications.message('Removed history entries greater than 30 days old')
redirect("/history")
@cherrypy.expose
def truncEplinks(self):
myDB = db.DBConnection()
nbep=myDB.select("SELECT count(*) from episode_links")
myDB.action("DELETE FROM episode_links WHERE 1=1")
messnum = str(nbep[0][0]) + ' history links deleted'
ui.notifications.message('All Episode Links Removed', messnum)
redirect("/history")
@cherrypy.expose
def truncEpListProc(self):
myDB = db.DBConnection()
nbep=myDB.select("SELECT count(*) from processed_files")
myDB.action("DELETE FROM processed_files WHERE 1=1")
messnum = str(nbep[0][0]) + ' record for file processed delete'
ui.notifications.message('Clear list of file processed', messnum)
redirect("/history")
ConfigMenu = [
{ 'title': 'General', 'path': 'config/general/' },
{ 'title': 'Search Settings', 'path': 'config/search/' },
{ 'title': 'Search Providers', 'path': 'config/providers/' },
{ 'title': 'Subtitles Settings','path': 'config/subtitles/' },
{ 'title': 'Post Processing', 'path': 'config/postProcessing/' },
{ 'title': 'Notifications', 'path': 'config/notifications/' },
]
class ConfigGeneral:
@cherrypy.expose
def index(self):
t = PageTemplate(file="config_general.tmpl")
t.submenu = ConfigMenu
return _munge(t)
@cherrypy.expose
def saveRootDirs(self, rootDirString=None):
sickbeard.ROOT_DIRS = rootDirString
sickbeard.save_config()
@cherrypy.expose
def saveAddShowDefaults(self, defaultFlattenFolders, defaultStatus, anyQualities, bestQualities, audio_lang, subtitles=None):
if anyQualities:
anyQualities = anyQualities.split(',')
else:
anyQualities = []
if bestQualities:
bestQualities = bestQualities.split(',')
else:
bestQualities = []
newQuality = Quality.combineQualities(map(int, anyQualities), map(int, bestQualities))
sickbeard.STATUS_DEFAULT = int(defaultStatus)
sickbeard.QUALITY_DEFAULT = int(newQuality)
sickbeard.AUDIO_SHOW_DEFAULT = str(audio_lang)
if defaultFlattenFolders == "true":
defaultFlattenFolders = 1
else:
defaultFlattenFolders = 0
sickbeard.FLATTEN_FOLDERS_DEFAULT = int(defaultFlattenFolders)
if subtitles == "true":
subtitles = 1
else:
subtitles = 0
sickbeard.SUBTITLES_DEFAULT = int(subtitles)
sickbeard.save_config()
@cherrypy.expose
def generateKey(self):
""" Return a new randomized API_KEY
"""
try:
from hashlib import md5
except ImportError:
from md5 import md5
# Create some values to seed md5
t = str(time.time())
r = str(random.random())
# Create the md5 instance and give it the current time
m = md5(t)
# Update the md5 instance with the random variable
m.update(r)
# Return a hex digest of the md5, eg 49f68a5c8493ec2c0bf489821c21fc3b
logger.log(u"New API generated")
return m.hexdigest()
@cherrypy.expose
def saveGeneral(self, log_dir=None, web_port=None, web_log=None, web_ipv6=None,
update_shows_on_start=None,launch_browser=None, web_username=None, use_api=None, api_key=None,
web_password=None, version_notify=None, enable_https=None, https_cert=None, https_key=None, sort_article=None, french_column=None):
results = []
if web_ipv6 == "on":
web_ipv6 = 1
else:
web_ipv6 = 0
if web_log == "on":
web_log = 1
else:
web_log = 0
if launch_browser == "on":
launch_browser = 1
else:
launch_browser = 0
if update_shows_on_start == "on":
update_shows_on_start = 1
else:
update_shows_on_start = 0
if sort_article == "on":
sort_article = 1
else:
sort_article = 0
if french_column == "on":
french_column = 1
else:
french_column= 0
if version_notify == "on":
version_notify = 1
else:
version_notify = 0
if not config.change_LOG_DIR(log_dir):
results += ["Unable to create directory " + os.path.normpath(log_dir) + ", log dir not changed."]
sickbeard.UPDATE_SHOWS_ON_START = update_shows_on_start
sickbeard.LAUNCH_BROWSER = launch_browser
sickbeard.SORT_ARTICLE = sort_article
sickbeard.FRENCH_COLUMN = french_column
sickbeard.WEB_PORT = int(web_port)
sickbeard.WEB_IPV6 = web_ipv6
sickbeard.WEB_LOG = web_log
sickbeard.WEB_USERNAME = web_username
sickbeard.WEB_PASSWORD = web_password
if use_api == "on":
use_api = 1
else:
use_api = 0
sickbeard.USE_API = use_api
sickbeard.API_KEY = api_key
if enable_https == "on":
enable_https = 1
else:
enable_https = 0
sickbeard.ENABLE_HTTPS = enable_https
if not config.change_HTTPS_CERT(https_cert):
results += ["Unable to create directory " + os.path.normpath(https_cert) + ", https cert dir not changed."]
if not config.change_HTTPS_KEY(https_key):
results += ["Unable to create directory " + os.path.normpath(https_key) + ", https key dir not changed."]
config.change_VERSION_NOTIFY(version_notify)
sickbeard.save_config()
if len(results) > 0:
for x in results:
logger.log(x, logger.ERROR)
ui.notifications.error('Error(s) Saving Configuration',
'<br />\n'.join(results))
else:
ui.notifications.message('Configuration Saved', ek.ek(os.path.join, sickbeard.CONFIG_FILE) )
redirect("/config/general/")
class ConfigSearch:
@cherrypy.expose
def index(self):
t = PageTemplate(file="config_search.tmpl")
t.submenu = ConfigMenu
return _munge(t)
@cherrypy.expose
def saveSearch(self, use_nzbs=None, use_torrents=None, nzb_dir=None, sab_username=None, sab_password=None,
sab_apikey=None, sab_category=None, sab_host=None, nzbget_password=None, nzbget_category=None, nzbget_host=None,
torrent_dir=None,torrent_method=None, nzb_method=None, usenet_retention=None, search_frequency=None, french_delay=None,
download_propers=None, download_french=None, torrent_username=None, torrent_password=None, torrent_host=None,
torrent_label=None, torrent_path=None, torrent_custom_url=None, torrent_ratio=None, torrent_paused=None, ignore_words=None,
prefered_method=None, torrent_use_ftp = None, ftp_host=None, ftp_port=None, ftp_timeout=None, ftp_passive = None, ftp_login=None,
ftp_password=None, ftp_remotedir=None):
results = []
if not config.change_NZB_DIR(nzb_dir):
results += ["Unable to create directory " + os.path.normpath(nzb_dir) + ", dir not changed."]
if not config.change_TORRENT_DIR(torrent_dir):
results += ["Unable to create directory " + os.path.normpath(torrent_dir) + ", dir not changed."]
config.change_SEARCH_FREQUENCY(search_frequency)
if download_propers == "on":
download_propers = 1
else:
download_propers = 0
if download_french == "on":
download_french = 1
else:
download_french = 0
if use_nzbs == "on":
use_nzbs = 1
else:
use_nzbs = 0
if use_torrents == "on":
use_torrents = 1
else:
use_torrents = 0
if usenet_retention == None:
usenet_retention = 200
if french_delay == None:
french_delay = 120
if ignore_words == None:
ignore_words = ""
if ftp_port == None:
ftp_port = 21
if ftp_timeout == None:
ftp_timeout = 120
sickbeard.USE_NZBS = use_nzbs
sickbeard.USE_TORRENTS = use_torrents
sickbeard.NZB_METHOD = nzb_method
sickbeard.PREFERED_METHOD = prefered_method
sickbeard.TORRENT_METHOD = torrent_method
sickbeard.USENET_RETENTION = int(usenet_retention)
sickbeard.FRENCH_DELAY = int(french_delay)
sickbeard.IGNORE_WORDS = ignore_words
sickbeard.DOWNLOAD_PROPERS = download_propers
sickbeard.DOWNLOAD_FRENCH = download_french
sickbeard.SAB_USERNAME = sab_username
sickbeard.SAB_PASSWORD = sab_password
sickbeard.SAB_APIKEY = sab_apikey.strip()
sickbeard.SAB_CATEGORY = sab_category
if sab_host and not re.match('https?://.*', sab_host):
sab_host = 'http://' + sab_host
if not sab_host.endswith('/'):
sab_host = sab_host + '/'
sickbeard.SAB_HOST = sab_host
sickbeard.NZBGET_PASSWORD = nzbget_password
sickbeard.NZBGET_CATEGORY = nzbget_category
sickbeard.NZBGET_HOST = nzbget_host
sickbeard.TORRENT_USERNAME = torrent_username
sickbeard.TORRENT_PASSWORD = torrent_password
sickbeard.TORRENT_LABEL = torrent_label
sickbeard.TORRENT_PATH = torrent_path
if torrent_custom_url == "on":
torrent_custom_url = 1
else:
torrent_custom_url = 0
sickbeard.TORRENT_CUSTOM_URL = torrent_custom_url
sickbeard.TORRENT_RATIO = torrent_ratio
if torrent_paused == "on":
torrent_paused = 1
else:
torrent_paused = 0
sickbeard.TORRENT_PAUSED = torrent_paused
if torrent_host and not re.match('https?://.*', torrent_host):
torrent_host = 'http://' + torrent_host
if not torrent_host.endswith('/'):
torrent_host = torrent_host + '/'
sickbeard.TORRENT_HOST = torrent_host
if torrent_use_ftp == "on":
torrent_use_ftp = 1
else:
torrent_use_ftp = 0
sickbeard.USE_TORRENT_FTP = torrent_use_ftp
sickbeard.FTP_HOST = ftp_host
sickbeard.FTP_PORT = ftp_port
sickbeard.FTP_TIMEOUT = ftp_timeout
if ftp_passive == "on":
ftp_passive = 1
else:
ftp_passive = 0
sickbeard.FTP_PASSIVE = ftp_passive
sickbeard.FTP_LOGIN = ftp_login
sickbeard.FTP_PASSWORD = ftp_password
sickbeard.FTP_DIR = ftp_remotedir
sickbeard.save_config()
if len(results) > 0:
for x in results:
logger.log(x, logger.ERROR)
ui.notifications.error('Error(s) Saving Configuration',
'<br />\n'.join(results))
else:
ui.notifications.message('Configuration Saved', ek.ek(os.path.join, sickbeard.CONFIG_FILE) )
redirect("/config/search/")
class ConfigPostProcessing:
@cherrypy.expose
def index(self):
t = PageTemplate(file="config_postProcessing.tmpl")
t.submenu = ConfigMenu
return _munge(t)
@cherrypy.expose
def savePostProcessing(self, naming_pattern=None, naming_multi_ep=None,
xbmc_data=None, xbmc__frodo__data=None, mediabrowser_data=None, synology_data=None, sony_ps3_data=None, wdtv_data=None, tivo_data=None,
use_banner=None, keep_processed_dir=None, process_method=None, process_automatically=None, process_automatically_torrent=None, rename_episodes=None,
move_associated_files=None, tv_download_dir=None, torrent_download_dir=None, naming_custom_abd=None, naming_abd_pattern=None):
results = []
if not config.change_TV_DOWNLOAD_DIR(tv_download_dir):
results += ["Unable to create directory " + os.path.normpath(tv_download_dir) + ", dir not changed."]
if not config.change_TORRENT_DOWNLOAD_DIR(torrent_download_dir):
results += ["Unable to create directory " + os.path.normpath(torrent_download_dir) + ", dir not changed."]
if use_banner == "on":
use_banner = 1
else:
use_banner = 0
if process_automatically == "on":
process_automatically = 1
else:
process_automatically = 0
if process_automatically_torrent == "on":
process_automatically_torrent = 1
else:
process_automatically_torrent = 0
if rename_episodes == "on":
rename_episodes = 1
else:
rename_episodes = 0
if keep_processed_dir == "on":
keep_processed_dir = 1
else:
keep_processed_dir = 0
if move_associated_files == "on":
move_associated_files = 1
else:
move_associated_files = 0
if naming_custom_abd == "on":
naming_custom_abd = 1
else:
naming_custom_abd = 0
sickbeard.PROCESS_AUTOMATICALLY = process_automatically
sickbeard.PROCESS_AUTOMATICALLY_TORRENT = process_automatically_torrent
sickbeard.KEEP_PROCESSED_DIR = keep_processed_dir
sickbeard.PROCESS_METHOD = process_method
sickbeard.RENAME_EPISODES = rename_episodes
sickbeard.MOVE_ASSOCIATED_FILES = move_associated_files
sickbeard.NAMING_CUSTOM_ABD = naming_custom_abd
sickbeard.metadata_provider_dict['XBMC'].set_config(xbmc_data)
sickbeard.metadata_provider_dict['XBMC (Frodo)'].set_config(xbmc__frodo__data)
sickbeard.metadata_provider_dict['MediaBrowser'].set_config(mediabrowser_data)
sickbeard.metadata_provider_dict['Synology'].set_config(synology_data)
sickbeard.metadata_provider_dict['Sony PS3'].set_config(sony_ps3_data)
sickbeard.metadata_provider_dict['WDTV'].set_config(wdtv_data)
sickbeard.metadata_provider_dict['TIVO'].set_config(tivo_data)
if self.isNamingValid(naming_pattern, naming_multi_ep) != "invalid":
sickbeard.NAMING_PATTERN = naming_pattern
sickbeard.NAMING_MULTI_EP = int(naming_multi_ep)
sickbeard.NAMING_FORCE_FOLDERS = naming.check_force_season_folders()
else:
results.append("You tried saving an invalid naming config, not saving your naming settings")
if self.isNamingValid(naming_abd_pattern, None, True) != "invalid":
sickbeard.NAMING_ABD_PATTERN = naming_abd_pattern
elif naming_custom_abd:
results.append("You tried saving an invalid air-by-date naming config, not saving your air-by-date settings")
sickbeard.USE_BANNER = use_banner
sickbeard.save_config()
if len(results) > 0:
for x in results:
logger.log(x, logger.ERROR)
ui.notifications.error('Error(s) Saving Configuration',
'<br />\n'.join(results))
else:
ui.notifications.message('Configuration Saved', ek.ek(os.path.join, sickbeard.CONFIG_FILE) )
redirect("/config/postProcessing/")
@cherrypy.expose
def testNaming(self, pattern=None, multi=None, abd=False):
if multi != None:
multi = int(multi)
result = naming.test_name(pattern, multi, abd)
result = ek.ek(os.path.join, result['dir'], result['name'])
return result
@cherrypy.expose
def isNamingValid(self, pattern=None, multi=None, abd=False):
if pattern == None:
return "invalid"
# air by date shows just need one check, we don't need to worry about season folders
if abd:
is_valid = naming.check_valid_abd_naming(pattern)
require_season_folders = False
else:
# check validity of single and multi ep cases for the whole path
is_valid = naming.check_valid_naming(pattern, multi)
# check validity of single and multi ep cases for only the file name
require_season_folders = naming.check_force_season_folders(pattern, multi)
if is_valid and not require_season_folders:
return "valid"
elif is_valid and require_season_folders:
return "seasonfolders"
else:
return "invalid"
class ConfigProviders:
@cherrypy.expose
def index(self):
t = PageTemplate(file="config_providers.tmpl")
t.submenu = ConfigMenu
return _munge(t)
@cherrypy.expose
def canAddNewznabProvider(self, name):
if not name:
return json.dumps({'error': 'Invalid name specified'})
providerDict = dict(zip([x.getID() for x in sickbeard.newznabProviderList], sickbeard.newznabProviderList))
tempProvider = newznab.NewznabProvider(name, '')
if tempProvider.getID() in providerDict:
return json.dumps({'error': 'Exists as '+providerDict[tempProvider.getID()].name})
else:
return json.dumps({'success': tempProvider.getID()})
@cherrypy.expose
def saveNewznabProvider(self, name, url, key=''):
if not name or not url:
return '0'
if not url.endswith('/'):
url = url + '/'
providerDict = dict(zip([x.name for x in sickbeard.newznabProviderList], sickbeard.newznabProviderList))
if name in providerDict:
if not providerDict[name].default:
providerDict[name].name = name
providerDict[name].url = url
providerDict[name].key = key
return providerDict[name].getID() + '|' + providerDict[name].configStr()
else:
newProvider = newznab.NewznabProvider(name, url, key)
sickbeard.newznabProviderList.append(newProvider)
return newProvider.getID() + '|' + newProvider.configStr()
@cherrypy.expose
def deleteNewznabProvider(self, id):
providerDict = dict(zip([x.getID() for x in sickbeard.newznabProviderList], sickbeard.newznabProviderList))
if id not in providerDict or providerDict[id].default:
return '0'
# delete it from the list
sickbeard.newznabProviderList.remove(providerDict[id])
if id in sickbeard.PROVIDER_ORDER:
sickbeard.PROVIDER_ORDER.remove(id)
return '1'
@cherrypy.expose
def saveProviders(self, nzbmatrix_username=None, nzbmatrix_apikey=None,
nzbs_r_us_uid=None, nzbs_r_us_hash=None, newznab_string='',
omgwtfnzbs_uid=None, omgwtfnzbs_key=None,
tvtorrents_digest=None, tvtorrents_hash=None,
torrentleech_key=None,
btn_api_key=None,
newzbin_username=None, newzbin_password=None,t411_username=None,t411_password=None,ftdb_username=None,ftdb_password=None,tpi_username=None,tpi_password=None,addict_username=None,addict_password=None,fnt_username=None,fnt_password=None,xthor_username=None,xthor_password=None,thinkgeek_username=None,thinkgeek_password=None,
gks_key=None,
ethor_key=None,
provider_order=None):
results = []
provider_str_list = provider_order.split()
provider_list = []
newznabProviderDict = dict(zip([x.getID() for x in sickbeard.newznabProviderList], sickbeard.newznabProviderList))
finishedNames = []
# add all the newznab info we got into our list
for curNewznabProviderStr in newznab_string.split('!!!'):
if not curNewznabProviderStr:
continue
curName, curURL, curKey = curNewznabProviderStr.split('|')
newProvider = newznab.NewznabProvider(curName, curURL, curKey)
curID = newProvider.getID()
# if it already exists then update it
if curID in newznabProviderDict:
newznabProviderDict[curID].name = curName
newznabProviderDict[curID].url = curURL
newznabProviderDict[curID].key = curKey
else:
sickbeard.newznabProviderList.append(newProvider)
finishedNames.append(curID)
# delete anything that is missing
for curProvider in sickbeard.newznabProviderList:
if curProvider.getID() not in finishedNames:
sickbeard.newznabProviderList.remove(curProvider)
# do the enable/disable
for curProviderStr in provider_str_list:
curProvider, curEnabled = curProviderStr.split(':')
curEnabled = int(curEnabled)
provider_list.append(curProvider)
if curProvider == 'nzbs_r_us':
sickbeard.NZBSRUS = curEnabled
elif curProvider == 'nzbs_org_old':
sickbeard.NZBS = curEnabled
elif curProvider == 'nzbmatrix':
sickbeard.NZBMATRIX = curEnabled
elif curProvider == 'newzbin':
sickbeard.NEWZBIN = curEnabled
elif curProvider == 'bin_req':
sickbeard.BINREQ = curEnabled
elif curProvider == 'womble_s_index':
sickbeard.WOMBLE = curEnabled
elif curProvider == 'nzbx':
sickbeard.NZBX = curEnabled
elif curProvider == 'omgwtfnzbs':
sickbeard.OMGWTFNZBS = curEnabled
elif curProvider == 'ezrss':
sickbeard.EZRSS = curEnabled
elif curProvider == 'tvtorrents':
sickbeard.TVTORRENTS = curEnabled
elif curProvider == 'torrentleech':
sickbeard.TORRENTLEECH = curEnabled
elif curProvider == 'btn':
sickbeard.BTN = curEnabled
elif curProvider == 'binnewz':
sickbeard.BINNEWZ = curEnabled
elif curProvider == 't411':
sickbeard.T411 = curEnabled
elif curProvider == 'ftdb':
sickbeard.FTDB = curEnabled
elif curProvider == 'tpi':
sickbeard.TPI = curEnabled
elif curProvider == 'addict':
sickbeard.ADDICT = curEnabled
elif curProvider == 'fnt':
sickbeard.FNT = curEnabled
elif curProvider == 'xthor':
sickbeard.XTHOR = curEnabled
elif curProvider == 'thinkgeek':
sickbeard.THINKGEEK = curEnabled
elif curProvider == 'cpasbien':
sickbeard.Cpasbien = curEnabled
elif curProvider == 'kat':
sickbeard.kat = curEnabled
elif curProvider == 'piratebay':
sickbeard.THEPIRATEBAY = curEnabled
elif curProvider == 'gks':
sickbeard.GKS = curEnabled
elif curProvider == 'ethor':
sickbeard.ETHOR = curEnabled
elif curProvider in newznabProviderDict:
newznabProviderDict[curProvider].enabled = bool(curEnabled)
else:
logger.log(u"don't know what " + curProvider + " is, skipping")
sickbeard.TVTORRENTS_DIGEST = tvtorrents_digest.strip()
sickbeard.TVTORRENTS_HASH = tvtorrents_hash.strip()
sickbeard.TORRENTLEECH_KEY = torrentleech_key.strip()
sickbeard.ETHOR_KEY = ethor_key.strip()
sickbeard.BTN_API_KEY = btn_api_key.strip()
sickbeard.T411_USERNAME = t411_username
sickbeard.T411_PASSWORD = t411_password
sickbeard.FTDB_USERNAME = ftdb_username
sickbeard.FTDB_PASSWORD = ftdb_password
sickbeard.TPI_USERNAME = tpi_username
sickbeard.TPI_PASSWORD = tpi_password
sickbeard.ADDICT_USERNAME = addict_username
sickbeard.ADDICT_PASSWORD = addict_password
sickbeard.FNT_USERNAME = fnt_username
sickbeard.FNT_PASSWORD = fnt_password
sickbeard.XTHOR_USERNAME = xthor_username
sickbeard.XTHOR_PASSWORD = xthor_password
sickbeard.THINKGEEK_USERNAME = thinkgeek_username
sickbeard.THINKGEEK_PASSWORD = thinkgeek_password
sickbeard.NZBSRUS_UID = nzbs_r_us_uid.strip()
sickbeard.NZBSRUS_HASH = nzbs_r_us_hash.strip()
sickbeard.OMGWTFNZBS_UID = omgwtfnzbs_uid.strip()
sickbeard.OMGWTFNZBS_KEY = omgwtfnzbs_key.strip()
sickbeard.GKS_KEY = gks_key.strip()
sickbeard.PROVIDER_ORDER = provider_list
sickbeard.save_config()
if len(results) > 0:
for x in results:
logger.log(x, logger.ERROR)
ui.notifications.error('Error(s) Saving Configuration',
'<br />\n'.join(results))
else:
ui.notifications.message('Configuration Saved', ek.ek(os.path.join, sickbeard.CONFIG_FILE) )
redirect("/config/providers/")
class ConfigNotifications:
@cherrypy.expose
def index(self):
t = PageTemplate(file="config_notifications.tmpl")
t.submenu = ConfigMenu
return _munge(t)
@cherrypy.expose
def saveNotifications(self, use_xbmc=None, xbmc_notify_onsnatch=None, xbmc_notify_ondownload=None, xbmc_update_onlyfirst=None, xbmc_notify_onsubtitledownload=None,
xbmc_update_library=None, xbmc_update_full=None, xbmc_host=None, xbmc_username=None, xbmc_password=None,
use_plex=None, plex_notify_onsnatch=None, plex_notify_ondownload=None, plex_notify_onsubtitledownload=None, plex_update_library=None,
plex_server_host=None, plex_host=None, plex_username=None, plex_password=None,
use_growl=None, growl_notify_onsnatch=None, growl_notify_ondownload=None, growl_notify_onsubtitledownload=None, growl_host=None, growl_password=None,
use_prowl=None, prowl_notify_onsnatch=None, prowl_notify_ondownload=None, prowl_notify_onsubtitledownload=None, prowl_api=None, prowl_priority=0,
use_twitter=None, twitter_notify_onsnatch=None, twitter_notify_ondownload=None, twitter_notify_onsubtitledownload=None,
use_boxcar=None, boxcar_notify_onsnatch=None, boxcar_notify_ondownload=None, boxcar_notify_onsubtitledownload=None, boxcar_username=None,
use_pushover=None, pushover_notify_onsnatch=None, pushover_notify_ondownload=None, pushover_notify_onsubtitledownload=None, pushover_userkey=None,
use_libnotify=None, libnotify_notify_onsnatch=None, libnotify_notify_ondownload=None, libnotify_notify_onsubtitledownload=None,
use_nmj=None, nmj_host=None, nmj_database=None, nmj_mount=None, use_synoindex=None,
use_nmjv2=None, nmjv2_host=None, nmjv2_dbloc=None, nmjv2_database=None,
use_trakt=None, trakt_username=None, trakt_password=None, trakt_api=None,trakt_remove_watchlist=None,trakt_use_watchlist=None,trakt_start_paused=None,trakt_method_add=None,
use_synologynotifier=None, synologynotifier_notify_onsnatch=None, synologynotifier_notify_ondownload=None, synologynotifier_notify_onsubtitledownload=None,
use_pytivo=None, pytivo_notify_onsnatch=None, pytivo_notify_ondownload=None, pytivo_notify_onsubtitledownload=None, pytivo_update_library=None,
pytivo_host=None, pytivo_share_name=None, pytivo_tivo_name=None,
use_nma=None, nma_notify_onsnatch=None, nma_notify_ondownload=None, nma_notify_onsubtitledownload=None, nma_api=None, nma_priority=0,
use_pushalot=None, pushalot_notify_onsnatch=None, pushalot_notify_ondownload=None, pushalot_notify_onsubtitledownload=None, pushalot_authorizationtoken=None,
use_pushbullet=None, pushbullet_notify_onsnatch=None, pushbullet_notify_ondownload=None, pushbullet_notify_onsubtitledownload=None, pushbullet_api=None, pushbullet_device=None, pushbullet_device_list=None,
use_mail=None, mail_username=None, mail_password=None, mail_server=None, mail_ssl=None, mail_from=None, mail_to=None, mail_notify_onsnatch=None ):
results = []
if xbmc_notify_onsnatch == "on":
xbmc_notify_onsnatch = 1
else:
xbmc_notify_onsnatch = 0
if xbmc_notify_ondownload == "on":
xbmc_notify_ondownload = 1
else:
xbmc_notify_ondownload = 0
if xbmc_notify_onsubtitledownload == "on":
xbmc_notify_onsubtitledownload = 1
else:
xbmc_notify_onsubtitledownload = 0
if xbmc_update_library == "on":
xbmc_update_library = 1
else:
xbmc_update_library = 0
if xbmc_update_full == "on":
xbmc_update_full = 1
else:
xbmc_update_full = 0
if xbmc_update_onlyfirst == "on":
xbmc_update_onlyfirst = 1
else:
xbmc_update_onlyfirst = 0
if use_xbmc == "on":
use_xbmc = 1
else:
use_xbmc = 0
if plex_update_library == "on":
plex_update_library = 1
else:
plex_update_library = 0
if plex_notify_onsnatch == "on":
plex_notify_onsnatch = 1
else:
plex_notify_onsnatch = 0
if plex_notify_ondownload == "on":
plex_notify_ondownload = 1
else:
plex_notify_ondownload = 0
if plex_notify_onsubtitledownload == "on":
plex_notify_onsubtitledownload = 1
else:
plex_notify_onsubtitledownload = 0
if use_plex == "on":
use_plex = 1
else:
use_plex = 0
if growl_notify_onsnatch == "on":
growl_notify_onsnatch = 1
else:
growl_notify_onsnatch = 0
if growl_notify_ondownload == "on":
growl_notify_ondownload = 1
else:
growl_notify_ondownload = 0
if growl_notify_onsubtitledownload == "on":
growl_notify_onsubtitledownload = 1
else:
growl_notify_onsubtitledownload = 0
if use_growl == "on":
use_growl = 1
else:
use_growl = 0
if prowl_notify_onsnatch == "on":
prowl_notify_onsnatch = 1
else:
prowl_notify_onsnatch = 0
if prowl_notify_ondownload == "on":
prowl_notify_ondownload = 1
else:
prowl_notify_ondownload = 0
if prowl_notify_onsubtitledownload == "on":
prowl_notify_onsubtitledownload = 1
else:
prowl_notify_onsubtitledownload = 0
if use_prowl == "on":
use_prowl = 1
else:
use_prowl = 0
if twitter_notify_onsnatch == "on":
twitter_notify_onsnatch = 1
else:
twitter_notify_onsnatch = 0
if twitter_notify_ondownload == "on":
twitter_notify_ondownload = 1
else:
twitter_notify_ondownload = 0
if twitter_notify_onsubtitledownload == "on":
twitter_notify_onsubtitledownload = 1
else:
twitter_notify_onsubtitledownload = 0
if use_twitter == "on":
use_twitter = 1
else:
use_twitter = 0
if boxcar_notify_onsnatch == "on":
boxcar_notify_onsnatch = 1
else:
boxcar_notify_onsnatch = 0
if boxcar_notify_ondownload == "on":
boxcar_notify_ondownload = 1
else:
boxcar_notify_ondownload = 0
if boxcar_notify_onsubtitledownload == "on":
boxcar_notify_onsubtitledownload = 1
else:
boxcar_notify_onsubtitledownload = 0
if use_boxcar == "on":
use_boxcar = 1
else:
use_boxcar = 0
if pushover_notify_onsnatch == "on":
pushover_notify_onsnatch = 1
else:
pushover_notify_onsnatch = 0
if pushover_notify_ondownload == "on":
pushover_notify_ondownload = 1
else:
pushover_notify_ondownload = 0
if pushover_notify_onsubtitledownload == "on":
pushover_notify_onsubtitledownload = 1
else:
pushover_notify_onsubtitledownload = 0
if use_pushover == "on":
use_pushover = 1
else:
use_pushover = 0
if use_nmj == "on":
use_nmj = 1
else:
use_nmj = 0
if use_synoindex == "on":
use_synoindex = 1
else:
use_synoindex = 0
if use_synologynotifier == "on":
use_synologynotifier = 1
else:
use_synologynotifier = 0
if synologynotifier_notify_onsnatch == "on":
synologynotifier_notify_onsnatch = 1
else:
synologynotifier_notify_onsnatch = 0
if synologynotifier_notify_ondownload == "on":
synologynotifier_notify_ondownload = 1
else:
synologynotifier_notify_ondownload = 0
if synologynotifier_notify_onsubtitledownload == "on":
synologynotifier_notify_onsubtitledownload = 1
else:
synologynotifier_notify_onsubtitledownload = 0
if use_nmjv2 == "on":
use_nmjv2 = 1
else:
use_nmjv2 = 0
if use_trakt == "on":
use_trakt = 1
else:
use_trakt = 0
if trakt_remove_watchlist == "on":
trakt_remove_watchlist = 1
else:
trakt_remove_watchlist = 0
if trakt_use_watchlist == "on":
trakt_use_watchlist = 1
else:
trakt_use_watchlist = 0
if trakt_start_paused == "on":
trakt_start_paused = 1
else:
trakt_start_paused = 0
if use_pytivo == "on":
use_pytivo = 1
else:
use_pytivo = 0
if pytivo_notify_onsnatch == "on":
pytivo_notify_onsnatch = 1
else:
pytivo_notify_onsnatch = 0
if pytivo_notify_ondownload == "on":
pytivo_notify_ondownload = 1
else:
pytivo_notify_ondownload = 0
if pytivo_notify_onsubtitledownload == "on":
pytivo_notify_onsubtitledownload = 1
else:
pytivo_notify_onsubtitledownload = 0
if pytivo_update_library == "on":
pytivo_update_library = 1
else:
pytivo_update_library = 0
if use_nma == "on":
use_nma = 1
else:
use_nma = 0
if nma_notify_onsnatch == "on":
nma_notify_onsnatch = 1
else:
nma_notify_onsnatch = 0
if nma_notify_ondownload == "on":
nma_notify_ondownload = 1
else:
nma_notify_ondownload = 0
if nma_notify_onsubtitledownload == "on":
nma_notify_onsubtitledownload = 1
else:
nma_notify_onsubtitledownload = 0
if use_mail == "on":
use_mail = 1
else:
use_mail = 0
if mail_ssl == "on":
mail_ssl = 1
else:
mail_ssl = 0
if mail_notify_onsnatch == "on":
mail_notify_onsnatch = 1
else:
mail_notify_onsnatch = 0
if use_pushalot == "on":
use_pushalot = 1
else:
use_pushalot = 0
if pushalot_notify_onsnatch == "on":
pushalot_notify_onsnatch = 1
else:
pushalot_notify_onsnatch = 0
if pushalot_notify_ondownload == "on":
pushalot_notify_ondownload = 1
else:
pushalot_notify_ondownload = 0
if pushalot_notify_onsubtitledownload == "on":
pushalot_notify_onsubtitledownload = 1
else:
pushalot_notify_onsubtitledownload = 0
if use_pushbullet == "on":
use_pushbullet = 1
else:
use_pushbullet = 0
if pushbullet_notify_onsnatch == "on":
pushbullet_notify_onsnatch = 1
else:
pushbullet_notify_onsnatch = 0
if pushbullet_notify_ondownload == "on":
pushbullet_notify_ondownload = 1
else:
pushbullet_notify_ondownload = 0
if pushbullet_notify_onsubtitledownload == "on":
pushbullet_notify_onsubtitledownload = 1
else:
pushbullet_notify_onsubtitledownload = 0
sickbeard.USE_XBMC = use_xbmc
sickbeard.XBMC_NOTIFY_ONSNATCH = xbmc_notify_onsnatch
sickbeard.XBMC_NOTIFY_ONDOWNLOAD = xbmc_notify_ondownload
sickbeard.XBMC_NOTIFY_ONSUBTITLEDOWNLOAD = xbmc_notify_onsubtitledownload
sickbeard.XBMC_UPDATE_LIBRARY = xbmc_update_library
sickbeard.XBMC_UPDATE_FULL = xbmc_update_full
sickbeard.XBMC_UPDATE_ONLYFIRST = xbmc_update_onlyfirst
sickbeard.XBMC_HOST = xbmc_host
sickbeard.XBMC_USERNAME = xbmc_username
sickbeard.XBMC_PASSWORD = xbmc_password
sickbeard.USE_PLEX = use_plex
sickbeard.PLEX_NOTIFY_ONSNATCH = plex_notify_onsnatch
sickbeard.PLEX_NOTIFY_ONDOWNLOAD = plex_notify_ondownload
sickbeard.PLEX_NOTIFY_ONSUBTITLEDOWNLOAD = plex_notify_onsubtitledownload
sickbeard.PLEX_UPDATE_LIBRARY = plex_update_library
sickbeard.PLEX_HOST = plex_host
sickbeard.PLEX_SERVER_HOST = plex_server_host
sickbeard.PLEX_USERNAME = plex_username
sickbeard.PLEX_PASSWORD = plex_password
sickbeard.USE_GROWL = use_growl
sickbeard.GROWL_NOTIFY_ONSNATCH = growl_notify_onsnatch
sickbeard.GROWL_NOTIFY_ONDOWNLOAD = growl_notify_ondownload
sickbeard.GROWL_NOTIFY_ONSUBTITLEDOWNLOAD = growl_notify_onsubtitledownload
sickbeard.GROWL_HOST = growl_host
sickbeard.GROWL_PASSWORD = growl_password
sickbeard.USE_PROWL = use_prowl
sickbeard.PROWL_NOTIFY_ONSNATCH = prowl_notify_onsnatch
sickbeard.PROWL_NOTIFY_ONDOWNLOAD = prowl_notify_ondownload
sickbeard.PROWL_NOTIFY_ONSUBTITLEDOWNLOAD = prowl_notify_onsubtitledownload
sickbeard.PROWL_API = prowl_api
sickbeard.PROWL_PRIORITY = prowl_priority
sickbeard.USE_TWITTER = use_twitter
sickbeard.TWITTER_NOTIFY_ONSNATCH = twitter_notify_onsnatch
sickbeard.TWITTER_NOTIFY_ONDOWNLOAD = twitter_notify_ondownload
sickbeard.TWITTER_NOTIFY_ONSUBTITLEDOWNLOAD = twitter_notify_onsubtitledownload
sickbeard.USE_BOXCAR = use_boxcar
sickbeard.BOXCAR_NOTIFY_ONSNATCH = boxcar_notify_onsnatch
sickbeard.BOXCAR_NOTIFY_ONDOWNLOAD = boxcar_notify_ondownload
sickbeard.BOXCAR_NOTIFY_ONSUBTITLEDOWNLOAD = boxcar_notify_onsubtitledownload
sickbeard.BOXCAR_USERNAME = boxcar_username
sickbeard.USE_PUSHOVER = use_pushover
sickbeard.PUSHOVER_NOTIFY_ONSNATCH = pushover_notify_onsnatch
sickbeard.PUSHOVER_NOTIFY_ONDOWNLOAD = pushover_notify_ondownload
sickbeard.PUSHOVER_NOTIFY_ONSUBTITLEDOWNLOAD = pushover_notify_onsubtitledownload
sickbeard.PUSHOVER_USERKEY = pushover_userkey
sickbeard.USE_LIBNOTIFY = use_libnotify == "on"
sickbeard.LIBNOTIFY_NOTIFY_ONSNATCH = libnotify_notify_onsnatch == "on"
sickbeard.LIBNOTIFY_NOTIFY_ONDOWNLOAD = libnotify_notify_ondownload == "on"
sickbeard.LIBNOTIFY_NOTIFY_ONSUBTITLEDOWNLOAD = libnotify_notify_onsubtitledownload == "on"
sickbeard.USE_NMJ = use_nmj
sickbeard.NMJ_HOST = nmj_host
sickbeard.NMJ_DATABASE = nmj_database
sickbeard.NMJ_MOUNT = nmj_mount
sickbeard.USE_SYNOINDEX = use_synoindex
sickbeard.USE_SYNOLOGYNOTIFIER = use_synologynotifier
sickbeard.SYNOLOGYNOTIFIER_NOTIFY_ONSNATCH = synologynotifier_notify_onsnatch
sickbeard.SYNOLOGYNOTIFIER_NOTIFY_ONDOWNLOAD = synologynotifier_notify_ondownload
sickbeard.SYNOLOGYNOTIFIER_NOTIFY_ONSUBTITLEDOWNLOAD = synologynotifier_notify_onsubtitledownload
sickbeard.USE_NMJv2 = use_nmjv2
sickbeard.NMJv2_HOST = nmjv2_host
sickbeard.NMJv2_DATABASE = nmjv2_database
sickbeard.NMJv2_DBLOC = nmjv2_dbloc
sickbeard.USE_TRAKT = use_trakt
sickbeard.TRAKT_USERNAME = trakt_username
sickbeard.TRAKT_PASSWORD = trakt_password
sickbeard.TRAKT_API = trakt_api
sickbeard.TRAKT_REMOVE_WATCHLIST = trakt_remove_watchlist
sickbeard.TRAKT_USE_WATCHLIST = trakt_use_watchlist
sickbeard.TRAKT_METHOD_ADD = trakt_method_add
sickbeard.TRAKT_START_PAUSED = trakt_start_paused
sickbeard.USE_PYTIVO = use_pytivo
sickbeard.PYTIVO_NOTIFY_ONSNATCH = pytivo_notify_onsnatch == "off"
sickbeard.PYTIVO_NOTIFY_ONDOWNLOAD = pytivo_notify_ondownload == "off"
sickbeard.PYTIVO_NOTIFY_ONSUBTITLEDOWNLOAD = pytivo_notify_onsubtitledownload == "off"
sickbeard.PYTIVO_UPDATE_LIBRARY = pytivo_update_library
sickbeard.PYTIVO_HOST = pytivo_host
sickbeard.PYTIVO_SHARE_NAME = pytivo_share_name
sickbeard.PYTIVO_TIVO_NAME = pytivo_tivo_name
sickbeard.USE_NMA = use_nma
sickbeard.NMA_NOTIFY_ONSNATCH = nma_notify_onsnatch
sickbeard.NMA_NOTIFY_ONDOWNLOAD = nma_notify_ondownload
sickbeard.NMA_NOTIFY_ONSUBTITLEDOWNLOAD = nma_notify_onsubtitledownload
sickbeard.NMA_API = nma_api
sickbeard.NMA_PRIORITY = nma_priority
sickbeard.USE_MAIL = use_mail
sickbeard.MAIL_USERNAME = mail_username
sickbeard.MAIL_PASSWORD = mail_password
sickbeard.MAIL_SERVER = mail_server
sickbeard.MAIL_SSL = mail_ssl
sickbeard.MAIL_FROM = mail_from
sickbeard.MAIL_TO = mail_to
sickbeard.MAIL_NOTIFY_ONSNATCH = mail_notify_onsnatch
sickbeard.USE_PUSHALOT = use_pushalot
sickbeard.PUSHALOT_NOTIFY_ONSNATCH = pushalot_notify_onsnatch
sickbeard.PUSHALOT_NOTIFY_ONDOWNLOAD = pushalot_notify_ondownload
sickbeard.PUSHALOT_NOTIFY_ONSUBTITLEDOWNLOAD = pushalot_notify_onsubtitledownload
sickbeard.PUSHALOT_AUTHORIZATIONTOKEN = pushalot_authorizationtoken
sickbeard.USE_PUSHBULLET = use_pushbullet
sickbeard.PUSHBULLET_NOTIFY_ONSNATCH = pushbullet_notify_onsnatch
sickbeard.PUSHBULLET_NOTIFY_ONDOWNLOAD = pushbullet_notify_ondownload
sickbeard.PUSHBULLET_NOTIFY_ONSUBTITLEDOWNLOAD = pushbullet_notify_onsubtitledownload
sickbeard.PUSHBULLET_API = pushbullet_api
sickbeard.PUSHBULLET_DEVICE = pushbullet_device_list
sickbeard.save_config()
if len(results) > 0:
for x in results:
logger.log(x, logger.ERROR)
ui.notifications.error('Error(s) Saving Configuration',
'<br />\n'.join(results))
else:
ui.notifications.message('Configuration Saved', ek.ek(os.path.join, sickbeard.CONFIG_FILE) )
redirect("/config/notifications/")
class ConfigSubtitles:
@cherrypy.expose
def index(self):
t = PageTemplate(file="config_subtitles.tmpl")
t.submenu = ConfigMenu
return _munge(t)
@cherrypy.expose
def saveSubtitles(self, use_subtitles=None, subsnewasold=None, subtitles_plugins=None, subtitles_languages=None, subtitles_dir=None, subtitles_dir_sub=None, subsnolang = None, service_order=None, subtitles_history=None, subtitles_clean_hi=None, subtitles_clean_team=None, subtitles_clean_music=None, subtitles_clean_punc=None):
results = []
if use_subtitles == "on":
use_subtitles = 1
if sickbeard.subtitlesFinderScheduler.thread == None or not sickbeard.subtitlesFinderScheduler.thread.isAlive():
sickbeard.subtitlesFinderScheduler.initThread()
else:
use_subtitles = 0
sickbeard.subtitlesFinderScheduler.abort = True
logger.log(u"Waiting for the SUBTITLESFINDER thread to exit")
try:
sickbeard.subtitlesFinderScheduler.thread.join(5)
except:
pass
if subtitles_history == "on":
subtitles_history = 1
else:
subtitles_history = 0
if subtitles_dir_sub == "on":
subtitles_dir_sub = 1
else:
subtitles_dir_sub = 0
if subsnewasold == "on":
subsnewasold = 1
else:
subsnewasold = 0
if subsnolang == "on":
subsnolang = 1
else:
subsnolang = 0
sickbeard.USE_SUBTITLES = use_subtitles
sickbeard.SUBSNEWASOLD = subsnewasold
sickbeard.SUBTITLES_LANGUAGES = [lang.alpha2 for lang in subtitles.isValidLanguage(subtitles_languages.replace(' ', '').split(','))] if subtitles_languages != '' else ''
sickbeard.SUBTITLES_DIR = subtitles_dir
sickbeard.SUBTITLES_DIR_SUB = subtitles_dir_sub
sickbeard.SUBSNOLANG = subsnolang
sickbeard.SUBTITLES_HISTORY = subtitles_history
# Subtitles services
services_str_list = service_order.split()
subtitles_services_list = []
subtitles_services_enabled = []
for curServiceStr in services_str_list:
curService, curEnabled = curServiceStr.split(':')
subtitles_services_list.append(curService)
subtitles_services_enabled.append(int(curEnabled))
sickbeard.SUBTITLES_SERVICES_LIST = subtitles_services_list
sickbeard.SUBTITLES_SERVICES_ENABLED = subtitles_services_enabled
#Subtitles Cleansing
if subtitles_clean_hi == "on":
subtitles_clean_hi = 1
else:
subtitles_clean_hi = 0
if subtitles_clean_team == "on":
subtitles_clean_team = 1
else:
subtitles_clean_team = 0
if subtitles_clean_music == "on":
subtitles_clean_music = 1
else:
subtitles_clean_music = 0
if subtitles_clean_punc == "on":
subtitles_clean_punc = 1
else:
subtitles_clean_punc = 0
sickbeard.SUBTITLES_CLEAN_HI = subtitles_clean_hi
sickbeard.SUBTITLES_CLEAN_TEAM = subtitles_clean_team
sickbeard.SUBTITLES_CLEAN_MUSIC = subtitles_clean_music
sickbeard.SUBTITLES_CLEAN_PUNC = subtitles_clean_punc
sickbeard.save_config()
if len(results) > 0:
for x in results:
logger.log(x, logger.ERROR)
ui.notifications.error('Error(s) Saving Configuration',
'<br />\n'.join(results))
else:
ui.notifications.message('Configuration Saved', ek.ek(os.path.join, sickbeard.CONFIG_FILE) )
redirect("/config/subtitles/")
class Config:
@cherrypy.expose
def index(self):
t = PageTemplate(file="config.tmpl")
t.submenu = ConfigMenu
return _munge(t)
general = ConfigGeneral()
search = ConfigSearch()
postProcessing = ConfigPostProcessing()
providers = ConfigProviders()
notifications = ConfigNotifications()
subtitles = ConfigSubtitles()
def haveXBMC():
return sickbeard.USE_XBMC and sickbeard.XBMC_UPDATE_LIBRARY
def havePLEX():
return sickbeard.USE_PLEX and sickbeard.PLEX_UPDATE_LIBRARY
def HomeMenu():
return [
{ 'title': 'Add Shows', 'path': 'home/addShows/', },
{ 'title': 'Manual Post-Processing', 'path': 'home/postprocess/' },
{ 'title': 'Update XBMC', 'path': 'home/updateXBMC/', 'requires': haveXBMC },
{ 'title': 'Update Plex', 'path': 'home/updatePLEX/', 'requires': havePLEX },
{ 'title': 'Update', 'path': 'manage/manageSearches/forceVersionCheck', 'confirm': True},
{ 'title': 'Restart', 'path': 'home/restart/?pid='+str(sickbeard.PID), 'confirm': True },
{ 'title': 'Shutdown', 'path': 'home/shutdown/?pid='+str(sickbeard.PID), 'confirm': True },
]
class HomePostProcess:
@cherrypy.expose
def index(self):
t = PageTemplate(file="home_postprocess.tmpl")
t.submenu = HomeMenu()
return _munge(t)
@cherrypy.expose
def processEpisode(self, dir=None, nzbName=None, jobName=None, quiet=None):
if not dir:
redirect("/home/postprocess")
else:
result = processTV.processDir(dir, nzbName)
if quiet != None and int(quiet) == 1:
return result
result = result.replace("\n","<br />\n")
return _genericMessage("Postprocessing results", result)
class NewHomeAddShows:
@cherrypy.expose
def index(self):
t = PageTemplate(file="home_addShows.tmpl")
t.submenu = HomeMenu()
return _munge(t)
@cherrypy.expose
def getTVDBLanguages(self):
result = tvdb_api.Tvdb().config['valid_languages']
# Make sure list is sorted alphabetically but 'fr' is in front
if 'fr' in result:
del result[result.index('fr')]
result.sort()
result.insert(0, 'fr')
return json.dumps({'results': result})
@cherrypy.expose
def sanitizeFileName(self, name):
return helpers.sanitizeFileName(name)
@cherrypy.expose
def searchTVDBForShowName(self, name, lang="fr"):
if not lang or lang == 'null':
lang = "fr"
baseURL = "http://thetvdb.com/api/GetSeries.php?"
nameUTF8 = name.encode('utf-8')
logger.log(u"Trying to find Show on thetvdb.com with: " + nameUTF8.decode('utf-8'), logger.DEBUG)
# Use each word in the show's name as a possible search term
keywords = nameUTF8.split(' ')
# Insert the whole show's name as the first search term so best results are first
# ex: keywords = ['Some Show Name', 'Some', 'Show', 'Name']
if len(keywords) > 1:
keywords.insert(0, nameUTF8)
# Query the TVDB for each search term and build the list of results
results = []
for searchTerm in keywords:
params = {'seriesname': searchTerm,
'language': lang}
finalURL = baseURL + urllib.urlencode(params)
logger.log(u"Searching for Show with searchterm: \'" + searchTerm.decode('utf-8') + u"\' on URL " + finalURL, logger.DEBUG)
urlData = helpers.getURL(finalURL)
if urlData is None:
# When urlData is None, trouble connecting to TVDB, don't try the rest of the keywords
logger.log(u"Unable to get URL: " + finalURL, logger.ERROR)
break
else:
try:
seriesXML = etree.ElementTree(etree.XML(urlData))
series = seriesXML.getiterator('Series')
except Exception, e:
# use finalURL in log, because urlData can be too much information
logger.log(u"Unable to parse XML for some reason: " + ex(e) + " from XML: " + finalURL, logger.ERROR)
series = ''
# add each result to our list
for curSeries in series:
tvdb_id = int(curSeries.findtext('seriesid'))
# don't add duplicates
if tvdb_id in [x[0] for x in results]:
continue
results.append((tvdb_id, curSeries.findtext('SeriesName'), curSeries.findtext('FirstAired')))
lang_id = tvdb_api.Tvdb().config['langabbv_to_id'][lang]
return json.dumps({'results': results, 'langid': lang_id})
@cherrypy.expose
def massAddTable(self, rootDir=None):
t = PageTemplate(file="home_massAddTable.tmpl")
t.submenu = HomeMenu()
myDB = db.DBConnection()
if not rootDir:
return "No folders selected."
elif type(rootDir) != list:
root_dirs = [rootDir]
else:
root_dirs = rootDir
root_dirs = [urllib.unquote_plus(x) for x in root_dirs]
default_index = int(sickbeard.ROOT_DIRS.split('|')[0])
if len(root_dirs) > default_index:
tmp = root_dirs[default_index]
if tmp in root_dirs:
root_dirs.remove(tmp)
root_dirs = [tmp]+root_dirs
dir_list = []
for root_dir in root_dirs:
try:
file_list = ek.ek(os.listdir, root_dir)
except:
continue
for cur_file in file_list:
cur_path = ek.ek(os.path.normpath, ek.ek(os.path.join, root_dir, cur_file))
if not ek.ek(os.path.isdir, cur_path):
continue
cur_dir = {
'dir': cur_path,
'display_dir': '<b>'+ek.ek(os.path.dirname, cur_path)+os.sep+'</b>'+ek.ek(os.path.basename, cur_path),
}
# see if the folder is in XBMC already
dirResults = myDB.select("SELECT * FROM tv_shows WHERE location = ?", [cur_path])
if dirResults:
cur_dir['added_already'] = True
else:
cur_dir['added_already'] = False
dir_list.append(cur_dir)
tvdb_id = ''
show_name = ''
for cur_provider in sickbeard.metadata_provider_dict.values():
(tvdb_id, show_name) = cur_provider.retrieveShowMetadata(cur_path)
if tvdb_id and show_name:
break
cur_dir['existing_info'] = (tvdb_id, show_name)
if tvdb_id and helpers.findCertainShow(sickbeard.showList, tvdb_id):
cur_dir['added_already'] = True
t.dirList = dir_list
return _munge(t)
@cherrypy.expose
def newShow(self, show_to_add=None, other_shows=None):
"""
Display the new show page which collects a tvdb id, folder, and extra options and
posts them to addNewShow
"""
t = PageTemplate(file="home_newShow.tmpl")
t.submenu = HomeMenu()
show_dir, tvdb_id, show_name = self.split_extra_show(show_to_add)
if tvdb_id and show_name:
use_provided_info = True
else:
use_provided_info = False
# tell the template whether we're giving it show name & TVDB ID
t.use_provided_info = use_provided_info
# use the given show_dir for the tvdb search if available
if not show_dir:
t.default_show_name = ''
elif not show_name:
t.default_show_name = ek.ek(os.path.basename, ek.ek(os.path.normpath, show_dir)).replace('.',' ')
else:
t.default_show_name = show_name
# carry a list of other dirs if given
if not other_shows:
other_shows = []
elif type(other_shows) != list:
other_shows = [other_shows]
if use_provided_info:
t.provided_tvdb_id = tvdb_id
t.provided_tvdb_name = show_name
t.provided_show_dir = show_dir
t.other_shows = other_shows
return _munge(t)
@cherrypy.expose
def addNewShow(self, whichSeries=None, tvdbLang="fr", rootDir=None, defaultStatus=None,
anyQualities=None, bestQualities=None, flatten_folders=None, subtitles=None, fullShowPath=None,
other_shows=None, skipShow=None, audio_lang=None):
"""
Receive tvdb id, dir, and other options and create a show from them. If extra show dirs are
provided then it forwards back to newShow, if not it goes to /home.
"""
# grab our list of other dirs if given
if not other_shows:
other_shows = []
elif type(other_shows) != list:
other_shows = [other_shows]
def finishAddShow():
# if there are no extra shows then go home
if not other_shows:
redirect('/home')
# peel off the next one
next_show_dir = other_shows[0]
rest_of_show_dirs = other_shows[1:]
# go to add the next show
return self.newShow(next_show_dir, rest_of_show_dirs)
# if we're skipping then behave accordingly
if skipShow:
return finishAddShow()
# sanity check on our inputs
if (not rootDir and not fullShowPath) or not whichSeries:
return "Missing params, no tvdb id or folder:"+repr(whichSeries)+" and "+repr(rootDir)+"/"+repr(fullShowPath)
# figure out what show we're adding and where
series_pieces = whichSeries.partition('|')
if len(series_pieces) < 3:
return "Error with show selection."
tvdb_id = int(series_pieces[0])
show_name = series_pieces[2]
# use the whole path if it's given, or else append the show name to the root dir to get the full show path
if fullShowPath:
show_dir = ek.ek(os.path.normpath, fullShowPath)
else:
show_dir = ek.ek(os.path.join, rootDir, helpers.sanitizeFileName(show_name))
# blanket policy - if the dir exists you should have used "add existing show" numbnuts
if ek.ek(os.path.isdir, show_dir) and not fullShowPath:
ui.notifications.error("Unable to add show", "Folder "+show_dir+" exists already")
redirect('/home/addShows/existingShows')
# don't create show dir if config says not to
if sickbeard.ADD_SHOWS_WO_DIR:
logger.log(u"Skipping initial creation of "+show_dir+" due to config.ini setting")
else:
dir_exists = helpers.makeDir(show_dir)
if not dir_exists:
logger.log(u"Unable to create the folder "+show_dir+", can't add the show", logger.ERROR)
ui.notifications.error("Unable to add show", "Unable to create the folder "+show_dir+", can't add the show")
redirect("/home")
else:
helpers.chmodAsParent(show_dir)
# prepare the inputs for passing along
if flatten_folders == "on":
flatten_folders = 1
else:
flatten_folders = 0
if subtitles == "on":
subtitles = 1
else:
subtitles = 0
if not anyQualities:
anyQualities = []
if not bestQualities:
bestQualities = []
if type(anyQualities) != list:
anyQualities = [anyQualities]
if type(bestQualities) != list:
bestQualities = [bestQualities]
newQuality = Quality.combineQualities(map(int, anyQualities), map(int, bestQualities))
# add the show
sickbeard.showQueueScheduler.action.addShow(tvdb_id, show_dir, int(defaultStatus), newQuality, flatten_folders, tvdbLang, subtitles, audio_lang) #@UndefinedVariable
ui.notifications.message('Show added', 'Adding the specified show into '+show_dir)
return finishAddShow()
@cherrypy.expose
def existingShows(self):
"""
Prints out the page to add existing shows from a root dir
"""
t = PageTemplate(file="home_addExistingShow.tmpl")
t.submenu = HomeMenu()
return _munge(t)
def split_extra_show(self, extra_show):
if not extra_show:
return (None, None, None)
split_vals = extra_show.split('|')
if len(split_vals) < 3:
return (extra_show, None, None)
show_dir = split_vals[0]
tvdb_id = split_vals[1]
show_name = '|'.join(split_vals[2:])
return (show_dir, tvdb_id, show_name)
@cherrypy.expose
def addExistingShows(self, shows_to_add=None, promptForSettings=None):
"""
Receives a dir list and add them. Adds the ones with given TVDB IDs first, then forwards
along to the newShow page.
"""
# grab a list of other shows to add, if provided
if not shows_to_add:
shows_to_add = []
elif type(shows_to_add) != list:
shows_to_add = [shows_to_add]
shows_to_add = [urllib.unquote_plus(x) for x in shows_to_add]
if promptForSettings == "on":
promptForSettings = 1
else:
promptForSettings = 0
tvdb_id_given = []
dirs_only = []
# separate all the ones with TVDB IDs
for cur_dir in shows_to_add:
if not '|' in cur_dir:
dirs_only.append(cur_dir)
else:
show_dir, tvdb_id, show_name = self.split_extra_show(cur_dir)
if not show_dir or not tvdb_id or not show_name:
continue
tvdb_id_given.append((show_dir, int(tvdb_id), show_name))
# if they want me to prompt for settings then I will just carry on to the newShow page
if promptForSettings and shows_to_add:
return self.newShow(shows_to_add[0], shows_to_add[1:])
# if they don't want me to prompt for settings then I can just add all the nfo shows now
num_added = 0
for cur_show in tvdb_id_given:
show_dir, tvdb_id, show_name = cur_show
# add the show
sickbeard.showQueueScheduler.action.addShow(tvdb_id, show_dir, int(sickbeard.STATUS_DEFAULT), sickbeard.QUALITY_DEFAULT, sickbeard.FLATTEN_FOLDERS_DEFAULT,"fr", sickbeard.SUBTITLES_DEFAULT, sickbeard.AUDIO_SHOW_DEFAULT) #@UndefinedVariable
num_added += 1
if num_added:
ui.notifications.message("Shows Added", "Automatically added "+str(num_added)+" from their existing metadata files")
# if we're done then go home
if not dirs_only:
redirect('/home')
# for the remaining shows we need to prompt for each one, so forward this on to the newShow page
return self.newShow(dirs_only[0], dirs_only[1:])
ErrorLogsMenu = [
{ 'title': 'Clear Errors', 'path': 'errorlogs/clearerrors' },
#{ 'title': 'View Log', 'path': 'errorlogs/viewlog' },
]
class ErrorLogs:
@cherrypy.expose
def index(self):
t = PageTemplate(file="errorlogs.tmpl")
t.submenu = ErrorLogsMenu
return _munge(t)
@cherrypy.expose
def clearerrors(self):
classes.ErrorViewer.clear()
redirect("/errorlogs")
@cherrypy.expose
def viewlog(self, minLevel=logger.MESSAGE, maxLines=500):
t = PageTemplate(file="viewlogs.tmpl")
t.submenu = ErrorLogsMenu
minLevel = int(minLevel)
data = []
if os.path.isfile(logger.sb_log_instance.log_file):
f = open(logger.sb_log_instance.log_file)
data = f.readlines()
f.close()
regex = "^(\w+).?\-(\d\d)\s+(\d\d)\:(\d\d):(\d\d)\s+([A-Z]+)\s+(.*)$"
finalData = []
numLines = 0
lastLine = False
numToShow = min(maxLines, len(data))
for x in reversed(data):
x = x.decode('utf-8')
match = re.match(regex, x)
if match:
level = match.group(6)
if level not in logger.reverseNames:
lastLine = False
continue
if logger.reverseNames[level] >= minLevel:
lastLine = True
finalData.append(x)
else:
lastLine = False
continue
elif lastLine:
finalData.append("AA"+x)
numLines += 1
if numLines >= numToShow:
break
result = "".join(finalData)
t.logLines = result
t.minLevel = minLevel
return _munge(t)
class Home:
@cherrypy.expose
def is_alive(self, *args, **kwargs):
if 'callback' in kwargs and '_' in kwargs:
callback, _ = kwargs['callback'], kwargs['_']
else:
return "Error: Unsupported Request. Send jsonp request with 'callback' variable in the query stiring."
cherrypy.response.headers['Cache-Control'] = "max-age=0,no-cache,no-store"
cherrypy.response.headers['Content-Type'] = 'text/javascript'
cherrypy.response.headers['Access-Control-Allow-Origin'] = '*'
cherrypy.response.headers['Access-Control-Allow-Headers'] = 'x-requested-with'
if sickbeard.started:
return callback+'('+json.dumps({"msg": str(sickbeard.PID)})+');'
else:
return callback+'('+json.dumps({"msg": "nope"})+');'
@cherrypy.expose
def index(self):
t = PageTemplate(file="home.tmpl")
t.submenu = HomeMenu()
return _munge(t)
addShows = NewHomeAddShows()
postprocess = HomePostProcess()
@cherrypy.expose
def testSABnzbd(self, host=None, username=None, password=None, apikey=None):
if not host.endswith("/"):
host = host + "/"
connection, accesMsg = sab.getSabAccesMethod(host, username, password, apikey)
if connection:
authed, authMsg = sab.testAuthentication(host, username, password, apikey) #@UnusedVariable
if authed:
return "Success. Connected and authenticated"
else:
return "Authentication failed. SABnzbd expects '"+accesMsg+"' as authentication method"
else:
return "Unable to connect to host"
@cherrypy.expose
def testTorrent(self, torrent_method=None, host=None, username=None, password=None):
if not host.endswith("/"):
host = host + "/"
client = clients.getClientIstance(torrent_method)
connection, accesMsg = client(host, username, password).testAuthentication()
return accesMsg
@cherrypy.expose
def testGrowl(self, host=None, password=None):
cherrypy.response.headers['Cache-Control'] = "max-age=0,no-cache,no-store"
result = notifiers.growl_notifier.test_notify(host, password)
if password==None or password=='':
pw_append = ''
else:
pw_append = " with password: " + password
if result:
return "Registered and Tested growl successfully "+urllib.unquote_plus(host)+pw_append
else:
return "Registration and Testing of growl failed "+urllib.unquote_plus(host)+pw_append
@cherrypy.expose
def testProwl(self, prowl_api=None, prowl_priority=0):
cherrypy.response.headers['Cache-Control'] = "max-age=0,no-cache,no-store"
result = notifiers.prowl_notifier.test_notify(prowl_api, prowl_priority)
if result:
return "Test prowl notice sent successfully"
else:
return "Test prowl notice failed"
@cherrypy.expose
def testBoxcar(self, username=None):
cherrypy.response.headers['Cache-Control'] = "max-age=0,no-cache,no-store"
result = notifiers.boxcar_notifier.test_notify(username)
if result:
return "Boxcar notification succeeded. Check your Boxcar clients to make sure it worked"
else:
return "Error sending Boxcar notification"
@cherrypy.expose
def testPushover(self, userKey=None):
cherrypy.response.headers['Cache-Control'] = "max-age=0,no-cache,no-store"
result = notifiers.pushover_notifier.test_notify(userKey)
if result:
return "Pushover notification succeeded. Check your Pushover clients to make sure it worked"
else:
return "Error sending Pushover notification"
@cherrypy.expose
def twitterStep1(self):
cherrypy.response.headers['Cache-Control'] = "max-age=0,no-cache,no-store"
return notifiers.twitter_notifier._get_authorization()
@cherrypy.expose
def twitterStep2(self, key):
cherrypy.response.headers['Cache-Control'] = "max-age=0,no-cache,no-store"
result = notifiers.twitter_notifier._get_credentials(key)
logger.log(u"result: "+str(result))
if result:
return "Key verification successful"
else:
return "Unable to verify key"
@cherrypy.expose
def testTwitter(self):
cherrypy.response.headers['Cache-Control'] = "max-age=0,no-cache,no-store"
result = notifiers.twitter_notifier.test_notify()
if result:
return "Tweet successful, check your twitter to make sure it worked"
else:
return "Error sending tweet"
@cherrypy.expose
def testXBMC(self, host=None, username=None, password=None):
cherrypy.response.headers['Cache-Control'] = "max-age=0,no-cache,no-store"
finalResult = ''
for curHost in [x.strip() for x in host.split(",")]:
curResult = notifiers.xbmc_notifier.test_notify(urllib.unquote_plus(curHost), username, password)
if len(curResult.split(":")) > 2 and 'OK' in curResult.split(":")[2]:
finalResult += "Test XBMC notice sent successfully to " + urllib.unquote_plus(curHost)
else:
finalResult += "Test XBMC notice failed to " + urllib.unquote_plus(curHost)
finalResult += "<br />\n"
return finalResult
@cherrypy.expose
def testPLEX(self, host=None, username=None, password=None):
cherrypy.response.headers['Cache-Control'] = "max-age=0,no-cache,no-store"
finalResult = ''
for curHost in [x.strip() for x in host.split(",")]:
curResult = notifiers.plex_notifier.test_notify(urllib.unquote_plus(curHost), username, password)
if len(curResult.split(":")) > 2 and 'OK' in curResult.split(":")[2]:
finalResult += "Test Plex notice sent successfully to " + urllib.unquote_plus(curHost)
else:
finalResult += "Test Plex notice failed to " + urllib.unquote_plus(curHost)
finalResult += "<br />\n"
return finalResult
@cherrypy.expose
def testLibnotify(self):
cherrypy.response.headers['Cache-Control'] = "max-age=0,no-cache,no-store"
if notifiers.libnotify_notifier.test_notify():
return "Tried sending desktop notification via libnotify"
else:
return notifiers.libnotify.diagnose()
@cherrypy.expose
def testNMJ(self, host=None, database=None, mount=None):
cherrypy.response.headers['Cache-Control'] = "max-age=0,no-cache,no-store"
result = notifiers.nmj_notifier.test_notify(urllib.unquote_plus(host), database, mount)
if result:
return "Successfull started the scan update"
else:
return "Test failed to start the scan update"
@cherrypy.expose
def settingsNMJ(self, host=None):
cherrypy.response.headers['Cache-Control'] = "max-age=0,no-cache,no-store"
result = notifiers.nmj_notifier.notify_settings(urllib.unquote_plus(host))
if result:
return '{"message": "Got settings from %(host)s", "database": "%(database)s", "mount": "%(mount)s"}' % {"host": host, "database": sickbeard.NMJ_DATABASE, "mount": sickbeard.NMJ_MOUNT}
else:
return '{"message": "Failed! Make sure your Popcorn is on and NMJ is running. (see Log & Errors -> Debug for detailed info)", "database": "", "mount": ""}'
@cherrypy.expose
def testNMJv2(self, host=None):
cherrypy.response.headers['Cache-Control'] = "max-age=0,no-cache,no-store"
result = notifiers.nmjv2_notifier.test_notify(urllib.unquote_plus(host))
if result:
return "Test notice sent successfully to " + urllib.unquote_plus(host)
else:
return "Test notice failed to " + urllib.unquote_plus(host)
@cherrypy.expose
def settingsNMJv2(self, host=None, dbloc=None, instance=None):
cherrypy.response.headers['Cache-Control'] = "max-age=0,no-cache,no-store"
result = notifiers.nmjv2_notifier.notify_settings(urllib.unquote_plus(host), dbloc, instance)
if result:
return '{"message": "NMJ Database found at: %(host)s", "database": "%(database)s"}' % {"host": host, "database": sickbeard.NMJv2_DATABASE}
else:
return '{"message": "Unable to find NMJ Database at location: %(dbloc)s. Is the right location selected and PCH running?", "database": ""}' % {"dbloc": dbloc}
@cherrypy.expose
def testTrakt(self, api=None, username=None, password=None):
cherrypy.response.headers['Cache-Control'] = "max-age=0,no-cache,no-store"
result = notifiers.trakt_notifier.test_notify(api, username, password)
if result:
return "Test notice sent successfully to Trakt"
else:
return "Test notice failed to Trakt"
@cherrypy.expose
def testMail(self, mail_from=None, mail_to=None, mail_server=None, mail_ssl=None, mail_user=None, mail_password=None):
cherrypy.response.headers['Cache-Control'] = "max-age=0,no-cache,no-store"
result = notifiers.mail_notifier.test_notify(mail_from, mail_to, mail_server, mail_ssl, mail_user, mail_password)
if result:
return "Mail sent"
else:
return "Can't sent mail."
@cherrypy.expose
def testNMA(self, nma_api=None, nma_priority=0):
cherrypy.response.headers['Cache-Control'] = "max-age=0,no-cache,no-store"
result = notifiers.nma_notifier.test_notify(nma_api, nma_priority)
if result:
return "Test NMA notice sent successfully"
else:
return "Test NMA notice failed"
@cherrypy.expose
def testPushalot(self, authorizationToken=None):
cherrypy.response.headers['Cache-Control'] = "max-age=0,no-cache,no-store"
result = notifiers.pushalot_notifier.test_notify(authorizationToken)
if result:
return "Pushalot notification succeeded. Check your Pushalot clients to make sure it worked"
else:
return "Error sending Pushalot notification"
@cherrypy.expose
def testPushbullet(self, api=None):
cherrypy.response.headers['Cache-Control'] = "max-age=0,no-cache,no-store"
result = notifiers.pushbullet_notifier.test_notify(api)
if result:
return "Pushbullet notification succeeded. Check your device to make sure it worked"
else:
return "Error sending Pushbullet notification"
@cherrypy.expose
def getPushbulletDevices(self, api=None):
cherrypy.response.headers['Cache-Control'] = "max-age=0,no-cache,no-store"
result = notifiers.pushbullet_notifier.get_devices(api)
if result:
return result
else:
return "Error sending Pushbullet notification"
@cherrypy.expose
def shutdown(self, pid=None):
if str(pid) != str(sickbeard.PID):
redirect("/home")
threading.Timer(2, sickbeard.invoke_shutdown).start()
title = "Shutting down"
message = "Sick Beard is shutting down..."
return _genericMessage(title, message)
@cherrypy.expose
def restart(self, pid=None):
if str(pid) != str(sickbeard.PID):
redirect("/home")
t = PageTemplate(file="restart.tmpl")
t.submenu = HomeMenu()
# do a soft restart
threading.Timer(2, sickbeard.invoke_restart, [False]).start()
return _munge(t)
@cherrypy.expose
def update(self, pid=None):
if str(pid) != str(sickbeard.PID):
redirect("/home")
updated = sickbeard.versionCheckScheduler.action.update() #@UndefinedVariable
if updated:
# do a hard restart
threading.Timer(2, sickbeard.invoke_restart, [False]).start()
t = PageTemplate(file="restart_bare.tmpl")
return _munge(t)
else:
return _genericMessage("Update Failed","Update wasn't successful, not restarting. Check your log for more information.")
@cherrypy.expose
def displayShow(self, show=None):
if show == None:
return _genericMessage("Error", "Invalid show ID")
else:
showObj = sickbeard.helpers.findCertainShow(sickbeard.showList, int(show))
if showObj == None:
return _genericMessage("Error", "Show not in show list")
showObj.exceptions = scene_exceptions.get_scene_exceptions(showObj.tvdbid)
myDB = db.DBConnection()
seasonResults = myDB.select(
"SELECT DISTINCT season FROM tv_episodes WHERE showid = ? ORDER BY season desc",
[showObj.tvdbid]
)
sqlResults = myDB.select(
"SELECT * FROM tv_episodes WHERE showid = ? ORDER BY season DESC, episode DESC",
[showObj.tvdbid]
)
t = PageTemplate(file="displayShow.tmpl")
t.submenu = [ { 'title': 'Edit', 'path': 'home/editShow?show=%d'%showObj.tvdbid } ]
try:
t.showLoc = (showObj.location, True)
except sickbeard.exceptions.ShowDirNotFoundException:
t.showLoc = (showObj._location, False)
show_message = ''
if sickbeard.showQueueScheduler.action.isBeingAdded(showObj): #@UndefinedVariable
show_message = 'This show is in the process of being downloaded from theTVDB.com - the info below is incomplete.'
elif sickbeard.showQueueScheduler.action.isBeingUpdated(showObj): #@UndefinedVariable
show_message = 'The information below is in the process of being updated.'
elif sickbeard.showQueueScheduler.action.isBeingRefreshed(showObj): #@UndefinedVariable
show_message = 'The episodes below are currently being refreshed from disk'
elif sickbeard.showQueueScheduler.action.isBeingSubtitled(showObj): #@UndefinedVariable
show_message = 'Currently downloading subtitles for this show'
elif sickbeard.showQueueScheduler.action.isBeingCleanedSubtitle(showObj): #@UndefinedVariable
show_message = 'Currently cleaning subtitles for this show'
elif sickbeard.showQueueScheduler.action.isInRefreshQueue(showObj): #@UndefinedVariable
show_message = 'This show is queued to be refreshed.'
elif sickbeard.showQueueScheduler.action.isInUpdateQueue(showObj): #@UndefinedVariable
show_message = 'This show is queued and awaiting an update.'
elif sickbeard.showQueueScheduler.action.isInSubtitleQueue(showObj): #@UndefinedVariable
show_message = 'This show is queued and awaiting subtitles download.'
if not sickbeard.showQueueScheduler.action.isBeingAdded(showObj): #@UndefinedVariable
if not sickbeard.showQueueScheduler.action.isBeingUpdated(showObj): #@UndefinedVariable
t.submenu.append({ 'title': 'Delete', 'path': 'home/deleteShow?show=%d'%showObj.tvdbid, 'confirm': True })
t.submenu.append({ 'title': 'Re-scan files', 'path': 'home/refreshShow?show=%d'%showObj.tvdbid })
t.submenu.append({ 'title': 'Force Full Update', 'path': 'home/updateShow?show=%d&force=1'%showObj.tvdbid })
t.submenu.append({ 'title': 'Update show in XBMC', 'path': 'home/updateXBMC?showName=%s'%urllib.quote_plus(showObj.name.encode('utf-8')), 'requires': haveXBMC })
t.submenu.append({ 'title': 'Preview Rename', 'path': 'home/testRename?show=%d'%showObj.tvdbid })
t.submenu.append({ 'title': 'French Search', 'path': 'home/frenchSearch?show=%d'%showObj.tvdbid })
if sickbeard.USE_SUBTITLES and not sickbeard.showQueueScheduler.action.isBeingSubtitled(showObj) and not sickbeard.showQueueScheduler.action.isBeingCleanedSubtitle(showObj) and showObj.subtitles:
t.submenu.append({ 'title': 'Download Subtitles', 'path': 'home/subtitleShow?show=%d'%showObj.tvdbid })
t.submenu.append({ 'title': 'Clean Subtitles', 'path': 'home/subtitleShowClean?show=%d'%showObj.tvdbid })
t.show = showObj
t.sqlResults = sqlResults
t.seasonResults = seasonResults
t.show_message = show_message
epCounts = {}
epCats = {}
epCounts[Overview.SKIPPED] = 0
epCounts[Overview.WANTED] = 0
epCounts[Overview.QUAL] = 0
epCounts[Overview.GOOD] = 0
epCounts[Overview.UNAIRED] = 0
epCounts[Overview.SNATCHED] = 0
showSceneNumberColum = False
for curResult in sqlResults:
if not showSceneNumberColum and (isinstance(curResult["scene_season"], int) and isinstance(curResult["scene_episode"], int)):
showSceneNumberColum = True
curEpCat = showObj.getOverview(int(curResult["status"]))
epCats[str(curResult["season"])+"x"+str(curResult["episode"])] = curEpCat
epCounts[curEpCat] += 1
t.showSceneNumberColum = showSceneNumberColum
def titler(x):
if not x:
return x
if x.lower().startswith('a '):
x = x[2:]
elif x.lower().startswith('the '):
x = x[4:]
return x
t.sortedShowList = sorted(sickbeard.showList, lambda x, y: cmp(titler(x.name), titler(y.name)))
t.epCounts = epCounts
t.epCats = epCats
return _munge(t)
@cherrypy.expose
def plotDetails(self, show, season, episode):
result = db.DBConnection().action("SELECT description FROM tv_episodes WHERE showid = ? AND season = ? AND episode = ?", (show, season, episode)).fetchone()
return result['description'] if result else 'Episode not found.'
@cherrypy.expose
def editShow(self, show=None, location=None, anyQualities=[], bestQualities=[], exceptions_list=[], flatten_folders=None, paused=None, frenchsearch=None, directCall=False, air_by_date=None, tvdbLang=None, audio_lang=None, subtitles=None):
if show == None:
errString = "Invalid show ID: "+str(show)
if directCall:
return [errString]
else:
return _genericMessage("Error", errString)
showObj = sickbeard.helpers.findCertainShow(sickbeard.showList, int(show))
if showObj == None:
errString = "Unable to find the specified show: "+str(show)
if directCall:
return [errString]
else:
return _genericMessage("Error", errString)
showObj.exceptions = scene_exceptions.get_scene_exceptions(showObj.tvdbid)
if not location and not anyQualities and not bestQualities and not flatten_folders:
t = PageTemplate(file="editShow.tmpl")
t.submenu = HomeMenu()
with showObj.lock:
t.show = showObj
return _munge(t)
if flatten_folders == "on":
flatten_folders = 1
else:
flatten_folders = 0
logger.log(u"flatten folders: "+str(flatten_folders))
if paused == "on":
paused = 1
else:
paused = 0
if frenchsearch == "on":
frenchsearch = 1
else:
frenchsearch = 0
if air_by_date == "on":
air_by_date = 1
else:
air_by_date = 0
if subtitles == "on":
subtitles = 1
else:
subtitles = 0
if tvdbLang and tvdbLang in tvdb_api.Tvdb().config['valid_languages']:
tvdb_lang = tvdbLang
else:
tvdb_lang = showObj.lang
# if we changed the language then kick off an update
if tvdb_lang == showObj.lang:
do_update = False
else:
do_update = True
if type(anyQualities) != list:
anyQualities = [anyQualities]
if type(bestQualities) != list:
bestQualities = [bestQualities]
if type(exceptions_list) != list:
exceptions_list = [exceptions_list]
#If directCall from mass_edit_update no scene exceptions handling
if directCall:
do_update_exceptions = False
else:
if set(exceptions_list) == set(showObj.exceptions):
do_update_exceptions = False
else:
do_update_exceptions = True
errors = []
with showObj.lock:
newQuality = Quality.combineQualities(map(int, anyQualities), map(int, bestQualities))
showObj.quality = newQuality
# reversed for now
if bool(showObj.flatten_folders) != bool(flatten_folders):
showObj.flatten_folders = flatten_folders
try:
sickbeard.showQueueScheduler.action.refreshShow(showObj) #@UndefinedVariable
except exceptions.CantRefreshException, e:
errors.append("Unable to refresh this show: "+ex(e))
showObj.paused = paused
showObj.air_by_date = air_by_date
showObj.subtitles = subtitles
showObj.frenchsearch = frenchsearch
showObj.lang = tvdb_lang
showObj.audio_lang = audio_lang
# if we change location clear the db of episodes, change it, write to db, and rescan
if os.path.normpath(showObj._location) != os.path.normpath(location):
logger.log(os.path.normpath(showObj._location)+" != "+os.path.normpath(location), logger.DEBUG)
if not ek.ek(os.path.isdir, location):
errors.append("New location <tt>%s</tt> does not exist" % location)
# don't bother if we're going to update anyway
elif not do_update:
# change it
try:
showObj.location = location
try:
sickbeard.showQueueScheduler.action.refreshShow(showObj) #@UndefinedVariable
except exceptions.CantRefreshException, e:
errors.append("Unable to refresh this show:"+ex(e))
# grab updated info from TVDB
#showObj.loadEpisodesFromTVDB()
# rescan the episodes in the new folder
except exceptions.NoNFOException:
errors.append("The folder at <tt>%s</tt> doesn't contain a tvshow.nfo - copy your files to that folder before you change the directory in Sick Beard." % location)
# save it to the DB
showObj.saveToDB()
# force the update
if do_update:
try:
sickbeard.showQueueScheduler.action.updateShow(showObj, True) #@UndefinedVariable
time.sleep(1)
except exceptions.CantUpdateException, e:
errors.append("Unable to force an update on the show.")
if do_update_exceptions:
try:
scene_exceptions.update_scene_exceptions(showObj.tvdbid, exceptions_list) #@UndefinedVariable
time.sleep(1)
except exceptions.CantUpdateException, e:
errors.append("Unable to force an update on scene exceptions of the show.")
if directCall:
return errors
if len(errors) > 0:
ui.notifications.error('%d error%s while saving changes:' % (len(errors), "" if len(errors) == 1 else "s"),
'<ul>' + '\n'.join(['<li>%s</li>' % error for error in errors]) + "</ul>")
redirect("/home/displayShow?show=" + show)
@cherrypy.expose
def deleteShow(self, show=None):
if show == None:
return _genericMessage("Error", "Invalid show ID")
showObj = sickbeard.helpers.findCertainShow(sickbeard.showList, int(show))
if showObj == None:
return _genericMessage("Error", "Unable to find the specified show")
if sickbeard.showQueueScheduler.action.isBeingAdded(showObj) or sickbeard.showQueueScheduler.action.isBeingUpdated(showObj): #@UndefinedVariable
return _genericMessage("Error", "Shows can't be deleted while they're being added or updated.")
showObj.deleteShow()
ui.notifications.message('<b>%s</b> has been deleted' % showObj.name)
redirect("/home")
@cherrypy.expose
def refreshShow(self, show=None):
if show == None:
return _genericMessage("Error", "Invalid show ID")
showObj = sickbeard.helpers.findCertainShow(sickbeard.showList, int(show))
if showObj == None:
return _genericMessage("Error", "Unable to find the specified show")
# force the update from the DB
try:
sickbeard.showQueueScheduler.action.refreshShow(showObj) #@UndefinedVariable
except exceptions.CantRefreshException, e:
ui.notifications.error("Unable to refresh this show.",
ex(e))
time.sleep(3)
redirect("/home/displayShow?show="+str(showObj.tvdbid))
@cherrypy.expose
def updateShow(self, show=None, force=0):
if show == None:
return _genericMessage("Error", "Invalid show ID")
showObj = sickbeard.helpers.findCertainShow(sickbeard.showList, int(show))
if showObj == None:
return _genericMessage("Error", "Unable to find the specified show")
# force the update
try:
sickbeard.showQueueScheduler.action.updateShow(showObj, bool(force)) #@UndefinedVariable
except exceptions.CantUpdateException, e:
ui.notifications.error("Unable to update this show.",
ex(e))
# just give it some time
time.sleep(3)
redirect("/home/displayShow?show=" + str(showObj.tvdbid))
@cherrypy.expose
def subtitleShow(self, show=None, force=0):
if show == None:
return _genericMessage("Error", "Invalid show ID")
showObj = sickbeard.helpers.findCertainShow(sickbeard.showList, int(show))
if showObj == None:
return _genericMessage("Error", "Unable to find the specified show")
# search and download subtitles
sickbeard.showQueueScheduler.action.downloadSubtitles(showObj, bool(force)) #@UndefinedVariable
time.sleep(3)
redirect("/home/displayShow?show="+str(showObj.tvdbid))
@cherrypy.expose
def subtitleShowClean(self, show=None, force=0):
if show == None:
return _genericMessage("Error", "Invalid show ID")
showObj = sickbeard.helpers.findCertainShow(sickbeard.showList, int(show))
if showObj == None:
return _genericMessage("Error", "Unable to find the specified show")
# search and download subtitles
sickbeard.showQueueScheduler.action.cleanSubtitles(showObj, bool(force)) #@UndefinedVariable
time.sleep(3)
redirect("/home/displayShow?show="+str(showObj.tvdbid))
@cherrypy.expose
def frenchSearch(self, show=None, force=0):
if show == None:
return _genericMessage("Error", "Invalid show ID")
showObj = sickbeard.helpers.findCertainShow(sickbeard.showList, int(show))
if showObj == None:
return _genericMessage("Error", "Unable to find the specified show")
# search and download subtitles
sickbeard.showQueueScheduler.action.searchFrench(showObj, bool(force)) #@UndefinedVariable
time.sleep(3)
redirect("/home/displayShow?show="+str(showObj.tvdbid))
@cherrypy.expose
def updateXBMC(self, showName=None):
if sickbeard.XBMC_UPDATE_ONLYFIRST:
# only send update to first host in the list -- workaround for xbmc sql backend users
host = sickbeard.XBMC_HOST.split(",")[0].strip()
else:
host = sickbeard.XBMC_HOST
if notifiers.xbmc_notifier.update_library(showName=showName):
ui.notifications.message("Library update command sent to XBMC host(s): " + host)
else:
ui.notifications.error("Unable to contact one or more XBMC host(s): " + host)
redirect('/home')
@cherrypy.expose
def updatePLEX(self):
if notifiers.plex_notifier.update_library():
ui.notifications.message("Library update command sent to Plex Media Server host: " + sickbeard.PLEX_SERVER_HOST)
else:
ui.notifications.error("Unable to contact Plex Media Server host: " + sickbeard.PLEX_SERVER_HOST)
redirect('/home')
@cherrypy.expose
def setStatus(self, show=None, eps=None, status=None, direct=False):
if show == None or eps == None or status == None:
errMsg = "You must specify a show and at least one episode"
if direct:
ui.notifications.error('Error', errMsg)
return json.dumps({'result': 'error'})
else:
return _genericMessage("Error", errMsg)
if not statusStrings.has_key(int(status)):
errMsg = "Invalid status"
if direct:
ui.notifications.error('Error', errMsg)
return json.dumps({'result': 'error'})
else:
return _genericMessage("Error", errMsg)
showObj = sickbeard.helpers.findCertainShow(sickbeard.showList, int(show))
if showObj == None:
errMsg = "Error", "Show not in show list"
if direct:
ui.notifications.error('Error', errMsg)
return json.dumps({'result': 'error'})
else:
return _genericMessage("Error", errMsg)
segment_list = []
if eps != None:
for curEp in eps.split('|'):
logger.log(u"Attempting to set status on episode "+curEp+" to "+status, logger.DEBUG)
epInfo = curEp.split('x')
epObj = showObj.getEpisode(int(epInfo[0]), int(epInfo[1]))
if int(status) == WANTED:
# figure out what segment the episode is in and remember it so we can backlog it
if epObj.show.air_by_date:
ep_segment = str(epObj.airdate)[:7]
else:
ep_segment = epObj.season
if ep_segment not in segment_list:
segment_list.append(ep_segment)
if epObj == None:
return _genericMessage("Error", "Episode couldn't be retrieved")
with epObj.lock:
# don't let them mess up UNAIRED episodes
if epObj.status == UNAIRED:
logger.log(u"Refusing to change status of "+curEp+" because it is UNAIRED", logger.ERROR)
continue
if int(status) in Quality.DOWNLOADED and epObj.status not in Quality.SNATCHED + Quality.SNATCHED_PROPER + Quality.SNATCHED_FRENCH + Quality.DOWNLOADED + [IGNORED] and not ek.ek(os.path.isfile, epObj.location):
logger.log(u"Refusing to change status of "+curEp+" to DOWNLOADED because it's not SNATCHED/DOWNLOADED", logger.ERROR)
continue
epObj.status = int(status)
epObj.saveToDB()
msg = "Backlog was automatically started for the following seasons of <b>"+showObj.name+"</b>:<br />"
for cur_segment in segment_list:
msg += "<li>Season "+str(cur_segment)+"</li>"
logger.log(u"Sending backlog for "+showObj.name+" season "+str(cur_segment)+" because some eps were set to wanted")
cur_backlog_queue_item = search_queue.BacklogQueueItem(showObj, cur_segment)
sickbeard.searchQueueScheduler.action.add_item(cur_backlog_queue_item) #@UndefinedVariable
msg += "</ul>"
if segment_list:
ui.notifications.message("Backlog started", msg)
if direct:
return json.dumps({'result': 'success'})
else:
redirect("/home/displayShow?show=" + show)
@cherrypy.expose
def setAudio(self, show=None, eps=None, audio_langs=None, direct=False):
if show == None or eps == None or audio_langs == None:
errMsg = "You must specify a show and at least one episode"
if direct:
ui.notifications.error('Error', errMsg)
return json.dumps({'result': 'error'})
else:
return _genericMessage("Error", errMsg)
showObj = sickbeard.helpers.findCertainShow(sickbeard.showList, int(show))
if showObj == None:
return _genericMessage("Error", "Show not in show list")
try:
show_loc = showObj.location #@UnusedVariable
except exceptions.ShowDirNotFoundException:
return _genericMessage("Error", "Can't rename episodes when the show dir is missing.")
ep_obj_rename_list = []
for curEp in eps.split('|'):
logger.log(u"Attempting to set audio on episode "+curEp+" to "+audio_langs, logger.DEBUG)
epInfo = curEp.split('x')
epObj = showObj.getEpisode(int(epInfo[0]), int(epInfo[1]))
epObj.audio_langs = str(audio_langs)
epObj.saveToDB()
if direct:
return json.dumps({'result': 'success'})
else:
redirect("/home/displayShow?show=" + show)
@cherrypy.expose
def testRename(self, show=None):
if show == None:
return _genericMessage("Error", "You must specify a show")
showObj = sickbeard.helpers.findCertainShow(sickbeard.showList, int(show))
if showObj == None:
return _genericMessage("Error", "Show not in show list")
try:
show_loc = showObj.location #@UnusedVariable
except exceptions.ShowDirNotFoundException:
return _genericMessage("Error", "Can't rename episodes when the show dir is missing.")
ep_obj_rename_list = []
ep_obj_list = showObj.getAllEpisodes(has_location=True)
for cur_ep_obj in ep_obj_list:
# Only want to rename if we have a location
if cur_ep_obj.location:
if cur_ep_obj.relatedEps:
# do we have one of multi-episodes in the rename list already
have_already = False
for cur_related_ep in cur_ep_obj.relatedEps + [cur_ep_obj]:
if cur_related_ep in ep_obj_rename_list:
have_already = True
break
if not have_already:
ep_obj_rename_list.append(cur_ep_obj)
else:
ep_obj_rename_list.append(cur_ep_obj)
if ep_obj_rename_list:
# present season DESC episode DESC on screen
ep_obj_rename_list.reverse()
t = PageTemplate(file="testRename.tmpl")
t.submenu = [{'title': 'Edit', 'path': 'home/editShow?show=%d' % showObj.tvdbid}]
t.ep_obj_list = ep_obj_rename_list
t.show = showObj
return _munge(t)
@cherrypy.expose
def doRename(self, show=None, eps=None):
if show == None or eps == None:
errMsg = "You must specify a show and at least one episode"
return _genericMessage("Error", errMsg)
show_obj = sickbeard.helpers.findCertainShow(sickbeard.showList, int(show))
if show_obj == None:
errMsg = "Error", "Show not in show list"
return _genericMessage("Error", errMsg)
try:
show_loc = show_obj.location #@UnusedVariable
except exceptions.ShowDirNotFoundException:
return _genericMessage("Error", "Can't rename episodes when the show dir is missing.")
myDB = db.DBConnection()
if eps == None:
redirect("/home/displayShow?show=" + show)
for curEp in eps.split('|'):
epInfo = curEp.split('x')
# this is probably the worst possible way to deal with double eps but I've kinda painted myself into a corner here with this stupid database
ep_result = myDB.select("SELECT * FROM tv_episodes WHERE showid = ? AND season = ? AND episode = ? AND 5=5", [show, epInfo[0], epInfo[1]])
if not ep_result:
logger.log(u"Unable to find an episode for "+curEp+", skipping", logger.WARNING)
continue
related_eps_result = myDB.select("SELECT * FROM tv_episodes WHERE location = ? AND episode != ?", [ep_result[0]["location"], epInfo[1]])
root_ep_obj = show_obj.getEpisode(int(epInfo[0]), int(epInfo[1]))
for cur_related_ep in related_eps_result:
related_ep_obj = show_obj.getEpisode(int(cur_related_ep["season"]), int(cur_related_ep["episode"]))
if related_ep_obj not in root_ep_obj.relatedEps:
root_ep_obj.relatedEps.append(related_ep_obj)
root_ep_obj.rename()
redirect("/home/displayShow?show=" + show)
@cherrypy.expose
def trunchistory(self, epid):
myDB = db.DBConnection()
nbep = myDB.select("Select count(*) from episode_links where episode_id=?",[epid])
myDB.action("DELETE from episode_links where episode_id=?",[epid])
messnum = str(nbep[0][0]) + ' history links deleted'
ui.notifications.message('Episode History Truncated' , messnum)
return json.dumps({'result': 'ok'})
@cherrypy.expose
def searchEpisode(self, show=None, season=None, episode=None):
# retrieve the episode object and fail if we can't get one
ep_obj = _getEpisode(show, season, episode)
if isinstance(ep_obj, str):
return json.dumps({'result': 'failure'})
# make a queue item for it and put it on the queue
ep_queue_item = search_queue.ManualSearchQueueItem(ep_obj)
sickbeard.searchQueueScheduler.action.add_item(ep_queue_item) #@UndefinedVariable
# wait until the queue item tells us whether it worked or not
while ep_queue_item.success == None: #@UndefinedVariable
time.sleep(1)
# return the correct json value
if ep_queue_item.success:
return json.dumps({'result': statusStrings[ep_obj.status]})
return json.dumps({'result': 'failure'})
@cherrypy.expose
def searchEpisodeSubtitles(self, show=None, season=None, episode=None):
# retrieve the episode object and fail if we can't get one
ep_obj = _getEpisode(show, season, episode)
if isinstance(ep_obj, str):
return json.dumps({'result': 'failure'})
# try do download subtitles for that episode
previous_subtitles = ep_obj.subtitles
try:
subtitles = ep_obj.downloadSubtitles()
if sickbeard.SUBTITLES_DIR:
for video in subtitles:
subs_new_path = ek.ek(os.path.join, os.path.dirname(video.path), sickbeard.SUBTITLES_DIR)
dir_exists = helpers.makeDir(subs_new_path)
if not dir_exists:
logger.log(u"Unable to create subtitles folder "+subs_new_path, logger.ERROR)
else:
helpers.chmodAsParent(subs_new_path)
for subtitle in subtitles.get(video):
new_file_path = ek.ek(os.path.join, subs_new_path, os.path.basename(subtitle.path))
helpers.moveFile(subtitle.path, new_file_path)
if sickbeard.SUBSNOLANG:
helpers.copyFile(new_file_path,new_file_path[:-6]+"srt")
helpers.chmodAsParent(new_file_path[:-6]+"srt")
helpers.chmodAsParent(new_file_path)
else:
if sickbeard.SUBTITLES_DIR_SUB:
for video in subtitles:
subs_new_path = os.path.join(os.path.dirname(video.path),"Subs")
dir_exists = helpers.makeDir(subs_new_path)
if not dir_exists:
logger.log(u"Unable to create subtitles folder "+subs_new_path, logger.ERROR)
else:
helpers.chmodAsParent(subs_new_path)
for subtitle in subtitles.get(video):
new_file_path = ek.ek(os.path.join, subs_new_path, os.path.basename(subtitle.path))
helpers.moveFile(subtitle.path, new_file_path)
if sickbeard.SUBSNOLANG:
helpers.copyFile(new_file_path,new_file_path[:-6]+"srt")
helpers.chmodAsParent(new_file_path[:-6]+"srt")
helpers.chmodAsParent(new_file_path)
else:
for video in subtitles:
for subtitle in subtitles.get(video):
if sickbeard.SUBSNOLANG:
helpers.copyFile(subtitle.path,subtitle.path[:-6]+"srt")
helpers.chmodAsParent(subtitle.path[:-6]+"srt")
helpers.chmodAsParent(subtitle.path)
except:
return json.dumps({'result': 'failure'})
# return the correct json value
if previous_subtitles != ep_obj.subtitles:
status = 'New subtitles downloaded: %s' % ' '.join(["<img src='"+sickbeard.WEB_ROOT+"/images/flags/"+subliminal.language.Language(x).alpha2+".png' alt='"+subliminal.language.Language(x).name+"'/>" for x in sorted(list(set(ep_obj.subtitles).difference(previous_subtitles)))])
else:
status = 'No subtitles downloaded'
ui.notifications.message('Subtitles Search', status)
return json.dumps({'result': status, 'subtitles': ','.join([x for x in ep_obj.subtitles])})
@cherrypy.expose
def mergeEpisodeSubtitles(self, show=None, season=None, episode=None):
# retrieve the episode object and fail if we can't get one
ep_obj = _getEpisode(show, season, episode)
if isinstance(ep_obj, str):
return json.dumps({'result': 'failure'})
# try do merge subtitles for that episode
try:
ep_obj.mergeSubtitles()
except Exception as e:
return json.dumps({'result': 'failure', 'exception': str(e)})
# return the correct json value
status = 'Subtitles merged successfully '
ui.notifications.message('Merge Subtitles', status)
return json.dumps({'result': 'ok'})
class UI:
@cherrypy.expose
def add_message(self):
ui.notifications.message('Test 1', 'This is test number 1')
ui.notifications.error('Test 2', 'This is test number 2')
return "ok"
@cherrypy.expose
def get_messages(self):
messages = {}
cur_notification_num = 1
for cur_notification in ui.notifications.get_notifications():
messages['notification-'+str(cur_notification_num)] = {'title': cur_notification.title,
'message': cur_notification.message,
'type': cur_notification.type}
cur_notification_num += 1
return json.dumps(messages)
class WebInterface:
@cherrypy.expose
def index(self):
redirect("/home")
@cherrypy.expose
def showPoster(self, show=None, which=None):
#Redirect initial poster/banner thumb to default images
if which[0:6] == 'poster':
default_image_name = 'poster.png'
else:
default_image_name = 'banner.png'
default_image_path = ek.ek(os.path.join, sickbeard.PROG_DIR, 'data', 'images', default_image_name)
if show is None:
return cherrypy.lib.static.serve_file(default_image_path, content_type="image/png")
else:
showObj = sickbeard.helpers.findCertainShow(sickbeard.showList, int(show))
if showObj is None:
return cherrypy.lib.static.serve_file(default_image_path, content_type="image/png")
cache_obj = image_cache.ImageCache()
if which == 'poster':
image_file_name = cache_obj.poster_path(showObj.tvdbid)
if which == 'poster_thumb':
image_file_name = cache_obj.poster_thumb_path(showObj.tvdbid)
if which == 'banner':
image_file_name = cache_obj.banner_path(showObj.tvdbid)
if which == 'banner_thumb':
image_file_name = cache_obj.banner_thumb_path(showObj.tvdbid)
if ek.ek(os.path.isfile, image_file_name):
return cherrypy.lib.static.serve_file(image_file_name, content_type="image/jpeg")
else:
return cherrypy.lib.static.serve_file(default_image_path, content_type="image/png")
@cherrypy.expose
def setHomeLayout(self, layout):
if layout not in ('poster', 'banner', 'simple'):
layout = 'poster'
sickbeard.HOME_LAYOUT = layout
redirect("/home")
@cherrypy.expose
def setHomeSearch(self, search):
if search not in ('True', 'False'):
search = 'False'
sickbeard.TOGGLE_SEARCH= search
redirect("/home")
@cherrypy.expose
def toggleDisplayShowSpecials(self, show):
sickbeard.DISPLAY_SHOW_SPECIALS = not sickbeard.DISPLAY_SHOW_SPECIALS
redirect("/home/displayShow?show=" + show)
@cherrypy.expose
def setComingEpsLayout(self, layout):
if layout not in ('poster', 'banner', 'list'):
layout = 'banner'
sickbeard.COMING_EPS_LAYOUT = layout
redirect("/comingEpisodes")
@cherrypy.expose
def toggleComingEpsDisplayPaused(self):
sickbeard.COMING_EPS_DISPLAY_PAUSED = not sickbeard.COMING_EPS_DISPLAY_PAUSED
redirect("/comingEpisodes")
@cherrypy.expose
def setComingEpsSort(self, sort):
if sort not in ('date', 'network', 'show'):
sort = 'date'
sickbeard.COMING_EPS_SORT = sort
redirect("/comingEpisodes")
@cherrypy.expose
def comingEpisodes(self, layout="None"):
# get local timezone and load network timezones
sb_timezone = tz.tzlocal()
network_dict = network_timezones.load_network_dict()
myDB = db.DBConnection()
today1 = datetime.date.today()
today = today1.toordinal()
next_week1 = (datetime.date.today() + datetime.timedelta(days=7))
next_week = next_week1.toordinal()
recently = (datetime.date.today() - datetime.timedelta(days=sickbeard.COMING_EPS_MISSED_RANGE)).toordinal()
done_show_list = []
qualList = Quality.DOWNLOADED + Quality.SNATCHED + [ARCHIVED, IGNORED]
sql_results1 = myDB.select("SELECT *, 0 as localtime, tv_shows.status as show_status FROM tv_episodes, tv_shows WHERE season != 0 AND airdate >= ? AND airdate < ? AND tv_shows.tvdb_id = tv_episodes.showid AND tv_episodes.status NOT IN ("+','.join(['?']*len(qualList))+")", [today, next_week] + qualList)
for cur_result in sql_results1:
done_show_list.append(helpers.tryInt(cur_result["showid"]))
more_sql_results = myDB.select("SELECT *, tv_shows.status as show_status FROM tv_episodes outer_eps, tv_shows WHERE season != 0 AND showid NOT IN ("+','.join(['?']*len(done_show_list))+") AND tv_shows.tvdb_id = outer_eps.showid AND airdate IN (SELECT airdate FROM tv_episodes inner_eps WHERE inner_eps.showid = outer_eps.showid AND inner_eps.airdate >= ? AND inner_eps.status NOT IN ("+','.join(['?']*len(Quality.DOWNLOADED+Quality.SNATCHED))+") ORDER BY inner_eps.airdate ASC LIMIT 1)", done_show_list + [next_week] + Quality.DOWNLOADED + Quality.SNATCHED)
sql_results1 += more_sql_results
more_sql_results = myDB.select("SELECT *, 0 as localtime, tv_shows.status as show_status FROM tv_episodes, tv_shows WHERE season != 0 AND tv_shows.tvdb_id = tv_episodes.showid AND airdate < ? AND airdate >= ? AND tv_episodes.status = ? AND tv_episodes.status NOT IN ("+','.join(['?']*len(qualList))+")", [today, recently, WANTED] + qualList)
sql_results1 += more_sql_results
# sort by localtime
sorts = {
'date': (lambda x, y: cmp(x["localtime"], y["localtime"])),
'show': (lambda a, b: cmp((a["show_name"], a["localtime"]), (b["show_name"], b["localtime"]))),
'network': (lambda a, b: cmp((a["network"], a["localtime"]), (b["network"], b["localtime"]))),
}
# make a dict out of the sql results
sql_results = [dict(row) for row in sql_results1]
# regex to parse time (12/24 hour format)
time_regex = re.compile(r"(\d{1,2}):(\d{2,2})( [PA]M)?\b", flags=re.IGNORECASE)
# add localtime to the dict
for index, item in enumerate(sql_results1):
mo = time_regex.search(item['airs'])
if mo != None and len(mo.groups()) >= 2:
try:
hr = helpers.tryInt(mo.group(1))
m = helpers.tryInt(mo.group(2))
ap = mo.group(3)
# convert am/pm to 24 hour clock
if ap != None:
if ap.lower() == u" pm" and hr != 12:
hr += 12
elif ap.lower() == u" am" and hr == 12:
hr -= 12
except:
hr = 0
m = 0
else:
hr = 0
m = 0
if hr < 0 or hr > 23 or m < 0 or m > 59:
hr = 0
m = 0
te = datetime.datetime.fromordinal(helpers.tryInt(item['airdate']))
foreign_timezone = network_timezones.get_network_timezone(item['network'], network_dict, sb_timezone)
foreign_naive = datetime.datetime(te.year, te.month, te.day, hr, m,tzinfo=foreign_timezone)
sql_results[index]['localtime'] = foreign_naive.astimezone(sb_timezone)
#Normalize/Format the Airing Time
try:
locale.setlocale(locale.LC_TIME, 'us_US')
sql_results[index]['localtime_string'] = sql_results[index]['localtime'].strftime("%A %H:%M %p")
locale.setlocale(locale.LC_ALL, '') #Reseting to default locale
except:
sql_results[index]['localtime_string'] = sql_results[index]['localtime'].strftime("%A %H:%M %p")
sql_results.sort(sorts[sickbeard.COMING_EPS_SORT])
t = PageTemplate(file="comingEpisodes.tmpl")
# paused_item = { 'title': '', 'path': 'toggleComingEpsDisplayPaused' }
# paused_item['title'] = 'Hide Paused' if sickbeard.COMING_EPS_DISPLAY_PAUSED else 'Show Paused'
paused_item = { 'title': 'View Paused:', 'path': {'': ''} }
paused_item['path'] = {'Hide': 'toggleComingEpsDisplayPaused'} if sickbeard.COMING_EPS_DISPLAY_PAUSED else {'Show': 'toggleComingEpsDisplayPaused'}
t.submenu = [
{ 'title': 'Sort by:', 'path': {'Date': 'setComingEpsSort/?sort=date',
'Show': 'setComingEpsSort/?sort=show',
'Network': 'setComingEpsSort/?sort=network',
}},
{ 'title': 'Layout:', 'path': {'Banner': 'setComingEpsLayout/?layout=banner',
'Poster': 'setComingEpsLayout/?layout=poster',
'List': 'setComingEpsLayout/?layout=list',
}},
paused_item,
]
t.next_week = datetime.datetime.combine(next_week1, datetime.time(tzinfo=sb_timezone))
t.today = datetime.datetime.now().replace(tzinfo=sb_timezone)
t.sql_results = sql_results
# Allow local overriding of layout parameter
if layout and layout in ('poster', 'banner', 'list'):
t.layout = layout
else:
t.layout = sickbeard.COMING_EPS_LAYOUT
return _munge(t)
# Raw iCalendar implementation by Pedro Jose Pereira Vieito (@pvieito).
#
# iCalendar (iCal) - Standard RFC 5545 <http://tools.ietf.org/html/rfc5546>
# Works with iCloud, Google Calendar and Outlook.
@cherrypy.expose
def calendar(self):
""" Provides a subscribeable URL for iCal subscriptions
"""
logger.log(u"Receiving iCal request from %s" % cherrypy.request.remote.ip)
poster_url = cherrypy.url().replace('ical', '')
time_re = re.compile('([0-9]{1,2})\:([0-9]{2})(\ |)([AM|am|PM|pm]{2})')
# Create a iCal string
ical = 'BEGIN:VCALENDAR\n'
ical += 'VERSION:2.0\n'
ical += 'PRODID://Sick-Beard Upcoming Episodes//\n'
# Get shows info
myDB = db.DBConnection()
# Limit dates
past_date = (datetime.date.today() + datetime.timedelta(weeks=-52)).toordinal()
future_date = (datetime.date.today() + datetime.timedelta(weeks=52)).toordinal()
# Get all the shows that are not paused and are currently on air (from kjoconnor Fork)
calendar_shows = myDB.select("SELECT show_name, tvdb_id, network, airs, runtime FROM tv_shows WHERE status = 'Continuing' AND paused != '1'")
for show in calendar_shows:
# Get all episodes of this show airing between today and next month
episode_list = myDB.select("SELECT tvdbid, name, season, episode, description, airdate FROM tv_episodes WHERE airdate >= ? AND airdate < ? AND showid = ?", (past_date, future_date, int(show["tvdb_id"])))
for episode in episode_list:
# Get local timezone and load network timezones
local_zone = tz.tzlocal()
try:
network_zone = network_timezones.get_network_timezone(show['network'], network_timezones.load_network_dict(), local_zone)
except:
# Dummy network_zone for exceptions
network_zone = None
# Get the air date and time
air_date = datetime.datetime.fromordinal(int(episode['airdate']))
air_time = re.compile('([0-9]{1,2})\:([0-9]{2})(\ |)([AM|am|PM|pm]{2})').search(show["airs"])
# Parse out the air time
try:
if (air_time.group(4).lower() == 'pm' and int(air_time.group(1)) == 12):
t = datetime.time(12, int(air_time.group(2)), 0, tzinfo=network_zone)
elif (air_time.group(4).lower() == 'pm'):
t = datetime.time((int(air_time.group(1)) + 12), int(air_time.group(2)), 0, tzinfo=network_zone)
elif (air_time.group(4).lower() == 'am' and int(air_time.group(1)) == 12):
t = datetime.time(0, int(air_time.group(2)), 0, tzinfo=network_zone)
else:
t = datetime.time(int(air_time.group(1)), int(air_time.group(2)), 0, tzinfo=network_zone)
except:
# Dummy time for exceptions
t = datetime.time(22, 0, 0, tzinfo=network_zone)
# Combine air time and air date into one datetime object
air_date_time = datetime.datetime.combine(air_date, t).astimezone(local_zone)
# Create event for episode
ical = ical + 'BEGIN:VEVENT\n'
ical = ical + 'DTSTART:' + str(air_date_time.date()).replace("-", "") + '\n'
ical = ical + 'SUMMARY:' + show['show_name'] + ': ' + episode['name'] + '\n'
ical = ical + 'UID:' + str(datetime.date.today().isoformat()) + '-' + str(random.randint(10000,99999)) + '@Sick-Beard\n'
if (episode['description'] != ''):
ical = ical + 'DESCRIPTION:' + show['airs'] + ' on ' + show['network'] + '\\n\\n' + episode['description'] + '\n'
else:
ical = ical + 'DESCRIPTION:' + show['airs'] + ' on ' + show['network'] + '\n'
ical = ical + 'LOCATION:' + 'Episode ' + str(episode['episode']) + ' - Season ' + str(episode['season']) + '\n'
ical = ical + 'END:VEVENT\n'
# Ending the iCal
ical += 'END:VCALENDAR\n'
return ical
manage = Manage()
history = History()
config = Config()
home = Home()
api = Api()
browser = browser.WebFileBrowser()
errorlogs = ErrorLogs()
ui = UI()
|
foufou55/Sick-Beard
|
sickbeard/webserve.py
|
Python
|
gpl-3.0
| 152,830
|
# Copyright (c) 2014 Hewlett-Packard Development Company, L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from openstack import exceptions
OpenStackConfigException = exceptions.ConfigException
|
ctrlaltdel/neutrinator
|
vendor/openstack/config/exceptions.py
|
Python
|
gpl-3.0
| 699
|
###############################################################################
# ilastik: interactive learning and segmentation toolkit
#
# Copyright (C) 2011-2014, the ilastik developers
# <team@ilastik.org>
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# In addition, as a special exception, the copyright holders of
# ilastik give you permission to combine ilastik with applets,
# workflows and plugins which are not covered under the GNU
# General Public License.
#
# See the LICENSE file for details. License information is also available
# on the ilastik web site at:
# http://ilastik.org/license.html
###############################################################################
import ilastik.ilastik_logging
ilastik.ilastik_logging.default_config.init()
from ilastik.applets.objectClassification.opObjectClassification import OpObjectClassification
import numpy
class TestTransferLabelsFunction(object):
def test(self):
coords_old = dict()
coords_old["Coord<Minimum>"]=numpy.array([[0, 0, 0], [0, 0, 0], [0, 0, 0], [15, 15, 15], [22, 22, 22], [31, 31, 31]])
coords_old["Coord<Maximum>"]=numpy.array([[50, 50, 50], [10, 10, 10], [3, 3, 3], [20, 20, 20], [30, 30, 30], [35, 35, 35]])
coords_new = dict()
coords_new["Coord<Minimum>"]=numpy.array([[0, 0, 0], [2, 2, 2], [17, 17, 17], [22, 22, 22], [26, 26, 26]])
coords_new["Coord<Maximum>"]=numpy.array([[50, 50, 50], [5, 5, 5], [20, 20, 20], [25, 25, 25], [33, 33, 33]])
labels = numpy.zeros((6,))
labels[0]=0
labels[1]=1
labels[2]=0
labels[3]=2
labels[4]=3
labels[5]=4
newlabels, oldlost, newlost = OpObjectClassification.transferLabels(labels, coords_old, coords_new, None)
assert numpy.all(newlabels == [0, 1, 2, 0, 0])
assert len(oldlost["full"])==0
assert len(oldlost["partial"])==1
min4 = coords_old["Coord<Minimum>"][4]
max4 = coords_old["Coord<Maximum>"][4]
assert numpy.all(oldlost["partial"]==(min4+(max4-min4)/2))
newmin4 = coords_new["Coord<Minimum>"][4]
newmax4 = coords_new["Coord<Maximum>"][4]
assert numpy.all(newlost["conflict"]==(newmin4+(newmax4-newmin4)/2.))
if __name__ == "__main__":
import sys
import nose
sys.argv.append("--nocapture") # Don't steal stdout. Show it on the console as usual.
sys.argv.append("--nologcapture") # Don't set the logging level to DEBUG. Leave it alone.
nose.run(defaultTest=__file__)
|
nielsbuwen/ilastik
|
tests/test_applets/objectClassification/testTransferLabels.py
|
Python
|
gpl-3.0
| 2,796
|
import multiprocessing
import os
import platform
import queue
import re
import subprocess
import sys
import unittest
from pyprint.ConsolePrinter import ConsolePrinter
from coalib.output.printers.LogPrinter import LogPrinter
from coalib.processes.CONTROL_ELEMENT import CONTROL_ELEMENT
from coalib.processes.Processing import (
ACTIONS, autoapply_actions, check_result_ignore, create_process_group,
execute_section, filter_raising_callables, get_default_actions,
get_file_dict, print_result, process_queues, simplify_section_result,
yield_ignore_ranges)
from coalib.results.HiddenResult import HiddenResult
from coalib.results.Result import RESULT_SEVERITY, Result
from coalib.results.result_actions.ApplyPatchAction import ApplyPatchAction
from coalib.results.result_actions.PrintDebugMessageAction import (
PrintDebugMessageAction)
from coalib.results.result_actions.ResultAction import ResultAction
from coalib.results.SourceRange import SourceRange
from coalib.settings.ConfigurationGathering import gather_configuration
from coalib.settings.Section import Section
from coalib.settings.Setting import Setting
from coalib.misc.Caching import FileCache
process_group_test_code = """
import time, subprocess, os, platform, sys;
p=subprocess.Popen([sys.executable,
"-c",
"import time; time.sleep(0.1)"]);
pgid = p.pid if platform.system() == "Windows" else os.getpgid(p.pid);
print(p.pid, pgid)
p.terminate()
"""
class DummyProcess(multiprocessing.Process):
def __init__(self, control_queue, starts_dead=False):
multiprocessing.Process.__init__(self)
self.control_queue = control_queue
self.starts_dead = starts_dead
def is_alive(self):
return not self.control_queue.empty() and not self.starts_dead
class ProcessingTestLogPrinter(LogPrinter):
def __init__(self, log_queue):
LogPrinter.__init__(self, self)
self.log_queue = log_queue
self.set_up = False
def log_message(self, log_message, timestamp=None, **kwargs):
self.log_queue.put(log_message)
class ProcessingTest(unittest.TestCase):
def setUp(self):
config_path = os.path.abspath(os.path.join(
os.path.dirname(__file__),
"section_executor_test_files",
".coafile"))
self.testcode_c_path = os.path.join(os.path.dirname(config_path),
"testcode.c")
self.result_queue = queue.Queue()
self.queue = queue.Queue()
self.log_queue = queue.Queue()
log_printer = LogPrinter(ConsolePrinter())
self.log_printer = ProcessingTestLogPrinter(self.log_queue)
(self.sections,
self.local_bears,
self.global_bears,
targets) = gather_configuration(lambda *args: True,
log_printer,
arg_list=["--config",
re.escape(config_path)])
self.assertEqual(len(self.local_bears["default"]), 1)
self.assertEqual(len(self.global_bears["default"]), 1)
self.assertEqual(targets, [])
def test_run(self):
self.sections['default'].append(Setting('jobs', "1"))
cache = FileCache(self.log_printer, "coala_test", flush_cache=True)
results = execute_section(self.sections["default"],
self.global_bears["default"],
self.local_bears["default"],
lambda *args: self.result_queue.put(args[2]),
cache,
self.log_printer)
self.assertTrue(results[0])
local_results = self.result_queue.get(timeout=0)
global_results = self.result_queue.get(timeout=0)
self.assertTrue(self.result_queue.empty())
self.assertEqual(len(local_results), 1)
self.assertEqual(len(global_results), 1)
# Result dict also returned
# One file
self.assertEqual(len(results[1]), 1)
# One global bear
self.assertEqual(len(results[2]), 1)
local_result = local_results[0]
global_result = global_results[0]
self.assertRegex(repr(local_result),
"<Result object\\(id={}, origin='LocalTestBear', aff"
"ected_code=\\(\\), severity=NORMAL, confidence=100"
", message='test msg'\\) at 0x[0-9a-fA-F]+>".format(
hex(local_result.id)))
self.assertRegex(repr(global_result),
"<Result object\\(id={}, origin='GlobalTestBear', "
"affected_code=\\(.*start=.*file=.*section_executor_"
"test_files.*line=None.*end=.*\\), severity=NORMAL, "
"confidence=100, message='test message'\\) at "
"0x[0-9a-fA-F]+>".format(hex(global_result.id)))
def test_empty_run(self):
self.sections['default'].append(Setting('jobs', "bogus!"))
results = execute_section(self.sections["default"],
[],
[],
lambda *args: self.result_queue.put(args[2]),
None,
self.log_printer)
# No results
self.assertFalse(results[0])
# One file
self.assertEqual(len(results[1]), 1)
# No global bear
self.assertEqual(len(results[2]), 0)
def test_process_queues(self):
ctrlq = queue.Queue()
# Append custom controlling sequences.
# Simulated process 1
ctrlq.put((CONTROL_ELEMENT.LOCAL, 1))
ctrlq.put((CONTROL_ELEMENT.LOCAL_FINISHED, None))
ctrlq.put((CONTROL_ELEMENT.GLOBAL, 1))
# Simulated process 2
ctrlq.put((CONTROL_ELEMENT.LOCAL, 2))
# Simulated process 1
ctrlq.put((CONTROL_ELEMENT.GLOBAL_FINISHED, None))
# Simulated process 2
ctrlq.put((CONTROL_ELEMENT.LOCAL_FINISHED, None))
ctrlq.put((CONTROL_ELEMENT.GLOBAL, 1))
ctrlq.put((CONTROL_ELEMENT.GLOBAL_FINISHED, None))
first_local = Result.from_values("o", "The first result.", file="f")
second_local = Result.from_values("ABear",
"The second result.",
file="f",
line=1)
third_local = Result.from_values("ABear",
"The second result.",
file="f",
line=4)
fourth_local = Result.from_values("ABear",
"Another result.",
file="f",
line=7)
first_global = Result("o", "The one and only global result.")
section = Section("")
section.append(Setting('min_severity', "normal"))
process_queues(
[DummyProcess(control_queue=ctrlq) for i in range(3)],
ctrlq,
{1: [first_local,
second_local,
third_local,
# The following are to be ignored
Result('o', 'm', severity=RESULT_SEVERITY.INFO),
Result.from_values("ABear", "u", "f", 2, 1),
Result.from_values("ABear", "u", "f", 3, 1)],
2: [fourth_local,
# The following are to be ignored
HiddenResult("t", "c"),
Result.from_values("ABear", "u", "f", 5, 1),
Result.from_values("ABear", "u", "f", 6, 1)]},
{1: [first_global]},
{"f": ["first line # stop ignoring, invalid ignore range\n",
"second line # ignore all\n",
"third line\n",
"fourth line # gnore shouldn't trigger without i!\n",
"# Start ignoring ABear, BBear and CBear\n",
"# Stop ignoring\n",
"seventh"]},
lambda *args: self.queue.put(args[2]),
section,
None,
self.log_printer)
self.assertEqual(self.queue.get(timeout=0), ([first_local,
second_local,
third_local]))
self.assertEqual(self.queue.get(timeout=0), ([fourth_local]))
self.assertEqual(self.queue.get(timeout=0), ([first_global]))
self.assertEqual(self.queue.get(timeout=0), ([first_global]))
def test_dead_processes(self):
ctrlq = queue.Queue()
# Not enough FINISH elements in the queue, processes start already dead
# Also queue elements are reversed
ctrlq.put((CONTROL_ELEMENT.GLOBAL_FINISHED, None))
ctrlq.put((CONTROL_ELEMENT.LOCAL_FINISHED, None))
process_queues(
[DummyProcess(ctrlq, starts_dead=True) for i in range(3)],
ctrlq, {}, {}, {},
lambda *args: self.queue.put(args[2]),
Section(""),
None,
self.log_printer)
with self.assertRaises(queue.Empty):
self.queue.get(timeout=0)
# Not enough FINISH elements in the queue, processes start already dead
ctrlq.put((CONTROL_ELEMENT.LOCAL_FINISHED, None))
ctrlq.put((CONTROL_ELEMENT.GLOBAL_FINISHED, None))
process_queues(
[DummyProcess(ctrlq, starts_dead=True) for i in range(3)],
ctrlq, {}, {}, {},
lambda *args: self.queue.put(args[2]),
Section(""),
None,
self.log_printer)
with self.assertRaises(queue.Empty):
self.queue.get(timeout=0)
def test_create_process_group(self):
p = create_process_group([sys.executable,
"-c",
process_group_test_code],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
retval = p.wait()
if retval != 0:
for line in p.stderr:
print(line, end='')
raise Exception("Subprocess did not exit correctly")
output = [i for i in p.stdout]
p.stderr.close()
p.stdout.close()
pid, pgid = [int(i.strip()) for i_out in output for i in i_out.split()]
if platform.system() != "Windows":
# There is no way of testing this on windows with the current
# python modules subprocess and os
self.assertEqual(p.pid, pgid)
def test_filter_raising_callables(self):
class A(Exception):
pass
class B(Exception):
pass
class C(Exception):
pass
def create_exception_raiser(exception):
def raiser(exc):
if exception in exc:
raise exception
return exception
return raiser
raiseA, raiseB, raiseC = (create_exception_raiser(exc)
for exc in [A, B, C])
test_list = [raiseA, raiseC, raiseB, raiseC]
self.assertEqual(list(filter_raising_callables(test_list, A, (A,))),
[C, B, C])
self.assertEqual(list(filter_raising_callables(test_list,
(B, C),
exc=(B, C))),
[A])
# Test whether non filtered exceptions bubble up.
with self.assertRaises(B):
list(filter_raising_callables(test_list, C, exc=(B, C)))
def test_get_file_dict(self):
file_dict = get_file_dict([self.testcode_c_path], self.log_printer)
self.assertEqual(len(file_dict), 1)
self.assertEqual(type(file_dict[self.testcode_c_path]),
tuple,
msg="files in file_dict should not be editable")
self.assertEqual("Files that will be checked:\n" + self.testcode_c_path,
self.log_printer.log_queue.get().message)
def test_get_file_dict_non_existent_file(self):
file_dict = get_file_dict(["non_existent_file"], self.log_printer)
self.assertEqual(file_dict, {})
self.assertIn(("Failed to read file 'non_existent_file' because of "
"an unknown error."),
self.log_printer.log_queue.get().message)
def test_simplify_section_result(self):
results = (True,
{"file1": [Result("a", "b")], "file2": None},
{"file3": [Result("a", "c")]},
None)
yielded, yielded_unfixed, all_results = simplify_section_result(results)
self.assertEqual(yielded, True)
self.assertEqual(yielded_unfixed, True)
self.assertEqual(len(all_results), 2)
def test_ignore_results(self):
ranges = [([], SourceRange.from_values("f", 1, 1, 2, 2))]
result = Result.from_values("origin",
"message",
file="e",
line=1,
column=1,
end_line=2,
end_column=2)
self.assertFalse(check_result_ignore(result, ranges))
ranges.append(([], SourceRange.from_values("e", 2, 3, 3, 3)))
self.assertFalse(check_result_ignore(result, ranges))
ranges.append(([], SourceRange.from_values("e", 1, 1, 2, 2)))
self.assertTrue(check_result_ignore(result, ranges))
result1 = Result.from_values("origin", "message", file="e")
self.assertFalse(check_result_ignore(result1, ranges))
ranges = [(['something', 'else', 'not origin'],
SourceRange.from_values("e", 1, 1, 2, 2))]
self.assertFalse(check_result_ignore(result, ranges))
ranges = [(['something', 'else', 'origin'],
SourceRange.from_values("e", 1, 1, 2, 2))]
self.assertTrue(check_result_ignore(result, ranges))
def test_ignore_glob(self):
result = Result.from_values("LineLengthBear",
"message",
file="d",
line=1,
column=1,
end_line=2,
end_column=2)
ranges = [(["(line*|space*)", "py*"],
SourceRange.from_values("d", 1, 1, 2, 2))]
self.assertTrue(check_result_ignore(result, ranges))
result = Result.from_values("SpaceConsistencyBear",
"message",
file="d",
line=1,
column=1,
end_line=2,
end_column=2)
ranges = [(["(line*|space*)", "py*"],
SourceRange.from_values("d", 1, 1, 2, 2))]
self.assertTrue(check_result_ignore(result, ranges))
result = Result.from_values("XMLBear",
"message",
file="d",
line=1,
column=1,
end_line=2,
end_column=2)
ranges = [(["(line*|space*)", "py*"],
SourceRange.from_values("d", 1, 1, 2, 2))]
self.assertFalse(check_result_ignore(result, ranges))
def test_yield_ignore_ranges(self):
test_file_dict_a = {'f':
('# Ignore aBear\n',
'a_string = "This string should be ignored"\n')}
test_ignore_range_a = list(yield_ignore_ranges(test_file_dict_a))
for test_bears, test_source_range in test_ignore_range_a:
self.assertEqual(test_bears, ['abear'])
self.assertEqual(test_source_range.start.line, 1)
self.assertEqual(test_source_range.start.column, 1)
self.assertEqual(test_source_range.end.line, 2)
self.assertEqual(test_source_range.end.column, 43)
test_file_dict_b = {'f':
('# start Ignoring bBear\n',
'b_string = "This string should be ignored"\n',
'# stop ignoring\n')}
test_ignore_range_b = list(yield_ignore_ranges(test_file_dict_b))
for test_bears, test_source_range in test_ignore_range_b:
self.assertEqual(test_bears, ['bbear'])
self.assertEqual(test_source_range.start.line, 1)
self.assertEqual(test_source_range.start.column, 1)
self.assertEqual(test_source_range.end.line, 3)
self.assertEqual(test_source_range.end.column, 16)
test_file_dict_c = {'f':
('# Start ignoring cBear\n',
'# Stop ignoring cBear This & prev ignored\n')}
test_ignore_range_c = list(yield_ignore_ranges(test_file_dict_c))
for test_bears, test_source_range in test_ignore_range_c:
self.assertEqual(test_bears, ['cbear'])
self.assertEqual(test_source_range.start.line, 1)
self.assertEqual(test_source_range.start.column, 1)
self.assertEqual(test_source_range.end.line, 2)
self.assertEqual(test_source_range.end.column, 42)
test_file_dict_d = {'f':
('# Start ignoring cBear\n',
'All of this ignored\n')}
test_ignore_range_d = list(yield_ignore_ranges(test_file_dict_d))
for test_bears, test_source_range in test_ignore_range_d:
self.assertEqual(test_bears, ['cbear'])
self.assertEqual(test_source_range.start.line, 1)
self.assertEqual(test_source_range.start.column, 1)
self.assertEqual(test_source_range.end.line, 2)
self.assertEqual(test_source_range.end.column, 20)
# This case was a bug.
test_file_dict_single_line = {'f': ('# ignore XBEAR',)}
test_ignore_range_single_line = list(yield_ignore_ranges(
test_file_dict_single_line))
self.assertEqual(len(test_ignore_range_single_line), 1)
bears, source_range = test_ignore_range_single_line[0]
self.assertEqual(bears, ['xbear'])
self.assertEqual(source_range.start.line, 1)
self.assertEqual(source_range.start.column, 1)
self.assertEqual(source_range.end.line, 1)
self.assertEqual(source_range.end.column, 14)
class ProcessingTest_GetDefaultActions(unittest.TestCase):
def setUp(self):
self.section = Section("X")
def test_no_key(self):
self.assertEqual(get_default_actions(self.section), ({}, {}))
def test_no_value(self):
self.section.append(Setting("default_actions", ""))
self.assertEqual(get_default_actions(self.section), ({}, {}))
def test_only_valid_actions(self):
self.section.append(Setting(
"default_actions",
"MyBear: PrintDebugMessageAction, ValidBear: ApplyPatchAction"))
self.assertEqual(
get_default_actions(self.section),
({"MyBear": PrintDebugMessageAction,
"ValidBear": ApplyPatchAction},
{}))
def test_valid_and_invalid_actions(self):
self.section.append(Setting(
"default_actions",
"MyBear: INVALID_action, ValidBear: ApplyPatchAction, XBear: ABC"))
self.assertEqual(get_default_actions(self.section),
({"ValidBear": ApplyPatchAction},
{"MyBear": "INVALID_action", "XBear": "ABC"}))
class ProcessingTest_AutoapplyActions(unittest.TestCase):
def setUp(self):
self.log_queue = queue.Queue()
self.log_printer = ProcessingTestLogPrinter(self.log_queue)
self.resultY = Result("YBear", "msg1")
self.resultZ = Result("ZBear", "msg2")
self.results = [self.resultY, self.resultZ]
self.section = Section("A")
def test_no_default_actions(self):
ret = autoapply_actions(self.results,
{},
{},
self.section,
self.log_printer)
self.assertEqual(ret, self.results)
self.assertTrue(self.log_queue.empty())
def test_with_invalid_action(self):
self.section.append(Setting("default_actions",
"XBear: nonSENSE_action"))
ret = autoapply_actions(self.results,
{},
{},
self.section,
self.log_printer)
self.assertEqual(ret, self.results)
self.assertEqual(self.log_queue.get().message,
"Selected default action 'nonSENSE_action' for bear "
"'XBear' does not exist. Ignoring action.")
self.assertTrue(self.log_queue.empty())
def test_without_default_action_and_unapplicable(self):
# Use a result where no default action is supplied for and another one
# where the action is not applicable.
old_is_applicable = ApplyPatchAction.is_applicable
ApplyPatchAction.is_applicable = lambda *args: False
self.section.append(Setting(
"default_actions",
"NoBear: ApplyPatchAction, YBear: ApplyPatchAction"))
ret = autoapply_actions(self.results,
{},
{},
self.section,
self.log_printer)
self.assertEqual(ret, self.results)
self.assertEqual(self.log_queue.get().message,
"Selected default action 'ApplyPatchAction' for bear "
"'YBear' is not applicable. Action not applied.")
self.assertTrue(self.log_queue.empty())
ApplyPatchAction.is_applicable = old_is_applicable
def test_applicable_action(self):
# Use a result whose action can be successfully applied.
log_printer = self.log_printer
class TestAction(ResultAction):
def apply(self, *args, **kwargs):
log_printer.debug("ACTION APPLIED SUCCESSFULLY.")
ACTIONS.append(TestAction)
self.section.append(Setting("default_actions", "Z*: TestAction"))
ret = autoapply_actions(self.results,
{},
{},
self.section,
log_printer)
self.assertEqual(ret, [self.resultY])
self.assertEqual(self.log_queue.get().message,
"ACTION APPLIED SUCCESSFULLY.")
self.assertEqual(self.log_queue.get().message,
"Applied 'TestAction' "
"on the whole project from 'ZBear'.")
self.assertTrue(self.log_queue.empty())
ACTIONS.pop()
def test_failing_action(self):
class FailingTestAction(ResultAction):
def apply(self, *args, **kwargs):
raise RuntimeError("YEAH THAT'S A FAILING BEAR")
ACTIONS.append(FailingTestAction)
self.section.append(Setting("default_actions",
"YBear: FailingTestAction"))
ret = autoapply_actions(self.results,
{},
{},
self.section,
self.log_printer)
self.assertEqual(ret, self.results)
self.assertEqual(self.log_queue.get().message,
"Failed to execute action 'FailingTestAction'"
" with error: YEAH THAT'S A FAILING BEAR.")
self.assertIn("YEAH THAT'S A FAILING BEAR",
self.log_queue.get().message)
self.assertEqual(self.log_queue.get().message,
"-> for result " + repr(self.resultY) + ".")
self.assertTrue(self.log_queue.empty())
ACTIONS.pop()
class ProcessingTest_PrintResult(unittest.TestCase):
def setUp(self):
self.section = Section('name')
self.log_printer = LogPrinter(ConsolePrinter(), log_level=0)
def test_autoapply_override(self):
"""
Tests that the default_actions aren't automatically applied when the
autoapply setting overrides that.
"""
self.section.append(Setting('default_actions',
'somebear: PrintDebugMessageAction'))
# Verify that it would apply the action, i.e. remove the result
results = [5, HiddenResult('origin', []),
Result('somebear', 'message', debug_msg='debug')]
retval, newres = print_result(results, {}, 0, lambda *args: None,
self.section, self.log_printer, {}, [])
self.assertEqual(newres, [])
# Override and verify that result is unprocessed, i.e. not gone
self.section.append(Setting('autoapply', 'false'))
retval, newres = print_result(results, {}, 0, lambda *args: None,
self.section, self.log_printer, {}, [])
self.assertNotEqual(newres, [])
|
abhiroyg/coala
|
tests/processes/ProcessingTest.py
|
Python
|
agpl-3.0
| 25,894
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import logging
from datetime import datetime
from dateutil.relativedelta import relativedelta
from operator import itemgetter
import time
import openerp
from openerp import SUPERUSER_ID, api
from openerp import tools
from openerp.osv import fields, osv, expression
from openerp.tools.translate import _
from openerp.tools.float_utils import float_round as round
import openerp.addons.decimal_precision as dp
_logger = logging.getLogger(__name__)
def check_cycle(self, cr, uid, ids, context=None):
""" climbs the ``self._table.parent_id`` chains for 100 levels or
until it can't find any more parent(s)
Returns true if it runs out of parents (no cycle), false if
it can recurse 100 times without ending all chains
"""
level = 100
while len(ids):
cr.execute('SELECT DISTINCT parent_id '\
'FROM '+self._table+' '\
'WHERE id IN %s '\
'AND parent_id IS NOT NULL',(tuple(ids),))
ids = map(itemgetter(0), cr.fetchall())
if not level:
return False
level -= 1
return True
class res_company(osv.osv):
_inherit = "res.company"
_columns = {
'income_currency_exchange_account_id': fields.many2one(
'account.account',
string="Gain Exchange Rate Account",
domain="[('type', '=', 'other')]",),
'expense_currency_exchange_account_id': fields.many2one(
'account.account',
string="Loss Exchange Rate Account",
domain="[('type', '=', 'other')]",),
}
class account_payment_term(osv.osv):
_name = "account.payment.term"
_description = "Payment Term"
_columns = {
'name': fields.char('Payment Term', translate=True, required=True),
'active': fields.boolean('Active', help="If the active field is set to False, it will allow you to hide the payment term without removing it."),
'note': fields.text('Description', translate=True),
'line_ids': fields.one2many('account.payment.term.line', 'payment_id', 'Terms', copy=True),
}
_defaults = {
'active': 1,
}
_order = "name"
def compute(self, cr, uid, id, value, date_ref=False, context=None):
if not date_ref:
date_ref = datetime.now().strftime('%Y-%m-%d')
pt = self.browse(cr, uid, id, context=context)
amount = value
result = []
obj_precision = self.pool.get('decimal.precision')
prec = obj_precision.precision_get(cr, uid, 'Account')
for line in pt.line_ids:
if line.value == 'fixed':
amt = round(line.value_amount, prec)
elif line.value == 'procent':
amt = round(value * line.value_amount, prec)
elif line.value == 'balance':
amt = round(amount, prec)
if amt:
next_date = (datetime.strptime(date_ref, '%Y-%m-%d') + relativedelta(days=line.days))
if line.days2 < 0:
next_first_date = next_date + relativedelta(day=1,months=1) #Getting 1st of next month
next_date = next_first_date + relativedelta(days=line.days2)
if line.days2 > 0:
next_date += relativedelta(day=line.days2, months=1)
result.append( (next_date.strftime('%Y-%m-%d'), amt) )
amount -= amt
amount = reduce(lambda x,y: x+y[1], result, 0.0)
dist = round(value-amount, prec)
if dist:
result.append( (time.strftime('%Y-%m-%d'), dist) )
return result
class account_payment_term_line(osv.osv):
_name = "account.payment.term.line"
_description = "Payment Term Line"
_columns = {
'value': fields.selection([('procent', 'Percent'),
('balance', 'Balance'),
('fixed', 'Fixed Amount')], 'Computation',
required=True, help="""Select here the kind of valuation related to this payment term line. Note that you should have your last line with the type 'Balance' to ensure that the whole amount will be treated."""),
'value_amount': fields.float('Amount To Pay', digits_compute=dp.get_precision('Payment Term'), help="For percent enter a ratio between 0-1."),
'days': fields.integer('Number of Days', required=True, help="Number of days to add before computation of the day of month." \
"If Date=15/01, Number of Days=22, Day of Month=-1, then the due date is 28/02."),
'days2': fields.integer('Day of the Month', required=True, help="Day of the month, set -1 for the last day of the current month. If it's positive, it gives the day of the next month. Set 0 for net days (otherwise it's based on the beginning of the month)."),
'payment_id': fields.many2one('account.payment.term', 'Payment Term', required=True, select=True, ondelete='cascade'),
}
_defaults = {
'value': 'balance',
'days': 30,
'days2': 0,
}
_order = "value desc,days"
def _check_percent(self, cr, uid, ids, context=None):
obj = self.browse(cr, uid, ids[0], context=context)
if obj.value == 'procent' and ( obj.value_amount < 0.0 or obj.value_amount > 1.0):
return False
return True
_constraints = [
(_check_percent, 'Percentages for Payment Term Line must be between 0 and 1, Example: 0.02 for 2%.', ['value_amount']),
]
class account_account_type(osv.osv):
_name = "account.account.type"
_description = "Account Type"
def _get_financial_report_ref(self, cr, uid, context=None):
obj_data = self.pool.get('ir.model.data')
obj_financial_report = self.pool.get('account.financial.report')
financial_report_ref = {}
for key, financial_report in [
('asset','account_financial_report_assets0'),
('liability','account_financial_report_liability0'),
('income','account_financial_report_income0'),
('expense','account_financial_report_expense0'),
]:
try:
financial_report_ref[key] = obj_financial_report.browse(cr, uid,
obj_data.get_object_reference(cr, uid, 'account', financial_report)[1],
context=context)
except ValueError:
pass
return financial_report_ref
def _get_current_report_type(self, cr, uid, ids, name, arg, context=None):
res = {}
financial_report_ref = self._get_financial_report_ref(cr, uid, context=context)
for record in self.browse(cr, uid, ids, context=context):
res[record.id] = 'none'
for key, financial_report in financial_report_ref.items():
list_ids = [x.id for x in financial_report.account_type_ids]
if record.id in list_ids:
res[record.id] = key
return res
def _save_report_type(self, cr, uid, account_type_id, field_name, field_value, arg, context=None):
field_value = field_value or 'none'
obj_financial_report = self.pool.get('account.financial.report')
#unlink if it exists somewhere in the financial reports related to BS or PL
financial_report_ref = self._get_financial_report_ref(cr, uid, context=context)
for key, financial_report in financial_report_ref.items():
list_ids = [x.id for x in financial_report.account_type_ids]
if account_type_id in list_ids:
obj_financial_report.write(cr, uid, [financial_report.id], {'account_type_ids': [(3, account_type_id)]})
#write it in the good place
if field_value != 'none':
return obj_financial_report.write(cr, uid, [financial_report_ref[field_value].id], {'account_type_ids': [(4, account_type_id)]})
_columns = {
'name': fields.char('Account Type', required=True, translate=True),
'code': fields.char('Code', size=32, required=True, select=True),
'close_method': fields.selection([('none', 'None'), ('balance', 'Balance'), ('detail', 'Detail'), ('unreconciled', 'Unreconciled')], 'Deferral Method', required=True, help="""Set here the method that will be used to generate the end of year journal entries for all the accounts of this type.
'None' means that nothing will be done.
'Balance' will generally be used for cash accounts.
'Detail' will copy each existing journal item of the previous year, even the reconciled ones.
'Unreconciled' will copy only the journal items that were unreconciled on the first day of the new fiscal year."""),
'report_type': fields.function(_get_current_report_type, fnct_inv=_save_report_type, type='selection', string='P&L / BS Category', store=True,
selection= [('none','/'),
('income', _('Profit & Loss (Income account)')),
('expense', _('Profit & Loss (Expense account)')),
('asset', _('Balance Sheet (Asset account)')),
('liability', _('Balance Sheet (Liability account)'))], help="This field is used to generate legal reports: profit and loss, balance sheet.", required=True),
'note': fields.text('Description'),
}
_defaults = {
'close_method': 'none',
'report_type': 'none',
}
_order = "code"
def _code_get(self, cr, uid, context=None):
acc_type_obj = self.pool.get('account.account.type')
ids = acc_type_obj.search(cr, uid, [])
res = acc_type_obj.read(cr, uid, ids, ['code', 'name'], context=context)
return [(r['code'], r['name']) for r in res]
#----------------------------------------------------------
# Accounts
#----------------------------------------------------------
class account_account(osv.osv):
_order = "parent_left"
_parent_order = "code"
_name = "account.account"
_description = "Account"
_parent_store = True
def _where_calc(self, cr, uid, domain, active_test=True, context=None):
""" Convert domains to allow easier filtering:
code: force case insensitive and right side matching search
journal_id: restrict to the accounts sharing the same account.account.type
"""
pos = 0
while pos < len(domain):
if domain[pos][0] == 'code' and domain[pos][1] in ('like', 'ilike') and domain[pos][2]:
domain[pos] = ('code', '=like', tools.ustr(domain[pos][2].replace('%', '')) + '%')
if domain[pos][0] == 'journal_id':
if not domain[pos][2]:
del domain[pos]
continue
jour = self.pool.get('account.journal').browse(cr, uid, domain[pos][2], context=context)
if (not (jour.account_control_ids or jour.type_control_ids)) or not domain[pos][2]:
domain[pos] = ('type', 'not in', ('consolidation', 'view'))
continue
ids3 = map(lambda x: x.id, jour.type_control_ids)
ids1 = super(account_account, self).search(cr, uid, [('user_type', 'in', ids3)])
ids1 += map(lambda x: x.id, jour.account_control_ids)
domain[pos] = ('id', 'in', ids1)
pos += 1
return super(account_account, self)._where_calc(cr, uid, domain, active_test, context)
def search(self, cr, uid, args, offset=0, limit=None, order=None,
context=None, count=False):
""" Check presence of key 'consolidate_children' in context to include also the Consolidated Children
of found accounts into the result of the search
"""
if context and context.has_key('consolidate_children'): #add consolidated children of accounts
ids = super(account_account, self).search(cr, uid, args, offset, limit,
order, context=context, count=count)
for consolidate_child in self.browse(cr, uid, context['account_id'], context=context).child_consol_ids:
ids.append(consolidate_child.id)
return ids
return super(account_account, self).search(cr, uid, args, offset, limit,
order, context=context, count=count)
def _get_children_and_consol(self, cr, uid, ids, context=None):
#this function search for all the children and all consolidated children (recursively) of the given account ids
ids2 = self.search(cr, uid, [('parent_id', 'child_of', ids)], context=context)
ids3 = []
for rec in self.browse(cr, uid, ids2, context=context):
for child in rec.child_consol_ids:
ids3.append(child.id)
if ids3:
ids3 = self._get_children_and_consol(cr, uid, ids3, context)
return ids2 + ids3
def __compute(self, cr, uid, ids, field_names, arg=None, context=None,
query='', query_params=()):
""" compute the balance, debit and/or credit for the provided
account ids
Arguments:
`ids`: account ids
`field_names`: the fields to compute (a list of any of
'balance', 'debit' and 'credit')
`arg`: unused fields.function stuff
`query`: additional query filter (as a string)
`query_params`: parameters for the provided query string
(__compute will handle their escaping) as a
tuple
"""
mapping = {
'balance': "COALESCE(SUM(l.debit),0) - COALESCE(SUM(l.credit), 0) as balance",
'debit': "COALESCE(SUM(l.debit), 0) as debit",
'credit': "COALESCE(SUM(l.credit), 0) as credit",
# by convention, foreign_balance is 0 when the account has no secondary currency, because the amounts may be in different currencies
'foreign_balance': "(SELECT CASE WHEN currency_id IS NULL THEN 0 ELSE COALESCE(SUM(l.amount_currency), 0) END FROM account_account WHERE id IN (l.account_id)) as foreign_balance",
}
#get all the necessary accounts
children_and_consolidated = self._get_children_and_consol(cr, uid, ids, context=context)
#compute for each account the balance/debit/credit from the move lines
accounts = {}
res = {}
null_result = dict((fn, 0.0) for fn in field_names)
if children_and_consolidated:
aml_query = self.pool.get('account.move.line')._query_get(cr, uid, context=context)
wheres = [""]
if query.strip():
wheres.append(query.strip())
if aml_query.strip():
wheres.append(aml_query.strip())
filters = " AND ".join(wheres)
# IN might not work ideally in case there are too many
# children_and_consolidated, in that case join on a
# values() e.g.:
# SELECT l.account_id as id FROM account_move_line l
# INNER JOIN (VALUES (id1), (id2), (id3), ...) AS tmp (id)
# ON l.account_id = tmp.id
# or make _get_children_and_consol return a query and join on that
request = ("SELECT l.account_id as id, " +\
', '.join(mapping.values()) +
" FROM account_move_line l" \
" WHERE l.account_id IN %s " \
+ filters +
" GROUP BY l.account_id")
params = (tuple(children_and_consolidated),) + query_params
cr.execute(request, params)
for row in cr.dictfetchall():
accounts[row['id']] = row
# consolidate accounts with direct children
children_and_consolidated.reverse()
brs = list(self.browse(cr, uid, children_and_consolidated, context=context))
sums = {}
currency_obj = self.pool.get('res.currency')
while brs:
current = brs.pop(0)
# can_compute = True
# for child in current.child_id:
# if child.id not in sums:
# can_compute = False
# try:
# brs.insert(0, brs.pop(brs.index(child)))
# except ValueError:
# brs.insert(0, child)
# if can_compute:
for fn in field_names:
sums.setdefault(current.id, {})[fn] = accounts.get(current.id, {}).get(fn, 0.0)
for child in current.child_id:
if child.company_id.currency_id.id == current.company_id.currency_id.id:
sums[current.id][fn] += sums[child.id][fn]
else:
sums[current.id][fn] += currency_obj.compute(cr, uid, child.company_id.currency_id.id, current.company_id.currency_id.id, sums[child.id][fn], context=context)
# as we have to relay on values computed before this is calculated separately than previous fields
if current.currency_id and current.exchange_rate and \
('adjusted_balance' in field_names or 'unrealized_gain_loss' in field_names):
# Computing Adjusted Balance and Unrealized Gains and losses
# Adjusted Balance = Foreign Balance / Exchange Rate
# Unrealized Gains and losses = Adjusted Balance - Balance
adj_bal = sums[current.id].get('foreign_balance', 0.0) / current.exchange_rate
sums[current.id].update({'adjusted_balance': adj_bal, 'unrealized_gain_loss': adj_bal - sums[current.id].get('balance', 0.0)})
for id in ids:
res[id] = sums.get(id, null_result)
else:
for id in ids:
res[id] = null_result
return res
def _get_company_currency(self, cr, uid, ids, field_name, arg, context=None):
result = {}
for rec in self.browse(cr, uid, ids, context=context):
result[rec.id] = (rec.company_id.currency_id.id,rec.company_id.currency_id.symbol)
return result
def _get_child_ids(self, cr, uid, ids, field_name, arg, context=None):
result = {}
for record in self.browse(cr, uid, ids, context=context):
if record.child_parent_ids:
result[record.id] = [x.id for x in record.child_parent_ids]
else:
result[record.id] = []
if record.child_consol_ids:
for acc in record.child_consol_ids:
if acc.id not in result[record.id]:
result[record.id].append(acc.id)
return result
def _get_level(self, cr, uid, ids, field_name, arg, context=None):
res = {}
for account in self.browse(cr, uid, ids, context=context):
#we may not know the level of the parent at the time of computation, so we
# can't simply do res[account.id] = account.parent_id.level + 1
level = 0
parent = account.parent_id
while parent:
level += 1
parent = parent.parent_id
res[account.id] = level
return res
def _set_credit_debit(self, cr, uid, account_id, name, value, arg, context=None):
if context.get('config_invisible', True):
return True
account = self.browse(cr, uid, account_id, context=context)
diff = value - getattr(account,name)
if not diff:
return True
journal_obj = self.pool.get('account.journal')
jids = journal_obj.search(cr, uid, [('type','=','situation'),('centralisation','=',1),('company_id','=',account.company_id.id)], context=context)
if not jids:
raise osv.except_osv(_('Error!'),_("You need an Opening journal with centralisation checked to set the initial balance."))
period_obj = self.pool.get('account.period')
pids = period_obj.search(cr, uid, [('special','=',True),('company_id','=',account.company_id.id)], context=context)
if not pids:
raise osv.except_osv(_('Error!'),_("There is no opening/closing period defined, please create one to set the initial balance."))
move_obj = self.pool.get('account.move.line')
move_id = move_obj.search(cr, uid, [
('journal_id','=',jids[0]),
('period_id','=',pids[0]),
('account_id','=', account_id),
(name,'>', 0.0),
('name','=', _('Opening Balance'))
], context=context)
if move_id:
move = move_obj.browse(cr, uid, move_id[0], context=context)
move_obj.write(cr, uid, move_id[0], {
name: diff+getattr(move,name)
}, context=context)
else:
if diff<0.0:
raise osv.except_osv(_('Error!'),_("Unable to adapt the initial balance (negative value)."))
nameinv = (name=='credit' and 'debit') or 'credit'
move_id = move_obj.create(cr, uid, {
'name': _('Opening Balance'),
'account_id': account_id,
'journal_id': jids[0],
'period_id': pids[0],
name: diff,
nameinv: 0.0
}, context=context)
return True
_columns = {
'name': fields.char('Name', required=True, select=True),
'currency_id': fields.many2one('res.currency', 'Secondary Currency', help="Forces all moves for this account to have this secondary currency."),
'code': fields.char('Code', size=64, required=True, select=1),
'type': fields.selection([
('view', 'View'),
('other', 'Regular'),
('receivable', 'Receivable'),
('payable', 'Payable'),
('liquidity','Liquidity'),
('consolidation', 'Consolidation'),
('closed', 'Closed'),
], 'Internal Type', required=True, help="The 'Internal Type' is used for features available on "\
"different types of accounts: view can not have journal items, consolidation are accounts that "\
"can have children accounts for multi-company consolidations, payable/receivable are for "\
"partners accounts (for debit/credit computations), closed for depreciated accounts."),
'user_type': fields.many2one('account.account.type', 'Account Type', required=True,
help="Account Type is used for information purpose, to generate "
"country-specific legal reports, and set the rules to close a fiscal year and generate opening entries."),
'financial_report_ids': fields.many2many('account.financial.report', 'account_account_financial_report', 'account_id', 'report_line_id', 'Financial Reports'),
'parent_id': fields.many2one('account.account', 'Parent', ondelete='cascade', domain=[('type','=','view')]),
'child_parent_ids': fields.one2many('account.account','parent_id','Children'),
'child_consol_ids': fields.many2many('account.account', 'account_account_consol_rel', 'child_id', 'parent_id', 'Consolidated Children'),
'child_id': fields.function(_get_child_ids, type='many2many', relation="account.account", string="Child Accounts"),
'balance': fields.function(__compute, digits_compute=dp.get_precision('Account'), string='Balance', multi='balance'),
'credit': fields.function(__compute, fnct_inv=_set_credit_debit, digits_compute=dp.get_precision('Account'), string='Credit', multi='balance'),
'debit': fields.function(__compute, fnct_inv=_set_credit_debit, digits_compute=dp.get_precision('Account'), string='Debit', multi='balance'),
'foreign_balance': fields.function(__compute, digits_compute=dp.get_precision('Account'), string='Foreign Balance', multi='balance',
help="Total amount (in Secondary currency) for transactions held in secondary currency for this account."),
'adjusted_balance': fields.function(__compute, digits_compute=dp.get_precision('Account'), string='Adjusted Balance', multi='balance',
help="Total amount (in Company currency) for transactions held in secondary currency for this account."),
'unrealized_gain_loss': fields.function(__compute, digits_compute=dp.get_precision('Account'), string='Unrealized Gain or Loss', multi='balance',
help="Value of Loss or Gain due to changes in exchange rate when doing multi-currency transactions."),
'reconcile': fields.boolean('Allow Reconciliation', help="Check this box if this account allows reconciliation of journal items."),
'exchange_rate': fields.related('currency_id', 'rate', type='float', string='Exchange Rate', digits=(12,6)),
'shortcut': fields.char('Shortcut', size=12),
'tax_ids': fields.many2many('account.tax', 'account_account_tax_default_rel',
'account_id', 'tax_id', 'Default Taxes'),
'note': fields.text('Internal Notes'),
'company_currency_id': fields.function(_get_company_currency, type='many2one', relation='res.currency', string='Company Currency'),
'company_id': fields.many2one('res.company', 'Company', required=True),
'active': fields.boolean('Active', select=2, help="If the active field is set to False, it will allow you to hide the account without removing it."),
'parent_left': fields.integer('Parent Left', select=1),
'parent_right': fields.integer('Parent Right', select=1),
'currency_mode': fields.selection([('current', 'At Date'), ('average', 'Average Rate')], 'Outgoing Currencies Rate',
help=
'This will select how the current currency rate for outgoing transactions is computed. '\
'In most countries the legal method is "average" but only a few software systems are able to '\
'manage this. So if you import from another software system you may have to use the rate at date. ' \
'Incoming transactions always use the rate at date.', \
required=True),
'level': fields.function(_get_level, string='Level', method=True, type='integer',
store={
'account.account': (_get_children_and_consol, ['level', 'parent_id'], 10),
}),
}
_defaults = {
'type': 'other',
'reconcile': False,
'active': True,
'currency_mode': 'current',
'company_id': lambda s, cr, uid, c: s.pool.get('res.company')._company_default_get(cr, uid, 'account.account', context=c),
}
def _check_recursion(self, cr, uid, ids, context=None):
obj_self = self.browse(cr, uid, ids[0], context=context)
p_id = obj_self.parent_id and obj_self.parent_id.id
if (obj_self in obj_self.child_consol_ids) or (p_id and (p_id is obj_self.id)):
return False
while(ids):
cr.execute('SELECT DISTINCT child_id '\
'FROM account_account_consol_rel '\
'WHERE parent_id IN %s', (tuple(ids),))
child_ids = map(itemgetter(0), cr.fetchall())
c_ids = child_ids
if (p_id and (p_id in c_ids)) or (obj_self.id in c_ids):
return False
while len(c_ids):
s_ids = self.search(cr, uid, [('parent_id', 'in', c_ids)])
if p_id and (p_id in s_ids):
return False
c_ids = s_ids
ids = child_ids
return True
def _check_type(self, cr, uid, ids, context=None):
if context is None:
context = {}
accounts = self.browse(cr, uid, ids, context=context)
for account in accounts:
if account.child_id and account.type not in ('view', 'consolidation'):
return False
return True
def _check_account_type(self, cr, uid, ids, context=None):
for account in self.browse(cr, uid, ids, context=context):
if account.type in ('receivable', 'payable') and account.user_type.close_method != 'unreconciled':
return False
return True
def _check_company_account(self, cr, uid, ids, context=None):
for account in self.browse(cr, uid, ids, context=context):
if account.parent_id:
if account.company_id != account.parent_id.company_id:
return False
return True
_constraints = [
(_check_recursion, 'Error!\nYou cannot create recursive accounts.', ['parent_id']),
(_check_type, 'Configuration Error!\nYou cannot define children to an account with internal type different of "View".', ['type']),
(_check_account_type, 'Configuration Error!\nYou cannot select an account type with a deferral method different of "Unreconciled" for accounts with internal type "Payable/Receivable".', ['user_type','type']),
(_check_company_account, 'Error!\nYou cannot create an account which has parent account of different company.', ['parent_id']),
]
_sql_constraints = [
('code_company_uniq', 'unique (code,company_id)', 'The code of the account must be unique per company !')
]
def name_search(self, cr, user, name, args=None, operator='ilike', context=None, limit=100):
if not args:
args = []
args = args[:]
ids = []
try:
if name and str(name).startswith('partner:'):
part_id = int(name.split(':')[1])
part = self.pool.get('res.partner').browse(cr, user, part_id, context=context)
args += [('id', 'in', (part.property_account_payable.id, part.property_account_receivable.id))]
name = False
if name and str(name).startswith('type:'):
type = name.split(':')[1]
args += [('type', '=', type)]
name = False
except:
pass
if name:
if operator not in expression.NEGATIVE_TERM_OPERATORS:
plus_percent = lambda n: n+'%'
code_op, code_conv = {
'ilike': ('=ilike', plus_percent),
'like': ('=like', plus_percent),
}.get(operator, (operator, lambda n: n))
ids = self.search(cr, user, ['|', ('code', code_op, code_conv(name)), '|', ('shortcut', '=', name), ('name', operator, name)]+args, limit=limit)
if not ids and len(name.split()) >= 2:
#Separating code and name of account for searching
operand1,operand2 = name.split(' ',1) #name can contain spaces e.g. OpenERP S.A.
ids = self.search(cr, user, [('code', operator, operand1), ('name', operator, operand2)]+ args, limit=limit)
else:
ids = self.search(cr, user, ['&','!', ('code', '=like', name+"%"), ('name', operator, name)]+args, limit=limit)
# as negation want to restric, do if already have results
if ids and len(name.split()) >= 2:
operand1,operand2 = name.split(' ',1) #name can contain spaces e.g. OpenERP S.A.
ids = self.search(cr, user, [('code', operator, operand1), ('name', operator, operand2), ('id', 'in', ids)]+ args, limit=limit)
else:
ids = self.search(cr, user, args, context=context, limit=limit)
return self.name_get(cr, user, ids, context=context)
def name_get(self, cr, uid, ids, context=None):
if not ids:
return []
if isinstance(ids, (int, long)):
ids = [ids]
reads = self.read(cr, uid, ids, ['name', 'code'], context=context)
res = []
for record in reads:
name = record['name']
if record['code']:
name = record['code'] + ' ' + name
res.append((record['id'], name))
return res
def copy(self, cr, uid, id, default=None, context=None, done_list=None, local=False):
default = {} if default is None else default.copy()
if done_list is None:
done_list = []
account = self.browse(cr, uid, id, context=context)
new_child_ids = []
default.update(code=_("%s (copy)") % (account['code'] or ''))
if not local:
done_list = []
if account.id in done_list:
return False
done_list.append(account.id)
if account:
for child in account.child_id:
child_ids = self.copy(cr, uid, child.id, default, context=context, done_list=done_list, local=True)
if child_ids:
new_child_ids.append(child_ids)
default['child_parent_ids'] = [(6, 0, new_child_ids)]
else:
default['child_parent_ids'] = False
return super(account_account, self).copy(cr, uid, id, default, context=context)
def _check_moves(self, cr, uid, ids, method, context=None):
line_obj = self.pool.get('account.move.line')
account_ids = self.search(cr, uid, [('id', 'child_of', ids)], context=context)
if line_obj.search(cr, uid, [('account_id', 'in', account_ids)], context=context):
if method == 'write':
raise osv.except_osv(_('Error!'), _('You cannot deactivate an account that contains journal items.'))
elif method == 'unlink':
raise osv.except_osv(_('Error!'), _('You cannot remove an account that contains journal items.'))
#Checking whether the account is set as a property to any Partner or not
values = ['account.account,%s' % (account_id,) for account_id in ids]
partner_prop_acc = self.pool.get('ir.property').search(cr, uid, [('value_reference','in', values)], context=context)
if partner_prop_acc:
raise osv.except_osv(_('Warning!'), _('You cannot remove/deactivate an account which is set on a customer or supplier.'))
return True
def _check_allow_type_change(self, cr, uid, ids, new_type, context=None):
restricted_groups = ['consolidation','view']
line_obj = self.pool.get('account.move.line')
for account in self.browse(cr, uid, ids, context=context):
old_type = account.type
account_ids = self.search(cr, uid, [('id', 'child_of', [account.id])])
if line_obj.search(cr, uid, [('account_id', 'in', account_ids)]):
#Check for 'Closed' type
if old_type == 'closed' and new_type !='closed':
raise osv.except_osv(_('Warning!'), _("You cannot change the type of account from 'Closed' to any other type as it contains journal items!"))
# Forbid to change an account type for restricted_groups as it contains journal items (or if one of its children does)
if (new_type in restricted_groups):
raise osv.except_osv(_('Warning!'), _("You cannot change the type of account to '%s' type as it contains journal items!") % (new_type,))
return True
# For legal reason (forbiden to modify journal entries which belongs to a closed fy or period), Forbid to modify
# the code of an account if journal entries have been already posted on this account. This cannot be simply
# 'configurable' since it can lead to a lack of confidence in Odoo and this is what we want to change.
def _check_allow_code_change(self, cr, uid, ids, context=None):
line_obj = self.pool.get('account.move.line')
for account in self.browse(cr, uid, ids, context=context):
account_ids = self.search(cr, uid, [('id', 'child_of', [account.id])], context=context)
if line_obj.search(cr, uid, [('account_id', 'in', account_ids)], context=context):
raise osv.except_osv(_('Warning !'), _("You cannot change the code of account which contains journal items!"))
return True
def write(self, cr, uid, ids, vals, context=None):
if context is None:
context = {}
if not ids:
return True
if isinstance(ids, (int, long)):
ids = [ids]
# Dont allow changing the company_id when account_move_line already exist
if 'company_id' in vals:
move_lines = self.pool.get('account.move.line').search(cr, uid, [('account_id', 'in', ids)], context=context)
if move_lines:
# Allow the write if the value is the same
for i in [i['company_id'][0] for i in self.read(cr,uid,ids,['company_id'], context=context)]:
if vals['company_id']!=i:
raise osv.except_osv(_('Warning!'), _('You cannot change the owner company of an account that already contains journal items.'))
if 'active' in vals and not vals['active']:
self._check_moves(cr, uid, ids, "write", context=context)
if 'type' in vals.keys():
self._check_allow_type_change(cr, uid, ids, vals['type'], context=context)
if 'code' in vals.keys():
self._check_allow_code_change(cr, uid, ids, context=context)
return super(account_account, self).write(cr, uid, ids, vals, context=context)
def unlink(self, cr, uid, ids, context=None):
self._check_moves(cr, uid, ids, "unlink", context=context)
return super(account_account, self).unlink(cr, uid, ids, context=context)
class account_journal(osv.osv):
_name = "account.journal"
_description = "Journal"
_columns = {
'with_last_closing_balance': fields.boolean('Opening With Last Closing Balance', help="For cash or bank journal, this option should be unchecked when the starting balance should always set to 0 for new documents."),
'name': fields.char('Journal Name', required=True),
'code': fields.char('Code', size=5, required=True, help="The code will be displayed on reports."),
'type': fields.selection([('sale', 'Sale'),('sale_refund','Sale Refund'), ('purchase', 'Purchase'), ('purchase_refund','Purchase Refund'), ('cash', 'Cash'), ('bank', 'Bank and Checks'), ('general', 'General'), ('situation', 'Opening/Closing Situation')], 'Type', size=32, required=True,
help="Select 'Sale' for customer invoices journals."\
" Select 'Purchase' for supplier invoices journals."\
" Select 'Cash' or 'Bank' for journals that are used in customer or supplier payments."\
" Select 'General' for miscellaneous operations journals."\
" Select 'Opening/Closing Situation' for entries generated for new fiscal years."),
'type_control_ids': fields.many2many('account.account.type', 'account_journal_type_rel', 'journal_id','type_id', 'Type Controls', domain=[('code','<>','view'), ('code', '<>', 'closed')]),
'account_control_ids': fields.many2many('account.account', 'account_account_type_rel', 'journal_id','account_id', 'Account', domain=[('type','<>','view'), ('type', '<>', 'closed')]),
'default_credit_account_id': fields.many2one('account.account', 'Default Credit Account', domain="[('type','!=','view')]", help="It acts as a default account for credit amount"),
'default_debit_account_id': fields.many2one('account.account', 'Default Debit Account', domain="[('type','!=','view')]", help="It acts as a default account for debit amount"),
'centralisation': fields.boolean('Centralized Counterpart', help="Check this box to determine that each entry of this journal won't create a new counterpart but will share the same counterpart. This is used in fiscal year closing."),
'update_posted': fields.boolean('Allow Cancelling Entries', help="Check this box if you want to allow the cancellation the entries related to this journal or of the invoice related to this journal"),
'group_invoice_lines': fields.boolean('Group Invoice Lines', help="If this box is checked, the system will try to group the accounting lines when generating them from invoices."),
'sequence_id': fields.many2one('ir.sequence', 'Entry Sequence', help="This field contains the information related to the numbering of the journal entries of this journal.", required=True, copy=False),
'user_id': fields.many2one('res.users', 'User', help="The user responsible for this journal"),
'groups_id': fields.many2many('res.groups', 'account_journal_group_rel', 'journal_id', 'group_id', 'Groups'),
'currency': fields.many2one('res.currency', 'Currency', help='The currency used to enter statement'),
'entry_posted': fields.boolean('Autopost Created Moves', help='Check this box to automatically post entries of this journal. Note that legally, some entries may be automatically posted when the source document is validated (Invoices), whatever the status of this field.'),
'company_id': fields.many2one('res.company', 'Company', required=True, select=1, help="Company related to this journal"),
'allow_date':fields.boolean('Check Date in Period', help= 'If checked, the entry won\'t be created if the entry date is not included into the selected period'),
'profit_account_id' : fields.many2one('account.account', 'Profit Account'),
'loss_account_id' : fields.many2one('account.account', 'Loss Account'),
'internal_account_id' : fields.many2one('account.account', 'Internal Transfers Account', select=1),
'cash_control' : fields.boolean('Cash Control', help='If you want the journal should be control at opening/closing, check this option'),
}
_defaults = {
'cash_control' : False,
'with_last_closing_balance' : True,
'user_id': lambda self, cr, uid, context: uid,
'company_id': lambda self, cr, uid, c: self.pool.get('res.users').browse(cr, uid, uid, c).company_id.id,
}
_sql_constraints = [
('code_company_uniq', 'unique (code, company_id)', 'The code of the journal must be unique per company !'),
('name_company_uniq', 'unique (name, company_id)', 'The name of the journal must be unique per company !'),
]
_order = 'code'
def _check_currency(self, cr, uid, ids, context=None):
for journal in self.browse(cr, uid, ids, context=context):
if journal.currency:
if journal.default_credit_account_id and not journal.default_credit_account_id.currency_id.id == journal.currency.id:
return False
if journal.default_debit_account_id and not journal.default_debit_account_id.currency_id.id == journal.currency.id:
return False
return True
_constraints = [
(_check_currency, 'Configuration error!\nThe currency chosen should be shared by the default accounts too.', ['currency','default_debit_account_id','default_credit_account_id']),
]
def copy(self, cr, uid, id, default=None, context=None):
default = dict(context or {})
journal = self.browse(cr, uid, id, context=context)
default.update(
code=_("%s (copy)") % (journal['code'] or ''),
name=_("%s (copy)") % (journal['name'] or ''))
return super(account_journal, self).copy(cr, uid, id, default, context=context)
def write(self, cr, uid, ids, vals, context=None):
if context is None:
context = {}
if isinstance(ids, (int, long)):
ids = [ids]
for journal in self.browse(cr, uid, ids, context=context):
if 'company_id' in vals and journal.company_id.id != vals['company_id']:
move_lines = self.pool.get('account.move.line').search(cr, uid, [('journal_id', 'in', ids)])
if move_lines:
raise osv.except_osv(_('Warning!'), _('This journal already contains items, therefore you cannot modify its company field.'))
return super(account_journal, self).write(cr, uid, ids, vals, context=context)
def create_sequence(self, cr, uid, vals, context=None):
""" Create new no_gap entry sequence for every new Joural
"""
# in account.journal code is actually the prefix of the sequence
# whereas ir.sequence code is a key to lookup global sequences.
prefix = vals['code'].upper()
seq = {
'name': vals['name'],
'implementation':'no_gap',
'prefix': prefix + "/%(year)s/",
'padding': 4,
'number_increment': 1
}
if 'company_id' in vals:
seq['company_id'] = vals['company_id']
return self.pool.get('ir.sequence').create(cr, uid, seq)
def create(self, cr, uid, vals, context=None):
if not 'sequence_id' in vals or not vals['sequence_id']:
# if we have the right to create a journal, we should be able to
# create it's sequence.
vals.update({'sequence_id': self.create_sequence(cr, SUPERUSER_ID, vals, context)})
return super(account_journal, self).create(cr, uid, vals, context)
def name_get(self, cr, user, ids, context=None):
"""
Returns a list of tupples containing id, name.
result format: {[(id, name), (id, name), ...]}
@param cr: A database cursor
@param user: ID of the user currently logged in
@param ids: list of ids for which name should be read
@param context: context arguments, like lang, time zone
@return: Returns a list of tupples containing id, name
"""
if not ids:
return []
if isinstance(ids, (int, long)):
ids = [ids]
result = self.browse(cr, user, ids, context=context)
res = []
for rs in result:
if rs.currency:
currency = rs.currency
else:
currency = rs.company_id.currency_id
name = "%s (%s)" % (rs.name, currency.name)
res += [(rs.id, name)]
return res
def name_search(self, cr, user, name, args=None, operator='ilike', context=None, limit=100):
if not args:
args = []
if operator in expression.NEGATIVE_TERM_OPERATORS:
domain = [('code', operator, name), ('name', operator, name)]
else:
domain = ['|', ('code', operator, name), ('name', operator, name)]
ids = self.search(cr, user, expression.AND([domain, args]), limit=limit, context=context)
return self.name_get(cr, user, ids, context=context)
class account_fiscalyear(osv.osv):
_name = "account.fiscalyear"
_description = "Fiscal Year"
_columns = {
'name': fields.char('Fiscal Year', required=True),
'code': fields.char('Code', size=6, required=True),
'company_id': fields.many2one('res.company', 'Company', required=True),
'date_start': fields.date('Start Date', required=True),
'date_stop': fields.date('End Date', required=True),
'period_ids': fields.one2many('account.period', 'fiscalyear_id', 'Periods'),
'state': fields.selection([('draft','Open'), ('done','Closed')], 'Status', readonly=True, copy=False),
'end_journal_period_id': fields.many2one(
'account.journal.period', 'End of Year Entries Journal',
readonly=True, copy=False),
}
_defaults = {
'state': 'draft',
'company_id': lambda self,cr,uid,c: self.pool.get('res.users').browse(cr, uid, uid, c).company_id.id,
}
_order = "date_start, id"
def _check_duration(self, cr, uid, ids, context=None):
obj_fy = self.browse(cr, uid, ids[0], context=context)
if obj_fy.date_stop < obj_fy.date_start:
return False
return True
_constraints = [
(_check_duration, 'Error!\nThe start date of a fiscal year must precede its end date.', ['date_start','date_stop'])
]
def create_period3(self, cr, uid, ids, context=None):
return self.create_period(cr, uid, ids, context, 3)
def create_period(self, cr, uid, ids, context=None, interval=1):
period_obj = self.pool.get('account.period')
for fy in self.browse(cr, uid, ids, context=context):
ds = datetime.strptime(fy.date_start, '%Y-%m-%d')
period_obj.create(cr, uid, {
'name': "%s %s" % (_('Opening Period'), ds.strftime('%Y')),
'code': ds.strftime('00/%Y'),
'date_start': ds,
'date_stop': ds,
'special': True,
'fiscalyear_id': fy.id,
})
while ds.strftime('%Y-%m-%d') < fy.date_stop:
de = ds + relativedelta(months=interval, days=-1)
if de.strftime('%Y-%m-%d') > fy.date_stop:
de = datetime.strptime(fy.date_stop, '%Y-%m-%d')
period_obj.create(cr, uid, {
'name': ds.strftime('%m/%Y'),
'code': ds.strftime('%m/%Y'),
'date_start': ds.strftime('%Y-%m-%d'),
'date_stop': de.strftime('%Y-%m-%d'),
'fiscalyear_id': fy.id,
})
ds = ds + relativedelta(months=interval)
return True
def find(self, cr, uid, dt=None, exception=True, context=None):
res = self.finds(cr, uid, dt, exception, context=context)
return res and res[0] or False
def finds(self, cr, uid, dt=None, exception=True, context=None):
if context is None: context = {}
if not dt:
dt = fields.date.context_today(self,cr,uid,context=context)
args = [('date_start', '<=' ,dt), ('date_stop', '>=', dt)]
if context.get('company_id', False):
company_id = context['company_id']
else:
company_id = self.pool.get('res.users').browse(cr, uid, uid, context=context).company_id.id
args.append(('company_id', '=', company_id))
ids = self.search(cr, uid, args, context=context)
if not ids:
if exception:
model, action_id = self.pool['ir.model.data'].get_object_reference(cr, uid, 'account', 'action_account_fiscalyear')
msg = _('There is no period defined for this date: %s.\nPlease go to Configuration/Periods and configure a fiscal year.') % dt
raise openerp.exceptions.RedirectWarning(msg, action_id, _('Go to the configuration panel'))
else:
return []
return ids
def name_search(self, cr, user, name, args=None, operator='ilike', context=None, limit=80):
if args is None:
args = []
if operator in expression.NEGATIVE_TERM_OPERATORS:
domain = [('code', operator, name), ('name', operator, name)]
else:
domain = ['|', ('code', operator, name), ('name', operator, name)]
ids = self.search(cr, user, expression.AND([domain, args]), limit=limit, context=context)
return self.name_get(cr, user, ids, context=context)
class account_period(osv.osv):
_name = "account.period"
_description = "Account period"
_columns = {
'name': fields.char('Period Name', required=True),
'code': fields.char('Code', size=12),
'special': fields.boolean('Opening/Closing Period',help="These periods can overlap."),
'date_start': fields.date('Start of Period', required=True, states={'done':[('readonly',True)]}),
'date_stop': fields.date('End of Period', required=True, states={'done':[('readonly',True)]}),
'fiscalyear_id': fields.many2one('account.fiscalyear', 'Fiscal Year', required=True, states={'done':[('readonly',True)]}, select=True),
'state': fields.selection([('draft','Open'), ('done','Closed')], 'Status', readonly=True, copy=False,
help='When monthly periods are created. The status is \'Draft\'. At the end of monthly period it is in \'Done\' status.'),
'company_id': fields.related('fiscalyear_id', 'company_id', type='many2one', relation='res.company', string='Company', store=True, readonly=True)
}
_defaults = {
'state': 'draft',
}
_order = "date_start, special desc"
_sql_constraints = [
('name_company_uniq', 'unique(name, company_id)', 'The name of the period must be unique per company!'),
]
def _check_duration(self,cr,uid,ids,context=None):
obj_period = self.browse(cr, uid, ids[0], context=context)
if obj_period.date_stop < obj_period.date_start:
return False
return True
def _check_year_limit(self,cr,uid,ids,context=None):
for obj_period in self.browse(cr, uid, ids, context=context):
if obj_period.special:
continue
if obj_period.fiscalyear_id.date_stop < obj_period.date_stop or \
obj_period.fiscalyear_id.date_stop < obj_period.date_start or \
obj_period.fiscalyear_id.date_start > obj_period.date_start or \
obj_period.fiscalyear_id.date_start > obj_period.date_stop:
return False
pids = self.search(cr, uid, [('date_stop','>=',obj_period.date_start),('date_start','<=',obj_period.date_stop),('special','=',False),('id','<>',obj_period.id)])
for period in self.browse(cr, uid, pids):
if period.fiscalyear_id.company_id.id==obj_period.fiscalyear_id.company_id.id:
return False
return True
_constraints = [
(_check_duration, 'Error!\nThe duration of the Period(s) is/are invalid.', ['date_stop']),
(_check_year_limit, 'Error!\nThe period is invalid. Either some periods are overlapping or the period\'s dates are not matching the scope of the fiscal year.', ['date_stop'])
]
@api.returns('self')
def next(self, cr, uid, period, step, context=None):
ids = self.search(cr, uid, [('date_start','>',period.date_start)])
if len(ids)>=step:
return ids[step-1]
return False
@api.returns('self')
def find(self, cr, uid, dt=None, context=None):
if context is None: context = {}
if not dt:
dt = fields.date.context_today(self, cr, uid, context=context)
args = [('date_start', '<=' ,dt), ('date_stop', '>=', dt)]
if context.get('company_id', False):
args.append(('company_id', '=', context['company_id']))
else:
company_id = self.pool.get('res.users').browse(cr, uid, uid, context=context).company_id.id
args.append(('company_id', '=', company_id))
result = []
if context.get('account_period_prefer_normal', True):
# look for non-special periods first, and fallback to all if no result is found
result = self.search(cr, uid, args + [('special', '=', False)], context=context)
if not result:
result = self.search(cr, uid, args, context=context)
if not result:
model, action_id = self.pool['ir.model.data'].get_object_reference(cr, uid, 'account', 'action_account_period')
msg = _('There is no period defined for this date: %s.\nPlease go to Configuration/Periods.') % dt
raise openerp.exceptions.RedirectWarning(msg, action_id, _('Go to the configuration panel'))
return result
def action_draft(self, cr, uid, ids, context=None):
mode = 'draft'
for period in self.browse(cr, uid, ids):
if period.fiscalyear_id.state == 'done':
raise osv.except_osv(_('Warning!'), _('You can not re-open a period which belongs to closed fiscal year'))
cr.execute('update account_journal_period set state=%s where period_id in %s', (mode, tuple(ids),))
cr.execute('update account_period set state=%s where id in %s', (mode, tuple(ids),))
self.invalidate_cache(cr, uid, context=context)
return True
def name_search(self, cr, user, name, args=None, operator='ilike', context=None, limit=100):
if args is None:
args = []
if operator in expression.NEGATIVE_TERM_OPERATORS:
domain = [('code', operator, name), ('name', operator, name)]
else:
domain = ['|', ('code', operator, name), ('name', operator, name)]
ids = self.search(cr, user, expression.AND([domain, args]), limit=limit, context=context)
return self.name_get(cr, user, ids, context=context)
def write(self, cr, uid, ids, vals, context=None):
if 'company_id' in vals:
move_lines = self.pool.get('account.move.line').search(cr, uid, [('period_id', 'in', ids)])
if move_lines:
raise osv.except_osv(_('Warning!'), _('This journal already contains items for this period, therefore you cannot modify its company field.'))
return super(account_period, self).write(cr, uid, ids, vals, context=context)
def build_ctx_periods(self, cr, uid, period_from_id, period_to_id):
if period_from_id == period_to_id:
return [period_from_id]
period_from = self.browse(cr, uid, period_from_id)
period_date_start = period_from.date_start
company1_id = period_from.company_id.id
period_to = self.browse(cr, uid, period_to_id)
period_date_stop = period_to.date_stop
company2_id = period_to.company_id.id
if company1_id != company2_id:
raise osv.except_osv(_('Error!'), _('You should choose the periods that belong to the same company.'))
if period_date_start > period_date_stop:
raise osv.except_osv(_('Error!'), _('Start period should precede then end period.'))
# /!\ We do not include a criterion on the company_id field below, to allow producing consolidated reports
# on multiple companies. It will only work when start/end periods are selected and no fiscal year is chosen.
#for period from = january, we want to exclude the opening period (but it has same date_from, so we have to check if period_from is special or not to include that clause or not in the search).
if period_from.special:
return self.search(cr, uid, [('date_start', '>=', period_date_start), ('date_stop', '<=', period_date_stop)])
return self.search(cr, uid, [('date_start', '>=', period_date_start), ('date_stop', '<=', period_date_stop), ('special', '=', False)])
class account_journal_period(osv.osv):
_name = "account.journal.period"
_description = "Journal Period"
def _icon_get(self, cr, uid, ids, field_name, arg=None, context=None):
result = {}.fromkeys(ids, 'STOCK_NEW')
for r in self.read(cr, uid, ids, ['state']):
result[r['id']] = {
'draft': 'STOCK_NEW',
'printed': 'STOCK_PRINT_PREVIEW',
'done': 'STOCK_DIALOG_AUTHENTICATION',
}.get(r['state'], 'STOCK_NEW')
return result
_columns = {
'name': fields.char('Journal-Period Name', required=True),
'journal_id': fields.many2one('account.journal', 'Journal', required=True, ondelete="cascade"),
'period_id': fields.many2one('account.period', 'Period', required=True, ondelete="cascade"),
'icon': fields.function(_icon_get, string='Icon', type='char'),
'active': fields.boolean('Active', help="If the active field is set to False, it will allow you to hide the journal period without removing it."),
'state': fields.selection([('draft','Draft'), ('printed','Printed'), ('done','Done')], 'Status', required=True, readonly=True,
help='When journal period is created. The status is \'Draft\'. If a report is printed it comes to \'Printed\' status. When all transactions are done, it comes in \'Done\' status.'),
'fiscalyear_id': fields.related('period_id', 'fiscalyear_id', string='Fiscal Year', type='many2one', relation='account.fiscalyear'),
'company_id': fields.related('journal_id', 'company_id', type='many2one', relation='res.company', string='Company', store=True, readonly=True)
}
def _check(self, cr, uid, ids, context=None):
for obj in self.browse(cr, uid, ids, context=context):
cr.execute('select * from account_move_line where journal_id=%s and period_id=%s limit 1', (obj.journal_id.id, obj.period_id.id))
res = cr.fetchall()
if res:
raise osv.except_osv(_('Error!'), _('You cannot modify/delete a journal with entries for this period.'))
return True
def write(self, cr, uid, ids, vals, context=None):
self._check(cr, uid, ids, context=context)
return super(account_journal_period, self).write(cr, uid, ids, vals, context=context)
def create(self, cr, uid, vals, context=None):
period_id = vals.get('period_id',False)
if period_id:
period = self.pool.get('account.period').browse(cr, uid, period_id, context=context)
vals['state']=period.state
return super(account_journal_period, self).create(cr, uid, vals, context)
def unlink(self, cr, uid, ids, context=None):
self._check(cr, uid, ids, context=context)
return super(account_journal_period, self).unlink(cr, uid, ids, context=context)
_defaults = {
'state': 'draft',
'active': True,
}
_order = "period_id"
#----------------------------------------------------------
# Entries
#----------------------------------------------------------
class account_move(osv.osv):
_name = "account.move"
_description = "Account Entry"
_order = 'id desc'
def account_assert_balanced(self, cr, uid, context=None):
cr.execute("""\
SELECT move_id
FROM account_move_line
WHERE state = 'valid'
GROUP BY move_id
HAVING abs(sum(debit) - sum(credit)) > 0.00001
""")
assert len(cr.fetchall()) == 0, \
"For all Journal Items, the state is valid implies that the sum " \
"of credits equals the sum of debits"
return True
def account_move_prepare(self, cr, uid, journal_id, date=False, ref='', company_id=False, context=None):
'''
Prepares and returns a dictionary of values, ready to be passed to create() based on the parameters received.
'''
if not date:
date = fields.date.today()
period_obj = self.pool.get('account.period')
if not company_id:
user = self.pool.get('res.users').browse(cr, uid, uid, context=context)
company_id = user.company_id.id
if context is None:
context = {}
#put the company in context to find the good period
ctx = context.copy()
ctx.update({'company_id': company_id})
return {
'journal_id': journal_id,
'date': date,
'period_id': period_obj.find(cr, uid, date, context=ctx)[0],
'ref': ref,
'company_id': company_id,
}
def name_get(self, cursor, user, ids, context=None):
if isinstance(ids, (int, long)):
ids = [ids]
if not ids:
return []
res = []
data_move = self.pool.get('account.move').browse(cursor, user, ids, context=context)
for move in data_move:
if move.state=='draft':
name = '*' + str(move.id)
else:
name = move.name
res.append((move.id, name))
return res
def _get_period(self, cr, uid, context=None):
ctx = dict(context or {})
period_ids = self.pool.get('account.period').find(cr, uid, context=ctx)
return period_ids[0]
def _amount_compute(self, cr, uid, ids, name, args, context, where =''):
if not ids: return {}
cr.execute( 'SELECT move_id, SUM(debit) '\
'FROM account_move_line '\
'WHERE move_id IN %s '\
'GROUP BY move_id', (tuple(ids),))
result = dict(cr.fetchall())
for id in ids:
result.setdefault(id, 0.0)
return result
def _search_amount(self, cr, uid, obj, name, args, context):
ids = set()
for cond in args:
amount = cond[2]
if isinstance(cond[2],(list,tuple)):
if cond[1] in ['in','not in']:
amount = tuple(cond[2])
else:
continue
else:
if cond[1] in ['=like', 'like', 'not like', 'ilike', 'not ilike', 'in', 'not in', 'child_of']:
continue
cr.execute("select move_id from account_move_line group by move_id having sum(debit) %s %%s" % (cond[1]),(amount,))
res_ids = set(id[0] for id in cr.fetchall())
ids = ids and (ids & res_ids) or res_ids
if ids:
return [('id', 'in', tuple(ids))]
return [('id', '=', '0')]
def _get_move_from_lines(self, cr, uid, ids, context=None):
line_obj = self.pool.get('account.move.line')
return [line.move_id.id for line in line_obj.browse(cr, uid, ids, context=context)]
_columns = {
'name': fields.char('Number', required=True, copy=False),
'ref': fields.char('Reference', copy=False),
'period_id': fields.many2one('account.period', 'Period', required=True, states={'posted':[('readonly',True)]}),
'journal_id': fields.many2one('account.journal', 'Journal', required=True, states={'posted':[('readonly',True)]}),
'state': fields.selection(
[('draft','Unposted'), ('posted','Posted')], 'Status',
required=True, readonly=True, copy=False,
help='All manually created new journal entries are usually in the status \'Unposted\', '
'but you can set the option to skip that status on the related journal. '
'In that case, they will behave as journal entries automatically created by the '
'system on document validation (invoices, bank statements...) and will be created '
'in \'Posted\' status.'),
'line_id': fields.one2many('account.move.line', 'move_id', 'Entries',
states={'posted':[('readonly',True)]},
copy=True),
'to_check': fields.boolean('To Review', help='Check this box if you are unsure of that journal entry and if you want to note it as \'to be reviewed\' by an accounting expert.'),
'partner_id': fields.related('line_id', 'partner_id', type="many2one", relation="res.partner", string="Partner", store={
_name: (lambda self, cr,uid,ids,c: ids, ['line_id'], 10),
'account.move.line': (_get_move_from_lines, ['partner_id'],10)
}),
'amount': fields.function(_amount_compute, string='Amount', digits_compute=dp.get_precision('Account'), type='float', fnct_search=_search_amount),
'date': fields.date('Date', required=True, states={'posted':[('readonly',True)]}, select=True),
'narration':fields.text('Internal Note'),
'company_id': fields.related('journal_id','company_id',type='many2one',relation='res.company',string='Company', store=True, readonly=True),
'balance': fields.float('balance', digits_compute=dp.get_precision('Account'), help="This is a field only used for internal purpose and shouldn't be displayed"),
}
_defaults = {
'name': '/',
'state': 'draft',
'period_id': _get_period,
'date': fields.date.context_today,
'company_id': lambda self, cr, uid, c: self.pool.get('res.users').browse(cr, uid, uid, c).company_id.id,
}
def _check_centralisation(self, cursor, user, ids, context=None):
for move in self.browse(cursor, user, ids, context=context):
if move.journal_id.centralisation:
move_ids = self.search(cursor, user, [
('period_id', '=', move.period_id.id),
('journal_id', '=', move.journal_id.id),
])
if len(move_ids) > 1:
return False
return True
_constraints = [
(_check_centralisation,
'You cannot create more than one move per period on a centralized journal.',
['journal_id']),
]
def post(self, cr, uid, ids, context=None):
if context is None:
context = {}
invoice = context.get('invoice', False)
valid_moves = self.validate(cr, uid, ids, context)
if not valid_moves:
raise osv.except_osv(_('Error!'), _('You cannot validate a non-balanced entry.\nMake sure you have configured payment terms properly.\nThe latest payment term line should be of the "Balance" type.'))
obj_sequence = self.pool.get('ir.sequence')
for move in self.browse(cr, uid, valid_moves, context=context):
if move.name =='/':
new_name = False
journal = move.journal_id
if invoice and invoice.internal_number:
new_name = invoice.internal_number
else:
if journal.sequence_id:
c = {'fiscalyear_id': move.period_id.fiscalyear_id.id}
new_name = obj_sequence.next_by_id(cr, uid, journal.sequence_id.id, c)
else:
raise osv.except_osv(_('Error!'), _('Please define a sequence on the journal.'))
if new_name:
self.write(cr, uid, [move.id], {'name':new_name})
cr.execute('UPDATE account_move '\
'SET state=%s '\
'WHERE id IN %s',
('posted', tuple(valid_moves),))
self.invalidate_cache(cr, uid, context=context)
return True
def button_validate(self, cursor, user, ids, context=None):
for move in self.browse(cursor, user, ids, context=context):
# check that all accounts have the same topmost ancestor
top_common = None
for line in move.line_id:
account = line.account_id
top_account = account
while top_account.parent_id:
top_account = top_account.parent_id
if not top_common:
top_common = top_account
elif top_account.id != top_common.id:
raise osv.except_osv(_('Error!'),
_('You cannot validate this journal entry because account "%s" does not belong to chart of accounts "%s".') % (account.name, top_common.name))
return self.post(cursor, user, ids, context=context)
def button_cancel(self, cr, uid, ids, context=None):
for line in self.browse(cr, uid, ids, context=context):
if not line.journal_id.update_posted:
raise osv.except_osv(_('Error!'), _('You cannot modify a posted entry of this journal.\nFirst you should set the journal to allow cancelling entries.'))
if ids:
cr.execute('UPDATE account_move '\
'SET state=%s '\
'WHERE id IN %s', ('draft', tuple(ids),))
self.invalidate_cache(cr, uid, context=context)
return True
def write(self, cr, uid, ids, vals, context=None):
if context is None:
context = {}
c = context.copy()
c['novalidate'] = True
result = super(account_move, self).write(cr, uid, ids, vals, c)
self.validate(cr, uid, ids, context=context)
return result
def create(self, cr, uid, vals, context=None):
context = dict(context or {})
if vals.get('line_id'):
if vals.get('journal_id'):
for l in vals['line_id']:
if not l[0]:
l[2]['journal_id'] = vals['journal_id']
context['journal_id'] = vals['journal_id']
if 'period_id' in vals:
for l in vals['line_id']:
if not l[0]:
l[2]['period_id'] = vals['period_id']
context['period_id'] = vals['period_id']
else:
default_period = self._get_period(cr, uid, context)
for l in vals['line_id']:
if not l[0]:
l[2]['period_id'] = default_period
context['period_id'] = default_period
c = context.copy()
c['novalidate'] = True
c['period_id'] = vals['period_id'] if 'period_id' in vals else self._get_period(cr, uid, context)
c['journal_id'] = vals['journal_id']
if 'date' in vals: c['date'] = vals['date']
result = super(account_move, self).create(cr, uid, vals, c)
tmp = self.validate(cr, uid, [result], context)
journal = self.pool.get('account.journal').browse(cr, uid, vals['journal_id'], context)
if journal.entry_posted and tmp:
self.button_validate(cr,uid, [result], context)
else:
result = super(account_move, self).create(cr, uid, vals, context)
return result
def unlink(self, cr, uid, ids, context=None, check=True):
context = dict(context or {})
if isinstance(ids, (int, long)):
ids = [ids]
toremove = []
obj_move_line = self.pool.get('account.move.line')
for move in self.browse(cr, uid, ids, context=context):
if move['state'] != 'draft':
raise osv.except_osv(_('User Error!'),
_('You cannot delete a posted journal entry "%s".') % \
move['name'])
for line in move.line_id:
if line.invoice:
raise osv.except_osv(_('User Error!'),
_("Move cannot be deleted if linked to an invoice. (Invoice: %s - Move ID:%s)") % \
(line.invoice.number,move.name))
line_ids = map(lambda x: x.id, move.line_id)
context['journal_id'] = move.journal_id.id
context['period_id'] = move.period_id.id
obj_move_line._update_check(cr, uid, line_ids, context)
obj_move_line.unlink(cr, uid, line_ids, context=context)
toremove.append(move.id)
result = super(account_move, self).unlink(cr, uid, toremove, context)
return result
def _compute_balance(self, cr, uid, id, context=None):
move = self.browse(cr, uid, id, context=context)
amount = 0
for line in move.line_id:
amount+= (line.debit - line.credit)
return amount
def _centralise(self, cr, uid, move, mode, context=None):
assert mode in ('debit', 'credit'), 'Invalid Mode' #to prevent sql injection
currency_obj = self.pool.get('res.currency')
account_move_line_obj = self.pool.get('account.move.line')
context = dict(context or {})
if mode=='credit':
account_id = move.journal_id.default_debit_account_id.id
mode2 = 'debit'
if not account_id:
raise osv.except_osv(_('User Error!'),
_('There is no default debit account defined \n' \
'on journal "%s".') % move.journal_id.name)
else:
account_id = move.journal_id.default_credit_account_id.id
mode2 = 'credit'
if not account_id:
raise osv.except_osv(_('User Error!'),
_('There is no default credit account defined \n' \
'on journal "%s".') % move.journal_id.name)
# find the first line of this move with the current mode
# or create it if it doesn't exist
cr.execute('select id from account_move_line where move_id=%s and centralisation=%s limit 1', (move.id, mode))
res = cr.fetchone()
if res:
line_id = res[0]
else:
context.update({'journal_id': move.journal_id.id, 'period_id': move.period_id.id})
line_id = account_move_line_obj.create(cr, uid, {
'name': _(mode.capitalize()+' Centralisation'),
'centralisation': mode,
'partner_id': False,
'account_id': account_id,
'move_id': move.id,
'journal_id': move.journal_id.id,
'period_id': move.period_id.id,
'date': move.period_id.date_stop,
'debit': 0.0,
'credit': 0.0,
}, context)
# find the first line of this move with the other mode
# so that we can exclude it from our calculation
cr.execute('select id from account_move_line where move_id=%s and centralisation=%s limit 1', (move.id, mode2))
res = cr.fetchone()
if res:
line_id2 = res[0]
else:
line_id2 = 0
cr.execute('SELECT SUM(%s) FROM account_move_line WHERE move_id=%%s AND id!=%%s' % (mode,), (move.id, line_id2))
result = cr.fetchone()[0] or 0.0
cr.execute('update account_move_line set '+mode2+'=%s where id=%s', (result, line_id))
account_move_line_obj.invalidate_cache(cr, uid, [mode2], [line_id], context=context)
#adjust also the amount in currency if needed
cr.execute("select currency_id, sum(amount_currency) as amount_currency from account_move_line where move_id = %s and currency_id is not null group by currency_id", (move.id,))
for row in cr.dictfetchall():
currency_id = currency_obj.browse(cr, uid, row['currency_id'], context=context)
if not currency_obj.is_zero(cr, uid, currency_id, row['amount_currency']):
amount_currency = row['amount_currency'] * -1
account_id = amount_currency > 0 and move.journal_id.default_debit_account_id.id or move.journal_id.default_credit_account_id.id
cr.execute('select id from account_move_line where move_id=%s and centralisation=\'currency\' and currency_id = %slimit 1', (move.id, row['currency_id']))
res = cr.fetchone()
if res:
cr.execute('update account_move_line set amount_currency=%s , account_id=%s where id=%s', (amount_currency, account_id, res[0]))
account_move_line_obj.invalidate_cache(cr, uid, ['amount_currency', 'account_id'], [res[0]], context=context)
else:
context.update({'journal_id': move.journal_id.id, 'period_id': move.period_id.id})
line_id = account_move_line_obj.create(cr, uid, {
'name': _('Currency Adjustment'),
'centralisation': 'currency',
'partner_id': False,
'account_id': account_id,
'move_id': move.id,
'journal_id': move.journal_id.id,
'period_id': move.period_id.id,
'date': move.period_id.date_stop,
'debit': 0.0,
'credit': 0.0,
'currency_id': row['currency_id'],
'amount_currency': amount_currency,
}, context)
return True
#
# Validate a balanced move. If it is a centralised journal, create a move.
#
def validate(self, cr, uid, ids, context=None):
if context and ('__last_update' in context):
del context['__last_update']
valid_moves = [] #Maintains a list of moves which can be responsible to create analytic entries
obj_analytic_line = self.pool.get('account.analytic.line')
obj_move_line = self.pool.get('account.move.line')
for move in self.browse(cr, uid, ids, context):
journal = move.journal_id
amount = 0
line_ids = []
line_draft_ids = []
company_id = None
# makes sure we don't use outdated period
obj_move_line._update_journal_check(cr, uid, journal.id, move.period_id.id, context=context)
for line in move.line_id:
amount += line.debit - line.credit
line_ids.append(line.id)
if line.state=='draft':
line_draft_ids.append(line.id)
if not company_id:
company_id = line.account_id.company_id.id
if not company_id == line.account_id.company_id.id:
raise osv.except_osv(_('Error!'), _("Cannot create moves for different companies."))
if line.account_id.currency_id and line.currency_id:
if line.account_id.currency_id.id != line.currency_id.id and (line.account_id.currency_id.id != line.account_id.company_id.currency_id.id):
raise osv.except_osv(_('Error!'), _("""Cannot create move with currency different from ..""") % (line.account_id.code, line.account_id.name))
if abs(amount) < 10 ** -4:
# If the move is balanced
# Add to the list of valid moves
# (analytic lines will be created later for valid moves)
valid_moves.append(move)
# Check whether the move lines are confirmed
if not line_draft_ids:
continue
# Update the move lines (set them as valid)
obj_move_line.write(cr, uid, line_draft_ids, {
'state': 'valid'
}, context, check=False)
account = {}
account2 = {}
if journal.type in ('purchase','sale'):
for line in move.line_id:
code = amount = 0
key = (line.account_id.id, line.tax_code_id.id)
if key in account2:
code = account2[key][0]
amount = account2[key][1] * (line.debit + line.credit)
elif line.account_id.id in account:
code = account[line.account_id.id][0]
amount = account[line.account_id.id][1] * (line.debit + line.credit)
if (code or amount) and not (line.tax_code_id or line.tax_amount):
obj_move_line.write(cr, uid, [line.id], {
'tax_code_id': code,
'tax_amount': amount
}, context, check=False)
elif journal.centralisation:
# If the move is not balanced, it must be centralised...
# Add to the list of valid moves
# (analytic lines will be created later for valid moves)
valid_moves.append(move)
#
# Update the move lines (set them as valid)
#
self._centralise(cr, uid, move, 'debit', context=context)
self._centralise(cr, uid, move, 'credit', context=context)
obj_move_line.write(cr, uid, line_draft_ids, {
'state': 'valid'
}, context, check=False)
else:
# We can't validate it (it's unbalanced)
# Setting the lines as draft
not_draft_line_ids = list(set(line_ids) - set(line_draft_ids))
if not_draft_line_ids:
obj_move_line.write(cr, uid, not_draft_line_ids, {
'state': 'draft'
}, context, check=False)
# Create analytic lines for the valid moves
for record in valid_moves:
obj_move_line.create_analytic_lines(cr, uid, [line.id for line in record.line_id], context)
valid_moves = [move.id for move in valid_moves]
return len(valid_moves) > 0 and valid_moves or False
class account_move_reconcile(osv.osv):
_name = "account.move.reconcile"
_description = "Account Reconciliation"
_columns = {
'name': fields.char('Name', required=True),
'type': fields.char('Type', required=True),
'line_id': fields.one2many('account.move.line', 'reconcile_id', 'Entry Lines'),
'line_partial_ids': fields.one2many('account.move.line', 'reconcile_partial_id', 'Partial Entry lines'),
'create_date': fields.date('Creation date', readonly=True),
'opening_reconciliation': fields.boolean('Opening Entries Reconciliation', help="Is this reconciliation produced by the opening of a new fiscal year ?."),
}
_defaults = {
'name': lambda self,cr,uid,ctx=None: self.pool.get('ir.sequence').get(cr, uid, 'account.reconcile', context=ctx) or '/',
}
# You cannot unlink a reconciliation if it is a opening_reconciliation one,
# you should use the generate opening entries wizard for that
def unlink(self, cr, uid, ids, context=None):
for move_rec in self.browse(cr, uid, ids, context=context):
if move_rec.opening_reconciliation:
raise osv.except_osv(_('Error!'), _('You cannot unreconcile journal items if they has been generated by the \
opening/closing fiscal year process.'))
return super(account_move_reconcile, self).unlink(cr, uid, ids, context=context)
# Look in the line_id and line_partial_ids to ensure the partner is the same or empty
# on all lines. We allow that only for opening/closing period
def _check_same_partner(self, cr, uid, ids, context=None):
for reconcile in self.browse(cr, uid, ids, context=context):
move_lines = []
if not reconcile.opening_reconciliation:
if reconcile.line_id:
first_partner = reconcile.line_id[0].partner_id.id
move_lines = reconcile.line_id
elif reconcile.line_partial_ids:
first_partner = reconcile.line_partial_ids[0].partner_id.id
move_lines = reconcile.line_partial_ids
if any([(line.account_id.type in ('receivable', 'payable') and line.partner_id.id != first_partner) for line in move_lines]):
return False
return True
_constraints = [
(_check_same_partner, 'You can only reconcile journal items with the same partner.', ['line_id', 'line_partial_ids']),
]
def reconcile_partial_check(self, cr, uid, ids, type='auto', context=None):
total = 0.0
for rec in self.browse(cr, uid, ids, context=context):
for line in rec.line_partial_ids:
if line.account_id.currency_id:
total += line.amount_currency
else:
total += (line.debit or 0.0) - (line.credit or 0.0)
if not total:
self.pool.get('account.move.line').write(cr, uid,
map(lambda x: x.id, rec.line_partial_ids),
{'reconcile_id': rec.id },
context=context
)
return True
def name_get(self, cr, uid, ids, context=None):
if not ids:
return []
result = []
for r in self.browse(cr, uid, ids, context=context):
total = reduce(lambda y,t: (t.debit or 0.0) - (t.credit or 0.0) + y, r.line_partial_ids, 0.0)
if total:
name = '%s (%.2f)' % (r.name, total)
result.append((r.id,name))
else:
result.append((r.id,r.name))
return result
#----------------------------------------------------------
# Tax
#----------------------------------------------------------
"""
a documenter
child_depend: la taxe depend des taxes filles
"""
class account_tax_code(osv.osv):
"""
A code for the tax object.
This code is used for some tax declarations.
"""
def _sum(self, cr, uid, ids, name, args, context, where ='', where_params=()):
parent_ids = tuple(self.search(cr, uid, [('parent_id', 'child_of', ids)]))
if context.get('based_on', 'invoices') == 'payments':
cr.execute('SELECT line.tax_code_id, sum(line.tax_amount) \
FROM account_move_line AS line, \
account_move AS move \
LEFT JOIN account_invoice invoice ON \
(invoice.move_id = move.id) \
WHERE line.tax_code_id IN %s '+where+' \
AND move.id = line.move_id \
AND ((invoice.state = \'paid\') \
OR (invoice.id IS NULL)) \
GROUP BY line.tax_code_id',
(parent_ids,) + where_params)
else:
cr.execute('SELECT line.tax_code_id, sum(line.tax_amount) \
FROM account_move_line AS line, \
account_move AS move \
WHERE line.tax_code_id IN %s '+where+' \
AND move.id = line.move_id \
GROUP BY line.tax_code_id',
(parent_ids,) + where_params)
res=dict(cr.fetchall())
obj_precision = self.pool.get('decimal.precision')
res2 = {}
for record in self.browse(cr, uid, ids, context=context):
def _rec_get(record):
amount = res.get(record.id) or 0.0
for rec in record.child_ids:
amount += _rec_get(rec) * rec.sign
return amount
res2[record.id] = round(_rec_get(record), obj_precision.precision_get(cr, uid, 'Account'))
return res2
def _sum_year(self, cr, uid, ids, name, args, context=None):
if context is None:
context = {}
move_state = ('posted', )
if context.get('state', 'all') == 'all':
move_state = ('draft', 'posted', )
if context.get('fiscalyear_id', False):
fiscalyear_id = [context['fiscalyear_id']]
else:
fiscalyear_id = self.pool.get('account.fiscalyear').finds(cr, uid, exception=False)
where = ''
where_params = ()
if fiscalyear_id:
pids = []
for fy in fiscalyear_id:
pids += map(lambda x: str(x.id), self.pool.get('account.fiscalyear').browse(cr, uid, fy).period_ids)
if pids:
where = ' AND line.period_id IN %s AND move.state IN %s '
where_params = (tuple(pids), move_state)
return self._sum(cr, uid, ids, name, args, context,
where=where, where_params=where_params)
def _sum_period(self, cr, uid, ids, name, args, context):
if context is None:
context = {}
move_state = ('posted', )
if context.get('state', False) == 'all':
move_state = ('draft', 'posted', )
if context.get('period_id', False):
period_id = context['period_id']
else:
period_id = self.pool.get('account.period').find(cr, uid, context=context)
if not period_id:
return dict.fromkeys(ids, 0.0)
period_id = period_id[0]
return self._sum(cr, uid, ids, name, args, context,
where=' AND line.period_id=%s AND move.state IN %s', where_params=(period_id, move_state))
_name = 'account.tax.code'
_description = 'Tax Code'
_rec_name = 'code'
_order = 'sequence, code'
_columns = {
'name': fields.char('Tax Case Name', required=True, translate=True),
'code': fields.char('Case Code', size=64),
'info': fields.text('Description'),
'sum': fields.function(_sum_year, string="Year Sum"),
'sum_period': fields.function(_sum_period, string="Period Sum"),
'parent_id': fields.many2one('account.tax.code', 'Parent Code', select=True),
'child_ids': fields.one2many('account.tax.code', 'parent_id', 'Child Codes'),
'line_ids': fields.one2many('account.move.line', 'tax_code_id', 'Lines'),
'company_id': fields.many2one('res.company', 'Company', required=True),
'sign': fields.float('Coefficent for parent', required=True, help='You can specify here the coefficient that will be used when consolidating the amount of this case into its parent. For example, set 1/-1 if you want to add/substract it.'),
'notprintable':fields.boolean("Not Printable in Invoice", help="Check this box if you don't want any tax related to this tax code to appear on invoices"),
'sequence': fields.integer('Sequence', help="Determine the display order in the report 'Accounting \ Reporting \ Generic Reporting \ Taxes \ Taxes Report'"),
}
def name_search(self, cr, user, name, args=None, operator='ilike', context=None, limit=80):
if not args:
args = []
if operator in expression.NEGATIVE_TERM_OPERATORS:
domain = [('code', operator, name), ('name', operator, name)]
else:
domain = ['|', ('code', operator, name), ('name', operator, name)]
ids = self.search(cr, user, expression.AND([domain, args]), limit=limit, context=context)
return self.name_get(cr, user, ids, context=context)
def name_get(self, cr, uid, ids, context=None):
if isinstance(ids, (int, long)):
ids = [ids]
if not ids:
return []
if isinstance(ids, (int, long)):
ids = [ids]
reads = self.read(cr, uid, ids, ['name','code'], context=context, load='_classic_write')
return [(x['id'], (x['code'] and (x['code'] + ' - ') or '') + x['name']) \
for x in reads]
def _default_company(self, cr, uid, context=None):
user = self.pool.get('res.users').browse(cr, uid, uid, context=context)
if user.company_id:
return user.company_id.id
return self.pool.get('res.company').search(cr, uid, [('parent_id', '=', False)])[0]
_defaults = {
'company_id': _default_company,
'sign': 1.0,
'notprintable': False,
}
_check_recursion = check_cycle
_constraints = [
(_check_recursion, 'Error!\nYou cannot create recursive accounts.', ['parent_id'])
]
_order = 'code'
def get_precision_tax():
def change_digit_tax(cr):
res = openerp.registry(cr.dbname)['decimal.precision'].precision_get(cr, SUPERUSER_ID, 'Account')
return (16, res+3)
return change_digit_tax
class account_tax(osv.osv):
"""
A tax object.
Type: percent, fixed, none, code
PERCENT: tax = price * amount
FIXED: tax = price + amount
NONE: no tax line
CODE: execute python code. localcontext = {'price_unit':pu}
return result in the context
Ex: result=round(price_unit*0.21,4)
"""
def copy_data(self, cr, uid, id, default=None, context=None):
if default is None:
default = {}
this = self.browse(cr, uid, id, context=context)
tmp_default = dict(default, name=_("%s (Copy)") % this.name)
return super(account_tax, self).copy_data(cr, uid, id, default=tmp_default, context=context)
_name = 'account.tax'
_description = 'Tax'
_columns = {
'name': fields.char('Tax Name', required=True, translate=True, help="This name will be displayed on reports"),
'sequence': fields.integer('Sequence', required=True, help="The sequence field is used to order the tax lines from the lowest sequences to the higher ones. The order is important if you have a tax with several tax children. In this case, the evaluation order is important."),
'amount': fields.float('Amount', required=True, digits_compute=get_precision_tax(), help="For taxes of type percentage, enter % ratio between 0-1."),
'active': fields.boolean('Active', help="If the active field is set to False, it will allow you to hide the tax without removing it."),
'type': fields.selection( [('percent','Percentage'), ('fixed','Fixed Amount'), ('none','None'), ('code','Python Code'), ('balance','Balance')], 'Tax Type', required=True,
help="The computation method for the tax amount."),
'applicable_type': fields.selection( [('true','Always'), ('code','Given by Python Code')], 'Applicability', required=True,
help="If not applicable (computed through a Python code), the tax won't appear on the invoice."),
'domain':fields.char('Domain', help="This field is only used if you develop your own module allowing developers to create specific taxes in a custom domain."),
'account_collected_id':fields.many2one('account.account', 'Invoice Tax Account', help="Set the account that will be set by default on invoice tax lines for invoices. Leave empty to use the expense account."),
'account_paid_id':fields.many2one('account.account', 'Refund Tax Account', help="Set the account that will be set by default on invoice tax lines for refunds. Leave empty to use the expense account."),
'account_analytic_collected_id':fields.many2one('account.analytic.account', 'Invoice Tax Analytic Account', help="Set the analytic account that will be used by default on the invoice tax lines for invoices. Leave empty if you don't want to use an analytic account on the invoice tax lines by default."),
'account_analytic_paid_id':fields.many2one('account.analytic.account', 'Refund Tax Analytic Account', help="Set the analytic account that will be used by default on the invoice tax lines for refunds. Leave empty if you don't want to use an analytic account on the invoice tax lines by default."),
'parent_id':fields.many2one('account.tax', 'Parent Tax Account', select=True),
'child_ids':fields.one2many('account.tax', 'parent_id', 'Child Tax Accounts', copy=True),
'child_depend':fields.boolean('Tax on Children', help="Set if the tax computation is based on the computation of child taxes rather than on the total amount."),
'python_compute':fields.text('Python Code'),
'python_compute_inv':fields.text('Python Code (reverse)'),
'python_applicable':fields.text('Applicable Code'),
#
# Fields used for the Tax declaration
#
'base_code_id': fields.many2one('account.tax.code', 'Account Base Code', help="Use this code for the tax declaration."),
'tax_code_id': fields.many2one('account.tax.code', 'Account Tax Code', help="Use this code for the tax declaration."),
'base_sign': fields.float('Base Code Sign', help="Usually 1 or -1.", digits_compute=get_precision_tax()),
'tax_sign': fields.float('Tax Code Sign', help="Usually 1 or -1.", digits_compute=get_precision_tax()),
# Same fields for refund invoices
'ref_base_code_id': fields.many2one('account.tax.code', 'Refund Base Code', help="Use this code for the tax declaration."),
'ref_tax_code_id': fields.many2one('account.tax.code', 'Refund Tax Code', help="Use this code for the tax declaration."),
'ref_base_sign': fields.float('Refund Base Code Sign', help="Usually 1 or -1.", digits_compute=get_precision_tax()),
'ref_tax_sign': fields.float('Refund Tax Code Sign', help="Usually 1 or -1.", digits_compute=get_precision_tax()),
'include_base_amount': fields.boolean('Included in base amount', help="Indicates if the amount of tax must be included in the base amount for the computation of the next taxes"),
'company_id': fields.many2one('res.company', 'Company', required=True),
'description': fields.char('Tax Code'),
'price_include': fields.boolean('Tax Included in Price', help="Check this if the price you use on the product and invoices includes this tax."),
'type_tax_use': fields.selection([('sale','Sale'),('purchase','Purchase'),('all','All')], 'Tax Application', required=True)
}
_sql_constraints = [
('name_company_uniq', 'unique(name, company_id)', 'Tax Name must be unique per company!'),
]
def name_search(self, cr, user, name, args=None, operator='ilike', context=None, limit=80):
"""
Returns a list of tupples containing id, name, as internally it is called {def name_get}
result format: {[(id, name), (id, name), ...]}
@param cr: A database cursor
@param user: ID of the user currently logged in
@param name: name to search
@param args: other arguments
@param operator: default operator is 'ilike', it can be changed
@param context: context arguments, like lang, time zone
@param limit: Returns first 'n' ids of complete result, default is 80.
@return: Returns a list of tupples containing id and name
"""
if not args:
args = []
if operator in expression.NEGATIVE_TERM_OPERATORS:
domain = [('description', operator, name), ('name', operator, name)]
else:
domain = ['|', ('description', operator, name), ('name', operator, name)]
ids = self.search(cr, user, expression.AND([domain, args]), limit=limit, context=context)
return self.name_get(cr, user, ids, context=context)
def write(self, cr, uid, ids, vals, context=None):
if vals.get('type', False) and vals['type'] in ('none', 'code'):
vals.update({'amount': 0.0})
return super(account_tax, self).write(cr, uid, ids, vals, context=context)
def search(self, cr, uid, args, offset=0, limit=None, order=None, context=None, count=False):
if context is None:
context = {}
journal_pool = self.pool.get('account.journal')
if context.get('type'):
if context.get('type') in ('out_invoice','out_refund'):
args += [('type_tax_use','in',['sale','all'])]
elif context.get('type') in ('in_invoice','in_refund'):
args += [('type_tax_use','in',['purchase','all'])]
if context.get('journal_id'):
journal = journal_pool.browse(cr, uid, context.get('journal_id'))
if journal.type in ('sale', 'purchase'):
args += [('type_tax_use','in',[journal.type,'all'])]
return super(account_tax, self).search(cr, uid, args, offset, limit, order, context, count)
def name_get(self, cr, uid, ids, context=None):
if not ids:
return []
res = []
for record in self.read(cr, uid, ids, ['description','name'], context=context):
name = record['description'] and record['description'] or record['name']
res.append((record['id'],name ))
return res
def _default_company(self, cr, uid, context=None):
user = self.pool.get('res.users').browse(cr, uid, uid, context=context)
if user.company_id:
return user.company_id.id
return self.pool.get('res.company').search(cr, uid, [('parent_id', '=', False)])[0]
_defaults = {
'python_compute': '''# price_unit\n# or False\n# product: product.product object or None\n# partner: res.partner object or None\n\nresult = price_unit * 0.10''',
'python_compute_inv': '''# price_unit\n# product: product.product object or False\n\nresult = price_unit * 0.10''',
'applicable_type': 'true',
'type': 'percent',
'amount': 0,
'price_include': 0,
'active': 1,
'type_tax_use': 'all',
'sequence': 1,
'ref_tax_sign': 1,
'ref_base_sign': 1,
'tax_sign': 1,
'base_sign': 1,
'include_base_amount': False,
'company_id': _default_company,
}
_order = 'sequence'
def _applicable(self, cr, uid, taxes, price_unit, product=None, partner=None):
res = []
for tax in taxes:
if tax.applicable_type=='code':
localdict = {'price_unit':price_unit, 'product':product, 'partner':partner}
exec tax.python_applicable in localdict
if localdict.get('result', False):
res.append(tax)
else:
res.append(tax)
return res
def _unit_compute(self, cr, uid, taxes, price_unit, product=None, partner=None, quantity=0):
taxes = self._applicable(cr, uid, taxes, price_unit ,product, partner)
res = []
cur_price_unit=price_unit
for tax in taxes:
# we compute the amount for the current tax object and append it to the result
data = {'id':tax.id,
'name': tax.name,
'account_collected_id':tax.account_collected_id.id,
'account_paid_id':tax.account_paid_id.id,
'account_analytic_collected_id': tax.account_analytic_collected_id.id,
'account_analytic_paid_id': tax.account_analytic_paid_id.id,
'base_code_id': tax.base_code_id.id,
'ref_base_code_id': tax.ref_base_code_id.id,
'sequence': tax.sequence,
'base_sign': tax.base_sign,
'tax_sign': tax.tax_sign,
'ref_base_sign': tax.ref_base_sign,
'ref_tax_sign': tax.ref_tax_sign,
'price_unit': cur_price_unit,
'tax_code_id': tax.tax_code_id.id,
'ref_tax_code_id': tax.ref_tax_code_id.id,
}
res.append(data)
if tax.type=='percent':
amount = cur_price_unit * tax.amount
data['amount'] = amount
elif tax.type=='fixed':
data['amount'] = tax.amount
data['tax_amount']=quantity
# data['amount'] = quantity
elif tax.type=='code':
localdict = {'price_unit':cur_price_unit, 'product':product, 'partner':partner, 'quantity': quantity}
exec tax.python_compute in localdict
amount = localdict['result']
data['amount'] = amount
elif tax.type=='balance':
data['amount'] = cur_price_unit - reduce(lambda x,y: y.get('amount',0.0)+x, res, 0.0)
data['balance'] = cur_price_unit
amount2 = data.get('amount', 0.0)
if tax.child_ids:
if tax.child_depend:
latest = res.pop()
amount = amount2
child_tax = self._unit_compute(cr, uid, tax.child_ids, amount, product, partner, quantity)
res.extend(child_tax)
for child in child_tax:
amount2 += child.get('amount', 0.0)
if tax.child_depend:
for r in res:
for name in ('base','ref_base'):
if latest[name+'_code_id'] and latest[name+'_sign'] and not r[name+'_code_id']:
r[name+'_code_id'] = latest[name+'_code_id']
r[name+'_sign'] = latest[name+'_sign']
r['price_unit'] = latest['price_unit']
latest[name+'_code_id'] = False
for name in ('tax','ref_tax'):
if latest[name+'_code_id'] and latest[name+'_sign'] and not r[name+'_code_id']:
r[name+'_code_id'] = latest[name+'_code_id']
r[name+'_sign'] = latest[name+'_sign']
r['amount'] = data['amount']
latest[name+'_code_id'] = False
if tax.include_base_amount:
cur_price_unit+=amount2
return res
def compute_for_bank_reconciliation(self, cr, uid, tax_id, amount, context=None):
""" Called by RPC by the bank statement reconciliation widget """
tax = self.browse(cr, uid, tax_id, context=context)
return self.compute_all(cr, uid, [tax], amount, 1) # TOCHECK may use force_exclude parameter
@api.v7
def compute_all(self, cr, uid, taxes, price_unit, quantity, product=None, partner=None, force_excluded=False):
"""
:param force_excluded: boolean used to say that we don't want to consider the value of field price_include of
tax. It's used in encoding by line where you don't matter if you encoded a tax with that boolean to True or
False
RETURN: {
'total': 0.0, # Total without taxes
'total_included: 0.0, # Total with taxes
'taxes': [] # List of taxes, see compute for the format
}
"""
# By default, for each tax, tax amount will first be computed
# and rounded at the 'Account' decimal precision for each
# PO/SO/invoice line and then these rounded amounts will be
# summed, leading to the total amount for that tax. But, if the
# company has tax_calculation_rounding_method = round_globally,
# we still follow the same method, but we use a much larger
# precision when we round the tax amount for each line (we use
# the 'Account' decimal precision + 5), and that way it's like
# rounding after the sum of the tax amounts of each line
precision = self.pool.get('decimal.precision').precision_get(cr, uid, 'Account')
tax_compute_precision = precision
if taxes and taxes[0].company_id.tax_calculation_rounding_method == 'round_globally':
tax_compute_precision += 5
totalin = totalex = round(price_unit * quantity, precision)
tin = []
tex = []
for tax in taxes:
if not tax.price_include or force_excluded:
tex.append(tax)
else:
tin.append(tax)
tin = self.compute_inv(cr, uid, tin, price_unit, quantity, product=product, partner=partner, precision=tax_compute_precision)
for r in tin:
totalex -= r.get('amount', 0.0)
totlex_qty = 0.0
try:
totlex_qty = totalex/quantity
except:
pass
tex = self._compute(cr, uid, tex, totlex_qty, quantity, product=product, partner=partner, precision=tax_compute_precision)
for r in tex:
totalin += r.get('amount', 0.0)
return {
'total': totalex,
'total_included': totalin,
'taxes': tin + tex
}
@api.v8
def compute_all(self, price_unit, quantity, product=None, partner=None, force_excluded=False):
return self._model.compute_all(
self._cr, self._uid, self, price_unit, quantity,
product=product, partner=partner, force_excluded=force_excluded)
def compute(self, cr, uid, taxes, price_unit, quantity, product=None, partner=None):
_logger.warning("Deprecated, use compute_all(...)['taxes'] instead of compute(...) to manage prices with tax included.")
return self._compute(cr, uid, taxes, price_unit, quantity, product, partner)
def _compute(self, cr, uid, taxes, price_unit, quantity, product=None, partner=None, precision=None):
"""
Compute tax values for given PRICE_UNIT, QUANTITY and a buyer/seller ADDRESS_ID.
RETURN:
[ tax ]
tax = {'name':'', 'amount':0.0, 'account_collected_id':1, 'account_paid_id':2}
one tax for each tax id in IDS and their children
"""
if not precision:
precision = self.pool.get('decimal.precision').precision_get(cr, uid, 'Account')
res = self._unit_compute(cr, uid, taxes, price_unit, product, partner, quantity)
total = 0.0
for r in res:
if r.get('balance',False):
r['amount'] = round(r.get('balance', 0.0) * quantity, precision) - total
else:
r['amount'] = round(r.get('amount', 0.0) * quantity, precision)
total += r['amount']
return res
def _unit_compute_inv(self, cr, uid, taxes, price_unit, product=None, partner=None):
taxes = self._applicable(cr, uid, taxes, price_unit, product, partner)
res = []
taxes.reverse()
cur_price_unit = price_unit
tax_parent_tot = 0.0
for tax in taxes:
if (tax.type=='percent') and not tax.include_base_amount:
tax_parent_tot += tax.amount
for tax in taxes:
if (tax.type=='fixed') and not tax.include_base_amount:
cur_price_unit -= tax.amount
for tax in taxes:
if tax.type=='percent':
if tax.include_base_amount:
amount = cur_price_unit - (cur_price_unit / (1 + tax.amount))
else:
amount = (cur_price_unit / (1 + tax_parent_tot)) * tax.amount
elif tax.type=='fixed':
amount = tax.amount
elif tax.type=='code':
localdict = {'price_unit':cur_price_unit, 'product':product, 'partner':partner}
exec tax.python_compute_inv in localdict
amount = localdict['result']
elif tax.type=='balance':
amount = cur_price_unit - reduce(lambda x,y: y.get('amount',0.0)+x, res, 0.0)
if tax.include_base_amount:
cur_price_unit -= amount
todo = 0
else:
todo = 1
res.append({
'id': tax.id,
'todo': todo,
'name': tax.name,
'amount': amount,
'account_collected_id': tax.account_collected_id.id,
'account_paid_id': tax.account_paid_id.id,
'account_analytic_collected_id': tax.account_analytic_collected_id.id,
'account_analytic_paid_id': tax.account_analytic_paid_id.id,
'base_code_id': tax.base_code_id.id,
'ref_base_code_id': tax.ref_base_code_id.id,
'sequence': tax.sequence,
'base_sign': tax.base_sign,
'tax_sign': tax.tax_sign,
'ref_base_sign': tax.ref_base_sign,
'ref_tax_sign': tax.ref_tax_sign,
'price_unit': cur_price_unit,
'tax_code_id': tax.tax_code_id.id,
'ref_tax_code_id': tax.ref_tax_code_id.id,
})
if tax.child_ids:
if tax.child_depend:
del res[-1]
amount = price_unit
parent_tax = self._unit_compute_inv(cr, uid, tax.child_ids, amount, product, partner)
res.extend(parent_tax)
total = 0.0
for r in res:
if r['todo']:
total += r['amount']
for r in res:
r['price_unit'] -= total
r['todo'] = 0
return res
def compute_inv(self, cr, uid, taxes, price_unit, quantity, product=None, partner=None, precision=None):
"""
Compute tax values for given PRICE_UNIT, QUANTITY and a buyer/seller ADDRESS_ID.
Price Unit is a Tax included price
RETURN:
[ tax ]
tax = {'name':'', 'amount':0.0, 'account_collected_id':1, 'account_paid_id':2}
one tax for each tax id in IDS and their children
"""
if not precision:
precision = self.pool.get('decimal.precision').precision_get(cr, uid, 'Account')
res = self._unit_compute_inv(cr, uid, taxes, price_unit, product, partner=None)
total = 0.0
for r in res:
if r.get('balance',False):
r['amount'] = round(r['balance'] * quantity, precision) - total
else:
r['amount'] = round(r['amount'] * quantity, precision)
total += r['amount']
return res
# ---------------------------------------------------------
# Account Entries Models
# ---------------------------------------------------------
class account_model(osv.osv):
_name = "account.model"
_description = "Account Model"
_columns = {
'name': fields.char('Model Name', required=True, help="This is a model for recurring accounting entries"),
'journal_id': fields.many2one('account.journal', 'Journal', required=True),
'company_id': fields.related('journal_id', 'company_id', type='many2one', relation='res.company', string='Company', store=True, readonly=True),
'lines_id': fields.one2many('account.model.line', 'model_id', 'Model Entries', copy=True),
'legend': fields.text('Legend', readonly=True, size=100),
}
_defaults = {
'legend': lambda self, cr, uid, context:_('You can specify year, month and date in the name of the model using the following labels:\n\n%(year)s: To Specify Year \n%(month)s: To Specify Month \n%(date)s: Current Date\n\ne.g. My model on %(date)s'),
}
def generate(self, cr, uid, ids, data=None, context=None):
if data is None:
data = {}
move_ids = []
entry = {}
account_move_obj = self.pool.get('account.move')
account_move_line_obj = self.pool.get('account.move.line')
pt_obj = self.pool.get('account.payment.term')
period_obj = self.pool.get('account.period')
if context is None:
context = {}
if data.get('date', False):
context = dict(context)
context.update({'date': data['date']})
move_date = context.get('date', time.strftime('%Y-%m-%d'))
move_date = datetime.strptime(move_date,"%Y-%m-%d")
for model in self.browse(cr, uid, ids, context=context):
ctx = context.copy()
ctx.update({'company_id': model.company_id.id})
period_ids = period_obj.find(cr, uid, dt=context.get('date', False), context=ctx)
period_id = period_ids and period_ids[0] or False
ctx.update({'journal_id': model.journal_id.id,'period_id': period_id})
try:
entry['name'] = model.name%{'year': move_date.strftime('%Y'), 'month': move_date.strftime('%m'), 'date': move_date.strftime('%Y-%m')}
except:
raise osv.except_osv(_('Wrong Model!'), _('You have a wrong expression "%(...)s" in your model!'))
move_id = account_move_obj.create(cr, uid, {
'ref': entry['name'],
'period_id': period_id,
'journal_id': model.journal_id.id,
'date': context.get('date', fields.date.context_today(self,cr,uid,context=context))
})
move_ids.append(move_id)
for line in model.lines_id:
analytic_account_id = False
if line.analytic_account_id:
if not model.journal_id.analytic_journal_id:
raise osv.except_osv(_('No Analytic Journal!'),_("You have to define an analytic journal on the '%s' journal!") % (model.journal_id.name,))
analytic_account_id = line.analytic_account_id.id
val = {
'move_id': move_id,
'journal_id': model.journal_id.id,
'period_id': period_id,
'analytic_account_id': analytic_account_id
}
date_maturity = context.get('date',time.strftime('%Y-%m-%d'))
if line.date_maturity == 'partner':
if not line.partner_id:
raise osv.except_osv(_('Error!'), _("Maturity date of entry line generated by model line '%s' of model '%s' is based on partner payment term!" \
"\nPlease define partner on it!")%(line.name, model.name))
payment_term_id = False
if model.journal_id.type in ('purchase', 'purchase_refund') and line.partner_id.property_supplier_payment_term:
payment_term_id = line.partner_id.property_supplier_payment_term.id
elif line.partner_id.property_payment_term:
payment_term_id = line.partner_id.property_payment_term.id
if payment_term_id:
pterm_list = pt_obj.compute(cr, uid, payment_term_id, value=1, date_ref=date_maturity)
if pterm_list:
pterm_list = [l[0] for l in pterm_list]
pterm_list.sort()
date_maturity = pterm_list[-1]
val.update({
'name': line.name,
'quantity': line.quantity,
'debit': line.debit,
'credit': line.credit,
'account_id': line.account_id.id,
'move_id': move_id,
'partner_id': line.partner_id.id,
'date': context.get('date', fields.date.context_today(self,cr,uid,context=context)),
'date_maturity': date_maturity
})
account_move_line_obj.create(cr, uid, val, context=ctx)
return move_ids
def onchange_journal_id(self, cr, uid, ids, journal_id, context=None):
company_id = False
if journal_id:
journal = self.pool.get('account.journal').browse(cr, uid, journal_id, context=context)
if journal.company_id.id:
company_id = journal.company_id.id
return {'value': {'company_id': company_id}}
class account_model_line(osv.osv):
_name = "account.model.line"
_description = "Account Model Entries"
_columns = {
'name': fields.char('Name', required=True),
'sequence': fields.integer('Sequence', required=True, help="The sequence field is used to order the resources from lower sequences to higher ones."),
'quantity': fields.float('Quantity', digits_compute=dp.get_precision('Account'), help="The optional quantity on entries."),
'debit': fields.float('Debit', digits_compute=dp.get_precision('Account')),
'credit': fields.float('Credit', digits_compute=dp.get_precision('Account')),
'account_id': fields.many2one('account.account', 'Account', required=True, ondelete="cascade"),
'analytic_account_id': fields.many2one('account.analytic.account', 'Analytic Account', ondelete="cascade"),
'model_id': fields.many2one('account.model', 'Model', required=True, ondelete="cascade", select=True),
'amount_currency': fields.float('Amount Currency', help="The amount expressed in an optional other currency."),
'currency_id': fields.many2one('res.currency', 'Currency'),
'partner_id': fields.many2one('res.partner', 'Partner'),
'date_maturity': fields.selection([('today','Date of the day'), ('partner','Partner Payment Term')], 'Maturity Date', help="The maturity date of the generated entries for this model. You can choose between the creation date or the creation date of the entries plus the partner payment terms."),
}
_order = 'sequence'
_sql_constraints = [
('credit_debit1', 'CHECK (credit*debit=0)', 'Wrong credit or debit value in model, they must be positive!'),
('credit_debit2', 'CHECK (credit+debit>=0)', 'Wrong credit or debit value in model, they must be positive!'),
]
# ---------------------------------------------------------
# Account Subscription
# ---------------------------------------------------------
class account_subscription(osv.osv):
_name = "account.subscription"
_description = "Account Subscription"
_columns = {
'name': fields.char('Name', required=True),
'ref': fields.char('Reference'),
'model_id': fields.many2one('account.model', 'Model', required=True),
'date_start': fields.date('Start Date', required=True),
'period_total': fields.integer('Number of Periods', required=True),
'period_nbr': fields.integer('Period', required=True),
'period_type': fields.selection([('day','days'),('month','month'),('year','year')], 'Period Type', required=True),
'state': fields.selection([('draft','Draft'),('running','Running'),('done','Done')], 'Status', required=True, readonly=True, copy=False),
'lines_id': fields.one2many('account.subscription.line', 'subscription_id', 'Subscription Lines', copy=True)
}
_defaults = {
'date_start': fields.date.context_today,
'period_type': 'month',
'period_total': 12,
'period_nbr': 1,
'state': 'draft',
}
def state_draft(self, cr, uid, ids, context=None):
self.write(cr, uid, ids, {'state':'draft'})
return False
def check(self, cr, uid, ids, context=None):
todone = []
for sub in self.browse(cr, uid, ids, context=context):
ok = True
for line in sub.lines_id:
if not line.move_id.id:
ok = False
break
if ok:
todone.append(sub.id)
if todone:
self.write(cr, uid, todone, {'state':'done'})
return False
def remove_line(self, cr, uid, ids, context=None):
toremove = []
for sub in self.browse(cr, uid, ids, context=context):
for line in sub.lines_id:
if not line.move_id.id:
toremove.append(line.id)
if toremove:
self.pool.get('account.subscription.line').unlink(cr, uid, toremove)
self.write(cr, uid, ids, {'state':'draft'})
return False
def compute(self, cr, uid, ids, context=None):
for sub in self.browse(cr, uid, ids, context=context):
ds = sub.date_start
for i in range(sub.period_total):
self.pool.get('account.subscription.line').create(cr, uid, {
'date': ds,
'subscription_id': sub.id,
})
if sub.period_type=='day':
ds = (datetime.strptime(ds, '%Y-%m-%d') + relativedelta(days=sub.period_nbr)).strftime('%Y-%m-%d')
if sub.period_type=='month':
ds = (datetime.strptime(ds, '%Y-%m-%d') + relativedelta(months=sub.period_nbr)).strftime('%Y-%m-%d')
if sub.period_type=='year':
ds = (datetime.strptime(ds, '%Y-%m-%d') + relativedelta(years=sub.period_nbr)).strftime('%Y-%m-%d')
self.write(cr, uid, ids, {'state':'running'})
return True
class account_subscription_line(osv.osv):
_name = "account.subscription.line"
_description = "Account Subscription Line"
_columns = {
'subscription_id': fields.many2one('account.subscription', 'Subscription', required=True, select=True),
'date': fields.date('Date', required=True),
'move_id': fields.many2one('account.move', 'Entry'),
}
def move_create(self, cr, uid, ids, context=None):
tocheck = {}
all_moves = []
obj_model = self.pool.get('account.model')
for line in self.browse(cr, uid, ids, context=context):
data = {
'date': line.date,
}
move_ids = obj_model.generate(cr, uid, [line.subscription_id.model_id.id], data, context)
tocheck[line.subscription_id.id] = True
self.write(cr, uid, [line.id], {'move_id':move_ids[0]})
all_moves.extend(move_ids)
if tocheck:
self.pool.get('account.subscription').check(cr, uid, tocheck.keys(), context)
return all_moves
_rec_name = 'date'
# ---------------------------------------------------------------
# Account Templates: Account, Tax, Tax Code and chart. + Wizard
# ---------------------------------------------------------------
class account_tax_template(osv.osv):
_name = 'account.tax.template'
class account_account_template(osv.osv):
_order = "code"
_name = "account.account.template"
_description ='Templates for Accounts'
_columns = {
'name': fields.char('Name', required=True, select=True),
'currency_id': fields.many2one('res.currency', 'Secondary Currency', help="Forces all moves for this account to have this secondary currency."),
'code': fields.char('Code', size=64, required=True, select=1),
'type': fields.selection([
('receivable','Receivable'),
('payable','Payable'),
('view','View'),
('consolidation','Consolidation'),
('liquidity','Liquidity'),
('other','Regular'),
('closed','Closed'),
], 'Internal Type', required=True,help="This type is used to differentiate types with "\
"special effects in Odoo: view can not have entries, consolidation are accounts that "\
"can have children accounts for multi-company consolidations, payable/receivable are for "\
"partners accounts (for debit/credit computations), closed for depreciated accounts."),
'user_type': fields.many2one('account.account.type', 'Account Type', required=True,
help="These types are defined according to your country. The type contains more information "\
"about the account and its specificities."),
'financial_report_ids': fields.many2many('account.financial.report', 'account_template_financial_report', 'account_template_id', 'report_line_id', 'Financial Reports'),
'reconcile': fields.boolean('Allow Reconciliation', help="Check this option if you want the user to reconcile entries in this account."),
'shortcut': fields.char('Shortcut', size=12),
'note': fields.text('Note'),
'parent_id': fields.many2one('account.account.template', 'Parent Account Template', ondelete='cascade', domain=[('type','=','view')]),
'child_parent_ids':fields.one2many('account.account.template', 'parent_id', 'Children'),
'tax_ids': fields.many2many('account.tax.template', 'account_account_template_tax_rel', 'account_id', 'tax_id', 'Default Taxes'),
'nocreate': fields.boolean('Optional create', help="If checked, the new chart of accounts will not contain this by default."),
'chart_template_id': fields.many2one('account.chart.template', 'Chart Template', help="This optional field allow you to link an account template to a specific chart template that may differ from the one its root parent belongs to. This allow you to define chart templates that extend another and complete it with few new accounts (You don't need to define the whole structure that is common to both several times)."),
}
_defaults = {
'reconcile': False,
'type': 'view',
'nocreate': False,
}
_check_recursion = check_cycle
_constraints = [
(_check_recursion, 'Error!\nYou cannot create recursive account templates.', ['parent_id']),
]
def name_get(self, cr, uid, ids, context=None):
if not ids:
return []
reads = self.read(cr, uid, ids, ['name','code'], context=context)
res = []
for record in reads:
name = record['name']
if record['code']:
name = record['code']+' '+name
res.append((record['id'],name ))
return res
def generate_account(self, cr, uid, chart_template_id, tax_template_ref, acc_template_ref, code_digits, company_id, context=None):
"""
This method for generating accounts from templates.
:param chart_template_id: id of the chart template chosen in the wizard
:param tax_template_ref: Taxes templates reference for write taxes_id in account_account.
:paramacc_template_ref: dictionary with the mappping between the account templates and the real accounts.
:param code_digits: number of digits got from wizard.multi.charts.accounts, this is use for account code.
:param company_id: company_id selected from wizard.multi.charts.accounts.
:returns: return acc_template_ref for reference purpose.
:rtype: dict
"""
if context is None:
context = {}
obj_acc = self.pool.get('account.account')
company_name = self.pool.get('res.company').browse(cr, uid, company_id, context=context).name
template = self.pool.get('account.chart.template').browse(cr, uid, chart_template_id, context=context)
#deactivate the parent_store functionnality on account_account for rapidity purpose
ctx = context.copy()
ctx.update({'defer_parent_store_computation': True})
level_ref = {}
children_acc_criteria = [('chart_template_id','=', chart_template_id)]
if template.account_root_id.id:
children_acc_criteria = ['|'] + children_acc_criteria + ['&',('parent_id','child_of', [template.account_root_id.id]),('chart_template_id','=', False)]
children_acc_template = self.search(cr, uid, [('nocreate','!=',True)] + children_acc_criteria, order='id')
for account_template in self.browse(cr, uid, children_acc_template, context=context):
# skip the root of COA if it's not the main one
if (template.account_root_id.id == account_template.id) and template.parent_id:
continue
tax_ids = []
for tax in account_template.tax_ids:
tax_ids.append(tax_template_ref[tax.id])
code_main = account_template.code and len(account_template.code) or 0
code_acc = account_template.code or ''
if code_main > 0 and code_main <= code_digits and account_template.type != 'view':
code_acc = str(code_acc) + (str('0'*(code_digits-code_main)))
parent_id = account_template.parent_id and ((account_template.parent_id.id in acc_template_ref) and acc_template_ref[account_template.parent_id.id]) or False
#the level as to be given as well at the creation time, because of the defer_parent_store_computation in
#context. Indeed because of this, the parent_left and parent_right are not computed and thus the child_of
#operator does not return the expected values, with result of having the level field not computed at all.
if parent_id:
level = parent_id in level_ref and level_ref[parent_id] + 1 or obj_acc._get_level(cr, uid, [parent_id], 'level', None, context=context)[parent_id] + 1
else:
level = 0
vals={
'name': (template.account_root_id.id == account_template.id) and company_name or account_template.name,
'currency_id': account_template.currency_id and account_template.currency_id.id or False,
'code': code_acc,
'type': account_template.type,
'user_type': account_template.user_type and account_template.user_type.id or False,
'reconcile': account_template.reconcile,
'shortcut': account_template.shortcut,
'note': account_template.note,
'financial_report_ids': account_template.financial_report_ids and [(6,0,[x.id for x in account_template.financial_report_ids])] or False,
'parent_id': parent_id,
'tax_ids': [(6,0,tax_ids)],
'company_id': company_id,
'level': level,
}
new_account = obj_acc.create(cr, uid, vals, context=ctx)
acc_template_ref[account_template.id] = new_account
level_ref[new_account] = level
#reactivate the parent_store functionnality on account_account
obj_acc._parent_store_compute(cr)
return acc_template_ref
class account_add_tmpl_wizard(osv.osv_memory):
"""Add one more account from the template.
With the 'nocreate' option, some accounts may not be created. Use this to add them later."""
_name = 'account.addtmpl.wizard'
def _get_def_cparent(self, cr, uid, context=None):
acc_obj = self.pool.get('account.account')
tmpl_obj = self.pool.get('account.account.template')
tids = tmpl_obj.read(cr, uid, [context['tmpl_ids']], ['parent_id'])
if not tids or not tids[0]['parent_id']:
return False
ptids = tmpl_obj.read(cr, uid, [tids[0]['parent_id'][0]], ['code'])
res = None
if not ptids or not ptids[0]['code']:
raise osv.except_osv(_('Error!'), _('There is no parent code for the template account.'))
res = acc_obj.search(cr, uid, [('code','=',ptids[0]['code'])])
return res and res[0] or False
_columns = {
'cparent_id':fields.many2one('account.account', 'Parent target', help="Creates an account with the selected template under this existing parent.", required=True),
}
_defaults = {
'cparent_id': _get_def_cparent,
}
def action_create(self,cr,uid,ids,context=None):
if context is None:
context = {}
acc_obj = self.pool.get('account.account')
tmpl_obj = self.pool.get('account.account.template')
data = self.read(cr, uid, ids)[0]
company_id = acc_obj.read(cr, uid, [data['cparent_id'][0]], ['company_id'])[0]['company_id'][0]
account_template = tmpl_obj.browse(cr, uid, context['tmpl_ids'])
vals = {
'name': account_template.name,
'currency_id': account_template.currency_id and account_template.currency_id.id or False,
'code': account_template.code,
'type': account_template.type,
'user_type': account_template.user_type and account_template.user_type.id or False,
'reconcile': account_template.reconcile,
'shortcut': account_template.shortcut,
'note': account_template.note,
'parent_id': data['cparent_id'][0],
'company_id': company_id,
}
acc_obj.create(cr, uid, vals)
return {'type':'state', 'state': 'end' }
def action_cancel(self, cr, uid, ids, context=None):
return { 'type': 'state', 'state': 'end' }
class account_tax_code_template(osv.osv):
_name = 'account.tax.code.template'
_description = 'Tax Code Template'
_order = 'sequence, code'
_rec_name = 'code'
_columns = {
'name': fields.char('Tax Case Name', required=True),
'code': fields.char('Case Code', size=64),
'info': fields.text('Description'),
'parent_id': fields.many2one('account.tax.code.template', 'Parent Code', select=True),
'child_ids': fields.one2many('account.tax.code.template', 'parent_id', 'Child Codes'),
'sign': fields.float('Sign For Parent', required=True),
'notprintable':fields.boolean("Not Printable in Invoice", help="Check this box if you don't want any tax related to this tax Code to appear on invoices."),
'sequence': fields.integer(
'Sequence', help=(
"Determine the display order in the report 'Accounting "
"\ Reporting \ Generic Reporting \ Taxes \ Taxes Report'"),
),
}
_defaults = {
'sign': 1.0,
'notprintable': False,
}
def generate_tax_code(self, cr, uid, tax_code_root_id, company_id, context=None):
'''
This function generates the tax codes from the templates of tax code that are children of the given one passed
in argument. Then it returns a dictionary with the mappping between the templates and the real objects.
:param tax_code_root_id: id of the root of all the tax code templates to process
:param company_id: id of the company the wizard is running for
:returns: dictionary with the mappping between the templates and the real objects.
:rtype: dict
'''
obj_tax_code_template = self.pool.get('account.tax.code.template')
obj_tax_code = self.pool.get('account.tax.code')
tax_code_template_ref = {}
company = self.pool.get('res.company').browse(cr, uid, company_id, context=context)
#find all the children of the tax_code_root_id
children_tax_code_template = tax_code_root_id and obj_tax_code_template.search(cr, uid, [('parent_id','child_of',[tax_code_root_id])], order='id') or []
for tax_code_template in obj_tax_code_template.browse(cr, uid, children_tax_code_template, context=context):
vals = {
'name': (tax_code_root_id == tax_code_template.id) and company.name or tax_code_template.name,
'code': tax_code_template.code,
'info': tax_code_template.info,
'parent_id': tax_code_template.parent_id and ((tax_code_template.parent_id.id in tax_code_template_ref) and tax_code_template_ref[tax_code_template.parent_id.id]) or False,
'company_id': company_id,
'sign': tax_code_template.sign,
'sequence': tax_code_template.sequence,
}
#check if this tax code already exists
rec_list = obj_tax_code.search(cr, uid, [('name', '=', vals['name']),('code', '=', vals['code']),('company_id', '=', vals['company_id'])], context=context)
if not rec_list:
#if not yet, create it
new_tax_code = obj_tax_code.create(cr, uid, vals)
#recording the new tax code to do the mapping
tax_code_template_ref[tax_code_template.id] = new_tax_code
return tax_code_template_ref
def name_get(self, cr, uid, ids, context=None):
if not ids:
return []
if isinstance(ids, (int, long)):
ids = [ids]
reads = self.read(cr, uid, ids, ['name','code'], context=context, load='_classic_write')
return [(x['id'], (x['code'] and x['code'] + ' - ' or '') + x['name']) \
for x in reads]
_check_recursion = check_cycle
_constraints = [
(_check_recursion, 'Error!\nYou cannot create recursive Tax Codes.', ['parent_id'])
]
_order = 'code,name'
class account_chart_template(osv.osv):
_name="account.chart.template"
_description= "Templates for Account Chart"
_columns={
'name': fields.char('Name', required=True),
'parent_id': fields.many2one('account.chart.template', 'Parent Chart Template'),
'code_digits': fields.integer('# of Digits', required=True, help="No. of Digits to use for account code"),
'visible': fields.boolean('Can be Visible?', help="Set this to False if you don't want this template to be used actively in the wizard that generate Chart of Accounts from templates, this is useful when you want to generate accounts of this template only when loading its child template."),
'currency_id': fields.many2one('res.currency', 'Currency'),
'complete_tax_set': fields.boolean('Complete Set of Taxes', help='This boolean helps you to choose if you want to propose to the user to encode the sale and purchase rates or choose from list of taxes. This last choice assumes that the set of tax defined on this template is complete'),
'account_root_id': fields.many2one('account.account.template', 'Root Account', domain=[('parent_id','=',False)]),
'tax_code_root_id': fields.many2one('account.tax.code.template', 'Root Tax Code', domain=[('parent_id','=',False)]),
'tax_template_ids': fields.one2many('account.tax.template', 'chart_template_id', 'Tax Template List', help='List of all the taxes that have to be installed by the wizard'),
'bank_account_view_id': fields.many2one('account.account.template', 'Bank Account'),
'property_account_receivable': fields.many2one('account.account.template', 'Receivable Account'),
'property_account_payable': fields.many2one('account.account.template', 'Payable Account'),
'property_account_expense_categ': fields.many2one('account.account.template', 'Expense Category Account'),
'property_account_income_categ': fields.many2one('account.account.template', 'Income Category Account'),
'property_account_expense': fields.many2one('account.account.template', 'Expense Account on Product Template'),
'property_account_income': fields.many2one('account.account.template', 'Income Account on Product Template'),
'property_account_income_opening': fields.many2one('account.account.template', 'Opening Entries Income Account'),
'property_account_expense_opening': fields.many2one('account.account.template', 'Opening Entries Expense Account'),
}
_defaults = {
'visible': True,
'code_digits': 6,
'complete_tax_set': True,
}
class account_tax_template(osv.osv):
_name = 'account.tax.template'
_description = 'Templates for Taxes'
_columns = {
'chart_template_id': fields.many2one('account.chart.template', 'Chart Template', required=True),
'name': fields.char('Tax Name', required=True),
'sequence': fields.integer('Sequence', required=True, help="The sequence field is used to order the taxes lines from lower sequences to higher ones. The order is important if you have a tax that has several tax children. In this case, the evaluation order is important."),
'amount': fields.float('Amount', required=True, digits_compute=get_precision_tax(), help="For Tax Type percent enter % ratio between 0-1."),
'type': fields.selection( [('percent','Percent'), ('fixed','Fixed'), ('none','None'), ('code','Python Code'), ('balance','Balance')], 'Tax Type', required=True),
'applicable_type': fields.selection( [('true','True'), ('code','Python Code')], 'Applicable Type', required=True, help="If not applicable (computed through a Python code), the tax won't appear on the invoice."),
'domain':fields.char('Domain', help="This field is only used if you develop your own module allowing developers to create specific taxes in a custom domain."),
'account_collected_id':fields.many2one('account.account.template', 'Invoice Tax Account'),
'account_paid_id':fields.many2one('account.account.template', 'Refund Tax Account'),
'parent_id':fields.many2one('account.tax.template', 'Parent Tax Account', select=True),
'child_depend':fields.boolean('Tax on Children', help="Set if the tax computation is based on the computation of child taxes rather than on the total amount."),
'python_compute':fields.text('Python Code'),
'python_compute_inv':fields.text('Python Code (reverse)'),
'python_applicable':fields.text('Applicable Code'),
#
# Fields used for the Tax declaration
#
'base_code_id': fields.many2one('account.tax.code.template', 'Base Code', help="Use this code for the tax declaration."),
'tax_code_id': fields.many2one('account.tax.code.template', 'Tax Code', help="Use this code for the tax declaration."),
'base_sign': fields.float('Base Code Sign', help="Usually 1 or -1."),
'tax_sign': fields.float('Tax Code Sign', help="Usually 1 or -1."),
# Same fields for refund invoices
'ref_base_code_id': fields.many2one('account.tax.code.template', 'Refund Base Code', help="Use this code for the tax declaration."),
'ref_tax_code_id': fields.many2one('account.tax.code.template', 'Refund Tax Code', help="Use this code for the tax declaration."),
'ref_base_sign': fields.float('Refund Base Code Sign', help="Usually 1 or -1."),
'ref_tax_sign': fields.float('Refund Tax Code Sign', help="Usually 1 or -1."),
'include_base_amount': fields.boolean('Include in Base Amount', help="Set if the amount of tax must be included in the base amount before computing the next taxes."),
'description': fields.char('Internal Name'),
'type_tax_use': fields.selection([('sale','Sale'),('purchase','Purchase'),('all','All')], 'Tax Use In', required=True,),
'price_include': fields.boolean('Tax Included in Price', help="Check this if the price you use on the product and invoices includes this tax."),
}
def name_get(self, cr, uid, ids, context=None):
if not ids:
return []
res = []
for record in self.read(cr, uid, ids, ['description','name'], context=context):
name = record['description'] and record['description'] or record['name']
res.append((record['id'],name ))
return res
def _default_company(self, cr, uid, context=None):
user = self.pool.get('res.users').browse(cr, uid, uid, context=context)
if user.company_id:
return user.company_id.id
return self.pool.get('res.company').search(cr, uid, [('parent_id', '=', False)])[0]
_defaults = {
'python_compute': lambda *a: '''# price_unit\n# product: product.product object or None\n# partner: res.partner object or None\n\nresult = price_unit * 0.10''',
'python_compute_inv': lambda *a: '''# price_unit\n# product: product.product object or False\n\nresult = price_unit * 0.10''',
'applicable_type': 'true',
'type': 'percent',
'amount': 0,
'sequence': 1,
'ref_tax_sign': 1,
'ref_base_sign': 1,
'tax_sign': 1,
'base_sign': 1,
'include_base_amount': False,
'type_tax_use': 'all',
'price_include': 0,
}
_order = 'sequence'
def _generate_tax(self, cr, uid, tax_templates, tax_code_template_ref, company_id, context=None):
"""
This method generate taxes from templates.
:param tax_templates: list of browse record of the tax templates to process
:param tax_code_template_ref: Taxcode templates reference.
:param company_id: id of the company the wizard is running for
:returns:
{
'tax_template_to_tax': mapping between tax template and the newly generated taxes corresponding,
'account_dict': dictionary containing a to-do list with all the accounts to assign on new taxes
}
"""
if context is None:
context = {}
res = {}
todo_dict = {}
tax_template_to_tax = {}
for tax in tax_templates:
vals_tax = {
'name':tax.name,
'sequence': tax.sequence,
'amount': tax.amount,
'type': tax.type,
'applicable_type': tax.applicable_type,
'domain': tax.domain,
'parent_id': tax.parent_id and ((tax.parent_id.id in tax_template_to_tax) and tax_template_to_tax[tax.parent_id.id]) or False,
'child_depend': tax.child_depend,
'python_compute': tax.python_compute,
'python_compute_inv': tax.python_compute_inv,
'python_applicable': tax.python_applicable,
'base_code_id': tax.base_code_id and ((tax.base_code_id.id in tax_code_template_ref) and tax_code_template_ref[tax.base_code_id.id]) or False,
'tax_code_id': tax.tax_code_id and ((tax.tax_code_id.id in tax_code_template_ref) and tax_code_template_ref[tax.tax_code_id.id]) or False,
'base_sign': tax.base_sign,
'tax_sign': tax.tax_sign,
'ref_base_code_id': tax.ref_base_code_id and ((tax.ref_base_code_id.id in tax_code_template_ref) and tax_code_template_ref[tax.ref_base_code_id.id]) or False,
'ref_tax_code_id': tax.ref_tax_code_id and ((tax.ref_tax_code_id.id in tax_code_template_ref) and tax_code_template_ref[tax.ref_tax_code_id.id]) or False,
'ref_base_sign': tax.ref_base_sign,
'ref_tax_sign': tax.ref_tax_sign,
'include_base_amount': tax.include_base_amount,
'description': tax.description,
'company_id': company_id,
'type_tax_use': tax.type_tax_use,
'price_include': tax.price_include
}
new_tax = self.pool.get('account.tax').create(cr, uid, vals_tax)
tax_template_to_tax[tax.id] = new_tax
#as the accounts have not been created yet, we have to wait before filling these fields
todo_dict[new_tax] = {
'account_collected_id': tax.account_collected_id and tax.account_collected_id.id or False,
'account_paid_id': tax.account_paid_id and tax.account_paid_id.id or False,
}
res.update({'tax_template_to_tax': tax_template_to_tax, 'account_dict': todo_dict})
return res
# Fiscal Position Templates
class account_fiscal_position_template(osv.osv):
_name = 'account.fiscal.position.template'
_description = 'Template for Fiscal Position'
_columns = {
'name': fields.char('Fiscal Position Template', required=True),
'chart_template_id': fields.many2one('account.chart.template', 'Chart Template', required=True),
'account_ids': fields.one2many('account.fiscal.position.account.template', 'position_id', 'Account Mapping'),
'tax_ids': fields.one2many('account.fiscal.position.tax.template', 'position_id', 'Tax Mapping'),
'note': fields.text('Notes'),
}
def generate_fiscal_position(self, cr, uid, chart_temp_id, tax_template_ref, acc_template_ref, company_id, context=None):
"""
This method generate Fiscal Position, Fiscal Position Accounts and Fiscal Position Taxes from templates.
:param chart_temp_id: Chart Template Id.
:param taxes_ids: Taxes templates reference for generating account.fiscal.position.tax.
:param acc_template_ref: Account templates reference for generating account.fiscal.position.account.
:param company_id: company_id selected from wizard.multi.charts.accounts.
:returns: True
"""
if context is None:
context = {}
obj_tax_fp = self.pool.get('account.fiscal.position.tax')
obj_ac_fp = self.pool.get('account.fiscal.position.account')
obj_fiscal_position = self.pool.get('account.fiscal.position')
fp_ids = self.search(cr, uid, [('chart_template_id', '=', chart_temp_id)])
for position in self.browse(cr, uid, fp_ids, context=context):
new_fp = obj_fiscal_position.create(cr, uid, {'company_id': company_id, 'name': position.name, 'note': position.note})
for tax in position.tax_ids:
obj_tax_fp.create(cr, uid, {
'tax_src_id': tax_template_ref[tax.tax_src_id.id],
'tax_dest_id': tax.tax_dest_id and tax_template_ref[tax.tax_dest_id.id] or False,
'position_id': new_fp
})
for acc in position.account_ids:
obj_ac_fp.create(cr, uid, {
'account_src_id': acc_template_ref[acc.account_src_id.id],
'account_dest_id': acc_template_ref[acc.account_dest_id.id],
'position_id': new_fp
})
return True
class account_fiscal_position_tax_template(osv.osv):
_name = 'account.fiscal.position.tax.template'
_description = 'Template Tax Fiscal Position'
_rec_name = 'position_id'
_columns = {
'position_id': fields.many2one('account.fiscal.position.template', 'Fiscal Position', required=True, ondelete='cascade'),
'tax_src_id': fields.many2one('account.tax.template', 'Tax Source', required=True),
'tax_dest_id': fields.many2one('account.tax.template', 'Replacement Tax')
}
class account_fiscal_position_account_template(osv.osv):
_name = 'account.fiscal.position.account.template'
_description = 'Template Account Fiscal Mapping'
_rec_name = 'position_id'
_columns = {
'position_id': fields.many2one('account.fiscal.position.template', 'Fiscal Mapping', required=True, ondelete='cascade'),
'account_src_id': fields.many2one('account.account.template', 'Account Source', domain=[('type','<>','view')], required=True),
'account_dest_id': fields.many2one('account.account.template', 'Account Destination', domain=[('type','<>','view')], required=True)
}
# ---------------------------------------------------------
# Account generation from template wizards
# ---------------------------------------------------------
class wizard_multi_charts_accounts(osv.osv_memory):
"""
Create a new account chart for a company.
Wizards ask for:
* a company
* an account chart template
* a number of digits for formatting code of non-view accounts
* a list of bank accounts owned by the company
Then, the wizard:
* generates all accounts from the template and assigns them to the right company
* generates all taxes and tax codes, changing account assignations
* generates all accounting properties and assigns them correctly
"""
_name='wizard.multi.charts.accounts'
_inherit = 'res.config'
_columns = {
'company_id':fields.many2one('res.company', 'Company', required=True),
'currency_id': fields.many2one('res.currency', 'Currency', help="Currency as per company's country."),
'only_one_chart_template': fields.boolean('Only One Chart Template Available'),
'chart_template_id': fields.many2one('account.chart.template', 'Chart Template', required=True),
'bank_accounts_id': fields.one2many('account.bank.accounts.wizard', 'bank_account_id', 'Cash and Banks', required=True),
'code_digits':fields.integer('# of Digits', required=True, help="No. of Digits to use for account code"),
"sale_tax": fields.many2one("account.tax.template", "Default Sale Tax"),
"purchase_tax": fields.many2one("account.tax.template", "Default Purchase Tax"),
'sale_tax_rate': fields.float('Sales Tax(%)'),
'purchase_tax_rate': fields.float('Purchase Tax(%)'),
'complete_tax_set': fields.boolean('Complete Set of Taxes', help='This boolean helps you to choose if you want to propose to the user to encode the sales and purchase rates or use the usual m2o fields. This last choice assumes that the set of tax defined for the chosen template is complete'),
}
def _get_chart_parent_ids(self, cr, uid, chart_template, context=None):
""" Returns the IDs of all ancestor charts, including the chart itself.
(inverse of child_of operator)
:param browse_record chart_template: the account.chart.template record
:return: the IDS of all ancestor charts, including the chart itself.
"""
result = [chart_template.id]
while chart_template.parent_id:
chart_template = chart_template.parent_id
result.append(chart_template.id)
return result
def onchange_tax_rate(self, cr, uid, ids, rate=False, context=None):
return {'value': {'purchase_tax_rate': rate or False}}
def onchange_chart_template_id(self, cr, uid, ids, chart_template_id=False, context=None):
res = {}
tax_templ_obj = self.pool.get('account.tax.template')
res['value'] = {'complete_tax_set': False, 'sale_tax': False, 'purchase_tax': False}
if chart_template_id:
data = self.pool.get('account.chart.template').browse(cr, uid, chart_template_id, context=context)
currency_id = data.currency_id and data.currency_id.id or self.pool.get('res.users').browse(cr, uid, uid, context=context).company_id.currency_id.id
res['value'].update({'complete_tax_set': data.complete_tax_set, 'currency_id': currency_id})
if data.complete_tax_set:
# default tax is given by the lowest sequence. For same sequence we will take the latest created as it will be the case for tax created while isntalling the generic chart of account
chart_ids = self._get_chart_parent_ids(cr, uid, data, context=context)
base_tax_domain = [("chart_template_id", "in", chart_ids), ('parent_id', '=', False)]
sale_tax_domain = base_tax_domain + [('type_tax_use', 'in', ('sale','all'))]
purchase_tax_domain = base_tax_domain + [('type_tax_use', 'in', ('purchase','all'))]
sale_tax_ids = tax_templ_obj.search(cr, uid, sale_tax_domain, order="sequence, id desc")
purchase_tax_ids = tax_templ_obj.search(cr, uid, purchase_tax_domain, order="sequence, id desc")
res['value'].update({'sale_tax': sale_tax_ids and sale_tax_ids[0] or False,
'purchase_tax': purchase_tax_ids and purchase_tax_ids[0] or False})
res.setdefault('domain', {})
res['domain']['sale_tax'] = repr(sale_tax_domain)
res['domain']['purchase_tax'] = repr(purchase_tax_domain)
if data.code_digits:
res['value'].update({'code_digits': data.code_digits})
return res
def default_get(self, cr, uid, fields, context=None):
res = super(wizard_multi_charts_accounts, self).default_get(cr, uid, fields, context=context)
tax_templ_obj = self.pool.get('account.tax.template')
account_chart_template = self.pool['account.chart.template']
if 'bank_accounts_id' in fields:
res.update({'bank_accounts_id': [{'acc_name': _('Cash'), 'account_type': 'cash'},{'acc_name': _('Bank'), 'account_type': 'bank'}]})
if 'company_id' in fields:
res.update({'company_id': self.pool.get('res.users').browse(cr, uid, [uid], context=context)[0].company_id.id})
if 'currency_id' in fields:
company_id = res.get('company_id') or False
if company_id:
company_obj = self.pool.get('res.company')
country_id = company_obj.browse(cr, uid, company_id, context=context).country_id.id
currency_id = company_obj.on_change_country(cr, uid, company_id, country_id, context=context)['value']['currency_id']
res.update({'currency_id': currency_id})
ids = account_chart_template.search(cr, uid, [('visible', '=', True)], context=context)
if ids:
#in order to set default chart which was last created set max of ids.
chart_id = max(ids)
if context.get("default_charts"):
model_data = self.pool.get('ir.model.data').search_read(cr, uid, [('model','=','account.chart.template'),('module','=',context.get("default_charts"))], ['res_id'], context=context)
if model_data:
chart_id = model_data[0]['res_id']
chart = account_chart_template.browse(cr, uid, chart_id, context=context)
chart_hierarchy_ids = self._get_chart_parent_ids(cr, uid, chart, context=context)
if 'chart_template_id' in fields:
res.update({'only_one_chart_template': len(ids) == 1,
'chart_template_id': chart_id})
if 'sale_tax' in fields:
sale_tax_ids = tax_templ_obj.search(cr, uid, [("chart_template_id", "in", chart_hierarchy_ids),
('type_tax_use', 'in', ('sale','all'))],
order="sequence")
res.update({'sale_tax': sale_tax_ids and sale_tax_ids[0] or False})
if 'purchase_tax' in fields:
purchase_tax_ids = tax_templ_obj.search(cr, uid, [("chart_template_id", "in", chart_hierarchy_ids),
('type_tax_use', 'in', ('purchase','all'))],
order="sequence")
res.update({'purchase_tax': purchase_tax_ids and purchase_tax_ids[0] or False})
res.update({
'purchase_tax_rate': 15.0,
'sale_tax_rate': 15.0,
})
return res
def fields_view_get(self, cr, uid, view_id=None, view_type='form', context=None, toolbar=False, submenu=False):
if context is None:context = {}
res = super(wizard_multi_charts_accounts, self).fields_view_get(cr, uid, view_id=view_id, view_type=view_type, context=context, toolbar=toolbar,submenu=False)
cmp_select = []
acc_template_obj = self.pool.get('account.chart.template')
company_obj = self.pool.get('res.company')
company_ids = company_obj.search(cr, uid, [], context=context)
#display in the widget selection of companies, only the companies that haven't been configured yet (but don't care about the demo chart of accounts)
cr.execute("SELECT company_id FROM account_account WHERE active = 't' AND account_account.parent_id IS NULL AND name != %s", ("Chart For Automated Tests",))
configured_cmp = [r[0] for r in cr.fetchall()]
unconfigured_cmp = list(set(company_ids)-set(configured_cmp))
for field in res['fields']:
if field == 'company_id':
res['fields'][field]['domain'] = [('id','in',unconfigured_cmp)]
res['fields'][field]['selection'] = [('', '')]
if unconfigured_cmp:
cmp_select = [(line.id, line.name) for line in company_obj.browse(cr, uid, unconfigured_cmp)]
res['fields'][field]['selection'] = cmp_select
return res
def check_created_journals(self, cr, uid, vals_journal, company_id, context=None):
"""
This method used for checking journals already created or not. If not then create new journal.
"""
obj_journal = self.pool.get('account.journal')
rec_list = obj_journal.search(cr, uid, [('name','=', vals_journal['name']),('company_id', '=', company_id)], context=context)
if not rec_list:
obj_journal.create(cr, uid, vals_journal, context=context)
return True
def generate_journals(self, cr, uid, chart_template_id, acc_template_ref, company_id, context=None):
"""
This method is used for creating journals.
:param chart_temp_id: Chart Template Id.
:param acc_template_ref: Account templates reference.
:param company_id: company_id selected from wizard.multi.charts.accounts.
:returns: True
"""
journal_data = self._prepare_all_journals(cr, uid, chart_template_id, acc_template_ref, company_id, context=context)
for vals_journal in journal_data:
self.check_created_journals(cr, uid, vals_journal, company_id, context=context)
return True
def _prepare_all_journals(self, cr, uid, chart_template_id, acc_template_ref, company_id, context=None):
def _get_analytic_journal(journal_type):
# Get the analytic journal
data = False
try:
if journal_type in ('sale', 'sale_refund'):
data = obj_data.get_object_reference(cr, uid, 'account', 'analytic_journal_sale')
elif journal_type in ('purchase', 'purchase_refund'):
data = obj_data.get_object_reference(cr, uid, 'account', 'exp')
elif journal_type == 'general':
pass
except ValueError:
pass
return data and data[1] or False
def _get_default_account(journal_type, type='debit'):
# Get the default accounts
default_account = False
if journal_type in ('sale', 'sale_refund'):
default_account = acc_template_ref.get(template.property_account_income_categ.id)
elif journal_type in ('purchase', 'purchase_refund'):
default_account = acc_template_ref.get(template.property_account_expense_categ.id)
elif journal_type == 'situation':
if type == 'debit':
default_account = acc_template_ref.get(template.property_account_expense_opening.id)
else:
default_account = acc_template_ref.get(template.property_account_income_opening.id)
return default_account
journal_names = {
'sale': _('Sales Journal'),
'purchase': _('Purchase Journal'),
'sale_refund': _('Sales Refund Journal'),
'purchase_refund': _('Purchase Refund Journal'),
'general': _('Miscellaneous Journal'),
'situation': _('Opening Entries Journal'),
}
journal_codes = {
'sale': _('SAJ'),
'purchase': _('EXJ'),
'sale_refund': _('SCNJ'),
'purchase_refund': _('ECNJ'),
'general': _('MISC'),
'situation': _('OPEJ'),
}
obj_data = self.pool.get('ir.model.data')
analytic_journal_obj = self.pool.get('account.analytic.journal')
template = self.pool.get('account.chart.template').browse(cr, uid, chart_template_id, context=context)
journal_data = []
for journal_type in ['sale', 'purchase', 'sale_refund', 'purchase_refund', 'general', 'situation']:
vals = {
'type': journal_type,
'name': journal_names[journal_type],
'code': journal_codes[journal_type],
'company_id': company_id,
'centralisation': journal_type == 'situation',
'analytic_journal_id': _get_analytic_journal(journal_type),
'default_credit_account_id': _get_default_account(journal_type, 'credit'),
'default_debit_account_id': _get_default_account(journal_type, 'debit'),
}
journal_data.append(vals)
return journal_data
def generate_properties(self, cr, uid, chart_template_id, acc_template_ref, company_id, context=None):
"""
This method used for creating properties.
:param chart_template_id: id of the current chart template for which we need to create properties
:param acc_template_ref: Mapping between ids of account templates and real accounts created from them
:param company_id: company_id selected from wizard.multi.charts.accounts.
:returns: True
"""
property_obj = self.pool.get('ir.property')
field_obj = self.pool.get('ir.model.fields')
todo_list = [
('property_account_receivable','res.partner','account.account'),
('property_account_payable','res.partner','account.account'),
('property_account_expense_categ','product.category','account.account'),
('property_account_income_categ','product.category','account.account'),
('property_account_expense','product.template','account.account'),
('property_account_income','product.template','account.account'),
]
template = self.pool.get('account.chart.template').browse(cr, uid, chart_template_id, context=context)
for record in todo_list:
account = getattr(template, record[0])
value = account and 'account.account,' + str(acc_template_ref[account.id]) or False
if value:
field = field_obj.search(cr, uid, [('name', '=', record[0]),('model', '=', record[1]),('relation', '=', record[2])], context=context)
vals = {
'name': record[0],
'company_id': company_id,
'fields_id': field[0],
'value': value,
}
property_ids = property_obj.search(cr, uid, [('name','=', record[0]),('company_id', '=', company_id)], context=context)
if property_ids:
#the property exist: modify it
property_obj.write(cr, uid, property_ids, vals, context=context)
else:
#create the property
property_obj.create(cr, uid, vals, context=context)
return True
def _install_template(self, cr, uid, template_id, company_id, code_digits=None, obj_wizard=None, acc_ref=None, taxes_ref=None, tax_code_ref=None, context=None):
'''
This function recursively loads the template objects and create the real objects from them.
:param template_id: id of the chart template to load
:param company_id: id of the company the wizard is running for
:param code_digits: integer that depicts the number of digits the accounts code should have in the COA
:param obj_wizard: the current wizard for generating the COA from the templates
:param acc_ref: Mapping between ids of account templates and real accounts created from them
:param taxes_ref: Mapping between ids of tax templates and real taxes created from them
:param tax_code_ref: Mapping between ids of tax code templates and real tax codes created from them
:returns: return a tuple with a dictionary containing
* the mapping between the account template ids and the ids of the real accounts that have been generated
from them, as first item,
* a similar dictionary for mapping the tax templates and taxes, as second item,
* a last identical containing the mapping of tax code templates and tax codes
:rtype: tuple(dict, dict, dict)
'''
if acc_ref is None:
acc_ref = {}
if taxes_ref is None:
taxes_ref = {}
if tax_code_ref is None:
tax_code_ref = {}
template = self.pool.get('account.chart.template').browse(cr, uid, template_id, context=context)
if template.parent_id:
tmp1, tmp2, tmp3 = self._install_template(cr, uid, template.parent_id.id, company_id, code_digits=code_digits, acc_ref=acc_ref, taxes_ref=taxes_ref, tax_code_ref=tax_code_ref, context=context)
acc_ref.update(tmp1)
taxes_ref.update(tmp2)
tax_code_ref.update(tmp3)
tmp1, tmp2, tmp3 = self._load_template(cr, uid, template_id, company_id, code_digits=code_digits, obj_wizard=obj_wizard, account_ref=acc_ref, taxes_ref=taxes_ref, tax_code_ref=tax_code_ref, context=context)
acc_ref.update(tmp1)
taxes_ref.update(tmp2)
tax_code_ref.update(tmp3)
return acc_ref, taxes_ref, tax_code_ref
def _load_template(self, cr, uid, template_id, company_id, code_digits=None, obj_wizard=None, account_ref=None, taxes_ref=None, tax_code_ref=None, context=None):
'''
This function generates all the objects from the templates
:param template_id: id of the chart template to load
:param company_id: id of the company the wizard is running for
:param code_digits: integer that depicts the number of digits the accounts code should have in the COA
:param obj_wizard: the current wizard for generating the COA from the templates
:param acc_ref: Mapping between ids of account templates and real accounts created from them
:param taxes_ref: Mapping between ids of tax templates and real taxes created from them
:param tax_code_ref: Mapping between ids of tax code templates and real tax codes created from them
:returns: return a tuple with a dictionary containing
* the mapping between the account template ids and the ids of the real accounts that have been generated
from them, as first item,
* a similar dictionary for mapping the tax templates and taxes, as second item,
* a last identical containing the mapping of tax code templates and tax codes
:rtype: tuple(dict, dict, dict)
'''
if account_ref is None:
account_ref = {}
if taxes_ref is None:
taxes_ref = {}
if tax_code_ref is None:
tax_code_ref = {}
template = self.pool.get('account.chart.template').browse(cr, uid, template_id, context=context)
obj_tax_code_template = self.pool.get('account.tax.code.template')
obj_acc_tax = self.pool.get('account.tax')
obj_tax_temp = self.pool.get('account.tax.template')
obj_acc_template = self.pool.get('account.account.template')
obj_fiscal_position_template = self.pool.get('account.fiscal.position.template')
# create all the tax code.
tax_code_ref.update(obj_tax_code_template.generate_tax_code(cr, uid, template.tax_code_root_id.id, company_id, context=context))
# Generate taxes from templates.
tax_templates = [x for x in template.tax_template_ids]
generated_tax_res = obj_tax_temp._generate_tax(cr, uid, tax_templates, tax_code_ref, company_id, context=context)
taxes_ref.update(generated_tax_res['tax_template_to_tax'])
# Generating Accounts from templates.
account_template_ref = obj_acc_template.generate_account(cr, uid, template_id, taxes_ref, account_ref, code_digits, company_id, context=context)
account_ref.update(account_template_ref)
# writing account values on tax after creation of accounts
for key,value in generated_tax_res['account_dict'].items():
if value['account_collected_id'] or value['account_paid_id']:
obj_acc_tax.write(cr, uid, [key], {
'account_collected_id': account_ref.get(value['account_collected_id'], False),
'account_paid_id': account_ref.get(value['account_paid_id'], False),
})
# Create Journals
self.generate_journals(cr, uid, template_id, account_ref, company_id, context=context)
# generate properties function
self.generate_properties(cr, uid, template_id, account_ref, company_id, context=context)
# Generate Fiscal Position , Fiscal Position Accounts and Fiscal Position Taxes from templates
obj_fiscal_position_template.generate_fiscal_position(cr, uid, template_id, taxes_ref, account_ref, company_id, context=context)
return account_ref, taxes_ref, tax_code_ref
def _create_tax_templates_from_rates(self, cr, uid, obj_wizard, company_id, context=None):
'''
This function checks if the chosen chart template is configured as containing a full set of taxes, and if
it's not the case, it creates the templates for account.tax.code and for account.account.tax objects accordingly
to the provided sale/purchase rates. Then it saves the new tax templates as default taxes to use for this chart
template.
:param obj_wizard: browse record of wizard to generate COA from templates
:param company_id: id of the company for wich the wizard is running
:return: True
'''
obj_tax_code_template = self.pool.get('account.tax.code.template')
obj_tax_temp = self.pool.get('account.tax.template')
chart_template = obj_wizard.chart_template_id
vals = {}
all_parents = self._get_chart_parent_ids(cr, uid, chart_template, context=context)
# create tax templates and tax code templates from purchase_tax_rate and sale_tax_rate fields
if not chart_template.complete_tax_set:
value = obj_wizard.sale_tax_rate
ref_tax_ids = obj_tax_temp.search(cr, uid, [('type_tax_use','in', ('sale','all')), ('chart_template_id', 'in', all_parents)], context=context, order="sequence, id desc", limit=1)
obj_tax_temp.write(cr, uid, ref_tax_ids, {'amount': value/100.0, 'name': _('Tax %.2f%%') % value})
value = obj_wizard.purchase_tax_rate
ref_tax_ids = obj_tax_temp.search(cr, uid, [('type_tax_use','in', ('purchase','all')), ('chart_template_id', 'in', all_parents)], context=context, order="sequence, id desc", limit=1)
obj_tax_temp.write(cr, uid, ref_tax_ids, {'amount': value/100.0, 'name': _('Purchase Tax %.2f%%') % value})
return True
def execute(self, cr, uid, ids, context=None):
'''
This function is called at the confirmation of the wizard to generate the COA from the templates. It will read
all the provided information to create the accounts, the banks, the journals, the taxes, the tax codes, the
accounting properties... accordingly for the chosen company.
'''
if uid != SUPERUSER_ID and not self.pool['res.users'].has_group(cr, uid, 'base.group_erp_manager'):
raise openerp.exceptions.AccessError(_("Only administrators can change the settings"))
obj_data = self.pool.get('ir.model.data')
ir_values_obj = self.pool.get('ir.values')
obj_wizard = self.browse(cr, uid, ids[0])
company_id = obj_wizard.company_id.id
self.pool.get('res.company').write(cr, uid, [company_id], {'currency_id': obj_wizard.currency_id.id})
# When we install the CoA of first company, set the currency to price types and pricelists
if company_id==1:
for ref in (('product','list_price'),('product','standard_price'),('product','list0'),('purchase','list0')):
try:
tmp2 = obj_data.get_object_reference(cr, uid, *ref)
if tmp2:
self.pool[tmp2[0]].write(cr, uid, tmp2[1], {
'currency_id': obj_wizard.currency_id.id
})
except ValueError:
pass
# If the floats for sale/purchase rates have been filled, create templates from them
self._create_tax_templates_from_rates(cr, uid, obj_wizard, company_id, context=context)
# Install all the templates objects and generate the real objects
acc_template_ref, taxes_ref, tax_code_ref = self._install_template(cr, uid, obj_wizard.chart_template_id.id, company_id, code_digits=obj_wizard.code_digits, obj_wizard=obj_wizard, context=context)
# write values of default taxes for product as super user
if obj_wizard.sale_tax and taxes_ref:
ir_values_obj.set_default(cr, SUPERUSER_ID, 'product.template', "taxes_id", [taxes_ref[obj_wizard.sale_tax.id]], for_all_users=True, company_id=company_id)
if obj_wizard.purchase_tax and taxes_ref:
ir_values_obj.set_default(cr, SUPERUSER_ID, 'product.template', "supplier_taxes_id", [taxes_ref[obj_wizard.purchase_tax.id]], for_all_users=True, company_id=company_id)
# Create Bank journals
self._create_bank_journals_from_o2m(cr, uid, obj_wizard, company_id, acc_template_ref, context=context)
return {}
def _prepare_bank_journal(self, cr, uid, line, current_num, default_account_id, company_id, context=None):
'''
This function prepares the value to use for the creation of a bank journal created through the wizard of
generating COA from templates.
:param line: dictionary containing the values encoded by the user related to his bank account
:param current_num: integer corresponding to a counter of the already created bank journals through this wizard.
:param default_account_id: id of the default debit.credit account created before for this journal.
:param company_id: id of the company for which the wizard is running
:return: mapping of field names and values
:rtype: dict
'''
obj_data = self.pool.get('ir.model.data')
obj_journal = self.pool.get('account.journal')
# we need to loop again to find next number for journal code
# because we can't rely on the value current_num as,
# its possible that we already have bank journals created (e.g. by the creation of res.partner.bank)
# and the next number for account code might have been already used before for journal
for num in xrange(current_num, 100):
# journal_code has a maximal size of 5, hence we can enforce the boundary num < 100
journal_code = _('BNK')[:3] + str(num)
ids = obj_journal.search(cr, uid, [('code', '=', journal_code), ('company_id', '=', company_id)], context=context)
if not ids:
break
else:
raise osv.except_osv(_('Error!'), _('Cannot generate an unused journal code.'))
vals = {
'name': line['acc_name'],
'code': journal_code,
'type': line['account_type'] == 'cash' and 'cash' or 'bank',
'company_id': company_id,
'analytic_journal_id': False,
'currency': False,
'default_credit_account_id': default_account_id,
'default_debit_account_id': default_account_id,
}
if line['currency_id']:
vals['currency'] = line['currency_id']
return vals
def _prepare_bank_account(self, cr, uid, line, new_code, acc_template_ref, ref_acc_bank, company_id, context=None):
'''
This function prepares the value to use for the creation of the default debit and credit accounts of a
bank journal created through the wizard of generating COA from templates.
:param line: dictionary containing the values encoded by the user related to his bank account
:param new_code: integer corresponding to the next available number to use as account code
:param acc_template_ref: the dictionary containing the mapping between the ids of account templates and the ids
of the accounts that have been generated from them.
:param ref_acc_bank: browse record of the account template set as root of all bank accounts for the chosen
template
:param company_id: id of the company for which the wizard is running
:return: mapping of field names and values
:rtype: dict
'''
obj_data = self.pool.get('ir.model.data')
# Get the id of the user types fr-or cash and bank
tmp = obj_data.get_object_reference(cr, uid, 'account', 'data_account_type_cash')
cash_type = tmp and tmp[1] or False
tmp = obj_data.get_object_reference(cr, uid, 'account', 'data_account_type_bank')
bank_type = tmp and tmp[1] or False
return {
'name': line['acc_name'],
'currency_id': line['currency_id'],
'code': new_code,
'type': 'liquidity',
'user_type': line['account_type'] == 'cash' and cash_type or bank_type,
'parent_id': acc_template_ref[ref_acc_bank.id] or False,
'company_id': company_id,
}
def _create_bank_journals_from_o2m(self, cr, uid, obj_wizard, company_id, acc_template_ref, context=None):
'''
This function creates bank journals and its accounts for each line encoded in the field bank_accounts_id of the
wizard.
:param obj_wizard: the current wizard that generates the COA from the templates.
:param company_id: the id of the company for which the wizard is running.
:param acc_template_ref: the dictionary containing the mapping between the ids of account templates and the ids
of the accounts that have been generated from them.
:return: True
'''
obj_acc = self.pool.get('account.account')
obj_journal = self.pool.get('account.journal')
code_digits = obj_wizard.code_digits
# Build a list with all the data to process
journal_data = []
if obj_wizard.bank_accounts_id:
for acc in obj_wizard.bank_accounts_id:
vals = {
'acc_name': acc.acc_name,
'account_type': acc.account_type,
'currency_id': acc.currency_id.id,
}
journal_data.append(vals)
ref_acc_bank = obj_wizard.chart_template_id.bank_account_view_id
if journal_data and not ref_acc_bank.code:
raise osv.except_osv(_('Configuration Error!'), _('You have to set a code for the bank account defined on the selected chart of accounts.'))
current_num = 1
for line in journal_data:
# Seek the next available number for the account code
while True:
new_code = str(ref_acc_bank.code.ljust(code_digits-len(str(current_num)), '0')) + str(current_num)
ids = obj_acc.search(cr, uid, [('code', '=', new_code), ('company_id', '=', company_id)])
if not ids:
break
else:
current_num += 1
# Create the default debit/credit accounts for this bank journal
vals = self._prepare_bank_account(cr, uid, line, new_code, acc_template_ref, ref_acc_bank, company_id, context=context)
default_account_id = obj_acc.create(cr, uid, vals, context=context)
#create the bank journal
vals_journal = self._prepare_bank_journal(cr, uid, line, current_num, default_account_id, company_id, context=context)
obj_journal.create(cr, uid, vals_journal)
current_num += 1
return True
class account_bank_accounts_wizard(osv.osv_memory):
_name='account.bank.accounts.wizard'
_columns = {
'acc_name': fields.char('Account Name.', required=True),
'bank_account_id': fields.many2one('wizard.multi.charts.accounts', 'Bank Account', required=True, ondelete='cascade'),
'currency_id': fields.many2one('res.currency', 'Secondary Currency', help="Forces all moves for this account to have this secondary currency."),
'account_type': fields.selection([('cash','Cash'), ('check','Check'), ('bank','Bank')], 'Account Type'),
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
andreparames/odoo
|
addons/account/account.py
|
Python
|
agpl-3.0
| 191,211
|
# -*- encoding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2013 Cubic ERP - Teradata SAC (<http://cubicerp.com>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
"name": "Electronic Invoice",
"version": "0.1",
"description": """
Manage the electronic invoice
=============================
The management of electronic invoice integrate the invoices with digital signatures and certificates usually in a PKI infastructure with xml messages to a webservices to generate and validate the electronic invoices.
Key Features
------------
* Add support to manage the webservices communication to generate and validate a electronic invoice
* Generate a abstract model to manage electronic invoices from several countries
""",
"author": "Cubic ERP",
"website": "http://cubicERP.com",
"category": "Financial",
"depends": [
"base_pki",
"account",
],
"data":[
"security/account_einvoice_security.xml",
"security/ir.model.access.csv",
"account_einvoice_workflow.xml",
"account_einvoice_view.xml",
"account_view.xml",
],
"demo_xml": [],
"active": False,
"installable": True,
"certificate" : "",
}
|
jolevq/odoopub
|
extra-addons/account_einvoice/__openerp__.py
|
Python
|
agpl-3.0
| 2,041
|