code
stringlengths 2
1.05M
| repo_name
stringlengths 5
104
| path
stringlengths 4
251
| language
stringclasses 1
value | license
stringclasses 15
values | size
int32 2
1.05M
|
|---|---|---|---|---|---|
try:
from django.utils.encoding import force_text # noqa
except ImportError:
from django.utils.encoding import force_unicode as force_text # noqa
try:
from urllib2 import urlopen # noqa
except ImportError:
from urllib.request import urlopen # noqa
|
blueyed/pytest_django
|
tests/compat.py
|
Python
|
bsd-3-clause
| 270
|
r"""
Modeling and inversion of temperature residuals measured in wells due to
temperature perturbations in the surface.
Perturbations can be of two kinds: **abrupt** or **linear**.
Forward modeling of these types of changes is done with functions:
* :func:`~fatiando.geothermal.climsig.abrupt`
* :func:`~fatiando.geothermal.climsig.linear`
Assumeing that the temperature perturbation was abrupt. The residual
temperature at a depth :math:`z_i` in the well at a time :math:`t` after the
perturbation is given by
.. math::
T_i(z_i) = A \left[1 - \mathrm{erf}\left(
\frac{z_i}{\sqrt{4\lambda t}}\right)\right]
where :math:`A` is the amplitude of the perturbation, :math:`\lambda` is the
thermal diffusivity of the medium, and :math:`\mathrm{erf}` is the error
function.
For the case of a linear change, the temperature is
.. math::
T_i(z_i) = A \left[
\left(1 + 2\frac{z_i^2}{4\lambda t}\right)
\mathrm{erfc}\left(\frac{z_i}{\sqrt{4\lambda t}}\right) -
\frac{2}{\sqrt{\pi}}\left(\frac{z_i}{\sqrt{4\lambda t}}\right)
\mathrm{exp}\left(-\frac{z_i^2}{4\lambda t}\right)
\right]
Given the temperature measured at different depths, we can **invert** for the
amplitude and age of the change. The available inversion solvers are:
* :class:`~fatiando.geothermal.climsig.SingleChange`: inverts for the
parameters of a single temperature change. Can use both abrupt and linear
models.
----
"""
from __future__ import division
import numpy
import scipy.special
from ..inversion.base import Misfit
from ..constants import THERMAL_DIFFUSIVITY_YEAR
def linear(amp, age, zp, diffus=THERMAL_DIFFUSIVITY_YEAR):
"""
Calculate the residual temperature profile in depth due to a linear
temperature perturbation.
Parameters:
* amp : float
Amplitude of the perturbation (in C)
* age : float
Time since the perturbation occured (in years)
* zp : array
The depths of computation points along the well (in meters)
* diffus : float
Thermal diffusivity of the medium (in m^2/year)
See the default values for the thermal diffusivity in
:mod:`fatiando.constants`.
Returns
* temp : array
The residual temperatures measured along the well
"""
tmp = zp / numpy.sqrt(4. * diffus * age)
res = amp * ((1. + 2 * tmp ** 2) * scipy.special.erfc(tmp)
- 2. / numpy.sqrt(numpy.pi) * tmp * numpy.exp(-tmp ** 2))
return res
def abrupt(amp, age, zp, diffus=THERMAL_DIFFUSIVITY_YEAR):
"""
Calculate the residual temperature profile in depth due to an abrupt
temperature perturbation.
Parameters:
* amp : float
Amplitude of the perturbation (in C)
* age : float
Time since the perturbation occured (in years)
* zp : array
Arry with the depths of computation points along the well (in meters)
* diffus : float
Thermal diffusivity of the medium (in m^2/year)
See the default values for the thermal diffusivity in
:mod:`fatiando.constants`.
Returns
* temp : array
The residual temperatures measured along the well
"""
return amp * (1. - scipy.special.erf(zp / numpy.sqrt(4. * diffus * age)))
class SingleChange(Misfit):
r"""
Invert the well temperature data for a single change in temperature.
The parameters of the change are its amplitude and age.
See the docstring of :mod:`fatiando.geothermal.climsig` for more
information and examples.
Parameters:
* temp : array
The temperature profile
* zp : array
Depths along the profile
* mode : string
The type of change: ``'abrupt'`` for an abrupt change, ``'linear'`` for
a linear change.
* diffus : float
Thermal diffusivity of the medium (in m^2/year)
.. note::
The recommended solver for this inverse problem is the
Levemberg-Marquardt method. Since this is a non-linear problem, set the
desired method and initial solution using the
:meth:`~fatiando.inversion.base.FitMixin.config` method.
See the example bellow.
Example with synthetic data:
>>> import numpy
>>> zp = numpy.arange(0, 100, 1)
>>> # For an ABRUPT change
>>> amp = 2
>>> age = 100 # Uses years to avoid overflows
>>> temp = abrupt(amp, age, zp)
>>> # Run the inversion for the amplitude and time
>>> # This is a non-linear problem, so use the Levemberg-Marquardt
>>> # algorithm with an initial estimate
>>> solver = SingleChange(temp, zp, mode='abrupt').config(
... 'levmarq', initial=[1, 1])
>>> amp_, age_ = solver.fit().estimate_
>>> print "amp: %.2f age: %.2f" % (amp_, age_)
amp: 2.00 age: 100.00
>>> # For a LINEAR change
>>> amp = 3.45
>>> age = 52.5
>>> temp = linear(amp, age, zp)
>>> solver = SingleChange(temp, zp, mode='linear').config(
... 'levmarq', initial=[1, 1])
>>> amp_, age_ = solver.fit().estimate_
>>> print "amp: %.2f age: %.2f" % (amp_, age_)
amp: 3.45 age: 52.50
Notes:
For **abrupt** changes, derivatives with respect to the amplitude and age
are calculated using the formula
.. math::
\frac{\partial T_i}{\partial A} = 1 - \mathrm{erf}\left(
\frac{z_i}{\sqrt{4\lambda t}}\right)
and
.. math::
\frac{\partial T_i}{\partial t} = \frac{A}{t\sqrt{\pi}}
\left(\frac{z_i}{\sqrt{4\lambda t}}\right)
\exp\left[-\left(\frac{z_i}{\sqrt{4\lambda t}}\right)^2\right]
respectively.
For **linear** changes, derivatives with respect to the age are calculated
using a 2-point finite difference approximation. Derivatives with respect
to amplitude are calculate using the formula
.. math::
\frac{\partial T_i}{\partial A} =
\left(1 + 2\frac{z_i^2}{4\lambda t}\right)
\mathrm{erfc}\left(\frac{z_i}{\sqrt{4\lambda t}}\right) -
\frac{2}{\sqrt{\pi}}\left(\frac{z_i}{\sqrt{4\lambda t}}\right)
\mathrm{exp}\left(-\frac{z_i^2}{4\lambda t}\right)
"""
def __init__(self, temp, zp, mode, diffus=THERMAL_DIFFUSIVITY_YEAR):
if len(temp) != len(zp):
raise ValueError("temp and zp must be of same length")
if mode not in ['abrupt', 'linear']:
raise ValueError("Invalid mode: %s. Must be 'abrupt' or 'linear'"
% (mode))
super(SingleChange, self).__init__(
data=temp,
positional=dict(zp=zp),
model=dict(diffus=float(diffus), mode=mode),
nparams=2, islinear=False)
def _get_predicted(self, p):
amp, age = p
zp = self.positional['zp']
diffus = self.model['diffus']
if self.model['mode'] == 'abrupt':
return abrupt(amp, age, zp, diffus)
if self.model['mode'] == 'linear':
return linear(amp, age, zp, diffus)
def _get_jacobian(self, p):
amp, age = p
zp = self.positional['zp']
diffus = self.model['diffus']
mode = self.model['mode']
if mode == 'abrupt':
tmp = zp / numpy.sqrt(4. * diffus * age)
jac = numpy.transpose([
abrupt(1., age, zp, diffus),
(amp * tmp * numpy.exp(-(tmp ** 2)) /
(numpy.sqrt(numpy.pi) * age))])
if mode == 'linear':
delta = 0.5
at_p = linear(amp, age, zp, diffus)
jac = numpy.transpose([
linear(1., age, zp, diffus),
(linear(amp, age + delta, zp, diffus) -
linear(amp, age - delta, zp, diffus)) / (2 * delta)])
return jac
|
eusoubrasileiro/fatiando_seismic
|
fatiando/geothermal/climsig.py
|
Python
|
bsd-3-clause
| 7,793
|
from paths import rpath,mpath,opath
from make_apex_cubes import all_apexfiles,get_source_tel_line,_is_sci, hdr_to_freq
from pyspeckit.spectrum.readers import read_class
from astropy.table import Table
from astropy import log
from astropy.utils.console import ProgressBar
import numpy as np
import os
import pylab as pl
def tsys_data(plot=False):
if plot:
fig1 = pl.figure(1)
fig2 = pl.figure(2)
fig1.clf()
fig2.clf()
ax1 = fig1.gca()
ax2 = fig2.gca()
datadict = {}
tbldict = {}
for apex_filename in all_apexfiles:
log.info(apex_filename)
cl = read_class.ClassObject(apex_filename)
sourcereg,line,telescopes = get_source_tel_line(apex_filename)
sci_sources = [source for source in cl.sources
if _is_sci(source, sourcereg)]
datadict[apex_filename] = {t:[] for t in telescopes}
for telescope in telescopes:
log.info('{0}: {1}'.format(apex_filename, telescope))
selection = [x
for source in sci_sources
for x in cl.select_spectra(telescope=telescope,
line=line,
source=source)]
spdheader = cl.read_observations(selection, progressbar=True)
datadict[apex_filename][telescope] = zip(*[(sp.std(), h['TSYS'])
for sp,h in ProgressBar(spdheader)])
tbl = Table([datadict[apex_filename][t][ii]
for t in telescopes
for ii in (0,1)],
names=[t+"_"+s
for t in telescopes
for s in ('STDDEV','TSYS',)],
dtype=['float'
for t in telescopes
for s in ('STDDEV','TSYS',)
])
log.info(os.path.basename(apex_filename)+"_tsys.fits")
tbl.write(os.path.basename(apex_filename)+"_tsys.fits", overwrite=True)
tbldict[apex_filename] = tbl
if plot:
ax1.plot(tbl['{0}_TSYS'.format(telescopes[0])],
tbl['{0}_STDDEV'.format(telescopes[0])],
',', alpha=0.8)
ax1.set_xlabel("TSYS")
ax1.set_ylabel("Std Dev")
fig1.savefig("StdDev_vs_TSYS_{0}.png".format(telescopes[0]))
ax2.plot(tbl['{0}_TSYS'.format(telescopes[1])],
tbl['{0}_STDDEV'.format(telescopes[1])],
',', alpha=0.8)
ax2.set_xlabel("TSYS")
ax2.set_ylabel("Std Dev")
pl.draw()
pl.show()
fig2.savefig("StdDev_vs_TSYS_{0}.png".format(telescopes[1]))
return datadict,tbldict
|
adamginsburg/APEX_CMZ_H2CO
|
observing/noise_stats.py
|
Python
|
bsd-3-clause
| 2,860
|
from .pandas_vb_common import *
class SetOperations(object):
goal_time = 0.2
def setup(self):
self.rng = date_range('1/1/2000', periods=10000, freq='T')
self.rng2 = self.rng[:(-1)]
# object index with datetime values
if (self.rng.dtype == object):
self.idx_rng = self.rng.view(Index)
else:
self.idx_rng = self.rng.asobject
self.idx_rng2 = self.idx_rng[:(-1)]
# other datetime
N = 100000
A = N - 20000
B = N + 20000
self.dtidx1 = DatetimeIndex(range(N))
self.dtidx2 = DatetimeIndex(range(A, B))
self.dtidx3 = DatetimeIndex(range(N, B))
# integer
self.N = 1000000
self.options = np.arange(self.N)
self.left = Index(
self.options.take(np.random.permutation(self.N)[:(self.N // 2)]))
self.right = Index(
self.options.take(np.random.permutation(self.N)[:(self.N // 2)]))
# strings
N = 10000
strs = tm.rands_array(10, N)
self.leftstr = Index(strs[:N * 2 // 3])
self.rightstr = Index(strs[N // 3:])
def time_datetime_intersection(self):
self.rng.intersection(self.rng2)
def time_datetime_union(self):
self.rng.union(self.rng2)
def time_datetime_difference(self):
self.dtidx1.difference(self.dtidx2)
def time_datetime_difference_disjoint(self):
self.dtidx1.difference(self.dtidx3)
def time_datetime_symmetric_difference(self):
self.dtidx1.symmetric_difference(self.dtidx2)
def time_index_datetime_intersection(self):
self.idx_rng.intersection(self.idx_rng2)
def time_index_datetime_union(self):
self.idx_rng.union(self.idx_rng2)
def time_int64_intersection(self):
self.left.intersection(self.right)
def time_int64_union(self):
self.left.union(self.right)
def time_int64_difference(self):
self.left.difference(self.right)
def time_int64_symmetric_difference(self):
self.left.symmetric_difference(self.right)
def time_str_difference(self):
self.leftstr.difference(self.rightstr)
def time_str_symmetric_difference(self):
self.leftstr.symmetric_difference(self.rightstr)
class Datetime(object):
goal_time = 0.2
def setup(self):
self.dr = pd.date_range('20000101', freq='D', periods=10000)
def time_is_dates_only(self):
self.dr._is_dates_only
class Float64(object):
goal_time = 0.2
def setup(self):
self.idx = tm.makeFloatIndex(1000000)
self.mask = ((np.arange(self.idx.size) % 3) == 0)
self.series_mask = Series(self.mask)
self.baseidx = np.arange(1000000.0)
def time_boolean_indexer(self):
self.idx[self.mask]
def time_boolean_series_indexer(self):
self.idx[self.series_mask]
def time_construct(self):
Index(self.baseidx)
def time_div(self):
(self.idx / 2)
def time_get(self):
self.idx[1]
def time_mul(self):
(self.idx * 2)
def time_slice_indexer_basic(self):
self.idx[:(-1)]
def time_slice_indexer_even(self):
self.idx[::2]
class StringIndex(object):
goal_time = 0.2
def setup(self):
self.idx = tm.makeStringIndex(1000000)
self.mask = ((np.arange(1000000) % 3) == 0)
self.series_mask = Series(self.mask)
def time_boolean_indexer(self):
self.idx[self.mask]
def time_boolean_series_indexer(self):
self.idx[self.series_mask]
def time_slice_indexer_basic(self):
self.idx[:(-1)]
def time_slice_indexer_even(self):
self.idx[::2]
class Multi1(object):
goal_time = 0.2
def setup(self):
(n, k) = (200, 5000)
self.levels = [np.arange(n), tm.makeStringIndex(n).values, (1000 + np.arange(n))]
self.labels = [np.random.choice(n, (k * n)) for lev in self.levels]
self.mi = MultiIndex(levels=self.levels, labels=self.labels)
self.iterables = [tm.makeStringIndex(10000), range(20)]
def time_duplicated(self):
self.mi.duplicated()
def time_from_product(self):
MultiIndex.from_product(self.iterables)
class Multi2(object):
goal_time = 0.2
def setup(self):
self.n = ((((3 * 5) * 7) * 11) * (1 << 10))
(low, high) = (((-1) << 12), (1 << 12))
self.f = (lambda k: np.repeat(np.random.randint(low, high, (self.n // k)), k))
self.i = np.random.permutation(self.n)
self.mi = MultiIndex.from_arrays([self.f(11), self.f(7), self.f(5), self.f(3), self.f(1)])[self.i]
self.a = np.repeat(np.arange(100), 1000)
self.b = np.tile(np.arange(1000), 100)
self.midx2 = MultiIndex.from_arrays([self.a, self.b])
self.midx2 = self.midx2.take(np.random.permutation(np.arange(100000)))
def time_sortlevel_int64(self):
self.mi.sortlevel()
def time_sortlevel_zero(self):
self.midx2.sortlevel(0)
def time_sortlevel_one(self):
self.midx2.sortlevel(1)
class Multi3(object):
goal_time = 0.2
def setup(self):
self.level1 = range(1000)
self.level2 = date_range(start='1/1/2012', periods=100)
self.mi = MultiIndex.from_product([self.level1, self.level2])
def time_datetime_level_values_full(self):
self.mi.copy().values
def time_datetime_level_values_sliced(self):
self.mi[:10].values
class Range(object):
goal_time = 0.2
def setup(self):
self.idx_inc = RangeIndex(start=0, stop=10**7, step=3)
self.idx_dec = RangeIndex(start=10**7, stop=-1, step=-3)
def time_max(self):
self.idx_inc.max()
def time_max_trivial(self):
self.idx_dec.max()
def time_min(self):
self.idx_dec.min()
def time_min_trivial(self):
self.idx_inc.min()
|
jmmease/pandas
|
asv_bench/benchmarks/index_object.py
|
Python
|
bsd-3-clause
| 5,886
|
import numpy as np
import pandas as pd
import pytest
from dask.dataframe.hashing import hash_pandas_object
from dask.dataframe.utils import assert_eq
@pytest.mark.parametrize('obj', [
pd.Series([1, 2, 3]),
pd.Series([1.0, 1.5, 3.2]),
pd.Series([1.0, 1.5, 3.2], index=[1.5, 1.1, 3.3]),
pd.Series(['a', 'b', 'c']),
pd.Series([True, False, True]),
pd.Index([1, 2, 3]),
pd.Index([True, False, True]),
pd.DataFrame({'x': ['a', 'b', 'c'], 'y': [1, 2, 3]}),
pd.util.testing.makeMissingDataframe(),
pd.util.testing.makeMixedDataFrame(),
pd.util.testing.makeTimeDataFrame(),
pd.util.testing.makeTimeSeries(),
pd.util.testing.makeTimedeltaIndex()])
def test_hash_pandas_object(obj):
a = hash_pandas_object(obj)
b = hash_pandas_object(obj)
if isinstance(a, np.ndarray):
np.testing.assert_equal(a, b)
else:
assert_eq(a, b)
|
chrisbarber/dask
|
dask/dataframe/tests/test_hashing.py
|
Python
|
bsd-3-clause
| 899
|
#! /usr/bin/python
# Copyright 2019 The ANGLE Project Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
#
# gen_overlay_widgets.py:
# Code generation for overlay widgets. Should be run when the widgets declaration file,
# overlay_widgets.json, is changed.
# NOTE: don't run this script directly. Run scripts/run_code_generation.py.
from datetime import date
import json
import sys
out_file = 'Overlay_autogen.cpp'
in_file = 'overlay_widgets.json'
template_out_file = u"""// GENERATED FILE - DO NOT EDIT.
// Generated by {script_name} using data from {input_file_name}.
//
// Copyright {copyright_year} The ANGLE Project Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
//
// {out_file_name}:
// Autogenerated overlay widget declarations.
#include "libANGLE/renderer/driver_utils.h"
#include "libANGLE/Overlay.h"
#include "libANGLE/OverlayWidgets.h"
#include "libANGLE/Overlay_font_autogen.h"
namespace gl
{{
using namespace overlay;
namespace
{{
int GetFontSize(int fontSize, bool largeFont)
{{
if (largeFont && fontSize > 0)
{{
return fontSize - 1;
}}
return fontSize;
}}
}} // anonymous namespace
void Overlay::initOverlayWidgets()
{{
const bool kLargeFont = rx::IsAndroid();
{init_widgets}
}}
}} // namespace gl
"""
template_init_widget = u"""{{
const int32_t fontSize = GetFontSize({font_size}, kLargeFont);
const int32_t offsetX = {offset_x};
const int32_t offsetY = {offset_y};
const int32_t width = {width};
const int32_t height = {height};
widget->{subwidget}type = WidgetType::{type};
widget->{subwidget}fontSize = fontSize;
widget->{subwidget}coords[0] = {coord0};
widget->{subwidget}coords[1] = {coord1};
widget->{subwidget}coords[2] = {coord2};
widget->{subwidget}coords[3] = {coord3};
widget->{subwidget}color[0] = {color_r};
widget->{subwidget}color[1] = {color_g};
widget->{subwidget}color[2] = {color_b};
widget->{subwidget}color[3] = {color_a};
}}
"""
def extract_type_and_constructor(properties):
constructor = properties['type']
args_separated = constructor.split('(', 1)
if len(args_separated) == 1:
return constructor, constructor
type_no_constructor = args_separated[0]
return type_no_constructor, constructor
def get_font_size_constant(properties):
return 'kFontLayer' + properties['font'].capitalize()
def is_graph_type(type):
return type == 'RunningGraph' or type == 'RunningHistogram'
def is_text_type(type):
return not is_graph_type(type)
class OverlayWidget:
def __init__(self, properties, is_graph_description=False):
if not is_graph_description:
self.name = properties['name']
self.type, self.constructor = extract_type_and_constructor(properties)
self.extract_common(properties)
if is_graph_type(self.type):
description_properties = properties['description']
description_properties['type'] = 'Text'
self.description = OverlayWidget(description_properties, True)
def extract_common(self, properties):
self.color = properties['color']
self.coords = properties['coords']
if is_graph_type(self.type):
self.bar_width = properties['bar_width']
self.height = properties['height']
else:
self.font = get_font_size_constant(properties)
self.length = properties['length']
self.negative_alignment = [False, False]
def is_negative_coord(coords, axis, widgets_so_far):
if isinstance(coords[axis], unicode):
coord_split = coords[axis].split('.')
# The coordinate is in the form other_widget.edge.mode
# We simply need to know if other_widget's coordinate is negative or not.
return widgets_so_far[coord_split[0]].negative_alignment[axis]
return coords[axis] < 0
def set_alignment_flags(overlay_widget, widgets_so_far):
overlay_widget.negative_alignment[0] = is_negative_coord(overlay_widget.coords, 0,
widgets_so_far)
overlay_widget.negative_alignment[1] = is_negative_coord(overlay_widget.coords, 1,
widgets_so_far)
if is_graph_type(overlay_widget.type):
set_alignment_flags(overlay_widget.description, widgets_so_far)
def get_offset_helper(widget, axis, smaller_coord_side):
# Assume axis is X. This function returns two values:
# - An offset where the bounding box is placed at,
# - Whether this offset is for the left or right edge.
#
# The input coordinate (widget.coord[axis]) is either:
#
# - a number: in this case, the offset is that number, and its sign determines whether this refers to the left or right edge of the bounding box.
# - other_widget.edge.mode: this has multiple possibilities:
# * edge=left, mode=align: the offset is other_widget.left, the edge is left.
# * edge=left, mode=adjacent: the offset is other_widget.left, the edge is right.
# * edge=right, mode=align: the offset is other_widget.right, the edge is right.
# * edge=right, mode=adjacent: the offset is other_widget.right, the edge is left.
#
# The case for the Y axis is similar, with the edge values being top or bottom.
coord = widget.coords[axis]
if not isinstance(coord, unicode):
is_left = coord >= 0
return coord, is_left
coord_split = coord.split('.')
is_left = coord_split[1] == smaller_coord_side
is_align = coord_split[2] == 'align'
other_widget_coords = 'mState.mOverlayWidgets[WidgetId::' + coord_split[0] + ']->coords'
other_widget_coord_index = axis + (0 if is_left else 2)
offset = other_widget_coords + '[' + str(other_widget_coord_index) + ']'
return offset, is_left == is_align
def get_offset_x(widget):
return get_offset_helper(widget, 0, 'left')
def get_offset_y(widget):
return get_offset_helper(widget, 1, 'top')
def get_bounding_box_coords(offset, width, offset_is_left, is_left_aligned):
# See comment in generate_widget_init_helper. This function is implementing the following:
#
# - offset_is_left && is_left_aligned: [offset, offset + width]
# - offset_is_left && !is_left_aligned: [offset, std::min(offset + width, -1)]
# - !offset_is_left && is_left_aligned: [std::max(1, offset - width), offset]
# - !offset_is_left && !is_left_aligned: [offset - width, offset]
coord_left = offset if offset_is_left else (offset + ' - ' + width)
coord_right = (offset + ' + ' + width) if offset_is_left else offset
if offset_is_left and not is_left_aligned:
coord_right = 'std::min(' + coord_right + ', -1)'
if not offset_is_left and is_left_aligned:
coord_left = 'std::max(' + coord_left + ', 1)'
return coord_left, coord_right
def generate_widget_init_helper(widget, is_graph_description=False):
font_size = '0'
# Common attributes
color = [channel / 255.0 for channel in widget.color]
offset_x, offset_x_is_left = get_offset_x(widget)
offset_y, offset_y_is_top = get_offset_y(widget)
if is_text_type(widget.type):
# Attributes deriven from text properties
font_size = widget.font
width = str(widget.length) + ' * kFontGlyphWidths[fontSize]'
height = 'kFontGlyphHeights[fontSize]'
else:
# Attributes deriven from graph properties
width = str(widget.bar_width) + ' * static_cast<uint32_t>(widget->runningValues.size())'
height = widget.height
is_left_aligned = not widget.negative_alignment[0]
is_top_aligned = not widget.negative_alignment[1]
# We have offset_x, offset_y, width and height which together determine the bounding box. If
# offset_x_is_left, the bounding box X would be in [offset_x, offset_x + width], otherwise it
# would be in [offset_x - width, offset_x]. Similarly for y. Since we use negative values to
# mean aligned to the right side of the screen, we need to make sure that:
#
# - if left aligned: offset_x - width is at minimum 1
# - if right aligned: offset_x + width is at maximum -1
#
# We therefore have the following combinations for the X axis:
#
# - offset_x_is_left && is_left_aligned: [offset_x, offset_x + width]
# - offset_x_is_left && !is_left_aligned: [offset_x, std::min(offset_x + width, -1)]
# - !offset_x_is_left && is_left_aligned: [std::max(1, offset_x - width), offset_x]
# - !offset_x_is_left && !is_left_aligned: [offset_x - width, offset_x]
#
# Similarly for y.
coord0, coord2 = get_bounding_box_coords('offsetX', 'width', offset_x_is_left, is_left_aligned)
coord1, coord3 = get_bounding_box_coords('offsetY', 'height', offset_y_is_top, is_top_aligned)
return template_init_widget.format(
subwidget='description.' if is_graph_description else '',
offset_x=offset_x,
offset_y=offset_y,
width=width,
height=height,
type=widget.type,
font_size=font_size,
coord0=coord0,
coord1=coord1,
coord2=coord2,
coord3=coord3,
color_r=color[0],
color_g=color[1],
color_b=color[2],
color_a=color[3])
def generate_widget_init(widget):
widget_init = '{\n' + widget.type + ' *widget = new ' + widget.constructor + ';\n'
widget_init += generate_widget_init_helper(widget)
widget_init += 'mState.mOverlayWidgets[WidgetId::' + widget.name + '].reset(widget);\n'
if is_graph_type(widget.type):
widget_init += generate_widget_init_helper(widget.description, True)
widget_init += '}\n'
return widget_init
def main():
if len(sys.argv) == 2 and sys.argv[1] == 'inputs':
print(in_file)
return
if len(sys.argv) == 2 and sys.argv[1] == 'outputs':
print(out_file)
return
with open(in_file) as fin:
layout = json.loads(fin.read())
# Read the layouts from the json file and determine alignment of widgets (as they can refer to
# other widgets.
overlay_widgets = {}
for widget_properties in layout['widgets']:
widget = OverlayWidget(widget_properties)
overlay_widgets[widget.name] = widget
set_alignment_flags(widget, overlay_widgets)
# Go over the widgets again and generate initialization code. Note that we need to iterate over
# the widgets in order, so we can't use the overlay_widgets dictionary for iteration.
init_widgets = []
for widget_properties in layout['widgets']:
init_widgets.append(generate_widget_init(overlay_widgets[widget_properties['name']]))
with open(out_file, 'w') as outfile:
outfile.write(
template_out_file.format(
script_name=__file__,
copyright_year=date.today().year,
input_file_name=in_file,
out_file_name=out_file,
init_widgets='\n'.join(init_widgets)))
outfile.close()
if __name__ == '__main__':
sys.exit(main())
|
endlessm/chromium-browser
|
third_party/angle/src/libANGLE/gen_overlay_widgets.py
|
Python
|
bsd-3-clause
| 11,216
|
import json
from unittest.mock import patch
from federation.hostmeta.parsers import (
parse_nodeinfo_document, parse_nodeinfo2_document, parse_statisticsjson_document, int_or_none,
parse_mastodon_document, parse_matrix_document)
from federation.tests.fixtures.hostmeta import (
NODEINFO2_10_DOC, NODEINFO_10_DOC, NODEINFO_20_DOC, STATISTICS_JSON_DOC, MASTODON_DOC, MASTODON_ACTIVITY_DOC,
MASTODON_RC_DOC, MASTODON_DOC_NULL_CONTACT, MATRIX_SYNAPSE_DOC, PLEROMA_MASTODON_API_DOC,
NODEINFO_21_DOC_INVALID_USAGE_COUNTS, MASTODON_DOC_3)
class TestIntOrNone:
def test_returns_negative_values_as_none(self):
assert int_or_none(-1) is None
class TestParseMastodonDocument:
@patch('federation.hostmeta.fetchers.fetch_nodeinfo_document', autospec=True)
def test_calls_nodeinfo_fetcher_if_pleroma(self, mock_fetch):
parse_mastodon_document(json.loads(PLEROMA_MASTODON_API_DOC), 'example.com')
mock_fetch.assert_called_once_with('example.com')
@patch('federation.hostmeta.parsers.fetch_document')
def test_parse_mastodon_document(self, mock_fetch):
mock_fetch.return_value = MASTODON_ACTIVITY_DOC, 200, None
result = parse_mastodon_document(json.loads(MASTODON_DOC), 'example.com')
assert result == {
'organization': {
'account': 'https://mastodon.local/@Admin',
'contact': 'hello@mastodon.local',
'name': 'Admin dude',
},
'host': 'example.com',
'name': 'Mastodon',
'open_signups': True,
'protocols': ["ostatus", "activitypub"],
'relay': False,
'server_meta': {},
'services': [],
'platform': 'mastodon',
'version': '2.4.0',
'features': {},
'activity': {
'users': {
'total': 159726,
'half_year': 90774,
'monthly': 27829,
'weekly': 8779,
},
'local_posts': 6059606,
'local_comments': None,
},
}
@patch('federation.hostmeta.parsers.fetch_document')
def test_parse_mastodon_document__null_contact_account(self, mock_fetch):
mock_fetch.return_value = MASTODON_ACTIVITY_DOC, 200, None
result = parse_mastodon_document(json.loads(MASTODON_DOC_NULL_CONTACT), 'example.com')
assert result == {
'organization': {
'account': '',
'contact': '',
'name': '',
},
'host': 'example.com',
'name': 'Mastodon',
'open_signups': True,
'protocols': ["ostatus", "activitypub"],
'relay': False,
'server_meta': {},
'services': [],
'platform': 'mastodon',
'version': '2.4.0',
'features': {},
'activity': {
'users': {
'total': 159726,
'half_year': 90774,
'monthly': 27829,
'weekly': 8779,
},
'local_posts': 6059606,
'local_comments': None,
},
}
@patch('federation.hostmeta.parsers.fetch_document')
def test_parse_mastodon_document__rc_version(self, mock_fetch):
mock_fetch.return_value = MASTODON_ACTIVITY_DOC, 200, None
result = parse_mastodon_document(json.loads(MASTODON_RC_DOC), 'example.com')
assert result == {
'organization': {
'account': 'https://mastodon.local/@Admin',
'contact': 'hello@mastodon.local',
'name': 'Admin dude',
},
'host': 'example.com',
'name': 'Mastodon',
'open_signups': True,
'protocols': ["ostatus", "activitypub"],
'relay': False,
'server_meta': {},
'services': [],
'platform': 'mastodon',
'version': '2.4.1rc1',
'features': {},
'activity': {
'users': {
'total': 159726,
'half_year': 90774,
'monthly': 27829,
'weekly': 8779,
},
'local_posts': 6059606,
'local_comments': None,
},
}
@patch('federation.hostmeta.parsers.fetch_document')
def test_parse_mastodon_document__protocols(self, mock_fetch):
mock_fetch.return_value = MASTODON_ACTIVITY_DOC, 200, None
result = parse_mastodon_document(json.loads(MASTODON_DOC_3), 'example.com')
assert result == {
'organization': {
'account': 'https://mastodon.local/@Admin',
'contact': 'hello@mastodon.local',
'name': 'Admin dude',
},
'host': 'example.com',
'name': 'Mastodon',
'open_signups': True,
'protocols': ["activitypub"],
'relay': False,
'server_meta': {},
'services': [],
'platform': 'mastodon',
'version': '3.0.0',
'features': {},
'activity': {
'users': {
'total': 159726,
'half_year': 90774,
'monthly': 27829,
'weekly': 8779,
},
'local_posts': 6059606,
'local_comments': None,
},
}
class TestParseMatrixDocument:
@patch('federation.hostmeta.parsers.send_document', autospec=True, return_value=(403, None))
def test_parse_matrix_document__signups_closed(self, mock_send):
result = parse_matrix_document(json.loads(MATRIX_SYNAPSE_DOC), 'feneas.org')
assert result == {
'organization': {
'account': '',
'contact': '',
'name': '',
},
'host': 'feneas.org',
'name': 'feneas.org',
'open_signups': False,
'protocols': ['matrix'],
'relay': '',
'server_meta': {},
'services': [],
'platform': 'matrix|synapse',
'version': '0.33.8',
'features': {},
'activity': {
'users': {
'total': None,
'half_year': None,
'monthly': None,
'weekly': None,
},
'local_posts': None,
'local_comments': None,
},
}
@patch('federation.hostmeta.parsers.send_document', autospec=True, return_value=(401, None))
def test_parse_matrix_document__signups_open(self, mock_send):
result = parse_matrix_document(json.loads(MATRIX_SYNAPSE_DOC), 'feneas.org')
assert result == {
'organization': {
'account': '',
'contact': '',
'name': '',
},
'host': 'feneas.org',
'name': 'feneas.org',
'open_signups': True,
'protocols': ['matrix'],
'relay': '',
'server_meta': {},
'services': [],
'platform': 'matrix|synapse',
'version': '0.33.8',
'features': {},
'activity': {
'users': {
'total': None,
'half_year': None,
'monthly': None,
'weekly': None,
},
'local_posts': None,
'local_comments': None,
},
}
class TestParseNodeInfoDocument:
def test_parse_nodeinfo_10_document(self):
result = parse_nodeinfo_document(json.loads(NODEINFO_10_DOC), 'iliketoast.net')
assert result == {
'organization': {
'account': 'podmin@iliketoast.net',
'contact': '',
'name': '',
},
'host': 'iliketoast.net',
'name': 'I Like Toast',
'open_signups': True,
'protocols': ["diaspora"],
'relay': '',
'server_meta': {},
'services': ["tumblr", "twitter"],
'platform': 'diaspora',
'version': '0.7.4.0-pd0313756',
'features': {
"nodeName": "I Like Toast",
"xmppChat": False,
"camo": {
"markdown": False,
"opengraph": False,
"remotePods": False
},
"adminAccount": "podmin",
},
'activity': {
'users': {
'total': 348,
'half_year': 123,
'monthly': 62,
'weekly': 19,
},
'local_posts': 8522,
'local_comments': 17671,
},
}
def test_parse_nodeinfo_20_document(self):
result = parse_nodeinfo_document(json.loads(NODEINFO_20_DOC), 'iliketoast.net')
assert result == {
'organization': {
'account': 'podmin@iliketoast.net',
'contact': '',
'name': '',
},
'host': 'iliketoast.net',
'name': 'I Like Toast',
'open_signups': True,
'protocols': ["diaspora"],
'relay': '',
'server_meta': {},
'services': ["tumblr", "twitter"],
'platform': 'diaspora',
'version': '0.7.4.0-pd0313756',
'features': {
"nodeName": "I Like Toast",
"xmppChat": False,
"camo": {
"markdown": False,
"opengraph": False,
"remotePods": False
},
"adminAccount": "podmin",
},
'activity': {
'users': {
'total': 348,
'half_year': 123,
'monthly': 62,
'weekly': 19,
},
'local_posts': 8522,
'local_comments': 17671,
},
}
def test_parse_nodeinfo_21_document__invalid_usage_counts(self):
result = parse_nodeinfo_document(json.loads(NODEINFO_21_DOC_INVALID_USAGE_COUNTS), 'pleroma.local')
assert result == {
'organization': {
'account': '',
'contact': '',
'name': '',
},
'host': 'pleroma.local',
'name': 'pleroma.local',
'open_signups': True,
'protocols': ["activitypub"],
'relay': '',
'server_meta': {},
'services': [],
'platform': 'pleroma',
'version': '0.7.4.0-pd0313756',
'features': {},
'activity': {
'users': {
'total': 348,
'half_year': None,
'monthly': None,
'weekly': None,
},
'local_posts': None,
'local_comments': None,
},
}
class TestParseNodeInfo2Document:
def test_parse_nodeinfo2_10_document(self):
result = parse_nodeinfo2_document(json.loads(NODEINFO2_10_DOC), 'example.com')
assert result == {
'organization': {
'account': 'https://example.com/u/admin',
'contact': 'foobar@example.com',
'name': 'Example organization',
},
'host': 'example.com',
'name': 'Example server',
'open_signups': True,
'protocols': ["diaspora", "zot"],
'relay': "tags",
'server_meta': {},
'services': ["facebook", "gnusocial", "twitter"],
'platform': 'example',
'version': '0.5.0',
'features': {},
'activity': {
'users': {
'total': 123,
'half_year': 42,
'monthly': 23,
'weekly': 10,
},
'local_posts': 500,
'local_comments': 1000,
},
}
def test_parse_nodeinfo2_10_document__cleans_port_from_host(self):
response = json.loads(NODEINFO2_10_DOC)
response["server"]["baseUrl"] = "https://example.com:5221"
result = parse_nodeinfo2_document(response, 'example.com')
assert result == {
'organization': {
'account': 'https://example.com/u/admin',
'contact': 'foobar@example.com',
'name': 'Example organization',
},
'host': 'example.com',
'name': 'Example server',
'open_signups': True,
'protocols': ["diaspora", "zot"],
'relay': "tags",
'server_meta': {},
'services': ["facebook", "gnusocial", "twitter"],
'platform': 'example',
'version': '0.5.0',
'features': {},
'activity': {
'users': {
'total': 123,
'half_year': 42,
'monthly': 23,
'weekly': 10,
},
'local_posts': 500,
'local_comments': 1000,
},
}
class TestParseStatisticsJSONDocument:
def test_parse_statisticsjson_document(self):
result = parse_statisticsjson_document(json.loads(STATISTICS_JSON_DOC), 'example.com')
assert result == {
'organization': {
'account': '',
'contact': '',
'name': '',
},
'host': 'example.com',
'name': 'diaspora*',
'open_signups': True,
'protocols': ["diaspora"],
'relay': '',
'server_meta': {},
'services': [],
'platform': 'diaspora',
'version': '0.5.7.0-p56ebcc76',
'features': {},
'activity': {
'users': {
'total': None,
'half_year': None,
'monthly': None,
'weekly': None,
},
'local_posts': None,
'local_comments': None,
},
}
|
jaywink/federation
|
federation/tests/hostmeta/test_parsers.py
|
Python
|
bsd-3-clause
| 14,602
|
try:
import urlparse
except ImportError:
#py3k
from urllib import parse as urlparse
import json
from .firebase_token_generator import FirebaseTokenGenerator
from .decorators import http_connection
from .multiprocess_pool import process_pool
from .jsonutil import JSONEncoder
__all__ = ['FirebaseAuthentication', 'FirebaseApplication']
@http_connection(60)
def make_get_request(url, params, headers, connection):
"""
Helper function that makes an HTTP GET request to the given firebase
endpoint. Timeout is 60 seconds.
`url`: The full URL of the firebase endpoint (DSN appended.)
`params`: Python dict that is appended to the URL like a querystring.
`headers`: Python dict. HTTP request headers.
`connection`: Predefined HTTP connection instance. If not given, it
is supplied by the `decorators.http_connection` function.
The returning value is a Python dict deserialized by the JSON decoder. However,
if the status code is not 2x or 403, an requests.HTTPError is raised.
connection = connection_pool.get_available_connection()
response = make_get_request('http://firebase.localhost/users', {'print': silent'},
{'X_FIREBASE_SOMETHING': 'Hi'}, connection)
response => {'1': 'John Doe', '2': 'Jane Doe'}
"""
timeout = getattr(connection, 'timeout')
response = connection.get(url, params=params, headers=headers, timeout=timeout)
if response.ok or response.status_code == 403:
return response.json() if response.content else None
else:
response.raise_for_status()
@http_connection(60)
def make_put_request(url, data, params, headers, connection):
"""
Helper function that makes an HTTP PUT request to the given firebase
endpoint. Timeout is 60 seconds.
`url`: The full URL of the firebase endpoint (DSN appended.)
`data`: JSON serializable dict that will be stored in the remote storage.
`params`: Python dict that is appended to the URL like a querystring.
`headers`: Python dict. HTTP request headers.
`connection`: Predefined HTTP connection instance. If not given, it
is supplied by the `decorators.http_connection` function.
The returning value is a Python dict deserialized by the JSON decoder. However,
if the status code is not 2x or 403, an requests.HTTPError is raised.
connection = connection_pool.get_available_connection()
response = make_put_request('http://firebase.localhost/users',
'{"1": "Ozgur Vatansever"}',
{'X_FIREBASE_SOMETHING': 'Hi'}, connection)
response => {'1': 'Ozgur Vatansever'} or {'error': 'Permission denied.'}
"""
timeout = getattr(connection, 'timeout')
response = connection.put(url, data=data, params=params, headers=headers,
timeout=timeout)
if response.ok or response.status_code == 403:
return response.json() if response.content else None
else:
response.raise_for_status()
@http_connection(60)
def make_post_request(url, data, params, headers, connection):
"""
Helper function that makes an HTTP POST request to the given firebase
endpoint. Timeout is 60 seconds.
`url`: The full URL of the firebase endpoint (DSN appended.)
`data`: JSON serializable dict that will be stored in the remote storage.
`params`: Python dict that is appended to the URL like a querystring.
`headers`: Python dict. HTTP request headers.
`connection`: Predefined HTTP connection instance. If not given, it
is supplied by the `decorators.http_connection` function.
The returning value is a Python dict deserialized by the JSON decoder. However,
if the status code is not 2x or 403, an requests.HTTPError is raised.
connection = connection_pool.get_available_connection()
response = make_put_request('http://firebase.localhost/users/',
'{"Ozgur Vatansever"}', {'X_FIREBASE_SOMETHING': 'Hi'}, connection)
response => {u'name': u'-Inw6zol_2f5ThHwVcSe'} or {'error': 'Permission denied.'}
"""
timeout = getattr(connection, 'timeout')
response = connection.post(url, data=data, params=params, headers=headers,
timeout=timeout)
if response.ok or response.status_code == 403:
return response.json() if response.content else None
else:
response.raise_for_status()
@http_connection(60)
def make_patch_request(url, data, params, headers, connection):
"""
Helper function that makes an HTTP PATCH request to the given firebase
endpoint. Timeout is 60 seconds.
`url`: The full URL of the firebase endpoint (DSN appended.)
`data`: JSON serializable dict that will be stored in the remote storage.
`params`: Python dict that is appended to the URL like a querystring.
`headers`: Python dict. HTTP request headers.
`connection`: Predefined HTTP connection instance. If not given, it
is supplied by the `decorators.http_connection` function.
The returning value is a Python dict deserialized by the JSON decoder. However,
if the status code is not 2x or 403, an requests.HTTPError is raised.
connection = connection_pool.get_available_connection()
response = make_put_request('http://firebase.localhost/users/1',
'{"Ozgur Vatansever"}', {'X_FIREBASE_SOMETHING': 'Hi'}, connection)
response => {'Ozgur Vatansever'} or {'error': 'Permission denied.'}
"""
timeout = getattr(connection, 'timeout')
response = connection.patch(url, data=data, params=params, headers=headers,
timeout=timeout)
if response.ok or response.status_code == 403:
return response.json() if response.content else None
else:
response.raise_for_status()
@http_connection(60)
def make_delete_request(url, params, headers, connection):
"""
Helper function that makes an HTTP DELETE request to the given firebase
endpoint. Timeout is 60 seconds.
`url`: The full URL of the firebase endpoint (DSN appended.)
`params`: Python dict that is appended to the URL like a querystring.
`headers`: Python dict. HTTP request headers.
`connection`: Predefined HTTP connection instance. If not given, it
is supplied by the `decorators.http_connection` function.
The returning value is NULL. However, if the status code is not 2x or 403,
an requests.HTTPError is raised.
connection = connection_pool.get_available_connection()
response = make_put_request('http://firebase.localhost/users/1',
{'X_FIREBASE_SOMETHING': 'Hi'}, connection)
response => NULL or {'error': 'Permission denied.'}
"""
timeout = getattr(connection, 'timeout')
response = connection.delete(url, params=params, headers=headers, timeout=timeout)
if response.ok or response.status_code == 403:
return response.json() if response.content else None
else:
response.raise_for_status()
class FirebaseUser(object):
"""
Class that wraps the credentials of the authenticated user. Think of
this as a container that holds authentication related data.
"""
def __init__(self, email, firebase_auth_token, provider, id=None):
self.email = email
self.firebase_auth_token = firebase_auth_token
self.provider = provider
self.id = id
class FirebaseAuthentication(object):
"""
Class that wraps the Firebase SimpleLogin mechanism. Actually this
class does not trigger a connection, simply fakes the auth action.
In addition, the provided email and password information is totally
useless and they never appear in the ``auth`` variable at the server.
"""
def __init__(self, secret, email, debug=False, admin=False, extra=None):
self.authenticator = FirebaseTokenGenerator(secret, debug, admin)
self.email = email
self.provider = 'password'
self.extra = (extra or {}).copy()
self.extra.update({'debug': debug, 'admin': admin,
'email': self.email, 'provider': self.provider})
def get_user(self):
"""
Method that gets the authenticated user. The returning user has
the token, email and the provider data.
"""
token = self.authenticator.create_token(self.extra)
user_id = self.extra.get('id')
return FirebaseUser(self.email, token, self.provider, user_id)
class FirebaseApplication(object):
"""
Class that actually connects with the Firebase backend via HTTP calls.
It fully implements the RESTful specifications defined by Firebase. Data
is transmitted as in JSON format in both ways. This class needs a DSN value
that defines the base URL of the backend, and if needed, authentication
credentials are accepted and then are taken into consideration while
constructing HTTP requests.
There are also the corresponding asynchronous versions of each HTTP method.
The async calls make use of the on-demand process pool defined under the
module `async`.
auth = FirebaseAuthentication(FIREBASE_SECRET, 'firebase@firebase.com', 'fbpw')
firebase = FirebaseApplication('https://firebase.localhost', auth)
That's all there is. Then you start connecting with the backend:
json_dict = firebase.get('/users', '1', {'print': 'pretty'})
print json_dict
{'1': 'John Doe', '2': 'Jane Doe', ...}
Async version is:
firebase.get('/users', '1', {'print': 'pretty'}, callback=log_json_dict)
The callback method is fed with the returning response.
"""
NAME_EXTENSION = '.json'
URL_SEPERATOR = '/'
def __init__(self, dsn, authentication=None):
assert dsn.startswith('https://'), 'DSN must be a secure URL'
self.dsn = dsn
self.authentication = authentication
def _build_endpoint_url(self, url, name=None):
"""
Method that constructs a full url with the given url and the
snapshot name.
Example:
full_url = _build_endpoint_url('/users', '1')
full_url => 'http://firebase.localhost/users/1.json'
"""
if not url.endswith(self.URL_SEPERATOR):
url = url + self.URL_SEPERATOR
if name is None:
name = ''
return '%s%s%s' % (urlparse.urljoin(self.dsn, url), name,
self.NAME_EXTENSION)
def _authenticate(self, params, headers):
"""
Method that simply adjusts authentication credentials for the
request.
`params` is the querystring of the request.
`headers` is the header of the request.
If auth instance is not provided to this class, this method simply
returns without doing anything.
"""
if self.authentication:
user = self.authentication.get_user()
params.update({'auth': user.firebase_auth_token})
headers.update(self.authentication.authenticator.HEADERS)
@http_connection(60)
def get(self, url, name, connection, params=None, headers=None):
"""
Synchronous GET request.
"""
if name is None: name = ''
params = params or {}
headers = headers or {}
endpoint = self._build_endpoint_url(url, name)
self._authenticate(params, headers)
return make_get_request(endpoint, params, headers, connection=connection)
def get_async(self, url, name, callback=None, params=None, headers=None):
"""
Asynchronous GET request with the process pool.
"""
if name is None: name = ''
params = params or {}
headers = headers or {}
endpoint = self._build_endpoint_url(url, name)
self._authenticate(params, headers)
process_pool.apply_async(make_get_request,
args=(endpoint, params, headers), callback=callback)
@http_connection(60)
def put(self, url, name, data, connection, params=None, headers=None):
"""
Synchronous PUT request. There will be no returning output from
the server, because the request will be made with ``silent``
parameter. ``data`` must be a JSONable value.
"""
assert name, 'Snapshot name must be specified'
params = params or {}
headers = headers or {}
endpoint = self._build_endpoint_url(url, name)
self._authenticate(params, headers)
data = json.dumps(data, cls=JSONEncoder)
return make_put_request(endpoint, data, params, headers,
connection=connection)
def put_async(self, url, name, data, callback=None, params=None, headers=None):
"""
Asynchronous PUT request with the process pool.
"""
if name is None: name = ''
params = params or {}
headers = headers or {}
endpoint = self._build_endpoint_url(url, name)
self._authenticate(params, headers)
data = json.dumps(data, cls=JSONEncoder)
process_pool.apply_async(make_put_request,
args=(endpoint, data, params, headers),
callback=callback)
@http_connection(60)
def post(self, url, data, connection, params=None, headers=None):
"""
Synchronous POST request. ``data`` must be a JSONable value.
"""
params = params or {}
headers = headers or {}
endpoint = self._build_endpoint_url(url, None)
self._authenticate(params, headers)
data = json.dumps(data, cls=JSONEncoder)
return make_post_request(endpoint, data, params, headers,
connection=connection)
def post_async(self, url, data, callback=None, params=None, headers=None):
"""
Asynchronous POST request with the process pool.
"""
params = params or {}
headers = headers or {}
endpoint = self._build_endpoint_url(url, None)
self._authenticate(params, headers)
data = json.dumps(data, cls=JSONEncoder)
process_pool.apply_async(make_post_request,
args=(endpoint, data, params, headers),
callback=callback)
@http_connection(60)
def patch(self, url, data, connection, params=None, headers=None):
"""
Synchronous POST request. ``data`` must be a JSONable value.
"""
params = params or {}
headers = headers or {}
endpoint = self._build_endpoint_url(url, None)
self._authenticate(params, headers)
data = json.dumps(data, cls=JSONEncoder)
return make_patch_request(endpoint, data, params, headers,
connection=connection)
def patch_async(self, url, data, callback=None, params=None, headers=None):
"""
Asynchronous PATCH request with the process pool.
"""
params = params or {}
headers = headers or {}
endpoint = self._build_endpoint_url(url, None)
self._authenticate(params, headers)
data = json.dumps(data, cls=JSONEncoder)
process_pool.apply_async(make_patch_request,
args=(endpoint, data, params, headers),
callback=callback)
@http_connection(60)
def delete(self, url, name, connection, params=None, headers=None):
"""
Synchronous DELETE request. ``data`` must be a JSONable value.
"""
if not name: name = ''
params = params or {}
headers = headers or {}
endpoint = self._build_endpoint_url(url, name)
self._authenticate(params, headers)
return make_delete_request(endpoint, params, headers, connection=connection)
def delete_async(self, url, name, callback=None, params=None, headers=None):
"""
Asynchronous DELETE request with the process pool.
"""
if not name: name = ''
params = params or {}
headers = headers or {}
endpoint = self._build_endpoint_url(url, name)
self._authenticate(params, headers)
process_pool.apply_async(make_delete_request,
args=(endpoint, params, headers), callback=callback)
|
neversun/sailfish-hackernews
|
pyPackages/python_firebase-noarch/firebase/firebase.py
|
Python
|
mit
| 16,320
|
# -*- coding: utf-8 -*-
from django.forms import fields
from django.forms import widgets
from djng.forms import field_mixins
from . import widgets as bs3widgets
class BooleanFieldMixin(field_mixins.BooleanFieldMixin):
def get_converted_widget(self):
assert(isinstance(self, fields.BooleanField))
if isinstance(self.widget, widgets.CheckboxInput):
self.widget_css_classes = None
if not isinstance(self.widget, bs3widgets.CheckboxInput):
new_widget = bs3widgets.CheckboxInput(self.label)
new_widget.__dict__, new_widget.choice_label = self.widget.__dict__, new_widget.choice_label
self.label = '' # label is rendered by the widget and not by BoundField.label_tag()
return new_widget
class ChoiceFieldMixin(field_mixins.ChoiceFieldMixin):
def get_converted_widget(self):
assert(isinstance(self, fields.ChoiceField))
if isinstance(self.widget, widgets.RadioSelect):
self.widget_css_classes = None
if not isinstance(self.widget, bs3widgets.RadioSelect):
new_widget = bs3widgets.RadioSelect()
new_widget.__dict__ = self.widget.__dict__
return new_widget
class MultipleChoiceFieldMixin(field_mixins.MultipleChoiceFieldMixin):
def get_converted_widget(self):
assert(isinstance(self, fields.MultipleChoiceField))
if isinstance(self.widget, widgets.CheckboxSelectMultiple):
self.widget_css_classes = None
if not isinstance(self.widget, bs3widgets.CheckboxSelectMultiple):
new_widget = bs3widgets.CheckboxSelectMultiple()
new_widget.__dict__ = self.widget.__dict__
return new_widget
|
dpetzold/django-angular
|
djng/styling/bootstrap3/field_mixins.py
|
Python
|
mit
| 1,771
|
from democracy.enums import InitialSectionType
INITIAL_SECTION_TYPE_DATA = [
{
'identifier': InitialSectionType.MAIN,
'name_singular': 'pääosio',
'name_plural': 'pääosiot',
},
{
'identifier': InitialSectionType.CLOSURE_INFO,
'name_singular': 'sulkeutumistiedote',
'name_plural': 'sulkeutumistiedotteet',
},
{
'identifier': InitialSectionType.SCENARIO,
'name_singular': 'vaihtoehto',
'name_plural': 'vaihtoehdot',
},
{
'identifier': InitialSectionType.PART,
'name_singular': 'osa-alue',
'name_plural': 'osa-alueet',
},
]
def create_initial_section_types(section_type_model):
for section in INITIAL_SECTION_TYPE_DATA:
section_type_model.objects.update_or_create(identifier=section['identifier'], defaults=section)
|
City-of-Helsinki/kerrokantasi
|
democracy/models/initial_data.py
|
Python
|
mit
| 860
|
#!/usr/bin/env python
import sys, os, os.path, signal
import jsshellhelper
from optparse import OptionParser
from subprocess import Popen, PIPE, STDOUT
# Uses jsshell https://developer.mozilla.org/en/Introduction_to_the_JavaScript_shell
class Packer(object):
toolsdir = os.path.dirname(os.path.abspath(__file__))
def run(self, jsshell, filename):
tmpFile = jsshellhelper.createEscapedFile(filename)
cmd = [jsshell,
'-f', os.path.join(self.toolsdir, 'packer.js'),
'-f', os.path.join(self.toolsdir, 'cleaner.js'),
'-f', tmpFile,
'-e', "var input = __unescape_string(); print(pack(input, 62, 1, 0));"]
proc = Popen(cmd, stdout=PIPE, stderr=PIPE)
stdout, stderr = proc.communicate()
if stdout:
print stdout
else:
print stderr
tmpFile = jsshellhelper.cleanUp(tmpFile)
def main():
parser = OptionParser()
options, args = parser.parse_args()
if len(args) < 2:
print >>sys.stderr, """Usage: %s <path to jsshell> <js file>""" % sys.argv[0]
sys.exit(1)
packer = Packer()
packer.run(args[0], args[1])
if __name__ == '__main__':
main()
|
edsfault/Edsfault-processing.js
|
tools/packer.py
|
Python
|
mit
| 1,163
|
# Python test set -- part 6, built-in types
from test_support import *
print '6. Built-in types'
print '6.1 Truth value testing'
if None: raise TestFailed, 'None is true instead of false'
if 0: raise TestFailed, '0 is true instead of false'
if 0L: raise TestFailed, '0L is true instead of false'
if 0.0: raise TestFailed, '0.0 is true instead of false'
if '': raise TestFailed, '\'\' is true instead of false'
if (): raise TestFailed, '() is true instead of false'
if []: raise TestFailed, '[] is true instead of false'
if {}: raise TestFailed, '{} is true instead of false'
if not 1: raise TestFailed, '1 is false instead of true'
if not 1L: raise TestFailed, '1L is false instead of true'
if not 1.0: raise TestFailed, '1.0 is false instead of true'
if not 'x': raise TestFailed, '\'x\' is false instead of true'
if not (1, 1): raise TestFailed, '(1, 1) is false instead of true'
if not [1]: raise TestFailed, '[1] is false instead of true'
if not {'x': 1}: raise TestFailed, '{\'x\': 1} is false instead of true'
def f(): pass
class C: pass
import sys
x = C()
if not f: raise TestFailed, 'f is false instead of true'
if not C: raise TestFailed, 'C is false instead of true'
if not sys: raise TestFailed, 'sys is false instead of true'
if not x: raise TestFailed, 'x is false instead of true'
print '6.2 Boolean operations'
if 0 or 0: raise TestFailed, '0 or 0 is true instead of false'
if 1 and 1: pass
else: raise TestFailed, '1 and 1 is false instead of false'
if not 1: raise TestFailed, 'not 1 is true instead of false'
print '6.3 Comparisons'
if 0 < 1 <= 1 == 1 >= 1 > 0 != 1: pass
else: raise TestFailed, 'int comparisons failed'
if 0L < 1L <= 1L == 1L >= 1L > 0L != 1L: pass
else: raise TestFailed, 'long int comparisons failed'
if 0.0 < 1.0 <= 1.0 == 1.0 >= 1.0 > 0.0 != 1.0: pass
else: raise TestFailed, 'float comparisons failed'
if '' < 'a' <= 'a' == 'a' < 'abc' < 'abd' < 'b': pass
else: raise TestFailed, 'string comparisons failed'
if 0 in [0] and 0 not in [1]: pass
else: raise TestFailed, 'membership test failed'
if None is None and [] is not []: pass
else: raise TestFailed, 'identity test failed'
print '6.4 Numeric types (mostly conversions)'
if 0 != 0L or 0 != 0.0 or 0L != 0.0: raise TestFailed, 'mixed comparisons'
if 1 != 1L or 1 != 1.0 or 1L != 1.0: raise TestFailed, 'mixed comparisons'
if -1 != -1L or -1 != -1.0 or -1L != -1.0:
raise TestFailed, 'int/long/float value not equal'
if int(1.9) == 1 == int(1.1) and int(-1.1) == -1 == int(-1.9): pass
else: raise TestFailed, 'int() does not round properly'
if long(1.9) == 1L == long(1.1) and long(-1.1) == -1L == long(-1.9): pass
else: raise TestFailed, 'long() does not round properly'
if float(1) == 1.0 and float(-1) == -1.0 and float(0) == 0.0: pass
else: raise TestFailed, 'float() does not work properly'
print '6.4.1 32-bit integers'
if 12 + 24 != 36: raise TestFailed, 'int op'
if 12 + (-24) != -12: raise TestFailed, 'int op'
if (-12) + 24 != 12: raise TestFailed, 'int op'
if (-12) + (-24) != -36: raise TestFailed, 'int op'
if not 12 < 24: raise TestFailed, 'int op'
if not -24 < -12: raise TestFailed, 'int op'
# Test for a particular bug in integer multiply
xsize, ysize, zsize = 238, 356, 4
if not (xsize*ysize*zsize == zsize*xsize*ysize == 338912):
raise TestFailed, 'int mul commutativity'
# And another.
m = -sys.maxint - 1
for divisor in 1, 2, 4, 8, 16, 32:
j = m // divisor
prod = divisor * j
if prod != m:
raise TestFailed, "%r * %r == %r != %r" % (divisor, j, prod, m)
if type(prod) is not int:
raise TestFailed, ("expected type(prod) to be int, not %r" %
type(prod))
# Check for expected * overflow to long.
for divisor in 1, 2, 4, 8, 16, 32:
j = m // divisor - 1
prod = divisor * j
if type(prod) is not long:
raise TestFailed, ("expected type(%r) to be long, not %r" %
(prod, type(prod)))
# Check for expected * overflow to long.
m = sys.maxint
for divisor in 1, 2, 4, 8, 16, 32:
j = m // divisor + 1
prod = divisor * j
if type(prod) is not long:
raise TestFailed, ("expected type(%r) to be long, not %r" %
(prod, type(prod)))
print '6.4.2 Long integers'
if 12L + 24L != 36L: raise TestFailed, 'long op'
if 12L + (-24L) != -12L: raise TestFailed, 'long op'
if (-12L) + 24L != 12L: raise TestFailed, 'long op'
if (-12L) + (-24L) != -36L: raise TestFailed, 'long op'
if not 12L < 24L: raise TestFailed, 'long op'
if not -24L < -12L: raise TestFailed, 'long op'
x = sys.maxint
if int(long(x)) != x: raise TestFailed, 'long op'
try: int(long(x)+1L)
except OverflowError: pass
else:raise TestFailed, 'long op'
x = -x
if int(long(x)) != x: raise TestFailed, 'long op'
x = x-1
if int(long(x)) != x: raise TestFailed, 'long op'
try: int(long(x)-1L)
except OverflowError: pass
else:raise TestFailed, 'long op'
print '6.4.3 Floating point numbers'
if 12.0 + 24.0 != 36.0: raise TestFailed, 'float op'
if 12.0 + (-24.0) != -12.0: raise TestFailed, 'float op'
if (-12.0) + 24.0 != 12.0: raise TestFailed, 'float op'
if (-12.0) + (-24.0) != -36.0: raise TestFailed, 'float op'
if not 12.0 < 24.0: raise TestFailed, 'float op'
if not -24.0 < -12.0: raise TestFailed, 'float op'
print '6.5 Sequence types'
print '6.5.1 Strings'
if len('') != 0: raise TestFailed, 'len(\'\')'
if len('a') != 1: raise TestFailed, 'len(\'a\')'
if len('abcdef') != 6: raise TestFailed, 'len(\'abcdef\')'
if 'xyz' + 'abcde' != 'xyzabcde': raise TestFailed, 'string concatenation'
if 'xyz'*3 != 'xyzxyzxyz': raise TestFailed, 'string repetition *3'
if 0*'abcde' != '': raise TestFailed, 'string repetition 0*'
if min('abc') != 'a' or max('abc') != 'c': raise TestFailed, 'min/max string'
if 'a' in 'abc' and 'b' in 'abc' and 'c' in 'abc' and 'd' not in 'abc': pass
else: raise TestFailed, 'in/not in string'
x = 'x'*103
if '%s!'%x != x+'!': raise TestFailed, 'nasty string formatting bug'
print '6.5.2 Tuples'
if len(()) != 0: raise TestFailed, 'len(())'
if len((1,)) != 1: raise TestFailed, 'len((1,))'
if len((1,2,3,4,5,6)) != 6: raise TestFailed, 'len((1,2,3,4,5,6))'
if (1,2)+(3,4) != (1,2,3,4): raise TestFailed, 'tuple concatenation'
if (1,2)*3 != (1,2,1,2,1,2): raise TestFailed, 'tuple repetition *3'
if 0*(1,2,3) != (): raise TestFailed, 'tuple repetition 0*'
if min((1,2)) != 1 or max((1,2)) != 2: raise TestFailed, 'min/max tuple'
if 0 in (0,1,2) and 1 in (0,1,2) and 2 in (0,1,2) and 3 not in (0,1,2): pass
else: raise TestFailed, 'in/not in tuple'
print '6.5.3 Lists'
if len([]) != 0: raise TestFailed, 'len([])'
if len([1,]) != 1: raise TestFailed, 'len([1,])'
if len([1,2,3,4,5,6]) != 6: raise TestFailed, 'len([1,2,3,4,5,6])'
if [1,2]+[3,4] != [1,2,3,4]: raise TestFailed, 'list concatenation'
if [1,2]*3 != [1,2,1,2,1,2]: raise TestFailed, 'list repetition *3'
if [1,2]*3L != [1,2,1,2,1,2]: raise TestFailed, 'list repetition *3L'
if 0*[1,2,3] != []: raise TestFailed, 'list repetition 0*'
if 0L*[1,2,3] != []: raise TestFailed, 'list repetition 0L*'
if min([1,2]) != 1 or max([1,2]) != 2: raise TestFailed, 'min/max list'
if 0 in [0,1,2] and 1 in [0,1,2] and 2 in [0,1,2] and 3 not in [0,1,2]: pass
else: raise TestFailed, 'in/not in list'
a = [1, 2, 3, 4, 5]
a[:-1] = a
if a != [1, 2, 3, 4, 5, 5]:
raise TestFailed, "list self-slice-assign (head)"
a = [1, 2, 3, 4, 5]
a[1:] = a
if a != [1, 1, 2, 3, 4, 5]:
raise TestFailed, "list self-slice-assign (tail)"
a = [1, 2, 3, 4, 5]
a[1:-1] = a
if a != [1, 1, 2, 3, 4, 5, 5]:
raise TestFailed, "list self-slice-assign (center)"
print '6.5.3a Additional list operations'
a = [0,1,2,3,4]
a[0L] = 1
a[1L] = 2
a[2L] = 3
if a != [1,2,3,3,4]: raise TestFailed, 'list item assignment [0L], [1L], [2L]'
a[0] = 5
a[1] = 6
a[2] = 7
if a != [5,6,7,3,4]: raise TestFailed, 'list item assignment [0], [1], [2]'
a[-2L] = 88
a[-1L] = 99
if a != [5,6,7,88,99]: raise TestFailed, 'list item assignment [-2L], [-1L]'
a[-2] = 8
a[-1] = 9
if a != [5,6,7,8,9]: raise TestFailed, 'list item assignment [-2], [-1]'
a[:2] = [0,4]
a[-3:] = []
a[1:1] = [1,2,3]
if a != [0,1,2,3,4]: raise TestFailed, 'list slice assignment'
a[ 1L : 4L] = [7,8,9]
if a != [0,7,8,9,4]: raise TestFailed, 'list slice assignment using long ints'
del a[1:4]
if a != [0,4]: raise TestFailed, 'list slice deletion'
del a[0]
if a != [4]: raise TestFailed, 'list item deletion [0]'
del a[-1]
if a != []: raise TestFailed, 'list item deletion [-1]'
a=range(0,5)
del a[1L:4L]
if a != [0,4]: raise TestFailed, 'list slice deletion'
del a[0L]
if a != [4]: raise TestFailed, 'list item deletion [0]'
del a[-1L]
if a != []: raise TestFailed, 'list item deletion [-1]'
a.append(0)
a.append(1)
a.append(2)
if a != [0,1,2]: raise TestFailed, 'list append'
a.insert(0, -2)
a.insert(1, -1)
a.insert(2,0)
if a != [-2,-1,0,0,1,2]: raise TestFailed, 'list insert'
if a.count(0) != 2: raise TestFailed, ' list count'
if a.index(0) != 2: raise TestFailed, 'list index'
a.remove(0)
if a != [-2,-1,0,1,2]: raise TestFailed, 'list remove'
a.reverse()
if a != [2,1,0,-1,-2]: raise TestFailed, 'list reverse'
a.sort()
if a != [-2,-1,0,1,2]: raise TestFailed, 'list sort'
def revcmp(a, b): return cmp(b, a)
a.sort(revcmp)
if a != [2,1,0,-1,-2]: raise TestFailed, 'list sort with cmp func'
# The following dumps core in unpatched Python 1.5:
def myComparison(x,y):
return cmp(x%3, y%7)
z = range(12)
z.sort(myComparison)
# Test extreme cases with long ints
a = [0,1,2,3,4]
if a[ -pow(2,128L): 3 ] != [0,1,2]:
raise TestFailed, "list slicing with too-small long integer"
if a[ 3: pow(2,145L) ] != [3,4]:
raise TestFailed, "list slicing with too-large long integer"
print '6.6 Mappings == Dictionaries'
d = {}
if d.keys() != []: raise TestFailed, '{}.keys()'
if d.has_key('a') != 0: raise TestFailed, '{}.has_key(\'a\')'
if ('a' in d) != 0: raise TestFailed, "'a' in {}"
if ('a' not in d) != 1: raise TestFailed, "'a' not in {}"
if len(d) != 0: raise TestFailed, 'len({})'
d = {'a': 1, 'b': 2}
if len(d) != 2: raise TestFailed, 'len(dict)'
k = d.keys()
k.sort()
if k != ['a', 'b']: raise TestFailed, 'dict keys()'
if d.has_key('a') and d.has_key('b') and not d.has_key('c'): pass
else: raise TestFailed, 'dict keys()'
if 'a' in d and 'b' in d and 'c' not in d: pass
else: raise TestFailed, 'dict keys() # in/not in version'
if d['a'] != 1 or d['b'] != 2: raise TestFailed, 'dict item'
d['c'] = 3
d['a'] = 4
if d['c'] != 3 or d['a'] != 4: raise TestFailed, 'dict item assignment'
del d['b']
if d != {'a': 4, 'c': 3}: raise TestFailed, 'dict item deletion'
# dict.clear()
d = {1:1, 2:2, 3:3}
d.clear()
if d != {}: raise TestFailed, 'dict clear'
# dict.update()
d.update({1:100})
d.update({2:20})
d.update({1:1, 2:2, 3:3})
if d != {1:1, 2:2, 3:3}: raise TestFailed, 'dict update'
d.clear()
try: d.update(None)
except AttributeError: pass
else: raise TestFailed, 'dict.update(None), AttributeError expected'
class SimpleUserDict:
def __init__(self):
self.d = {1:1, 2:2, 3:3}
def keys(self):
return self.d.keys()
def __getitem__(self, i):
return self.d[i]
d.update(SimpleUserDict())
if d != {1:1, 2:2, 3:3}: raise TestFailed, 'dict.update(instance)'
d.clear()
class FailingUserDict:
def keys(self):
raise ValueError
try: d.update(FailingUserDict())
except ValueError: pass
else: raise TestFailed, 'dict.keys() expected ValueError'
class FailingUserDict:
def keys(self):
class BogonIter:
def __iter__(self):
raise ValueError
return BogonIter()
try: d.update(FailingUserDict())
except ValueError: pass
else: raise TestFailed, 'iter(dict.keys()) expected ValueError'
class FailingUserDict:
def keys(self):
class BogonIter:
def __init__(self):
self.i = 1
def __iter__(self):
return self
def next(self):
if self.i:
self.i = 0
return 'a'
raise ValueError
return BogonIter()
def __getitem__(self, key):
return key
try: d.update(FailingUserDict())
except ValueError: pass
else: raise TestFailed, 'iter(dict.keys()).next() expected ValueError'
class FailingUserDict:
def keys(self):
class BogonIter:
def __init__(self):
self.i = ord('a')
def __iter__(self):
return self
def next(self):
if self.i <= ord('z'):
rtn = chr(self.i)
self.i += 1
return rtn
raise StopIteration
return BogonIter()
def __getitem__(self, key):
raise ValueError
try: d.update(FailingUserDict())
except ValueError: pass
else: raise TestFailed, 'dict.update(), __getitem__ expected ValueError'
# dict.copy()
d = {1:1, 2:2, 3:3}
if d.copy() != {1:1, 2:2, 3:3}: raise TestFailed, 'dict copy'
if {}.copy() != {}: raise TestFailed, 'empty dict copy'
# dict.get()
d = {}
if d.get('c') is not None: raise TestFailed, 'missing {} get, no 2nd arg'
if d.get('c', 3) != 3: raise TestFailed, 'missing {} get, w/ 2nd arg'
d = {'a' : 1, 'b' : 2}
if d.get('c') is not None: raise TestFailed, 'missing dict get, no 2nd arg'
if d.get('c', 3) != 3: raise TestFailed, 'missing dict get, w/ 2nd arg'
if d.get('a') != 1: raise TestFailed, 'present dict get, no 2nd arg'
if d.get('a', 3) != 1: raise TestFailed, 'present dict get, w/ 2nd arg'
# dict.setdefault()
d = {}
if d.setdefault('key0') is not None:
raise TestFailed, 'missing {} setdefault, no 2nd arg'
if d.setdefault('key0') is not None:
raise TestFailed, 'present {} setdefault, no 2nd arg'
d.setdefault('key', []).append(3)
if d['key'][0] != 3:
raise TestFailed, 'missing {} setdefault, w/ 2nd arg'
d.setdefault('key', []).append(4)
if len(d['key']) != 2:
raise TestFailed, 'present {} setdefault, w/ 2nd arg'
# dict.popitem()
for copymode in -1, +1:
# -1: b has same structure as a
# +1: b is a.copy()
for log2size in range(12):
size = 2**log2size
a = {}
b = {}
for i in range(size):
a[`i`] = i
if copymode < 0:
b[`i`] = i
if copymode > 0:
b = a.copy()
for i in range(size):
ka, va = ta = a.popitem()
if va != int(ka): raise TestFailed, "a.popitem: %s" % str(ta)
kb, vb = tb = b.popitem()
if vb != int(kb): raise TestFailed, "b.popitem: %s" % str(tb)
if copymode < 0 and ta != tb:
raise TestFailed, "a.popitem != b.popitem: %s, %s" % (
str(ta), str(tb))
if a: raise TestFailed, 'a not empty after popitems: %s' % str(a)
if b: raise TestFailed, 'b not empty after popitems: %s' % str(b)
try: type(1, 2)
except TypeError: pass
else: raise TestFailed, 'type(), w/2 args expected TypeError'
try: type(1, 2, 3, 4)
except TypeError: pass
else: raise TestFailed, 'type(), w/4 args expected TypeError'
|
MalloyPower/parsing-python
|
front-end/testsuite-python-lib/Python-2.2/Lib/test/test_types.py
|
Python
|
mit
| 14,942
|
import pytest
from mitmproxy.test import taddons
from mitmproxy.test import tflow
from mitmproxy import io
from mitmproxy import exceptions
from mitmproxy.addons import save
from mitmproxy.addons import view
def test_configure(tmpdir):
sa = save.Save()
with taddons.context(sa) as tctx:
with pytest.raises(exceptions.OptionsError):
tctx.configure(sa, save_stream_file=str(tmpdir))
with pytest.raises(Exception, match="Invalid filter"):
tctx.configure(
sa, save_stream_file=str(tmpdir.join("foo")), save_stream_filter="~~"
)
tctx.configure(sa, save_stream_filter="foo")
assert sa.filt
tctx.configure(sa, save_stream_filter=None)
assert not sa.filt
def rd(p):
with open(p, "rb") as f:
x = io.FlowReader(f)
return list(x.stream())
def test_tcp(tmpdir):
sa = save.Save()
with taddons.context(sa) as tctx:
p = str(tmpdir.join("foo"))
tctx.configure(sa, save_stream_file=p)
tt = tflow.ttcpflow()
sa.tcp_start(tt)
sa.tcp_end(tt)
tt = tflow.ttcpflow()
sa.tcp_start(tt)
sa.tcp_error(tt)
tctx.configure(sa, save_stream_file=None)
assert len(rd(p)) == 2
def test_websocket(tmpdir):
sa = save.Save()
with taddons.context(sa) as tctx:
p = str(tmpdir.join("foo"))
tctx.configure(sa, save_stream_file=p)
f = tflow.twebsocketflow()
sa.request(f)
sa.websocket_end(f)
f = tflow.twebsocketflow()
sa.request(f)
sa.websocket_end(f)
tctx.configure(sa, save_stream_file=None)
assert len(rd(p)) == 2
def test_save_command(tmpdir):
sa = save.Save()
with taddons.context() as tctx:
p = str(tmpdir.join("foo"))
sa.save([tflow.tflow(resp=True)], p)
assert len(rd(p)) == 1
sa.save([tflow.tflow(resp=True)], p)
assert len(rd(p)) == 1
sa.save([tflow.tflow(resp=True)], "+" + p)
assert len(rd(p)) == 2
with pytest.raises(exceptions.CommandError):
sa.save([tflow.tflow(resp=True)], str(tmpdir))
v = view.View()
tctx.master.addons.add(v)
tctx.master.addons.add(sa)
tctx.master.commands.execute("save.file @shown %s" % p)
def test_simple(tmpdir):
sa = save.Save()
with taddons.context(sa) as tctx:
p = str(tmpdir.join("foo"))
tctx.configure(sa, save_stream_file=p)
f = tflow.tflow(resp=True)
sa.request(f)
sa.response(f)
tctx.configure(sa, save_stream_file=None)
assert rd(p)[0].response
tctx.configure(sa, save_stream_file="+" + p)
f = tflow.tflow(err=True)
sa.request(f)
sa.error(f)
tctx.configure(sa, save_stream_file=None)
assert rd(p)[1].error
tctx.configure(sa, save_stream_file="+" + p)
f = tflow.tflow()
sa.request(f)
tctx.configure(sa, save_stream_file=None)
assert not rd(p)[2].response
|
Kriechi/mitmproxy
|
test/mitmproxy/addons/test_save.py
|
Python
|
mit
| 3,060
|
from __future__ import absolute_import
from __future__ import print_function
import sys, os, yaml, glob
import subprocess
import pandas as pd
import re
import shutil
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
from nougat import common, align
from itertools import groupby
from collections import OrderedDict
def run(global_config, sample_config):
sorted_libraries_by_insert = \
common._sort_libraries_by_insert(sample_config)
_check_libraries(sorted_libraries_by_insert)
computeAssemblyStats(sample_config)
# filter out short contigs
sample_config = _build_new_reference(sample_config)
if "tools" in sample_config:
"""If so, execute them one after the other in the specified order \
(might not work)"""
for command in sample_config["tools"]:
"""with this I pick up at run time the correct function in the \
current module"""
command_fn = getattr(sys.modules[__name__],
"_run_{}".format(command))
"""Update sample config, each command return sample_config and \
if necessary it modifies it"""
sample_config = command_fn(global_config, sample_config,
sorted_libraries_by_insert)
else:
#run default pipeline for QC
sample_config = _run_align(global_config, sample_config,
sorted_libraries_by_insert)
sample_config = _run_qaTools(global_config, sample_config,
sorted_libraries_by_insert)
sample_config = _run_FRC(global_config, sample_config,
sorted_libraries_by_insert)
def _run_align(global_config, sample_config,sorted_libraries_by_insert):
if "reference" not in sample_config:
print("reference sequence not provided, skypping alignment step.",
"Please provide a reference if you are intrested in aligning",
"the reads against a reference")
return sample_config
if not os.path.exists("alignments"):
os.makedirs("alignments")
os.chdir("alignments")
sorted_libraries_by_insert = align._align_reads(global_config,
sample_config, sorted_libraries_by_insert) # align reads
sorted_alignments_by_insert = align._merge_bam_files(global_config,
sample_config, sorted_libraries_by_insert) # merge alignments
sorted_alignments_by_insert = align.picard_CGbias(global_config,
sample_config,sorted_alignments_by_insert) # compute picard stats
sorted_alignments_by_insert = align.picard_collectInsertSizeMetrics(
global_config, sample_config,sorted_alignments_by_insert)
sorted_alignments_by_insert = align.picard_markDuplicates(global_config,
sample_config,sorted_alignments_by_insert)
os.chdir("..")
sample_config["alignments"] = sorted_alignments_by_insert
return sample_config
def _check_libraries(sorted_libraries_by_insert):
different_inserts = 0
current_insert = -1
orientation = ""
for library, libraryInfo in sorted_libraries_by_insert:
if current_insert == -1:
current_insert = libraryInfo["insert"]
different_inserts = 1
else :
if current_insert != libraryInfo["insert"]:
current_insert = libraryInfo["insert"]
different_inserts += 1
if different_inserts > 2:
sys.exit("error: in valiadation only two libraries are admitted "
"usually a PE and a MP, sometimes 2 PE)")
return
def _build_new_reference(sample_config):
minCtgLength = 500
if "minCtgLength" in sample_config:
minCtgLength = sample_config["minCtgLength"]
if minCtgLength < 500:
sys.exit("min contig length must be higher than 500bp, lower "
"values will complicate the job of valiadation tools and "
"make results difficult to interpret. For mammalian "
"genomes minCtgLength > 1Kbp is strongly suggested")
reference = sample_config["reference"]
reference_dir = os.path.abspath("reference")
if not os.path.exists(reference_dir):
os.makedirs(reference_dir)
os.chdir(reference_dir)
new_reference_name = os.path.abspath(os.path.basename(reference))
if os.path.exists(new_reference_name):
sample_config["reference"] = new_reference_name
os.chdir("..")
return sample_config # already created the new reference
with open(new_reference_name, "w") as new_ref_fd:
with open(reference, "r") as ref_fd:
fasta_header = ref_fd.readline()
sequence = ""
for line in ref_fd:
line = line
if line.startswith(">"):
if len(sequence) >= minCtgLength:
new_ref_fd.write(fasta_header)
new_ref_fd.write(sequence)
sequence = ""
fasta_header = line
else:
sequence+=line
if len(sequence) >= minCtgLength:
new_ref_fd.write(fasta_header)
new_ref_fd.write(sequence)
sample_config["reference"] = new_reference_name
os.chdir("..")
return sample_config
def _run_BUSCO(global_config, sample_config, sorted_alignments_by_insert):
program = global_config["Tools"]["BUSCO"]["bin"]
options = global_config["Tools"]["BUSCO"]["options"]
main_dir = os.getcwd()
BUSCOfolder = os.path.join(main_dir, "BUSCO")
if not os.path.exists(BUSCOfolder):
os.makedirs(BUSCOfolder)
os.chdir(BUSCOfolder)
BUSCO_data_path = os.path.expandvars(sample_config["BUSCODataPath"])
if not os.path.exists(BUSCO_data_path):
raise IOError("Path to the BUSCO data set does not exist!")
reference = sample_config["reference"]
output = sample_config["output"]
threads = sample_config.get("threads", 16)
command = [program, "-l", BUSCO_data_path, "-i", "{}".format(reference), "-o", "{}".format(output),
"-c", "{}".format(threads)]
command.extend(options)
common.print_command(command)
outfile = os.path.join(BUSCOfolder, "run_{}".format(output),
"short_summary_{}".format(output))
if not common.check_dryrun(sample_config) and not os.path.exists(outfile):
stdOut = open("BUSCO.stdOut", "a")
stdErr = open("BUSCO.stdErr", "a")
return_value = subprocess.call(command, stdout=stdOut, stderr=stdErr)
if not return_value == 0:
sys.exit("Error running BUSCO")
os.chdir("..")
def _run_FRC(global_config, sample_config, sorted_libraries_by_insert):
mainDir = os.getcwd()
FRCurveFolder = os.path.join(os.getcwd(), "FRCurve")
if not os.path.exists(FRCurveFolder):
os.makedirs(FRCurveFolder)
os.chdir("FRCurve")
program=global_config["Tools"]["FRC"]["bin"]
genomeSize = sample_config["genomeSize"]
reference = sample_config["reference"]
output = sample_config["output"]
alignments = sample_config["alignments"]
peBam = alignments[0][1]
peInsert = alignments[0][0]
peMinInsert = int(peInsert - peInsert*0.60)
peMaxInsert = int(peInsert + peInsert*0.60)
command = [program, "--pe-sam", peBam, "--pe-max-insert", "5000"]
if len(alignments) > 1:
mpBam = alignments[1][1]
mpInsert = alignments[1][0]
mpMinInsert = int(mpInsert - mpInsert*0.50)
mpMaxInsert = int(mpInsert + mpInsert*0.50)
command += ["--mp-sam", mpBam, "--mp-max-insert", "25000"]
command += [ "--genome-size", "{}".format(genomeSize), "--output", output]
common.print_command(command)
if not common.check_dryrun(sample_config) and not os.path.exists(
"{}_FRC.png".format(output)):
stdOut = open("FRC.stdOut", "a")
stdErr = open("FRC.stdErr", "a")
returnValue = subprocess.call(command , stdout=stdOut , stderr=stdErr)
if not returnValue == 0:
sys.exit("error, while running FRCurve: {}".format(command))
plotFRCurve(output)
os.chdir("..")
return sample_config
def plotFRCurve(output):
names = ["_FRC", "COMPR_MP_FRC", "COMPR_PE_FRC", "HIGH_COV_PE_FRC",
"HIGH_NORM_COV_PE_FRC", "HIGH_OUTIE_MP_FRC", "HIGH_OUTIE_PE_FRC",
"HIGH_SINGLE_MP_FRC", "HIGH_SINGLE_PE_FRC", "HIGH_SPAN_MP_FRC",
"HIGH_SPAN_PE_FRC", "LOW_COV_PE_FRC", "LOW_NORM_COV_PE_FRC",
"STRECH_MP_FRC", "STRECH_PE_FRC"]
for name in names:
FRC_data = pd.io.parsers.read_csv("{}{}.txt".format(output, name),
sep=' ', header=None)
FRC_features = FRC_data[FRC_data.columns[0]].tolist()
FRC_coverage = FRC_data[FRC_data.columns[1]].tolist()
plt.plot(FRC_features, FRC_coverage)
if name == "_FRC":
plt.title('Feature Resonse Curve -- All Features')
else:
plt.title('Feature Resonse Curve -- {}'.format(name))
plt.plot(FRC_features, FRC_coverage)
plt.savefig("{}{}.png".format(output, name))
plt.clf()
return 0
def _run_qaTools(global_config, sample_config, sorted_libraries_by_insert):
mainDir = os.getcwd()
qaToolsFolder = os.path.join(os.getcwd(), "QAstats")
if not os.path.exists(qaToolsFolder):
os.makedirs(qaToolsFolder)
os.chdir("QAstats")
program=global_config["Tools"]["qaTools"]["bin"]
genomeSize = sample_config["genomeSize"]
reference = sample_config["reference"]
output = sample_config["output"]
alignments = sample_config["alignments"][0]
BAMfile = alignments[1]
command = ["{}".format(program), "-m", "-q", "0", "-i", BAMfile,
"{}.cov".format(os.path.basename(BAMfile))]
common.print_command(command)
if not common.check_dryrun(sample_config) and not os.path.exists(
"{}.cov".format(os.path.basename(BAMfile))):
stdOut = open("QAtools.stdOut", "a")
stdErr = open("QAtools.stdErr", "a")
returnValue = subprocess.call(command , stdout=stdOut , stderr=stdErr)
if not returnValue == 0:
sys.exit("error, while running QAtools: {}".format(command))
#now add GC content
QAtools_dict = {}
header = ""
with open( "{}.cov".format(os.path.basename(BAMfile)), "r") as QA_csv:
header = QA_csv.readline().rstrip()
for line in QA_csv:
line = line.strip().split("\t")
QAtools_dict[line[0]] = [line[1],line[2],line[3]]
QA_GC_file = "{}.cov.gc".format(os.path.basename(BAMfile))
with open(QA_GC_file, "w") as QA_GC_fd:
QA_GC_fd.write("{}\tGCperc\n".format(header))
with open(reference, "r") as ref_fd:
fasta_raw_header = ref_fd.readline().strip()
fasta_raw_header = fasta_raw_header.split(" ")[0]
fasta_raw_header = fasta_raw_header.split("\t")[0]
fasta_header = fasta_raw_header.split(">")[1]
sequence = ""
for line in ref_fd:
line = line.strip()
if line.startswith(">"):
GC = computeGC(sequence)
if fasta_header not in QAtools_dict:
sys.exit("error while parsing QAcompute output: "
"probably some wired contig name is "
"present in your assmebly file")
QA_GC_fd.write("{}\t{}\t{}\t{}\t{}\n".format(
fasta_header, QAtools_dict[fasta_header][0],
QAtools_dict[fasta_header][1],
QAtools_dict[fasta_header][2], GC))
sequence = ""
fasta_raw_header = line.split(" ")[0]
fasta_raw_header = fasta_raw_header.split("\t")[0]
fasta_header = fasta_raw_header.split(">")[1]
else:
sequence+=line
GC = computeGC(sequence)
if fasta_header not in QAtools_dict:
sys.exit("error while parsing QAcompute output: probably "
"some wired contig name is present in your "
"assmebly file")
QA_GC_fd.write("{}\t{}\t{}\t{}\t{}\n".format(fasta_header,
QAtools_dict[fasta_header][0],
QAtools_dict[fasta_header][1],
QAtools_dict[fasta_header][2], GC))
plotQA(QA_GC_file)
os.chdir("..")
return sample_config
def plotQA(QA_GC_file):
#QA_GC_file="lib_500.bam.cov.gc"
import shutil as sh
sh.copy(QA_GC_file, "Contigs_Cov_SeqLen_GC.csv")
QA_data = pd.io.parsers.read_csv("Contigs_Cov_SeqLen_GC.csv",
sep='\t', header=0)
GCperc = QA_data['GCperc'].tolist()
MedianCov = QA_data['Median_Cov'].tolist()
SeqLen = QA_data['Seq_len'].tolist()
Mean_MedianCov = sum(MedianCov) / float(len(MedianCov))
Max_MedianCov = max(MedianCov)
if Max_MedianCov > 2.5* Mean_MedianCov:
Max_MedianCov = Mean_MedianCov*2
#GC_vs_Median Coverage
plt.plot(GCperc, MedianCov, 'or')
plt.title('GC content vs Median Coverage')
plt.xlabel('%GC')
plt.ylabel('Coverage')
plotname = "GC_vs_Coverage.png"
plt.savefig(plotname)
plt.clf()
# GC_vs_median eliminate outliers
plt.plot(GCperc, MedianCov, 'or')
plt.ylim((10, Max_MedianCov))
plt.title('GC content vs Median Coverage')
plt.xlabel('%GC')
plt.ylabel('Coverage')
plotname = "GC_vs_Coverage_noOutliers.png"
plt.savefig(plotname)
plt.clf()
#Coverage Distribution Histogram
try:
n, bins, patches = plt.hist(MedianCov, 100, facecolor='g')
except ValueError:
n, bins, patches = plt.hist(MedianCov, bins=range(0, max(MedianCov)+100, 10), facecolor='g')
plt.xlabel('Coverage')
plt.ylabel('Frequency')
plt.title('Coverage Distribution')
plotname = "Coverage_distribution.png"
plt.savefig(plotname)
plt.clf()
#Coverage Distribution Histogram eliminate outliers
n, bins, patches = plt.hist(MedianCov, 100, facecolor='g',
range=(4,Max_MedianCov))
plt.xlabel('Coverage')
plt.ylabel('Frequency')
plt.title('Coverage Distribution')
plotname = "Coverage_distribution_noOutliers.png"
plt.savefig(plotname)
plt.clf()
#Median Cov vs Sequence Length
plt.plot(MedianCov, map(lambda x: x/1000, SeqLen), 'ro')
plt.title('Median Coverage vs Contig Length')
plt.xlabel('Median Coverage')
plt.ylabel('Contig Length (Kbp)')
plotname = "MedianCov_vs_CtgLength.png"
plt.savefig(plotname)
plt.clf()
#Median Cov vs Sequence Length eliminate outliers
plt.plot(MedianCov, map(lambda x: x/1000, SeqLen), 'ro')
plt.xlim((10, Max_MedianCov))
plt.title('Median Coverage vs Contig Length')
plt.xlabel('Median Coverage')
plt.ylabel('Contig Length (Kbp)')
plotname = "MedianCov_vs_CtgLength_noOutliers.png"
plt.savefig(plotname)
plt.clf()
#GC content vs Contig length
plt.plot(GCperc, map(lambda x: x/1000, SeqLen), 'ro')
plt.title('%GC vs Contig Length')
plt.xlabel('%GC')
plt.ylabel('Contig Length (Kbp)')
plotname = "GC_vs_CtgLength.png"
plt.savefig(plotname)
plt.clf()
return 0
def computeGC(sequence):
gcCount = len(re.findall("[GC]", sequence)) + len(
re.findall("[gc]", sequence))
totalBaseCount = len(re.findall("[GCTA]", sequence)) + len(
re.findall("[gcta]", sequence))
gcFraction = float(gcCount) / totalBaseCount
return gcFraction
def computeAssemblyStats(sample_config):
outfile = os.path.join("contig_stats", "contiguity.out")
if not os.path.exists("contig_stats"):
os.makedirs("contig_stats")
minlength = sample_config.get("minCtgLength", 1000)
sequence = sample_config["reference"]
genomesize = sample_config["genomeSize"]
ctg = re.sub("scf.fasta$", "ctg.fasta", sequence)
scf = re.sub("ctg.fasta$", "scf.fasta", sequence)
def asm_stats(sequence):
stats = OrderedDict()
stats["assembly type"] = ""
stats["# sequences"] = 0
stats["assembly length"] = 0
stats["trim shorter than(bp)"] = minlength
stats["# trimmed sequences"] = 0
stats["trimmed assembly length"] = 0
stats["N50"] = 0
stats["N80"] = 0
stats["NG50"] = 0
stats["NG80"] = 0
stats["longest seq"] = 0
sequence_lengths = []
with open(sequence, "r") as seq_file:
# Groupby iterator. Should work for fasta of any column width
fai = groupby(seq_file, lambda x: x.startswith(">"))
while True:
try:
_, header = next(fai)
_, sequence = next(fai)
except StopIteration:
break
# Collect fasta sequence stats
seq_len = sum([len(i.strip()) for i in sequence])
sequence_lengths.append(seq_len)
stats["# sequences"] += 1
stats["assembly length"] += seq_len
if seq_len > minlength:
stats["# trimmed sequences"] += 1
stats["trimmed assembly length"] += seq_len
if seq_len > stats["longest seq"]:
stats["longest seq"] = seq_len
sequence_lengths = sorted(sequence_lengths, reverse=True)
test_sum = 0
for sequence in sequence_lengths:
test_sum += sequence
if stats["assembly length"] * 0.5 < test_sum and stats["N50"] is 0:
stats["N50"] = sequence
if stats["assembly length"] * 0.8 < test_sum and stats["N80"] is 0:
stats["N80"] = sequence
if genomesize * 0.5 < test_sum and stats["NG50"] is 0:
stats["NG50"] = sequence
if genomesize * 0.8 < test_sum and stats["NG80"] is 0:
stats["NG80"] = sequence
return stats
ctg_stats = asm_stats(ctg)
ctg_stats["assembly type"] = "contigs"
scf_stats = asm_stats(scf)
scf_stats["assembly type"] = "scaffolds"
with open(outfile, "w") as out:
out.write('\t'.join(ctg_stats.keys()))
out.write('\n')
for asm in [ctg_stats, scf_stats]:
out.write('\t'.join(map(str, asm.values())))
out.write('\n')
|
senthil10/NouGAT
|
nougat/evaluete.py
|
Python
|
mit
| 18,678
|
"""
Django settings for huts project.
Generated by 'django-admin startproject' using Django 1.10.5.
For more information on this file, see
https://docs.djangoproject.com/en/1.10/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.10/ref/settings/
"""
import os
import dj_database_url
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.10/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '1&1@xh%(guq+b#1&jv$e6pa9n6sm_w#9cia1)(+idj1)omok(*'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = ['*']
CORS_ORIGIN_ALLOW_ALL = True
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'hut',
'rest_framework',
'corsheaders',
]
MIDDLEWARE = [
'corsheaders.middleware.CorsMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
MIDDLEWARE_CLASSES = []
ROOT_URLCONF = 'huts.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'huts.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.10/ref/settings/#databases
DATABASES = {
# 'default': {
# 'ENGINE': 'django.db.backends.sqlite3',
# 'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
# }
'default': dj_database_url.config(default='postgres://nxhxalrlqkckbd:1f8179624d9a773c8de38b1303b149283dfd58238fb10d0509cb85be49edcc2a@ec2-54-247-99-159.eu-west-1.compute.amazonaws.com:5432/d9tipol4jem759')
}
# Password validation
# https://docs.djangoproject.com/en/1.10/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.10/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# # Static files (CSS, JavaScript, Images)
# # https://docs.djangoproject.com/en/1.10/howto/static-files/
STATIC_URL = '/static/'
MEDIA_URL = '/media/'
STATICFILES_DIRS = [
os.path.join(BASE_DIR, "static")
]
STATIC_ROOT = os.path.join(os.path.dirname(__file__), '../static_cdn')
MEDIA_ROOT = os.path.join(os.path.dirname(__file__), '../media_cdn')
db_from_env = dj_database_url.config(conn_max_age=500)
DATABASES['default'].update(db_from_env)
# Honor the 'X-Forwarded-Proto' header for request.is_secure()
# SECURE_PROXY_SSL_HEADER = ('HTTP_X_FORWARDED_PROTO', 'https')
REST_FRAMEWORK = {
'DEFAULT_PAGINATION_CLASS':
'rest_framework.pagination.PageNumberPagination',
'PAGE_SIZE': 2,
}
|
nikolaystanishev/hut
|
huts/settings.py
|
Python
|
mit
| 4,118
|
# Copyright (c) 2013 Oscar Campos <oscar.campos@member.fsf.org>
# See LICENSE for more details
"""
.. module:: decorators
:platform: Unix, Windows
:synopsis: Decorators for SublimePython plugin
.. moduleauthor:: Oscar Campos <oscar.campos@member.fsf.org>
"""
import os
import functools
def debug(f):
@functools.wrap(f)
def wrapped(*args, **kwargs):
try:
return f(*args, **kwargs)
except:
import traceback
with open(os.path.expanduser("~/trace"), "w") as fl:
traceback.print_exc(file=fl)
return wrapped
|
leonth/private-configs
|
sublime-text-3/Packages/SublimePythonIDE/server/decorators.py
|
Python
|
mit
| 598
|
import unittest
from programy.processors.post.denormalize import DenormalizePostProcessor
from programy.bot import Bot
from programy.brain import Brain
from programy.config.brain import BrainConfiguration
from programy.config.bot import BotConfiguration
class DenormalizeTests(unittest.TestCase):
def setUp(self):
self.bot = Bot(Brain(BrainConfiguration()), config=BotConfiguration())
self.bot.brain.denormals.process_splits([" dot com ",".com"])
def test_denormalize(self):
processor = DenormalizePostProcessor ()
result = processor.process(self.bot, "testid", "Hello")
self.assertIsNotNone(result)
self.assertEqual("Hello", result)
result = processor.process(self.bot, "testid", "hello dot com")
self.assertIsNotNone(result)
self.assertEqual("hello.com", result)
|
dkamotsky/program-y
|
src/test/processors/post/test_denormalize.py
|
Python
|
mit
| 851
|
# Generated by Django 2.2.15 on 2020-11-24 06:44
from decimal import Decimal
import django.core.validators
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
("assignments", "0015_assignmentvote_delegated_user"),
]
operations = [
migrations.AddField(
model_name="assignmentpoll",
name="db_amount_global_yes",
field=models.DecimalField(
blank=True,
decimal_places=6,
default=Decimal("0"),
max_digits=15,
null=True,
validators=[django.core.validators.MinValueValidator(Decimal("-2"))],
),
),
migrations.AddField(
model_name="assignmentpoll",
name="global_yes",
field=models.BooleanField(default=True),
),
migrations.AlterField(
model_name="assignmentpoll",
name="pollmethod",
field=models.CharField(
choices=[
("votes", "Yes per candidate"),
("N", "No per candidate"),
("YN", "Yes/No per candidate"),
("YNA", "Yes/No/Abstain per candidate"),
],
max_length=5,
),
),
migrations.AlterField(
model_name="assignmentpoll",
name="onehundred_percent_base",
field=models.CharField(
choices=[
("YN", "Yes/No per candidate"),
("YNA", "Yes/No/Abstain per candidate"),
("Y", "Sum of votes including general No/Abstain"),
("valid", "All valid ballots"),
("cast", "All casted ballots"),
("disabled", "Disabled (no percents)"),
],
max_length=8,
),
),
migrations.AlterField(
model_name="assignmentpoll",
name="pollmethod",
field=models.CharField(
choices=[
("Y", "Yes per candidate"),
("N", "No per candidate"),
("YN", "Yes/No per candidate"),
("YNA", "Yes/No/Abstain per candidate"),
],
max_length=5,
),
),
]
|
FinnStutzenstein/OpenSlides
|
server/openslides/assignments/migrations/0016_negative_votes.py
|
Python
|
mit
| 2,395
|
import frappe
from frappe.utils import cstr
def execute():
# Update Social Logins in User
run_patch()
# Create Social Login Key(s) from Social Login Keys
frappe.reload_doc("integrations", "doctype", "social_login_key", force=True)
if not frappe.db.exists('DocType', 'Social Login Keys'):
return
social_login_keys = frappe.get_doc("Social Login Keys", "Social Login Keys")
if social_login_keys.get("facebook_client_id") or social_login_keys.get("facebook_client_secret"):
facebook_login_key = frappe.new_doc("Social Login Key")
facebook_login_key.get_social_login_provider("Facebook", initialize=True)
facebook_login_key.social_login_provider = "Facebook"
facebook_login_key.client_id = social_login_keys.get("facebook_client_id")
facebook_login_key.client_secret = social_login_keys.get("facebook_client_secret")
if not (facebook_login_key.client_secret and facebook_login_key.client_id):
facebook_login_key.enable_social_login = 0
facebook_login_key.save()
if social_login_keys.get("frappe_server_url"):
frappe_login_key = frappe.new_doc("Social Login Key")
frappe_login_key.get_social_login_provider("Frappe", initialize=True)
frappe_login_key.social_login_provider = "Frappe"
frappe_login_key.base_url = social_login_keys.get("frappe_server_url")
frappe_login_key.client_id = social_login_keys.get("frappe_client_id")
frappe_login_key.client_secret = social_login_keys.get("frappe_client_secret")
if not (frappe_login_key.client_secret and frappe_login_key.client_id and frappe_login_key.base_url):
frappe_login_key.enable_social_login = 0
frappe_login_key.save()
if social_login_keys.get("github_client_id") or social_login_keys.get("github_client_secret"):
github_login_key = frappe.new_doc("Social Login Key")
github_login_key.get_social_login_provider("GitHub", initialize=True)
github_login_key.social_login_provider = "GitHub"
github_login_key.client_id = social_login_keys.get("github_client_id")
github_login_key.client_secret = social_login_keys.get("github_client_secret")
if not (github_login_key.client_secret and github_login_key.client_id):
github_login_key.enable_social_login = 0
github_login_key.save()
if social_login_keys.get("google_client_id") or social_login_keys.get("google_client_secret"):
google_login_key = frappe.new_doc("Social Login Key")
google_login_key.get_social_login_provider("Google", initialize=True)
google_login_key.social_login_provider = "Google"
google_login_key.client_id = social_login_keys.get("google_client_id")
google_login_key.client_secret = social_login_keys.get("google_client_secret")
if not (google_login_key.client_secret and google_login_key.client_id):
google_login_key.enable_social_login = 0
google_login_key.save()
frappe.delete_doc("DocType", "Social Login Keys")
def run_patch():
frappe.reload_doc("core", "doctype", "user", force=True)
frappe.reload_doc("core", "doctype", "user_social_login", force=True)
users = frappe.get_all("User", fields=["*"], filters={"name":("not in", ["Administrator", "Guest"])})
for user in users:
idx = 0
if user.frappe_userid:
insert_user_social_login(user.name, user.modified_by, 'frappe', idx, userid=user.frappe_userid)
idx += 1
if user.fb_userid or user.fb_username:
insert_user_social_login(user.name, user.modified_by, 'facebook', idx, userid=user.fb_userid, username=user.fb_username)
idx += 1
if user.github_userid or user.github_username:
insert_user_social_login(user.name, user.modified_by, 'github', idx, userid=user.github_userid, username=user.github_username)
idx += 1
if user.google_userid:
insert_user_social_login(user.name, user.modified_by, 'google', idx, userid=user.google_userid)
idx += 1
def insert_user_social_login(user, modified_by, provider, idx, userid=None, username=None):
source_cols = get_standard_cols()
creation_time = frappe.utils.get_datetime_str(frappe.utils.get_datetime())
values = [
frappe.generate_hash(length=10),
creation_time,
creation_time,
user,
modified_by,
user,
"User",
"social_logins",
cstr(idx),
provider
]
if userid:
source_cols.append("userid")
values.append(userid)
if username:
source_cols.append("username")
values.append(username)
query = """INSERT INTO `tabUser Social Login` (`{source_cols}`)
VALUES ({values})
""".format(
source_cols = "`, `".join(source_cols),
values= ", ".join([frappe.db.escape(d) for d in values])
)
frappe.db.sql(query)
def get_provider_field_map():
return frappe._dict({
"frappe": ["frappe_userid"],
"facebook": ["fb_userid", "fb_username"],
"github": ["github_userid", "github_username"],
"google": ["google_userid"],
})
def get_provider_fields(provider):
return get_provider_field_map().get(provider)
def get_standard_cols():
return ["name", "creation", "modified", "owner", "modified_by", "parent", "parenttype", "parentfield", "idx", "provider"]
|
frappe/frappe
|
frappe/patches/v10_0/refactor_social_login_keys.py
|
Python
|
mit
| 4,935
|
from .extensions import db, resizer
class Upload(db.Model):
__tablename__ = 'upload'
id = db.Column(db.Integer, autoincrement=True, primary_key=True)
name = db.Column(db.Unicode(255), nullable=False)
url = db.Column(db.Unicode(255), nullable=False)
if resizer:
for size in resizer.sizes.iterkeys():
setattr(Upload, size + '_name', db.Column(db.Unicode(255)))
setattr(Upload, size + '_url', db.Column(db.Unicode(255)))
|
FelixLoether/flask-uploads
|
flask_uploads/models.py
|
Python
|
mit
| 458
|
# coding: utf8
# OeQ autogenerated lookup function for 'Non Residential Building Base U-Value of Buildings in correlation to year of construction, based on GEMOD
import math
import numpy as np
import oeqLookuptable as oeq
def get(*xin):
l_lookup = oeq.lookuptable(
[0,1.2,
1849,1.2,
1850,1.2,
1851,1.2,
1852,1.2,
1853,1.2,
1854,1.2,
1855,1.2,
1856,1.2,
1857,1.2,
1858,1.2,
1859,1.2,
1860,1.2,
1861,1.2,
1862,1.2,
1863,1.2,
1864,1.2,
1865,1.2,
1866,1.2,
1867,1.2,
1868,1.2,
1869,1.2,
1870,1.2,
1871,1.2,
1872,1.2,
1873,1.2,
1874,1.2,
1875,1.2,
1876,1.2,
1877,1.2,
1878,1.2,
1879,1.2,
1880,1.2,
1881,1.2,
1882,1.2,
1883,1.2,
1884,1.2,
1885,1.2,
1886,1.2,
1887,1.2,
1888,1.2,
1889,1.2,
1890,1.2,
1891,1.2,
1892,1.2,
1893,1.2,
1894,1.2,
1895,1.2,
1896,1.2,
1897,1.2,
1898,1.2,
1899,1.2,
1900,1.2,
1901,1.2,
1902,1.2,
1903,1.2,
1904,1.2,
1905,1.2,
1906,1.2,
1907,1.2,
1908,1.2,
1909,1.2,
1910,1.2,
1911,1.2,
1912,1.2,
1913,1.2,
1914,1.2,
1915,1.2,
1916,1.2,
1917,1.2,
1918,1.2,
1919,1.2,
1920,1.2,
1921,1.2,
1922,1.2,
1923,1.2,
1924,1.2,
1925,1.2,
1926,1.2,
1927,1.2,
1928,1.2,
1929,1.2,
1930,1.2,
1931,1.2,
1932,1.2,
1933,1.2,
1934,1.2,
1935,1.2,
1936,1.2,
1937,1.2,
1938,1.2,
1939,1.2,
1940,1.2,
1941,1.2,
1942,1.2,
1943,1.2,
1944,1.2,
1945,1.2,
1946,1.2,
1947,1.2,
1948,1.2,
1949,1.2,
1950,1.2,
1951,1.2,
1952,1.2,
1953,1.2,
1954,1.2,
1955,1.2,
1956,1.2,
1957,1.2,
1958,1.2,
1959,1.2,
1960,1.2,
1961,1.2,
1962,1.2,
1963,1.2,
1964,1.2,
1965,1.2,
1966,1.2,
1967,1.2,
1968,1.2,
1969,1.2,
1970,1.2,
1971,1.2,
1972,1.2,
1973,1.2,
1974,1.2,
1975,1.2,
1976,1.2,
1977,1.2,
1978,1.2,
1979,0.85,
1980,0.85,
1981,0.85,
1982,0.85,
1983,0.85,
1984,0.4,
1985,0.4,
1986,0.4,
1987,0.4,
1988,0.4,
1989,0.4,
1990,0.4,
1991,0.4,
1992,0.4,
1993,0.4,
1994,0.4,
1995,0.4,
1996,0.4,
1997,0.4,
1998,0.4,
1999,0.4,
2000,0.4,
2001,0.4,
2002,0.4,
2003,0.4,
2004,0.4,
2005,0.4,
2006,0.4,
2007,0.4,
2008,0.4,
2009,0.4,
2010,0.4,
2011,0.4,
2012,0.4,
2013,0.4,
2014,0.4,
2015,0.4,
2016,0.4,
2017,0.4,
2018,0.4,
2019,0.4,
2020,0.4,
2021,0.15,
2022,0.15,
2023,0.15,
2024,0.15,
2025,0.15,
2026,0.15,
2027,0.15,
2028,0.15,
2029,0.15,
2030,0.15,
2031,0.15,
2032,0.15,
2033,0.15,
2034,0.15,
2035,0.15,
2036,0.15,
2037,0.15,
2038,0.15,
2039,0.15,
2040,0.15,
2041,0.15,
2042,0.15,
2043,0.15,
2044,0.15,
2045,0.15,
2046,0.15,
2047,0.15,
2048,0.15,
2049,0.15,
2050,0.1,
2051,0.1])
return(l_lookup.lookup(xin))
|
UdK-VPT/Open_eQuarter
|
mole/stat_corr/nrb_contemporary_base_uvalue_by_building_age_lookup.py
|
Python
|
gpl-2.0
| 2,378
|
# -*- coding: utf-8 -*-
"""
***************************************************************************
FileSelectionPanel.py
---------------------
Date : August 2012
Copyright : (C) 2012 by Victor Olaya
Email : volayaf at gmail dot com
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
__author__ = 'Victor Olaya'
__date__ = 'August 2012'
__copyright__ = '(C) 2012, Victor Olaya'
# This will get replaced with a git SHA1 when you do a git archive
__revision__ = '$Format:%H$'
import os
from qgis.PyQt import uic
from qgis.PyQt.QtWidgets import QFileDialog
from qgis.PyQt.QtCore import QSettings
from processing.tools.system import isWindows
pluginPath = os.path.split(os.path.dirname(__file__))[0]
WIDGET, BASE = uic.loadUiType(
os.path.join(pluginPath, 'ui', 'widgetBaseSelector.ui'))
class FileSelectionPanel(BASE, WIDGET):
def __init__(self, isFolder, ext=None):
super(FileSelectionPanel, self).__init__(None)
self.setupUi(self)
self.ext = ext or '*'
self.isFolder = isFolder
self.btnSelect.clicked.connect(self.showSelectionDialog)
def showSelectionDialog(self):
# Find the file dialog's working directory
settings = QSettings()
text = self.leText.text()
if os.path.isdir(text):
path = text
elif os.path.isdir(os.path.dirname(text)):
path = os.path.dirname(text)
elif settings.contains('/Processing/LastInputPath'):
path = settings.value('/Processing/LastInputPath')
else:
path = ''
if self.isFolder:
folder = QFileDialog.getExistingDirectory(self,
self.tr('Select folder'), path)
if folder:
self.leText.setText(folder)
settings.setValue('/Processing/LastInputPath',
os.path.dirname(folder))
else:
filenames = QFileDialog.getOpenFileNames(self,
self.tr('Select file'), path, '*.' + self.ext)
if filenames:
self.leText.setText(u';'.join(filenames))
settings.setValue('/Processing/LastInputPath',
os.path.dirname(filenames[0]))
def getValue(self):
s = self.leText.text()
if isWindows():
s = s.replace('\\', '/')
return s
def setText(self, text):
self.leText.setText(text)
|
alexbruy/QGIS
|
python/plugins/processing/gui/FileSelectionPanel.py
|
Python
|
gpl-2.0
| 3,127
|
#!/usr/bin/env python
# -*- noplot -*-
"""
N Classic Base renderer Ext renderer
20 0.22 0.14 0.14
100 0.16 0.14 0.13
1000 0.45 0.26 0.17
10000 3.30 1.31 0.53
50000 19.30 6.53 1.98
"""
from pylab import *
import time
for N in (20,100,1000,10000,50000):
tstart = time.time()
x = 0.9*rand(N)
y = 0.9*rand(N)
s = 20*rand(N)
scatter(x,y,s)
print '%d symbols in %1.2f s' % (N, time.time()-tstart)
|
ryandougherty/mwa-capstone
|
MWA_Tools/build/matplotlib/lib/mpl_examples/pylab_examples/scatter_profile.py
|
Python
|
gpl-2.0
| 556
|
#!/bin/env python
""" Showing last hour history of FTS transfers. """
import sys
import DIRAC
from DIRAC import gLogger, gConfig, S_OK
from DIRAC.Core.Base import Script
from DIRAC.Core.DISET.RPCClient import RPCClient
from DIRAC.ConfigurationSystem.Client import PathFinder
__RCSID__ = "$Id$"
colors = { "yellow" : "\033[93m%s\033[0m",
"red" : "\033[91m%s\033[0m" }
gProblematic = False
def showChannels():
""" print info about the last hour performance of FTS system """
global gProblematic
taSection = PathFinder.getAgentSection("DataManagement/TransferAgent")
timeScale = gConfig.getOption( taSection + "/ThroughputTimescale", 3600 )
if not timeScale["OK"]:
gLogger.error( timeScale["Message"] )
DIRAC.exit(1)
timeScale = int( timeScale["Value"] )
accFailureRate = gConfig.getOption( taSection + "/StrategyHandler/AcceptableFailureRate", 75 )
if not accFailureRate["OK"]:
gLogger.error( accFailureRate["Message"] )
accFailureRate = int( accFailureRate["Value"] )
accFailedFiles = gConfig.getOption( taSection + "/StrategyHandler/AcceptableFailedFiles", 5 )
if not accFailedFiles["OK"]:
gLogger.error( accFailedFiles["Message"] )
accFailedFiles = int( accFailedFiles["Value"] )
scInfo = "timescale = %s s\nacc failure rate = %s %%\nacc distinct failed files = %s" % ( timeScale,
accFailureRate,
accFailedFiles )
## db monitor
transferDB = RPCClient( "DataManagement/TransferDBMonitoring" )
## get channels
channels = transferDB.getChannelQueues()
if not channels["OK"]:
gLogger.error( channels["Message"] )
DIRAC.exit(1)
channels = channels["Value"]
## gend bandwidths
bands = transferDB.getChannelObservedThroughput( timeScale )
if not bands["OK"]:
gLogger.error( bands["Message"] )
DIRAC.exit(1)
bands = bands["Value"]
## get failed files
badFiles = transferDB.getCountFileToFTS( timeScale, "Failed" )
if not badFiles["OK"]:
gLogger.error( badFiles["Message"] )
DIRAC.exit(1)
badFiles = badFiles["Value"] if badFiles["Value"] else {}
colorize = sys.stdout.isatty()
header = " %2s | %-15s | %8s | %8s | %8s | %8s | %8s | %12s | %8s | %8s" % ( "Id", "Name", "Status",
"Waiting", "Success", "Failed",
"FilePut", "ThroughPut", "FailRate",
"FailedFiles" )
dashLine = "-"*len(header)
lineTemplate = " %2s | %-15s | %8s | %8d | %8d | %8d | %8.2f | %12.2f | %8.2f | %8d"
printOut = []
for chId, channel in channels.items():
name = channel["ChannelName"]
color = None
status = channel["Status"]
if status == "Active":
status = "OK"
waitingFiles = channel["Files"]
waitingSize = channel["Size"]
failedFiles = successFiles = filePut = througPut = fRate = 0
fFiles = 0
if chId in badFiles:
fFiles = int(badFiles[chId])
if chId in bands:
band = bands[chId]
failedFiles = int(band["FailedFiles"])
successFiles = int(band["SuccessfulFiles"])
filePut = band["Fileput"]
throughPut = band["Throughput"]
if failedFiles or successFiles:
fRate = 100.0 * float( failedFiles ) / ( float(failedFiles) + float( successFiles) )
if fRate > 0 and colorize:
color = "yellow"
status = "Poor"
if fRate > accFailureRate and fFiles > accFailedFiles:
status = "Closed"
if colorize:
color = "red"
if gProblematic and not fRate:
continue
if colorize and color:
line = colors[color] % lineTemplate
else:
line = lineTemplate
printOut.append( line % ( chId, name, status,
waitingFiles if waitingFiles else 0,
successFiles if successFiles else 0,
failedFiles if failedFiles else 0,
filePut if filePut else 0,
throughPut if througPut else 0,
fRate if fRate else 0,
fFiles if fFiles else 0 ) )
if printOut:
printOut = [ scInfo, header, dashLine ] + printOut
for line in printOut:
gLogger.always( line )
else:
gLogger.always("Noting to display...")
def setProblematic( problematic=False ):
""" callback for showing only problematic channels """
global gProblematic
gProblematic = True
return S_OK()
## script execution
if __name__ == "__main__":
Script.registerSwitch( "p", "problematic", "show only problematic channels", setProblematic )
Script.parseCommandLine()
showChannels()
|
sposs/DIRAC
|
DataManagementSystem/scripts/dirac-dms-show-fts-status.py
|
Python
|
gpl-3.0
| 5,027
|
from couchpotato import get_session
from couchpotato.api import addApiView
from couchpotato.core.event import addEvent, fireEvent, fireEventAsync
from couchpotato.core.helpers.encoding import simplifyString, toUnicode
from couchpotato.core.helpers.request import jsonified, getParam
from couchpotato.core.helpers.variable import md5, getTitle, splitString, \
possibleTitles
from couchpotato.core.logger import CPLog
from couchpotato.core.plugins.base import Plugin
from couchpotato.core.settings.model import Movie, Release, ReleaseInfo
from couchpotato.environment import Env
from inspect import ismethod, isfunction
from sqlalchemy.exc import InterfaceError
import datetime
import random
import re
import time
import traceback
log = CPLog(__name__)
class Searcher(Plugin):
in_progress = False
def __init__(self):
addEvent('searcher.all', self.allMovies)
addEvent('searcher.single', self.single)
addEvent('searcher.correct_movie', self.correctMovie)
addEvent('searcher.download', self.download)
addEvent('searcher.try_next_release', self.tryNextRelease)
addEvent('searcher.could_be_released', self.couldBeReleased)
addApiView('searcher.try_next', self.tryNextReleaseView, docs = {
'desc': 'Marks the snatched results as ignored and try the next best release',
'params': {
'id': {'desc': 'The id of the movie'},
},
})
addApiView('searcher.full_search', self.allMoviesView, docs = {
'desc': 'Starts a full search for all wanted movies',
})
addApiView('searcher.progress', self.getProgress, docs = {
'desc': 'Get the progress of current full search',
'return': {'type': 'object', 'example': """{
'progress': False || object, total & to_go,
}"""},
})
addEvent('app.load', self.setCrons)
addEvent('setting.save.searcher.cron_day.after', self.setCrons)
addEvent('setting.save.searcher.cron_hour.after', self.setCrons)
addEvent('setting.save.searcher.cron_minute.after', self.setCrons)
def setCrons(self):
fireEvent('schedule.cron', 'searcher.all', self.allMovies, day = self.conf('cron_day'), hour = self.conf('cron_hour'), minute = self.conf('cron_minute'))
def allMoviesView(self):
in_progress = self.in_progress
if not in_progress:
fireEventAsync('searcher.all')
fireEvent('notify.frontend', type = 'searcher.started', data = True, message = 'Full search started')
else:
fireEvent('notify.frontend', type = 'searcher.already_started', data = True, message = 'Full search already in progress')
return jsonified({
'success': not in_progress
})
def getProgress(self):
return jsonified({
'progress': self.in_progress
})
def allMovies(self):
if self.in_progress:
log.info('Search already in progress')
return
self.in_progress = True
db = get_session()
movies = db.query(Movie).filter(
Movie.status.has(identifier = 'active')
).all()
random.shuffle(movies)
self.in_progress = {
'total': len(movies),
'to_go': len(movies),
}
try:
search_types = self.getSearchTypes()
for movie in movies:
movie_dict = movie.to_dict({
'profile': {'types': {'quality': {}}},
'releases': {'status': {}, 'quality': {}},
'library': {'titles': {}, 'files':{}},
'files': {}
})
try:
self.single(movie_dict, search_types)
except IndexError:
log.error('Forcing library update for %s, if you see this often, please report: %s', (movie_dict['library']['identifier'], traceback.format_exc()))
fireEvent('library.update', movie_dict['library']['identifier'], force = True)
except:
log.error('Search failed for %s: %s', (movie_dict['library']['identifier'], traceback.format_exc()))
self.in_progress['to_go'] -= 1
# Break if CP wants to shut down
if self.shuttingDown():
break
except SearchSetupError:
pass
self.in_progress = False
def single(self, movie, search_types = None):
# Find out search type
try:
if not search_types:
search_types = self.getSearchTypes()
except SearchSetupError:
return
done_status = fireEvent('status.get', 'done', single = True)
if not movie['profile'] or movie['status_id'] == done_status.get('id'):
log.debug('Movie doesn\'t have a profile or already done, assuming in manage tab.')
return
db = get_session()
pre_releases = fireEvent('quality.pre_releases', single = True)
release_dates = fireEvent('library.update_release_date', identifier = movie['library']['identifier'], merge = True)
available_status, ignored_status = fireEvent('status.get', ['available', 'ignored'], single = True)
found_releases = []
default_title = getTitle(movie['library'])
if not default_title:
log.error('No proper info found for movie, removing it from library to cause it from having more issues.')
fireEvent('movie.delete', movie['id'], single = True)
return
fireEvent('notify.frontend', type = 'searcher.started.%s' % movie['id'], data = True, message = 'Searching for "%s"' % default_title)
ret = False
for quality_type in movie['profile']['types']:
if not self.conf('always_search') and not self.couldBeReleased(quality_type['quality']['identifier'] in pre_releases, release_dates):
log.info('Too early to search for %s, %s', (quality_type['quality']['identifier'], default_title))
continue
has_better_quality = 0
# See if better quality is available
for release in movie['releases']:
if release['quality']['order'] <= quality_type['quality']['order'] and release['status_id'] not in [available_status.get('id'), ignored_status.get('id')]:
has_better_quality += 1
# Don't search for quality lower then already available.
if has_better_quality is 0:
log.info('Search for %s in %s', (default_title, quality_type['quality']['label']))
quality = fireEvent('quality.single', identifier = quality_type['quality']['identifier'], single = True)
results = []
for search_type in search_types:
type_results = fireEvent('%s.search' % search_type, movie, quality, merge = True)
if type_results:
results += type_results
sorted_results = sorted(results, key = lambda k: k['score'], reverse = True)
if len(sorted_results) == 0:
log.debug('Nothing found for %s in %s', (default_title, quality_type['quality']['label']))
download_preference = self.conf('preferred_method')
if download_preference != 'both':
sorted_results = sorted(sorted_results, key = lambda k: k['type'], reverse = (download_preference == 'torrent'))
# Check if movie isn't deleted while searching
if not db.query(Movie).filter_by(id = movie.get('id')).first():
break
# Add them to this movie releases list
for nzb in sorted_results:
nzb_identifier = md5(nzb['url'])
found_releases.append(nzb_identifier)
rls = db.query(Release).filter_by(identifier = nzb_identifier).first()
if not rls:
rls = Release(
identifier = nzb_identifier,
movie_id = movie.get('id'),
quality_id = quality_type.get('quality_id'),
status_id = available_status.get('id')
)
db.add(rls)
else:
[db.delete(old_info) for old_info in rls.info]
rls.last_edit = int(time.time())
db.commit()
for info in nzb:
try:
if not isinstance(nzb[info], (str, unicode, int, long, float)):
continue
rls_info = ReleaseInfo(
identifier = info,
value = toUnicode(nzb[info])
)
rls.info.append(rls_info)
except InterfaceError:
log.debug('Couldn\'t add %s to ReleaseInfo: %s', (info, traceback.format_exc()))
db.commit()
nzb['status_id'] = rls.status_id
for nzb in sorted_results:
if not quality_type.get('finish', False) and quality_type.get('wait_for', 0) > 0 and nzb.get('age') <= quality_type.get('wait_for', 0):
log.info('Ignored, waiting %s days: %s', (quality_type.get('wait_for'), nzb['name']))
continue
if nzb['status_id'] == ignored_status.get('id'):
log.info('Ignored: %s', nzb['name'])
continue
if nzb['score'] <= 0:
log.info('Ignored, score to low: %s', nzb['name'])
continue
downloaded = self.download(data = nzb, movie = movie)
if downloaded is True:
ret = True
break
elif downloaded != 'try_next':
break
# Remove releases that aren't found anymore
for release in movie.get('releases', []):
if release.get('status_id') == available_status.get('id') and release.get('identifier') not in found_releases:
fireEvent('release.delete', release.get('id'), single = True)
else:
log.info('Better quality (%s) already available or snatched for %s', (quality_type['quality']['label'], default_title))
fireEvent('movie.restatus', movie['id'])
break
# Break if CP wants to shut down
if self.shuttingDown() or ret:
break
fireEvent('notify.frontend', type = 'searcher.ended.%s' % movie['id'], data = True)
return ret
def download(self, data, movie, manual = False):
# Test to see if any downloaders are enabled for this type
downloader_enabled = fireEvent('download.enabled', manual, data, single = True)
if downloader_enabled:
snatched_status = fireEvent('status.get', 'snatched', single = True)
# Download movie to temp
filedata = None
if data.get('download') and (ismethod(data.get('download')) or isfunction(data.get('download'))):
filedata = data.get('download')(url = data.get('url'), nzb_id = data.get('id'))
if filedata == 'try_next':
return filedata
download_result = fireEvent('download', data = data, movie = movie, manual = manual, filedata = filedata, single = True)
log.debug('Downloader result: %s', download_result)
if download_result:
try:
# Mark release as snatched
db = get_session()
rls = db.query(Release).filter_by(identifier = md5(data['url'])).first()
if rls:
renamer_enabled = Env.setting('enabled', 'renamer')
done_status = fireEvent('status.get', 'done', single = True)
rls.status_id = done_status.get('id') if not renamer_enabled else snatched_status.get('id')
# Save download-id info if returned
if isinstance(download_result, dict):
for key in download_result:
rls_info = ReleaseInfo(
identifier = 'download_%s' % key,
value = toUnicode(download_result.get(key))
)
rls.info.append(rls_info)
db.commit()
log_movie = '%s (%s) in %s' % (getTitle(movie['library']), movie['library']['year'], rls.quality.label)
snatch_message = 'Snatched "%s": %s' % (data.get('name'), log_movie)
log.info(snatch_message)
fireEvent('movie.snatched', message = snatch_message, data = rls.to_dict())
# If renamer isn't used, mark movie done
if not renamer_enabled:
active_status = fireEvent('status.get', 'active', single = True)
done_status = fireEvent('status.get', 'done', single = True)
try:
if movie['status_id'] == active_status.get('id'):
for profile_type in movie['profile']['types']:
if profile_type['quality_id'] == rls.quality.id and profile_type['finish']:
log.info('Renamer disabled, marking movie as finished: %s', log_movie)
# Mark release done
rls.status_id = done_status.get('id')
rls.last_edit = int(time.time())
db.commit()
# Mark movie done
mvie = db.query(Movie).filter_by(id = movie['id']).first()
mvie.status_id = done_status.get('id')
mvie.last_edit = int(time.time())
db.commit()
except:
log.error('Failed marking movie finished, renamer disabled: %s', traceback.format_exc())
except:
log.error('Failed marking movie finished: %s', traceback.format_exc())
return True
log.info('Tried to download, but none of the "%s" downloaders are enabled or gave an error', (data.get('type', '')))
return False
def getSearchTypes(self):
download_types = fireEvent('download.enabled_types', merge = True)
provider_types = fireEvent('provider.enabled_types', merge = True)
if download_types and len(list(set(provider_types) & set(download_types))) == 0:
log.error('There aren\'t any providers enabled for your downloader (%s). Check your settings.', ','.join(download_types))
raise NoProviders
for useless_provider in list(set(provider_types) - set(download_types)):
log.debug('Provider for "%s" enabled, but no downloader.', useless_provider)
search_types = download_types
if len(search_types) == 0:
log.error('There aren\'t any downloaders enabled. Please pick one in settings.')
raise NoDownloaders
return search_types
def correctMovie(self, nzb = None, movie = None, quality = None, **kwargs):
imdb_results = kwargs.get('imdb_results', False)
retention = Env.setting('retention', section = 'nzb')
if nzb.get('seeders') is None and 0 < retention < nzb.get('age', 0):
log.info2('Wrong: Outside retention, age is %s, needs %s or lower: %s', (nzb['age'], retention, nzb['name']))
return False
movie_name = getTitle(movie['library'])
movie_words = re.split('\W+', simplifyString(movie_name))
nzb_name = simplifyString(nzb['name'])
nzb_words = re.split('\W+', nzb_name)
# Make sure it has required words
required_words = splitString(self.conf('required_words').lower())
req_match = 0
for req_set in required_words:
req = splitString(req_set, '&')
req_match += len(list(set(nzb_words) & set(req))) == len(req)
if self.conf('required_words') and req_match == 0:
log.info2('Wrong: Required word missing: %s', nzb['name'])
return False
# Ignore releases
ignored_words = splitString(self.conf('ignored_words').lower())
ignored_match = 0
for ignored_set in ignored_words:
ignored = splitString(ignored_set, '&')
ignored_match += len(list(set(nzb_words) & set(ignored))) == len(ignored)
if self.conf('ignored_words') and ignored_match:
log.info2("Wrong: '%s' contains 'ignored words'", (nzb['name']))
return False
# Ignore porn stuff
pron_tags = ['xxx', 'sex', 'anal', 'tits', 'fuck', 'porn', 'orgy', 'milf', 'boobs', 'erotica', 'erotic']
pron_words = list(set(nzb_words) & set(pron_tags) - set(movie_words))
if pron_words:
log.info('Wrong: %s, probably pr0n', (nzb['name']))
return False
preferred_quality = fireEvent('quality.single', identifier = quality['identifier'], single = True)
# Contains lower quality string
if self.containsOtherQuality(nzb, movie_year = movie['library']['year'], preferred_quality = preferred_quality):
log.info2('Wrong: %s, looking for %s', (nzb['name'], quality['label']))
return False
# File to small
if nzb['size'] and preferred_quality['size_min'] > nzb['size']:
log.info2('Wrong: "%s" is too small to be %s. %sMB instead of the minimal of %sMB.', (nzb['name'], preferred_quality['label'], nzb['size'], preferred_quality['size_min']))
return False
# File to large
if nzb['size'] and preferred_quality.get('size_max') < nzb['size']:
log.info2('Wrong: "%s" is too large to be %s. %sMB instead of the maximum of %sMB.', (nzb['name'], preferred_quality['label'], nzb['size'], preferred_quality['size_max']))
return False
# Provider specific functions
get_more = nzb.get('get_more_info')
if get_more:
get_more(nzb)
extra_check = nzb.get('extra_check')
if extra_check and not extra_check(nzb):
return False
if imdb_results:
return True
# Check if nzb contains imdb link
if self.checkIMDB([nzb.get('description', '')], movie['library']['identifier']):
return True
for raw_title in movie['library']['titles']:
for movie_title in possibleTitles(raw_title['title']):
movie_words = re.split('\W+', simplifyString(movie_title))
if self.correctName(nzb['name'], movie_title):
# if no IMDB link, at least check year range 1
if len(movie_words) > 2 and self.correctYear([nzb['name']], movie['library']['year'], 1):
return True
# if no IMDB link, at least check year
if len(movie_words) <= 2 and self.correctYear([nzb['name']], movie['library']['year'], 0):
return True
log.info("Wrong: %s, undetermined naming. Looking for '%s (%s)'", (nzb['name'], movie_name, movie['library']['year']))
return False
def containsOtherQuality(self, nzb, movie_year = None, preferred_quality = {}):
name = nzb['name']
size = nzb.get('size', 0)
nzb_words = re.split('\W+', simplifyString(name))
qualities = fireEvent('quality.all', single = True)
found = {}
for quality in qualities:
# Main in words
if quality['identifier'] in nzb_words:
found[quality['identifier']] = True
# Alt in words
if list(set(nzb_words) & set(quality['alternative'])):
found[quality['identifier']] = True
# Try guessing via quality tags
guess = fireEvent('quality.guess', [nzb.get('name')], single = True)
if guess:
found[guess['identifier']] = True
# Hack for older movies that don't contain quality tag
year_name = fireEvent('scanner.name_year', name, single = True)
if len(found) == 0 and movie_year < datetime.datetime.now().year - 3 and not year_name.get('year', None):
if size > 3000: # Assume dvdr
log.info('Quality was missing in name, assuming it\'s a DVD-R based on the size: %s', (size))
found['dvdr'] = True
else: # Assume dvdrip
log.info('Quality was missing in name, assuming it\'s a DVD-Rip based on the size: %s', (size))
found['dvdrip'] = True
# Allow other qualities
for allowed in preferred_quality.get('allow'):
if found.get(allowed):
del found[allowed]
return not (found.get(preferred_quality['identifier']) and len(found) == 1)
def checkIMDB(self, haystack, imdbId):
for string in haystack:
if 'imdb.com/title/' + imdbId in string:
return True
return False
def correctYear(self, haystack, year, year_range):
for string in haystack:
year_name = fireEvent('scanner.name_year', string, single = True)
if year_name and ((year - year_range) <= year_name.get('year') <= (year + year_range)):
log.debug('Movie year matches range: %s looking for %s', (year_name.get('year'), year))
return True
log.debug('Movie year doesn\'t matche range: %s looking for %s', (year_name.get('year'), year))
return False
def correctName(self, check_name, movie_name):
check_names = [check_name]
# Match names between "
try: check_names.append(re.search(r'([\'"])[^\1]*\1', check_name).group(0))
except: pass
# Match longest name between []
try: check_names.append(max(check_name.split('['), key = len))
except: pass
for check_name in list(set(check_names)):
check_movie = fireEvent('scanner.name_year', check_name, single = True)
try:
check_words = filter(None, re.split('\W+', check_movie.get('name', '')))
movie_words = filter(None, re.split('\W+', simplifyString(movie_name)))
if len(check_words) > 0 and len(movie_words) > 0 and len(list(set(check_words) - set(movie_words))) == 0:
return True
except:
pass
return False
def couldBeReleased(self, is_pre_release, dates):
now = int(time.time())
if not dates or (dates.get('theater', 0) == 0 and dates.get('dvd', 0) == 0):
return True
else:
# For movies before 1972
if dates.get('theater', 0) < 0 or dates.get('dvd', 0) < 0:
return True
if is_pre_release:
# Prerelease 1 week before theaters
if dates.get('theater') - 604800 < now:
return True
else:
# 12 weeks after theater release
if dates.get('theater') > 0 and dates.get('theater') + 7257600 < now:
return True
if dates.get('dvd') > 0:
# 4 weeks before dvd release
if dates.get('dvd') - 2419200 < now:
return True
# Dvd should be released
if dates.get('dvd') < now:
return True
return False
def tryNextReleaseView(self):
trynext = self.tryNextRelease(getParam('id'))
return jsonified({
'success': trynext
})
def tryNextRelease(self, movie_id, manual = False):
snatched_status = fireEvent('status.get', 'snatched', single = True)
ignored_status = fireEvent('status.get', 'ignored', single = True)
try:
db = get_session()
rels = db.query(Release).filter_by(
status_id = snatched_status.get('id'),
movie_id = movie_id
).all()
for rel in rels:
rel.status_id = ignored_status.get('id')
db.commit()
movie_dict = fireEvent('movie.get', movie_id, single = True)
log.info('Trying next release for: %s', getTitle(movie_dict['library']))
fireEvent('searcher.single', movie_dict)
return True
except:
log.error('Failed searching for next release: %s', traceback.format_exc())
return False
class SearchSetupError(Exception):
pass
class NoDownloaders(SearchSetupError):
pass
class NoProviders(SearchSetupError):
pass
|
mozvip/CouchPotatoServer
|
couchpotato/core/plugins/searcher/main.py
|
Python
|
gpl-3.0
| 25,667
|
'''
SASSIE Copyright (C) 2011 Joseph E. Curtis
This program comes with ABSOLUTELY NO WARRANTY;
This is free software, and you are welcome to redistribute it under certain
conditions; see http://www.gnu.org/licenses/gpl-3.0.html for details.
'''
# System imports
from distutils.core import *
from distutils import sysconfig
# Third-party modules - we depend on numpy for everything
import numpy
from numpy.distutils.core import Extension, setup
# Obtain the numpy include directory. This logic works across numpy versions.
try:
numpy_include = numpy.get_include()
except AttributeError:
numpy_include = numpy.get_numpy_include()
# simple extension module
matrix_math = Extension(name="matrix_math",sources=['./matrix_math.f'],
include_dirs = [numpy_include],
)
# NumyTypemapTests setup
setup( name = "matrix_math",
description = "Module calculates matrix product",
author = "Joseph E. Curtis",
version = "0.1",
ext_modules = [matrix_math]
)
|
StevenCHowell/zazmol
|
src/python/extensions/matrix_math/setup_matrix_multiply.py
|
Python
|
gpl-3.0
| 1,074
|
# -*- coding: utf-8 -*-
from __future__ import print_function
from __future__ import unicode_literals
from __future__ import division
from PIL import Image
from treemap.images import save_uploaded_image
from treemap.tests import LocalMediaTestCase, media_dir
class SaveImageTest(LocalMediaTestCase):
@media_dir
def test_rotates_image(self):
sideways_file = self.load_resource('tree_sideways.jpg')
img_file, _ = save_uploaded_image(sideways_file, 'test')
expected_width, expected_height = Image.open(sideways_file).size
actual_width, actual_height = Image.open(img_file).size
self.assertEquals(expected_width, actual_height)
self.assertEquals(expected_height, actual_width)
|
ctaylo37/OTM2
|
opentreemap/treemap/tests/test_images.py
|
Python
|
gpl-3.0
| 735
|
# -*- coding: utf-8 -*-
# HORTON: Helpful Open-source Research TOol for N-fermion systems.
# Copyright (C) 2011-2017 The HORTON Development Team
#
# This file is part of HORTON.
#
# HORTON is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 3
# of the License, or (at your option) any later version.
#
# HORTON is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, see <http://www.gnu.org/licenses/>
#
# --
import numpy as np
from horton import * # pylint: disable=wildcard-import,unused-wildcard-import
from horton.io.test.common import compute_mulliken_charges, compute_hf_energy
def test_load_wfn_low_he_s():
fn_wfn = context.get_fn('test/he_s_orbital.wfn')
title, numbers, coordinates, centers, type_assignment, exponents, \
mo_count, occ_num, mo_energy, coefficients, energy = load_wfn_low(fn_wfn)
assert title == 'He atom - decontracted 6-31G basis set'
assert numbers.shape == (1,)
assert numbers == [2]
assert coordinates.shape == (1, 3)
assert (coordinates == [0.00, 0.00, 0.00]).all()
assert centers.shape == (4,)
assert (centers == [0, 0, 0, 0]).all()
assert type_assignment.shape == (4,)
assert (type_assignment == [1, 1, 1, 1]).all()
assert exponents.shape == (4,)
assert (exponents == [0.3842163E+02, 0.5778030E+01, 0.1241774E+01, 0.2979640E+00]).all()
assert mo_count.shape == (1,)
assert mo_count == [1]
assert occ_num.shape == (1,)
assert occ_num == [2.0]
assert mo_energy.shape == (1,)
assert mo_energy == [-0.914127]
assert coefficients.shape == (4, 1)
expected = np.array([0.26139500E+00, 0.41084277E+00, 0.39372947E+00, 0.14762025E+00])
assert (coefficients == expected.reshape(4, 1)).all()
assert abs(energy - (-2.855160426155)) < 1.e-5
def test_load_wfn_low_h2o():
fn_wfn = context.get_fn('test/h2o_sto3g.wfn')
title, numbers, coordinates, centers, type_assignment, exponents, \
mo_count, occ_num, mo_energy, coefficients, energy = load_wfn_low(fn_wfn)
assert title == 'H2O Optimization'
assert numbers.shape == (3,)
assert (numbers == np.array([8, 1, 1])).all()
assert coordinates.shape == (3, 3)
assert (coordinates[0] == [-4.44734101, 3.39697999, 0.00000000]).all()
assert (coordinates[1] == [-2.58401495, 3.55136194, 0.00000000]).all()
assert (coordinates[2] == [-4.92380519, 5.20496220, 0.00000000]).all()
assert centers.shape == (21,)
assert (centers[:15] == np.zeros(15, int)).all()
assert (centers[15:] == np.array([1, 1, 1, 2, 2, 2])).all()
assert type_assignment.shape == (21,)
assert (type_assignment[:6] == np.ones(6)).all()
assert (type_assignment[6:15] == np.array([2, 2, 2, 3, 3, 3, 4, 4, 4])).all()
assert (type_assignment[15:] == np.ones(6)).all()
assert exponents.shape == (21,)
assert (exponents[:3] == [0.1307093E+03, 0.2380887E+02, 0.6443608E+01]).all()
assert (exponents[5:8] == [0.3803890E+00, 0.5033151E+01, 0.1169596E+01]).all()
assert (exponents[13:16] == [0.1169596E+01, 0.3803890E+00, 0.3425251E+01]).all()
assert exponents[-1] == 0.1688554E+00
assert mo_count.shape == (5,)
assert (mo_count == [1, 2, 3, 4, 5]).all()
assert occ_num.shape == (5,)
assert np.sum(occ_num) == 10.0
assert (occ_num == [2.0, 2.0, 2.0, 2.0, 2.0]).all()
assert mo_energy.shape == (5,)
assert (mo_energy == np.sort(mo_energy)).all()
assert (mo_energy[:3] == [-20.251576, -1.257549, -0.593857]).all()
assert (mo_energy[3:] == [-0.459729, -0.392617]).all()
assert coefficients.shape == (21, 5)
expected = [0.42273517E+01, -0.99395832E+00, 0.19183487E-11, 0.44235381E+00, -0.57941668E-14]
assert (coefficients[0] == expected).all()
assert coefficients[6, 2] == 0.83831599E+00
assert coefficients[10, 3] == 0.65034846E+00
assert coefficients[17, 1] == 0.12988055E-01
assert coefficients[-1, 0] == -0.46610858E-03
assert coefficients[-1, -1] == -0.33277355E-15
assert abs(energy - (-74.965901217080)) < 1.e-6
def test_get_permutation_orbital():
assert (get_permutation_orbital(np.array([1, 1, 1])) == [0, 1, 2]).all()
assert (get_permutation_orbital(np.array([1, 1, 2, 3, 4])) == [0, 1, 2, 3, 4]).all()
assert (get_permutation_orbital(np.array([2, 3, 4])) == [0, 1, 2]).all()
assert (get_permutation_orbital(np.array([2, 2, 3, 3, 4, 4])) == [0, 2, 4, 1, 3, 5]).all()
assign = np.array([1, 1, 2, 2, 3, 3, 4, 4, 1])
expect = [0, 1, 2, 4, 6, 3, 5, 7, 8]
assert (get_permutation_orbital(assign) == expect).all()
assign = np.array([1, 5, 6, 7, 8, 9, 10, 1])
expect = [0, 1, 2, 3, 4, 5, 6, 7]
assert (get_permutation_orbital(assign) == expect).all()
assign = np.array([5, 5, 6, 6, 7, 7, 8, 8, 9, 9, 10, 10])
expect = [0, 2, 4, 6, 8, 10, 1, 3, 5, 7, 9, 11]
assert (get_permutation_orbital(assign) == expect).all()
assign = np.array([1, 2, 2, 3, 3, 4, 4, 5, 6, 7, 8, 9, 10])
expect = [0, 1, 3, 5, 2, 4, 6, 7, 8, 9, 10, 11, 12]
assert (get_permutation_orbital(assign) == expect).all()
# f orbitals
assign = np.array([11, 12, 13, 17, 14, 15, 18, 19, 16, 20])
assert (get_permutation_orbital(assign) == range(10)).all()
# g orbitals
assign = np.array([23, 29, 32, 27, 22, 28, 35, 34, 26, 31, 33, 30, 25, 24, 21])
assert (get_permutation_orbital(assign) == range(15)).all()
# g orbitals
assign = np.array([23, 29, 32, 27, 22, 28, 35, 34, 26, 31, 33, 30, 25, 24, 21])
assert (get_permutation_orbital(assign) == range(15)).all()
# h orbitals
assert (get_permutation_orbital(np.arange(36, 57)) == range(21)).all()
assign = np.array([1, 1, 11, 12, 13, 17, 14, 15, 18, 19, 16, 20])
assert (get_permutation_orbital(assign) == range(12)).all()
assign = np.array([2, 3, 4, 11, 12, 13, 17, 14, 15, 18, 19, 16, 20, 1, 1])
assert (get_permutation_orbital(assign) == range(15)).all()
def test_get_permutation_basis():
assert (get_permutation_basis(np.array([1, 1, 1])) == [0, 1, 2]).all()
assert (get_permutation_basis(np.array([2, 2, 3, 3, 4, 4])) == [0, 2, 4, 1, 3, 5]).all()
assert (get_permutation_basis(np.array([1, 2, 3, 4, 1])) == [0, 1, 2, 3, 4]).all()
assert (get_permutation_basis(np.array([5, 6, 7, 8, 9, 10])) == [0, 3, 4, 1, 5, 2]).all()
assign = np.repeat([5, 6, 7, 8, 9, 10], 2)
expect = [0, 6, 8, 2, 10, 4, 1, 7, 9, 3, 11, 5]
assert (get_permutation_basis(assign) == expect).all()
assert (get_permutation_basis(np.arange(1, 11)) == [0, 1, 2, 3, 4, 7, 8, 5, 9, 6]).all()
assign = np.array([1, 5, 6, 7, 8, 9, 10, 1])
expect = [0, 1, 4, 5, 2, 6, 3, 7]
assert (get_permutation_basis(assign) == expect).all()
assign = np.array([11, 12, 13, 17, 14, 15, 18, 19, 16, 20])
expect = [0, 4, 5, 3, 9, 6, 1, 8, 7, 2]
assert (get_permutation_basis(assign) == expect).all()
assign = np.array([1, 11, 12, 13, 17, 14, 15, 18, 19, 16, 20, 1])
expect = [0, 1, 5, 6, 4, 10, 7, 2, 9, 8, 3, 11]
assert (get_permutation_basis(assign) == expect).all()
assign = np.array([1, 11, 12, 13, 17, 14, 15, 18, 19, 16, 20, 2, 2, 3, 3, 4, 4])
expect = [0, 1, 5, 6, 4, 10, 7, 2, 9, 8, 3, 11, 13, 15, 12, 14, 16]
assert (get_permutation_basis(assign) == expect).all()
assign = [1, 11, 12, 13, 17, 14, 15, 18, 19, 16, 20, 2, 3, 4, 5, 6, 7, 8, 9, 10]
expect = np.array([0, 1, 5, 6, 4, 10, 7, 2, 9, 8, 3, 11, 12, 13, 14, 17, 18, 15, 19, 16])
assert (get_permutation_basis(np.array(assign)) == expect).all()
assert (get_permutation_basis(np.arange(36, 57)) == np.arange(21)[::-1]).all()
assign = [23, 29, 32, 27, 22, 28, 35, 34, 26, 31, 33, 30, 25, 24, 21]
expect = [14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]
assert (get_permutation_basis(np.array(assign)) == expect).all()
assert (get_permutation_basis(np.arange(36, 57)) == range(21)[::-1]).all()
def test_get_mask():
assert (get_mask(np.array([2, 3, 4])) == [True, False, False]).all()
expected = [True, True, False, False, True, True, False, False]
assert (get_mask(np.array([1, 2, 3, 4, 1, 2, 3, 4])) == expected).all()
expected = [True, False, False, False, False, False]
assert (get_mask(np.array([5, 6, 7, 8, 9, 10])) == expected).all()
expected = [True, False, False, True, True, False, False, False, False, False]
assert (get_mask(np.array([2, 3, 4, 1, 5, 6, 7, 8, 9, 10])) == expected).all()
expected = [True, False, False, False, False, False, False, False, False, False]
assert (get_mask(np.arange(11, 21)) == expected).all()
assert (get_mask(np.array([21, 24, 25])) == [True, False, False]).all()
assert (get_mask(np.array([11, 21, 36, 1])) == [True, True, True, True]).all()
def check_load_wfn(name):
# system out of *.wfn file
mol1 = IOData.from_file(context.get_fn('test/%s.wfn' % name))
# system out of *.fchk file
mol2 = IOData.from_file(context.get_fn('test/%s.fchk' % name))
# Coordinates check:
assert (abs(mol1.coordinates - mol2.coordinates) < 1e-6).all()
# Numbers check
numbers1 = mol1.numbers
numbers2 = mol2.numbers
assert (numbers1 == numbers2).all()
# Basis Set check:
obasis1 = mol1.obasis
obasis2 = mol2.obasis
assert obasis1.nbasis == obasis2.nbasis
assert (obasis1.shell_map == obasis2.shell_map).all()
assert (obasis1.shell_types == obasis2.shell_types).all()
assert (obasis1.nprims == obasis2.nprims).all()
assert (abs(obasis1.alphas - obasis2.alphas) < 1.e-4).all()
# Comparing MOs (*.wfn might not contain virtual orbitals):
n_mo = mol1.orb_alpha.nfn
assert (abs(mol1.orb_alpha.energies - mol2.orb_alpha.energies[:n_mo]) < 1.e-5).all()
assert (mol1.orb_alpha.occupations == mol2.orb_alpha.occupations[:n_mo]).all()
assert (abs(mol1.orb_alpha.coeffs - mol2.orb_alpha.coeffs[:, :n_mo]) < 1.e-7).all()
# Check overlap
olp1 = obasis1.compute_overlap()
olp2 = obasis2.compute_overlap()
obasis2.compute_overlap(olp2)
assert (abs(olp1 - olp2) < 1e-6).all()
# Check normalization
mol1.orb_alpha.check_normalization(olp1, 1e-5)
# Check charges
dm_full1 = mol1.get_dm_full()
charges1 = compute_mulliken_charges(obasis1, numbers1, dm_full1)
dm_full2 = mol2.get_dm_full()
charges2 = compute_mulliken_charges(obasis2, numbers2, dm_full2)
assert (abs(charges1 - charges2) < 1e-6).all()
# Check energy
energy1 = compute_hf_energy(mol1)
energy2 = compute_hf_energy(mol2)
# check loaded & computed energy from wfn file
assert abs(energy1 - mol1.energy) < 1.e-5
assert abs(energy1 - energy2) < 1e-5
return energy1, charges1
def test_load_wfn_he_s_virtual():
energy, charges = check_load_wfn('he_s_virtual')
# Compare to the energy printed in wfn file
assert abs(energy - (-2.855160426155)) < 1.e-6
assert (abs(charges - [0.0]) < 1e-5).all()
def test_load_wfn_he_s():
energy, charges = check_load_wfn('he_s_orbital')
# Compare to the energy printed in wfn file
assert abs(energy - (-2.855160426155)) < 1.e-6
assert (abs(charges - [0.0]) < 1e-5).all()
def test_load_wfn_he_sp():
energy, charges = check_load_wfn('he_sp_orbital')
# Compare to the energy printed in wfn file
assert abs(energy - (-2.859895424589)) < 1.e-6
assert (abs(charges - [0.0]) < 1e-5).all()
def test_load_wfn_he_spd():
energy, charges = check_load_wfn('he_spd_orbital')
# Compare to the energy printed in wfn file
assert abs(energy - (-2.855319016184)) < 1.e-6
assert (abs(charges - [0.0]) < 1e-5).all()
def test_load_wfn_he_spdf():
energy, charges = check_load_wfn('he_spdf_orbital')
# Compare to the energy printed in wfn file
assert abs(energy - (-1.100269433080)) < 1.e-6
assert (abs(charges - [0.0]) < 1e-5).all()
def test_load_wfn_he_spdfgh():
energy, charges = check_load_wfn('he_spdfgh_orbital')
# Compare to the energy printed in wfn file
assert abs(energy - (-1.048675168346)) < 1.e-6
assert (abs(charges - [0.0]) < 1e-5).all()
def test_load_wfn_he_spdfgh_virtual():
energy, charges = check_load_wfn('he_spdfgh_virtual')
# Compare to the energy printed in wfn file
assert abs(energy - (-1.048675168346)) < 1.e-6
assert (abs(charges - [0.0]) < 1e-5).all()
def check_wfn(fn_wfn, restricted, nbasis, energy, charges):
fn_wfn = context.get_fn(fn_wfn)
mol = IOData.from_file(fn_wfn)
assert mol.obasis.nbasis == nbasis
olp = mol.obasis.compute_overlap()
if restricted:
mol.orb_alpha.check_normalization(olp, 1e-5)
assert not hasattr(mol, 'orb_beta')
else:
mol.orb_alpha.check_normalization(olp, 1e-5)
mol.orb_beta.check_normalization(olp, 1e-5)
if energy is not None:
assert abs(energy - mol.energy) < 1.e-5
myenergy = compute_hf_energy(mol)
assert abs(energy - myenergy) < 1e-5
dm_full = mol.get_dm_full()
mycharges = compute_mulliken_charges(mol.obasis, mol.numbers, dm_full)
assert (abs(charges - mycharges) < 1e-5).all()
orb_beta = getattr(mol, 'orb_beta', None)
return mol.obasis, mol.coordinates, mol.numbers, dm_full, mol.orb_alpha, orb_beta, mol.energy
def test_load_wfn_h2o_sto3g_decontracted():
check_wfn(
'test/h2o_sto3g_decontracted.wfn',
True, 21, -75.162231674351,
np.array([-0.546656, 0.273328, 0.273328]),
)
def test_load_wfn_h2_ccpvqz_virtual():
obasis, coordinates, numbers, dm_full, orb_alpha, orb_beta, energy = check_wfn(
'test/h2_ccpvqz.wfn',
True, 74, -1.133504568400,
np.array([0.0, 0.0]),
)
expect = [82.64000, 12.41000, 2.824000, 0.7977000, 0.2581000]
assert (abs(obasis.alphas[:5] - expect) < 1.e-5).all()
expect = [-0.596838, 0.144565, 0.209605, 0.460401, 0.460401]
assert (orb_alpha.energies[:5] == expect).all()
expect = [12.859067, 13.017471, 16.405834, 25.824716, 26.100443]
assert (orb_alpha.energies[-5:] == expect).all()
assert (orb_alpha.occupations[:5] == [1.0, 0.0, 0.0, 0.0, 0.0]).all()
assert abs(orb_alpha.occupations.sum() - 1.0) < 1.e-6
def test_load_wfn_h2o_sto3g():
check_wfn(
'test/h2o_sto3g.wfn',
True, 21, -74.965901217080,
np.array([-0.330532, 0.165266, 0.165266])
)
def test_load_wfn_li_sp_virtual():
obasis, coordinates, numbers, dm_full, orb_alpha, orb_beta, energy = check_wfn(
'test/li_sp_virtual.wfn',
False, 8, -3.712905542719,
np.array([0.0, 0.0])
)
assert abs(orb_alpha.occupations.sum() - 2.0) < 1.e-6
assert abs(orb_beta.occupations.sum() - 1.0) < 1.e-6
assert (orb_alpha.occupations == [1.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]).all()
assert (orb_beta.occupations == [1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]).all()
expect = [-0.087492, -0.080310, 0.158784, 0.158784, 1.078773, 1.090891, 1.090891, 49.643670]
assert (abs(orb_alpha.energies - expect) < 1.e-6).all()
expect = [-0.079905, 0.176681, 0.176681, 0.212494, 1.096631, 1.096631, 1.122821, 49.643827]
assert (abs(orb_beta.energies - expect) < 1.e-6).all()
assert orb_alpha.coeffs.shape == (8, 8)
assert orb_beta.coeffs.shape == (8, 8)
def test_load_wfn_li_sp():
fn_wfn = context.get_fn('test/li_sp_orbital.wfn')
mol = IOData.from_file(fn_wfn)
assert mol.title == 'Li atom - using s & p orbitals'
assert mol.orb_alpha.nfn == 2
assert mol.orb_beta.nfn == 1
assert abs(mol.energy - (-3.712905542719)) < 1.e-5
def test_load_wfn_o2():
obasis, coordinates, numbers, dm_full, orb_alpha, orb_beta, energy = check_wfn(
'test/o2_uhf.wfn',
False, 72, -149.664140769678,
np.array([0.0, 0.0]),
)
assert orb_alpha.nfn == 9
assert orb_beta.nfn == 7
def test_load_wfn_o2_virtual():
obasis, coordinates, numbers, dm_full, orb_alpha, orb_beta, energy = check_wfn(
'test/o2_uhf_virtual.wfn',
False, 72, -149.664140769678,
np.array([0.0, 0.0]),
)
assert abs(orb_alpha.occupations.sum() - 9.0) < 1.e-6
assert abs(orb_beta.occupations.sum() - 7.0) < 1.e-6
assert orb_alpha.occupations.shape == (44,)
assert orb_beta.occupations.shape == (44,)
assert (orb_alpha.occupations[:9] == np.ones(9)).all()
assert (orb_beta.occupations[:7] == np.ones(7)).all()
assert (orb_alpha.occupations[9:] == np.zeros(35)).all()
assert (orb_beta.occupations[7:] == np.zeros(37)).all()
assert orb_alpha.energies.shape == (44,)
assert orb_beta.energies.shape == (44,)
assert orb_alpha.energies[0] == -20.752000
assert orb_alpha.energies[10] == 0.179578
assert orb_alpha.energies[-1] == 51.503193
assert orb_beta.energies[0] == -20.697027
assert orb_beta.energies[15] == 0.322590
assert orb_beta.energies[-1] == 51.535258
assert orb_alpha.coeffs.shape == (72, 44)
assert orb_beta.coeffs.shape == (72, 44)
def test_load_wfn_lif_fci():
obasis, coordinates, numbers, dm_full, orb_alpha, orb_beta, energy = check_wfn(
'test/lif_fci.wfn',
True, 44, None,
np.array([-0.645282, 0.645282]),
)
assert orb_alpha.occupations.shape == (18,)
assert abs(orb_alpha.occupations.sum() - 6.0) < 1.e-6
assert orb_alpha.occupations[0] == 2.00000000 / 2
assert orb_alpha.occupations[10] == 0.00128021 / 2
assert orb_alpha.occupations[-1] == 0.00000054 / 2
assert orb_alpha.energies.shape == (18,)
assert orb_alpha.energies[0] == -26.09321253
assert orb_alpha.energies[15] == 1.70096290
assert orb_alpha.energies[-1] == 2.17434072
assert orb_alpha.coeffs.shape == (44, 18)
kin = obasis.compute_kinetic()
expected_kin = 106.9326884815 # FCI kinetic energy
expected_nn = 9.1130265227
assert (np.einsum('ab,ba', kin, dm_full) - expected_kin) < 1.e-6
assert (compute_nucnuc(coordinates, numbers.astype(float)) - expected_nn) < 1.e-6
points = np.array([[0.0, 0.0, -0.17008], [0.0, 0.0, 0.0], [0.0, 0.0, 0.03779]])
density = np.zeros(3)
obasis.compute_grid_density_dm(dm_full, points, density)
assert (abs(density - [0.492787, 0.784545, 0.867723]) < 1.e-4).all()
assert abs(energy - (-107.0575700853)) < 1.e-5 # FCI energy
def test_load_wfn_lih_cation_fci():
obasis, coordinates, numbers, dm_full, orb_alpha, orb_beta, energy = check_wfn(
'test/lih_cation_fci.wfn',
True, 26, None,
np.array([0.913206, 0.086794]),
)
assert (numbers == [3, 1]).all()
expected_kin = 7.7989675958 # FCI kinetic energy
expected_nn = 0.9766607347
kin = obasis.compute_kinetic()
assert (np.einsum('ab,ba', kin, dm_full) - expected_kin) < 1.e-6
assert (compute_nucnuc(coordinates, numbers.astype(float)) - expected_nn) < 1.e-6
assert orb_alpha.occupations.shape == (11,)
assert abs(orb_alpha.occupations.sum() - 1.5) < 1.e-6
assert abs(energy - (-7.7214366383)) < 1.e-5 # FCI energy
|
QuantumElephant/horton
|
horton/io/test/test_wfn.py
|
Python
|
gpl-3.0
| 19,258
|
def is_perfect_number(n):
sum = 0
for x in range(1, n):
if n % x == 0:
sum += x
return sum == n
num = int(input("Please enter a number to check if it is perfect or not"))
print(is_perfect_number(num))
|
OpenGenus/cosmos
|
code/mathematical_algorithms/src/perfect_number/perfect_number.py
|
Python
|
gpl-3.0
| 235
|
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
try:
from django.contrib.auth import get_user_model
except ImportError: # django < 1.5
from django.contrib.auth.models import User
else:
User = get_user_model()
user_orm_label = '%s.%s' % (User._meta.app_label, User._meta.object_name)
user_model_label = '%s.%s' % (User._meta.app_label, User._meta.module_name)
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'ImageRevision.width'
db.add_column('wiki_imagerevision', 'width',
self.gf('django.db.models.fields.SmallIntegerField')(default=0),
keep_default=False)
# Adding field 'ImageRevision.height'
db.add_column('wiki_imagerevision', 'height',
self.gf('django.db.models.fields.SmallIntegerField')(default=0),
keep_default=False)
def backwards(self, orm):
# Deleting field 'ImageRevision.width'
db.delete_column('wiki_imagerevision', 'width')
# Deleting field 'ImageRevision.height'
db.delete_column('wiki_imagerevision', 'height')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
user_model_label: {
'Meta': {'object_name': User.__name__, 'db_table': "'%s'" % User._meta.db_table},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'django_notify.notificationtype': {
'Meta': {'object_name': 'NotificationType', 'db_table': "'notify_notificationtype'"},
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']", 'null': 'True', 'blank': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '128', 'primary_key': 'True'}),
'label': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True', 'blank': 'True'})
},
'django_notify.settings': {
'Meta': {'object_name': 'Settings', 'db_table': "'notify_settings'"},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'interval': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['%s']" % user_orm_label})
},
'django_notify.subscription': {
'Meta': {'object_name': 'Subscription', 'db_table': "'notify_subscription'"},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'notification_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['django_notify.NotificationType']"}),
'object_id': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True', 'blank': 'True'}),
'send_emails': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'settings': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['django_notify.Settings']"})
},
'sites.site': {
'Meta': {'ordering': "('domain',)", 'object_name': 'Site', 'db_table': "'django_site'"},
'domain': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'wiki.article': {
'Meta': {'object_name': 'Article'},
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'current_revision': ('django.db.models.fields.related.OneToOneField', [], {'blank': 'True', 'related_name': "'current_set'", 'unique': 'True', 'null': 'True', 'to': "orm['wiki.ArticleRevision']"}),
'group': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.Group']", 'null': 'True', 'blank': 'True'}),
'group_read': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'group_write': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'other_read': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'other_write': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'owner': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'owned_articles'", 'null': 'True', 'to': "orm['%s']" % user_orm_label})
},
'wiki.articleforobject': {
'Meta': {'unique_together': "(('content_type', 'object_id'),)", 'object_name': 'ArticleForObject'},
'article': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['wiki.Article']"}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'content_type_set_for_articleforobject'", 'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_mptt': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'object_id': ('django.db.models.fields.PositiveIntegerField', [], {})
},
'wiki.articleplugin': {
'Meta': {'object_name': 'ArticlePlugin'},
'article': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['wiki.Article']"}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'deleted': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
'wiki.articlerevision': {
'Meta': {'ordering': "('created',)", 'unique_together': "(('article', 'revision_number'),)", 'object_name': 'ArticleRevision'},
'article': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['wiki.Article']"}),
'automatic_log': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'content': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'deleted': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'ip_address': ('django.db.models.fields.IPAddressField', [], {'max_length': '15', 'null': 'True', 'blank': 'True'}),
'locked': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'previous_revision': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['wiki.ArticleRevision']", 'null': 'True', 'blank': 'True'}),
'revision_number': ('django.db.models.fields.IntegerField', [], {}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '512'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['%s']" % user_orm_label, 'null': 'True', 'blank': 'True'}),
'user_message': ('django.db.models.fields.TextField', [], {'blank': 'True'})
},
'wiki.articlesubscription': {
'Meta': {'object_name': 'ArticleSubscription', '_ormbases': ['wiki.ArticlePlugin', 'django_notify.Subscription']},
'articleplugin_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['wiki.ArticlePlugin']", 'unique': 'True', 'primary_key': 'True'}),
'subscription_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['django_notify.Subscription']", 'unique': 'True'})
},
'wiki.attachment': {
'Meta': {'object_name': 'Attachment', '_ormbases': ['wiki.ReusablePlugin']},
'current_revision': ('django.db.models.fields.related.OneToOneField', [], {'blank': 'True', 'related_name': "'current_set'", 'unique': 'True', 'null': 'True', 'to': "orm['wiki.AttachmentRevision']"}),
'original_filename': ('django.db.models.fields.CharField', [], {'max_length': '256', 'null': 'True', 'blank': 'True'}),
'reusableplugin_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['wiki.ReusablePlugin']", 'unique': 'True', 'primary_key': 'True'})
},
'wiki.attachmentrevision': {
'Meta': {'ordering': "('created',)", 'object_name': 'AttachmentRevision'},
'attachment': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['wiki.Attachment']"}),
'automatic_log': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'deleted': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'file': ('django.db.models.fields.files.FileField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'ip_address': ('django.db.models.fields.IPAddressField', [], {'max_length': '15', 'null': 'True', 'blank': 'True'}),
'locked': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'previous_revision': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['wiki.AttachmentRevision']", 'null': 'True', 'blank': 'True'}),
'revision_number': ('django.db.models.fields.IntegerField', [], {}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['%s']" % user_orm_label, 'null': 'True', 'blank': 'True'}),
'user_message': ('django.db.models.fields.TextField', [], {'blank': 'True'})
},
'wiki.image': {
'Meta': {'object_name': 'Image', '_ormbases': ['wiki.RevisionPlugin']},
'revisionplugin_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['wiki.RevisionPlugin']", 'unique': 'True', 'primary_key': 'True'})
},
'wiki.imagerevision': {
'Meta': {'object_name': 'ImageRevision', '_ormbases': ['wiki.RevisionPluginRevision']},
'height': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'}),
'image': ('django.db.models.fields.files.ImageField', [], {'max_length': '2000'}),
'revisionpluginrevision_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['wiki.RevisionPluginRevision']", 'unique': 'True', 'primary_key': 'True'}),
'width': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'})
},
'wiki.reusableplugin': {
'Meta': {'object_name': 'ReusablePlugin', '_ormbases': ['wiki.ArticlePlugin']},
'articleplugin_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['wiki.ArticlePlugin']", 'unique': 'True', 'primary_key': 'True'}),
'articles': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'shared_plugins_set'", 'symmetrical': 'False', 'to': "orm['wiki.Article']"})
},
'wiki.revisionplugin': {
'Meta': {'object_name': 'RevisionPlugin', '_ormbases': ['wiki.ArticlePlugin']},
'articleplugin_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['wiki.ArticlePlugin']", 'unique': 'True', 'primary_key': 'True'}),
'current_revision': ('django.db.models.fields.related.OneToOneField', [], {'blank': 'True', 'related_name': "'plugin_set'", 'unique': 'True', 'null': 'True', 'to': "orm['wiki.RevisionPluginRevision']"})
},
'wiki.revisionpluginrevision': {
'Meta': {'object_name': 'RevisionPluginRevision'},
'automatic_log': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'deleted': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'ip_address': ('django.db.models.fields.IPAddressField', [], {'max_length': '15', 'null': 'True', 'blank': 'True'}),
'locked': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'plugin': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'revision_set'", 'to': "orm['wiki.RevisionPlugin']"}),
'previous_revision': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['wiki.RevisionPluginRevision']", 'null': 'True', 'blank': 'True'}),
'revision_number': ('django.db.models.fields.IntegerField', [], {}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['%s']" % user_orm_label, 'null': 'True', 'blank': 'True'}),
'user_message': ('django.db.models.fields.TextField', [], {'blank': 'True'})
},
'wiki.simpleplugin': {
'Meta': {'object_name': 'SimplePlugin', '_ormbases': ['wiki.ArticlePlugin']},
'article_revision': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['wiki.ArticleRevision']"}),
'articleplugin_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['wiki.ArticlePlugin']", 'unique': 'True', 'primary_key': 'True'})
},
'wiki.urlpath': {
'Meta': {'unique_together': "(('site', 'parent', 'slug'),)", 'object_name': 'URLPath'},
'article': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['wiki.Article']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'level': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'lft': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'parent': ('mptt.fields.TreeForeignKey', [], {'blank': 'True', 'related_name': "'children'", 'null': 'True', 'to': "orm['wiki.URLPath']"}),
'rght': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'site': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sites.Site']"}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '50', 'null': 'True', 'blank': 'True'}),
'tree_id': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'})
}
}
complete_apps = ['wiki']
|
habibmasuro/django-wiki
|
wiki/migrations/0009_auto__add_field_imagerevision_width__add_field_imagerevision_height.py
|
Python
|
gpl-3.0
| 18,186
|
PROJECT_DEFAULTS = 'Project Defaults'
PATHS = 'Paths'
_from_config = {
'author': None,
'email': None,
'license': None,
'language': None,
'type': None,
'parent': None,
'vcs': None,
'footprints': None
}
_from_args = {
'name': None,
'author': None,
'email': None,
'license': None,
'language': None,
'type': None,
'parent': None,
'vcs': None,
'footprint': None
}
def load_args(args):
from_args = _from_args.copy()
keys = _from_args.keys()
for key in keys:
if args.__contains__(key):
from_args[key] = args.__getattribute__(key)
return from_args
def load_config(config):
from_config = _from_config.copy()
keys = _from_config.keys()
if config:
if config.has_section(PROJECT_DEFAULTS):
for key in keys:
if config.has_option(PROJECT_DEFAULTS, key):
from_config[key] = config.get(PROJECT_DEFAULTS, key)
if config.has_section(PATHS):
for key in keys:
if config.has_option(PATHS, key):
from_config[key] = config.get(PATHS, key)
return from_config
def merge_configged_argged(configged, argged):
merged = configged.copy()
for key in argged.keys():
if True in [key == k for k in configged.keys()]:
# We only care about a None val if the key exists in configged
# this will overwrite the config so that args take percedence
if argged[key] is not None:
merged[key] = argged[key]
else:
# If the key is not already here, then it must be 'footprint', in
# which case we definitely want to include it since that is our
# highest priority and requires less args to generate a project
merged[key] = argged[key]
return merged
def footprint_requires(merged):
required = ['name', 'parent']
passed = 0
pass_requires = len(required)
for r in required:
if r in merged.keys():
if merged[r] is not None:
passed += 1
return passed == pass_requires
def solo_args_requires(args):
required = ['name', 'parent', 'language', 'type']
passed = 0
pass_requires = len(required)
for r in required:
if r in args.keys():
if args[r] is not None:
passed += 1
return passed == pass_requires
def validate_args(args, config):
if config is not None:
configged = load_config(config)
argged = load_args(args)
merged = merge_configged_argged(configged, argged)
# If footprint is provided, we only need name and parent
if merged['footprint'] is not None:
return footprint_requires(merged), merged
# If no footprint, we need name, parent, language, and type to perform
# footprint lookups
if None not in [merged['name'], merged['parent'], merged['language'],
merged['type']]:
return True, merged
return False, merged
argged = load_args(args)
return solo_args_requires(argged), argged
|
shaggytwodope/progeny
|
validators.py
|
Python
|
gpl-3.0
| 3,149
|
# -*- coding: utf-8 -*-
#
# Copyright (c) 2013 the BabelFish authors. All rights reserved.
# Use of this source code is governed by the 3-clause BSD license
# that can be found in the LICENSE file.
#
import sys
if sys.version_info[0] >= 3:
basestr = str
else:
basestr = basestring
from .converters import (LanguageConverter, LanguageReverseConverter, LanguageEquivalenceConverter, CountryConverter,
CountryReverseConverter)
from .country import country_converters, COUNTRIES, COUNTRY_MATRIX, Country
from .exceptions import Error, LanguageConvertError, LanguageReverseError, CountryConvertError, CountryReverseError
from .language import language_converters, LANGUAGES, LANGUAGE_MATRIX, Language
from .script import SCRIPTS, SCRIPT_MATRIX, Script
|
clinton-hall/nzbToMedia
|
libs/common/babelfish/__init__.py
|
Python
|
gpl-3.0
| 761
|
# Lookup Bitcoin value from exchanges
from exchanges.bitfinex import Bitfinex
import re
def bitcoinValue(msg):
val = Bitfinex().get_current_price()
formattedVal = "$" + "{:,.2f}".format(val)
if re.search(r"(?i)moon", msg):
return "To the moon! " + formattedVal
else:
return "Bitcoin: " + formattedVal
|
bhipple/brobot
|
currency.py
|
Python
|
gpl-3.0
| 336
|
def caught(pyn, fpyn):
fx, fy = fpyn.xy()
return pyn.distance(fx, fy) <= 1
|
aresnick/pynguin
|
doc/examples_src/threaded_pynd/00015.py
|
Python
|
gpl-3.0
| 83
|
#!/usr/pkg/bin/python
import os, sys, time
from reportlab.graphics.barcode.common import *
from reportlab.graphics.barcode.code39 import *
from reportlab.graphics.barcode.code93 import *
from reportlab.graphics.barcode.code128 import *
from reportlab.graphics.barcode.usps import *
from reportlab.graphics.barcode.usps4s import USPS_4State
from reportlab.platypus import Spacer, SimpleDocTemplate, Table, TableStyle, Preformatted, PageBreak
from reportlab.lib.units import inch, cm
from reportlab.lib import colors
from reportlab.pdfgen.canvas import Canvas
from reportlab.lib.styles import getSampleStyleSheet
from reportlab.platypus.paragraph import Paragraph
from reportlab.platypus.frames import Frame
from reportlab.platypus.flowables import XBox, KeepTogether
from reportlab.graphics.shapes import Drawing
from reportlab.graphics.barcode import getCodes, getCodeNames, createBarcodeDrawing, createBarcodeImageInMemory
def run():
styles = getSampleStyleSheet()
styleN = styles['Normal']
styleH = styles['Heading1']
story = []
#for codeNames in code
story.append(Paragraph('I2of5', styleN))
story.append(I2of5(1234, barWidth = inch*0.02, checksum=0))
story.append(Paragraph('MSI', styleN))
story.append(MSI(1234))
story.append(Paragraph('Codabar', styleN))
story.append(Codabar("A012345B", barWidth = inch*0.02))
story.append(Paragraph('Code 11', styleN))
story.append(Code11("01234545634563"))
story.append(Paragraph('Code 39', styleN))
story.append(Standard39("A012345B%R"))
story.append(Paragraph('Extended Code 39', styleN))
story.append(Extended39("A012345B}"))
story.append(Paragraph('Code93', styleN))
story.append(Standard93("CODE 93"))
story.append(Paragraph('Extended Code93', styleN))
story.append(Extended93("L@@K! Code 93 :-)")) #, barWidth=0.005 * inch))
story.append(Paragraph('Code 128', styleN))
c=Code128("AB-12345678") #, barWidth=0.005 * inch)
#print 'WIDTH =', (c.width / inch), 'barWidth =', (c.barWidth / inch)
#print 'LQ =', (c.lquiet / inch), 'RQ =', (c.rquiet / inch)
story.append(c)
story.append(Paragraph('USPS FIM', styleN))
story.append(FIM("A"))
story.append(Paragraph('USPS POSTNET', styleN))
story.append(POSTNET('78247-1043'))
story.append(Paragraph('USPS 4 State', styleN))
story.append(USPS_4State('01234567094987654321','01234567891'))
from reportlab.graphics.barcode import createBarcodeDrawing
story.append(Paragraph('EAN13', styleN))
bcd = createBarcodeDrawing('EAN13', value='123456789012')
story.append(bcd)
story.append(Paragraph('EAN8', styleN))
bcd = createBarcodeDrawing('EAN8', value='1234567')
story.append(bcd)
story.append(Paragraph('UPCA', styleN))
bcd = createBarcodeDrawing('UPCA', value='03600029145')
story.append(bcd)
story.append(Paragraph('USPS_4State', styleN))
bcd = createBarcodeDrawing('USPS_4State', value='01234567094987654321',routing='01234567891')
story.append(bcd)
story.append(Paragraph('Label Size', styleN))
story.append(XBox((2.0 + 5.0/8.0)*inch, 1 * inch, '1x2-5/8"'))
story.append(Paragraph('Label Size', styleN))
story.append(XBox((1.75)*inch, .5 * inch, '1/2x1-3/4"'))
c = Canvas('out.pdf')
f = Frame(inch, inch, 6*inch, 9*inch, showBoundary=1)
f.addFromList(story, c)
c.save()
print 'saved out.pdf'
def fullTest(fileName="test_full.pdf"):
"""Creates large-ish test document with a variety of parameters"""
story = []
styles = getSampleStyleSheet()
styleN = styles['Normal']
styleH = styles['Heading1']
styleH2 = styles['Heading2']
story = []
story.append(Paragraph('ReportLab Barcode Test Suite - full output', styleH))
story.append(Paragraph('Generated on %s' % time.ctime(time.time()), styleN))
story.append(Paragraph('', styleN))
story.append(Paragraph('Repository information for this build:', styleN))
#see if we can figure out where it was built, if we're running in source
if os.path.split(os.getcwd())[-1] == 'barcode' and os.path.isdir('.svn'):
#runnning in a filesystem svn copy
infoLines = os.popen('svn info').read()
story.append(Preformatted(infoLines, styles["Code"]))
story.append(Paragraph('About this document', styleH2))
story.append(Paragraph('History and Status', styleH2))
story.append(Paragraph("""
This is the test suite and docoumentation for the ReportLab open source barcode API,
being re-released as part of the forthcoming ReportLab 2.0 release.
""", styleN))
story.append(Paragraph("""
Several years ago Ty Sarna contributed a barcode module to the ReportLab community.
Several of the codes were used by him in hiw work and to the best of our knowledge
this was correct. These were written as flowable objects and were available in PDFs,
but not in our graphics framework. However, we had no knowledge of barcodes ourselves
and did not advertise or extend the package.
""", styleN))
story.append(Paragraph("""
We "wrapped" the barcodes to be usable within our graphics framework; they are now available
as Drawing objects which can be rendered to EPS files or bitmaps. For the last 2 years this
has been available in our Diagra and Report Markup Language products. However, we did not
charge separately and use was on an "as is" basis.
""", styleN))
story.append(Paragraph("""
A major licensee of our technology has kindly agreed to part-fund proper productisation
of this code on an open source basis in Q1 2006. This has involved addition of EAN codes
as well as a proper testing program. Henceforth we intend to publicise the code more widely,
gather feedback, accept contributions of code and treat it as "supported".
""", styleN))
story.append(Paragraph("""
This involved making available both downloads and testing resources. This PDF document
is the output of the current test suite. It contains codes you can scan (if you use a nice sharp
laser printer!), and will be extended over coming weeks to include usage examples and notes on
each barcode and how widely tested they are. This is being done through documentation strings in
the barcode objects themselves so should always be up to date.
""", styleN))
story.append(Paragraph('Usage examples', styleH2))
story.append(Paragraph("""
To be completed
""", styleN))
story.append(Paragraph('The codes', styleH2))
story.append(Paragraph("""
Below we show a scannable code from each barcode, with and without human-readable text.
These are magnified about 2x from the natural size done by the original author to aid
inspection. This will be expanded to include several test cases per code, and to add
explanations of checksums. Be aware that (a) if you enter numeric codes which are too
short they may be prefixed for you (e.g. "123" for an 8-digit code becomes "00000123"),
and that the scanned results and readable text will generally include extra checksums
at the end.
""", styleN))
codeNames = getCodeNames()
from reportlab.lib.utils import flatten
width = [float(x[8:]) for x in sys.argv if x.startswith('--width=')]
height = [float(x[9:]) for x in sys.argv if x.startswith('--height=')]
isoScale = [int(x[11:]) for x in sys.argv if x.startswith('--isoscale=')]
options = {}
if width: options['width'] = width[0]
if height: options['height'] = height[0]
if isoScale: options['isoScale'] = isoScale[0]
scales = [x[8:].split(',') for x in sys.argv if x.startswith('--scale=')]
scales = map(float,scales and flatten(scales) or [1])
scales = map(float,scales and flatten(scales) or [1])
for scale in scales:
story.append(PageBreak())
story.append(Paragraph('Scale = %.1f'%scale, styleH2))
story.append(Spacer(36, 12))
for codeName in codeNames:
s = [Paragraph('Code: ' + codeName, styleH2)]
for hr in (0,1):
s.append(Spacer(36, 12))
dr = createBarcodeDrawing(codeName, humanReadable=hr,**options)
dr.renderScale = scale
s.append(dr)
s.append(Spacer(36, 12))
s.append(Paragraph('Barcode should say: ' + dr._bc.value, styleN))
story.append(KeepTogether(s))
SimpleDocTemplate(fileName).build(story)
print 'created', fileName
if __name__=='__main__':
run()
fullTest()
def createSample(name,memory):
f = open(name,'wb')
f.write(memory)
f.close()
createSample('test_cbcim.png',createBarcodeImageInMemory('EAN13', value='123456789012'))
createSample('test_cbcim.gif',createBarcodeImageInMemory('EAN8', value='1234567', format='gif'))
createSample('test_cbcim.pdf',createBarcodeImageInMemory('UPCA', value='03600029145',format='pdf'))
createSample('test_cbcim.tiff',createBarcodeImageInMemory('USPS_4State', value='01234567094987654321',routing='01234567891',format='tiff'))
|
TaskEvolution/Task-Coach-Evolution
|
taskcoach/taskcoachlib/thirdparty/src/reportlab/graphics/barcode/test.py
|
Python
|
gpl-3.0
| 9,268
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('libreosteoweb', '0021_therapeutsettings_siret'),
]
operations = [
migrations.AddField(
model_name='therapeutsettings',
name='invoice_footer',
field=models.TextField(null=True, verbose_name='Invoice footer', blank=True),
preserve_default=True,
),
]
|
littlejo/Libreosteo
|
libreosteoweb/migrations/0022_therapeutsettings_invoice_footer.py
|
Python
|
gpl-3.0
| 504
|
#! /usr/bin/env python3
"""
poller-wrapper A small tool which wraps around the poller and tries to
guide the polling process with a more modern approach with a
Queue and workers
Authors: Job Snijders <job.snijders@atrato.com>
Orsiris de Jong <contact@netpower.fr>
Date: Oct 2019
Usage: This program accepts one command line argument: the number of threads
that should run simultaneously. If no argument is given it will assume
a default of 16 threads.
Ubuntu Linux: apt-get install python-mysqldb
FreeBSD: cd /usr/ports/*/py-MySQLdb && make install clean
RHEL 7: yum install MySQL-python
RHEL 8: dnf install mariadb-connector-c-devel gcc && python -m pip install mysqlclient
Tested on: Python 3.6.8 / PHP 7.2.11 / CentOS 8.0
License: To the extent possible under law, Job Snijders has waived all
copyright and related or neighboring rights to this script.
This script has been put into the Public Domain. This work is
published from: The Netherlands.
"""
import LibreNMS.library as LNMS
try:
import json
import os
import queue
import subprocess
import sys
import threading
import time
from optparse import OptionParser
except ImportError as exc:
print('ERROR: missing one or more of the following python modules:')
print('threading, queue, sys, subprocess, time, os, json')
print('ERROR: %s' % exc)
sys.exit(2)
APP_NAME = "poller_wrapper"
LOG_FILE = "logs/" + APP_NAME + ".log"
_DEBUG = False
distpoll = False
real_duration = 0
polled_devices = 0
"""
Threading helper functions
"""
# (c) 2015, GPLv3, Daniel Preussker <f0o@devilcode.org> <<<EOC0
def memc_alive():
try:
global memc
key = str(uuid.uuid4())
memc.set('poller.ping.' + key, key, 60)
if memc.get('poller.ping.' + key) == key:
memc.delete('poller.ping.' + key)
return True
else:
return False
except:
return False
def memc_touch(key, time):
try:
global memc
val = memc.get(key)
memc.set(key, val, time)
except:
pass
def get_time_tag(step):
ts = int(time.time())
return ts - ts % step
#EOC0
"""
A seperate queue and a single worker for printing information to the screen prevents
the good old joke:
Some people, when confronted with a problem, think,
"I know, I'll use threads," and then two they hav erpoblesms.
"""
def printworker():
nodeso = 0
while True:
# (c) 2015, GPLv3, Daniel Preussker <f0o@devilcode.org> <<<EOC4
global IsNode
global distpoll
if distpoll:
if not IsNode:
memc_touch(master_tag, 10)
nodes = memc.get(nodes_tag)
if nodes is None and not memc_alive():
print("WARNING: Lost Memcached. Taking over all devices. Nodes will quit shortly.")
distpoll = False
nodes = nodeso
if nodes is not nodeso:
print("INFO: %s Node(s) Total" % (nodes))
nodeso = nodes
else:
memc_touch(nodes_tag, 10)
try:
worker_id, device_id, elapsed_time = print_queue.get(False)
except:
pass
try:
time.sleep(1)
except:
pass
continue
else:
worker_id, device_id, elapsed_time = print_queue.get()
# EOC4
global real_duration
global per_device_duration
global polled_devices
real_duration += elapsed_time
per_device_duration[device_id] = elapsed_time
polled_devices += 1
if elapsed_time < step:
print("INFO: worker %s finished device %s in %s seconds" % (worker_id, device_id, elapsed_time))
else:
print("WARNING: worker %s finished device %s in %s seconds" % (worker_id, device_id, elapsed_time))
print_queue.task_done()
"""
This class will fork off single instances of the poller.php process, record
how long it takes, and push the resulting reports to the printer queue
"""
def poll_worker():
while True:
device_id = poll_queue.get()
# (c) 2015, GPLv3, Daniel Preussker <f0o@devilcode.org> <<<EOC5
if not distpoll or memc.get('poller.device.%s.%s' % (device_id, time_tag)) is None:
if distpoll:
result = memc.add('poller.device.%s.%s' % (device_id, time_tag), config['distributed_poller_name'],
step)
if not result:
print("This device (%s) appears to be being polled by another poller" % (device_id))
poll_queue.task_done()
continue
if not memc_alive() and IsNode:
print("Lost Memcached, Not polling Device %s as Node. Master will poll it." % device_id)
poll_queue.task_done()
continue
# EOC5
try:
start_time = time.time()
output = "-d >> %s/poll_device_%s.log" % (log_dir, device_id) if debug else ">> /dev/null"
command = "/usr/bin/env php %s -h %s %s 2>&1" % (poller_path, device_id, output)
# TODO: replace with command_runner
subprocess.check_call(command, shell=True)
elapsed_time = int(time.time() - start_time)
print_queue.put([threading.current_thread().name, device_id, elapsed_time])
except (KeyboardInterrupt, SystemExit):
raise
except:
pass
poll_queue.task_done()
if __name__ == '__main__':
logger = LNMS.logger_get_logger(LOG_FILE, debug=_DEBUG)
install_dir = os.path.dirname(os.path.realpath(__file__))
LNMS.check_for_file(install_dir + '/.env')
config = json.loads(LNMS.get_config_data(install_dir))
poller_path = config['install_dir'] + '/poller.php'
log_dir = config['log_dir']
if 'rrd' in config and 'step' in config['rrd']:
step = config['rrd']['step']
else:
step = 300
# (c) 2015, GPLv3, Daniel Preussker <f0o@devilcode.org> <<<EOC1
if 'distributed_poller_group' in config:
poller_group = str(config['distributed_poller_group'])
else:
poller_group = False
if ('distributed_poller' in config and
'distributed_poller_memcached_host' in config and
'distributed_poller_memcached_port' in config and
config['distributed_poller']):
time_tag = str(get_time_tag(step))
master_tag = "poller.master." + time_tag
nodes_tag = "poller.nodes." + time_tag
try:
import memcache
import uuid
memc = memcache.Client([config['distributed_poller_memcached_host'] + ':' +
str(config['distributed_poller_memcached_port'])])
if str(memc.get(master_tag)) == config['distributed_poller_name']:
print("This system is already joined as the poller master.")
sys.exit(2)
if memc_alive():
if memc.get(master_tag) is None:
print("Registered as Master")
memc.set(master_tag, config['distributed_poller_name'], 10)
memc.set(nodes_tag, 0, step)
IsNode = False
else:
print("Registered as Node joining Master %s" % memc.get(master_tag))
IsNode = True
memc.incr(nodes_tag)
distpoll = True
else:
print("Could not connect to memcached, disabling distributed poller.")
distpoll = False
IsNode = False
except SystemExit:
raise
except ImportError:
print("ERROR: missing memcache python module:")
print("On deb systems: apt-get install python3-memcache")
print("On other systems: pip3 install python-memcached")
print("Disabling distributed poller.")
distpoll = False
else:
distpoll = False
# EOC1
s_time = time.time()
real_duration = 0
per_device_duration = {}
polled_devices = 0
"""
Take the amount of threads we want to run in parallel from the commandline
if None are given or the argument was garbage, fall back to default of 16
"""
usage = "usage: %prog [options] <workers> (Default: 16 (Do not set too high)"
description = "Spawn multiple poller.php processes in parallel."
parser = OptionParser(usage=usage, description=description)
parser.add_option('-d', '--debug', action='store_true', default=False,
help="Enable debug output. WARNING: Leaving this enabled will consume a lot of disk space.")
(options, args) = parser.parse_args()
debug = options.debug
try:
amount_of_workers = int(args[0])
except (IndexError, ValueError):
amount_of_workers = 16
devices_list = []
"""
This query specificly orders the results depending on the last_polled_timetaken variable
Because this way, we put the devices likely to be slow, in the top of the queue
thus greatening our chances of completing _all_ the work in exactly the time it takes to
poll the slowest device! cool stuff he
"""
# (c) 2015, GPLv3, Daniel Preussker <f0o@devilcode.org> <<<EOC2
if poller_group is not False:
query = 'select device_id from devices where poller_group IN(' + poller_group + \
') and disabled = 0 order by last_polled_timetaken desc'
else:
query = 'select device_id from devices where disabled = 0 order by last_polled_timetaken desc'
# EOC2
db = LNMS.db_open(config['db_socket'], config['db_host'], config['db_port'], config['db_user'], config['db_pass'], config['db_name'])
cursor = db.cursor()
cursor.execute(query)
devices = cursor.fetchall()
for row in devices:
devices_list.append(int(row[0]))
# (c) 2015, GPLv3, Daniel Preussker <f0o@devilcode.org> <<<EOC3
if distpoll and not IsNode:
query = "select max(device_id),min(device_id) from devices"
cursor.execute(query)
devices = cursor.fetchall()
maxlocks = devices[0][0] or 0
minlocks = devices[0][1] or 0
# EOC3
db.close()
poll_queue = queue.Queue()
print_queue = queue.Queue()
print(
"INFO: starting the poller at %s with %s threads, slowest devices first" % (time.strftime("%Y-%m-%d %H:%M:%S"),
amount_of_workers))
for device_id in devices_list:
poll_queue.put(device_id)
for i in range(amount_of_workers):
t = threading.Thread(target=poll_worker)
t.setDaemon(True)
t.start()
p = threading.Thread(target=printworker)
p.setDaemon(True)
p.start()
try:
poll_queue.join()
print_queue.join()
except (KeyboardInterrupt, SystemExit):
raise
total_time = int(time.time() - s_time)
print("INFO: poller-wrapper polled %s devices in %s seconds with %s workers" % (
polled_devices, total_time, amount_of_workers))
# (c) 2015, GPLv3, Daniel Preussker <f0o@devilcode.org> <<<EOC6
if distpoll or memc_alive():
master = memc.get(master_tag)
if master == config['distributed_poller_name'] and not IsNode:
print("Wait for all poller-nodes to finish")
nodes = memc.get(nodes_tag)
while nodes is not None and nodes > 0:
try:
time.sleep(1)
nodes = memc.get(nodes_tag)
except:
pass
print("Clearing Locks for %s" % time_tag)
x = minlocks
while x <= maxlocks:
res = memc.delete('poller.device.%s.%s' % (x, time_tag))
x += 1
print("%s Locks Cleared" % x)
print("Clearing Nodes")
memc.delete(master_tag)
memc.delete(nodes_tag)
else:
memc.decr(nodes_tag)
print("Finished %.3fs after interval start." % (time.time() - int(time_tag)))
# EOC6
show_stopper = False
db = LNMS.db_open(config['db_socket'], config['db_host'], config['db_port'], config['db_user'], config['db_pass'], config['db_name'])
cursor = db.cursor()
query = "update pollers set last_polled=NOW(), devices='%d', time_taken='%d' where poller_name='%s'" % (
polled_devices,
total_time,
config['distributed_poller_name'])
response = cursor.execute(query)
if response == 1:
db.commit()
else:
query = "insert into pollers set poller_name='%s', last_polled=NOW(), devices='%d', time_taken='%d'" % (
config['distributed_poller_name'], polled_devices, total_time)
cursor.execute(query)
db.commit()
db.close()
if total_time > step:
print(
"WARNING: the process took more than %s seconds to finish, you need faster hardware or more threads" % step)
print("INFO: in sequential style polling the elapsed time would have been: %s seconds" % real_duration)
for device in per_device_duration:
if per_device_duration[device] > step:
print("WARNING: device %s is taking too long: %s seconds" % (device, per_device_duration[device]))
show_stopper = True
if show_stopper:
print(
"ERROR: Some devices are taking more than %s seconds, the script cannot recommend you what to do." % step)
else:
recommend = int(total_time / step * amount_of_workers + 1)
print(
"WARNING: Consider setting a minimum of %d threads. (This does not constitute professional advice!)" % recommend)
sys.exit(2)
|
crcro/librenms
|
poller-wrapper.py
|
Python
|
gpl-3.0
| 14,267
|
import copy
import secrets
races = {}
colors = {
'🐶': 0xccd6dd,
'🐱': 0xffcb4e,
'🐭': 0x99aab5,
'🐰': 0x99aab5,
'🐙': 0x9266cc,
'🐠': 0xffcc4d,
'🦊': 0xf4900c,
'🦀': 0xbe1931,
'🐸': 0x77b255,
'🐧': 0xf5f8fa
}
names = {
'🐶': 'dog',
'🐱': 'cat',
'🐭': 'mouse',
'🐰': 'rabbit',
'🐙': 'octopus',
'🐠': 'fish',
'🦊': 'fox',
'🦀': 'crab',
'🐸': 'frog',
'🐧': 'penguin'
}
participant_icons = ['🐶', '🐱', '🐭', '🐰', '🐙', '🐠', '🦊', '🦀', '🐸', '🐧']
def make_race(channel_id, buyin):
icon_copy = copy.deepcopy(participant_icons)
race_data = {
'icons': icon_copy,
'users': [],
'buyin': buyin
}
races.update({channel_id: race_data})
def add_participant(channel_id, user):
race = races[channel_id]
icons = race['icons']
users = race['users']
usr_icon = secrets.choice(icons)
icons.remove(usr_icon)
race.update({'icons': icons})
participant_data = {
'user': user,
'icon': usr_icon
}
users.append(participant_data)
race.update({'users': users})
races.update({channel_id: race})
return usr_icon
|
AXAz0r/apex-sigma-core
|
sigma/modules/minigames/racing/nodes/race_storage.py
|
Python
|
gpl-3.0
| 1,238
|
# $File: _ext_type.py
# $Date: Wed Feb 22 15:04:06 2012 +0800
#
# Copyright (C) 2012 the pynojo development team <see AUTHORS file>
#
# Contributors to this file:
# Kai Jia <jia.kai66@gmail.com>
#
# This file is part of pynojo
#
# pynojo is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# pynojo is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with pynojo. If not, see <http://www.gnu.org/licenses/>.
#
"""Extra SQLAlchemy ORM types"""
__all__ = ['JSONEncodeDict']
import cjson
from sqlalchemy.types import TypeDecorator, String
from sqlalchemy.ext.mutable import Mutable
from pynojo.exc import PynojoRuntimeError
class JSONEncodeDict(TypeDecorator):
"""Represents an mutable python *dict* as a json-encoded string."""
# pylint: disable=W0223
impl = String
def process_bind_param(self, value, dialect):
if value is not None:
value = cjson.encode(value)
if len(value) > self.length:
raise PynojoRuntimeError(_(
'{class_name}: encoded string too long',
class_name = self.__class__.__name__))
return value
def process_result_value(self, value, dialect):
if value is not None:
value = cjson.decode(value)
return value
class _JSONEncodeDictMutabilize(Mutable, dict):
@classmethod
def coerce(cls, key, value):
if not isinstance(value, _JSONEncodeDictMutabilize):
if isinstance(value, dict):
return _JSONEncodeDictMutabilize(value)
return Mutable.coerce(key, value)
else:
return value
def __setitem__(self, key, value):
dict.__setitem__(self, key, value)
self.changed()
def __delitem__(self, key):
dict.__delitem__(self, key)
self.changed()
_JSONEncodeDictMutabilize.associate_with(JSONEncodeDict)
|
zxytim/pynojo
|
pynojo/model/_ext_type.py
|
Python
|
gpl-3.0
| 2,330
|
"""An FTP client class and some helper functions.
Based on RFC 959: File Transfer Protocol (FTP), by J. Postel and J. Reynolds
Example:
>>> from ftplib import FTP
>>> ftp = FTP('ftp.python.org') # connect to host, default port
>>> ftp.login() # default, i.e.: user anonymous, passwd anonymous@
'230 Guest login ok, access restrictions apply.'
>>> ftp.retrlines('LIST') # list directory contents
total 9
drwxr-xr-x 8 root wheel 1024 Jan 3 1994 .
drwxr-xr-x 8 root wheel 1024 Jan 3 1994 ..
drwxr-xr-x 2 root wheel 1024 Jan 3 1994 bin
drwxr-xr-x 2 root wheel 1024 Jan 3 1994 etc
d-wxrwxr-x 2 ftp wheel 1024 Sep 5 13:43 incoming
drwxr-xr-x 2 root wheel 1024 Nov 17 1993 lib
drwxr-xr-x 6 1094 wheel 1024 Sep 13 19:07 pub
drwxr-xr-x 3 root wheel 1024 Jan 3 1994 usr
-rw-r--r-- 1 root root 312 Aug 1 1994 welcome.msg
'226 Transfer complete.'
>>> ftp.quit()
'221 Goodbye.'
>>>
A nice test that reveals some of the network dialogue would be:
python ftplib.py -d localhost -l -p -l
"""
#
# Changes and improvements suggested by Steve Majewski.
# Modified by Jack to work on the mac.
# Modified by Siebren to support docstrings and PASV.
# Modified by Phil Schwartz to add storbinary and storlines callbacks.
#
import os
import sys
# Import SOCKS module if it exists, else standard socket module socket
try:
import SOCKS; socket = SOCKS; del SOCKS # import SOCKS as socket
from socket import getfqdn; socket.getfqdn = getfqdn; del getfqdn
except ImportError:
import socket
from socket import _GLOBAL_DEFAULT_TIMEOUT
__all__ = ["FTP","Netrc"]
# Magic number from <socket.h>
MSG_OOB = 0x1 # Process data out of band
# The standard FTP server control port
FTP_PORT = 21
# Exception raised when an error or invalid response is received
class Error(Exception): pass
class error_reply(Error): pass # unexpected [123]xx reply
class error_temp(Error): pass # 4xx errors
class error_perm(Error): pass # 5xx errors
class error_proto(Error): pass # response does not begin with [1-5]
# All exceptions (hopefully) that may be raised here and that aren't
# (always) programming errors on our side
all_errors = (Error, IOError, EOFError)
# Line terminators (we always output CRLF, but accept any of CRLF, CR, LF)
CRLF = '\r\n'
# The class itself
class FTP:
'''An FTP client class.
To create a connection, call the class using these arguments:
host, user, passwd, acct, timeout
The first four arguments are all strings, and have default value ''.
timeout must be numeric and defaults to None if not passed,
meaning that no timeout will be set on any ftp socket(s)
If a timeout is passed, then this is now the default timeout for all ftp
socket operations for this instance.
Then use self.connect() with optional host and port argument.
To download a file, use ftp.retrlines('RETR ' + filename),
or ftp.retrbinary() with slightly different arguments.
To upload a file, use ftp.storlines() or ftp.storbinary(),
which have an open file as argument (see their definitions
below for details).
The download/upload functions first issue appropriate TYPE
and PORT or PASV commands.
'''
debugging = 0
host = ''
port = FTP_PORT
sock = None
file = None
welcome = None
passiveserver = 1
# Initialization method (called by class instantiation).
# Initialize host to localhost, port to standard ftp port
# Optional arguments are host (for connect()),
# and user, passwd, acct (for login())
def __init__(self, host='', user='', passwd='', acct='',
timeout=_GLOBAL_DEFAULT_TIMEOUT):
self.timeout = timeout
if host:
self.connect(host)
if user:
self.login(user, passwd, acct)
def connect(self, host='', port=0, timeout=-999):
'''Connect to host. Arguments are:
- host: hostname to connect to (string, default previous host)
- port: port to connect to (integer, default previous port)
'''
if host != '':
self.host = host
if port > 0:
self.port = port
if timeout != -999:
self.timeout = timeout
self.sock = socket.create_connection((self.host, self.port), self.timeout)
self.af = self.sock.family
self.file = self.sock.makefile('rb')
self.welcome = self.getresp()
return self.welcome
def getwelcome(self):
'''Get the welcome message from the server.
(this is read and squirreled away by connect())'''
if self.debugging:
print '*welcome*', self.sanitize(self.welcome)
return self.welcome
def set_debuglevel(self, level):
'''Set the debugging level.
The required argument level means:
0: no debugging output (default)
1: print commands and responses but not body text etc.
2: also print raw lines read and sent before stripping CR/LF'''
self.debugging = level
debug = set_debuglevel
def set_pasv(self, val):
'''Use passive or active mode for data transfers.
With a false argument, use the normal PORT mode,
With a true argument, use the PASV command.'''
self.passiveserver = val
# Internal: "sanitize" a string for printing
def sanitize(self, s):
if s[:5] == 'pass ' or s[:5] == 'PASS ':
i = len(s)
while i > 5 and s[i-1] in '\r\n':
i = i-1
s = s[:5] + '*'*(i-5) + s[i:]
return repr(s)
# Internal: send one line to the server, appending CRLF
def putline(self, line):
line = line + CRLF
if self.debugging > 1: print '*put*', self.sanitize(line)
self.sock.sendall(line)
# Internal: send one command to the server (through putline())
def putcmd(self, line):
if self.debugging: print '*cmd*', self.sanitize(line)
self.putline(line)
# Internal: return one line from the server, stripping CRLF.
# Raise EOFError if the connection is closed
def getline(self):
line = self.file.readline()
if self.debugging > 1:
print '*get*', self.sanitize(line)
if not line: raise EOFError
if line[-2:] == CRLF: line = line[:-2]
elif line[-1:] in CRLF: line = line[:-1]
return line
# Internal: get a response from the server, which may possibly
# consist of multiple lines. Return a single string with no
# trailing CRLF. If the response consists of multiple lines,
# these are separated by '\n' characters in the string
def getmultiline(self):
line = self.getline()
if line[3:4] == '-':
code = line[:3]
while 1:
nextline = self.getline()
line = line + ('\n' + nextline)
if nextline[:3] == code and \
nextline[3:4] != '-':
break
return line
# Internal: get a response from the server.
# Raise various errors if the response indicates an error
def getresp(self):
resp = self.getmultiline()
if self.debugging: print '*resp*', self.sanitize(resp)
self.lastresp = resp[:3]
c = resp[:1]
if c in ('1', '2', '3'):
return resp
if c == '4':
raise error_temp, resp
if c == '5':
raise error_perm, resp
raise error_proto, resp
def voidresp(self):
"""Expect a response beginning with '2'."""
resp = self.getresp()
if resp[0] != '2':
raise error_reply, resp
return resp
def abort(self):
'''Abort a file transfer. Uses out-of-band data.
This does not follow the procedure from the RFC to send Telnet
IP and Synch; that doesn't seem to work with the servers I've
tried. Instead, just send the ABOR command as OOB data.'''
line = 'ABOR' + CRLF
if self.debugging > 1: print '*put urgent*', self.sanitize(line)
self.sock.sendall(line, MSG_OOB)
resp = self.getmultiline()
if resp[:3] not in ('426', '226'):
raise error_proto, resp
def sendcmd(self, cmd):
'''Send a command and return the response.'''
self.putcmd(cmd)
return self.getresp()
def voidcmd(self, cmd):
"""Send a command and expect a response beginning with '2'."""
self.putcmd(cmd)
return self.voidresp()
def sendport(self, host, port):
'''Send a PORT command with the current host and the given
port number.
'''
hbytes = host.split('.')
pbytes = [repr(port//256), repr(port%256)]
bytes = hbytes + pbytes
cmd = 'PORT ' + ','.join(bytes)
return self.voidcmd(cmd)
def sendeprt(self, host, port):
'''Send a EPRT command with the current host and the given port number.'''
af = 0
if self.af == socket.AF_INET:
af = 1
if self.af == socket.AF_INET6:
af = 2
if af == 0:
raise error_proto, 'unsupported address family'
fields = ['', repr(af), host, repr(port), '']
cmd = 'EPRT ' + '|'.join(fields)
return self.voidcmd(cmd)
def makeport(self):
'''Create a new socket and send a PORT command for it.'''
msg = "getaddrinfo returns an empty list"
sock = None
for res in socket.getaddrinfo(None, 0, self.af, socket.SOCK_STREAM, 0, socket.AI_PASSIVE):
af, socktype, proto, canonname, sa = res
try:
sock = socket.socket(af, socktype, proto)
sock.bind(sa)
except socket.error, msg:
if sock:
sock.close()
sock = None
continue
break
if not sock:
raise socket.error, msg
sock.listen(1)
port = sock.getsockname()[1] # Get proper port
host = self.sock.getsockname()[0] # Get proper host
if self.af == socket.AF_INET:
resp = self.sendport(host, port)
else:
resp = self.sendeprt(host, port)
return sock
def makepasv(self):
if self.af == socket.AF_INET:
host, port = parse227(self.sendcmd('PASV'))
else:
host, port = parse229(self.sendcmd('EPSV'), self.sock.getpeername())
return host, port
def ntransfercmd(self, cmd, rest=None):
"""Initiate a transfer over the data connection.
If the transfer is active, send a port command and the
transfer command, and accept the connection. If the server is
passive, send a pasv command, connect to it, and start the
transfer command. Either way, return the socket for the
connection and the expected size of the transfer. The
expected size may be None if it could not be determined.
Optional `rest' argument can be a string that is sent as the
argument to a REST command. This is essentially a server
marker used to tell the server to skip over any data up to the
given marker.
"""
size = None
if self.passiveserver:
host, port = self.makepasv()
conn = socket.create_connection((host, port), self.timeout)
if rest is not None:
self.sendcmd("REST %s" % rest)
resp = self.sendcmd(cmd)
# Some servers apparently send a 200 reply to
# a LIST or STOR command, before the 150 reply
# (and way before the 226 reply). This seems to
# be in violation of the protocol (which only allows
# 1xx or error messages for LIST), so we just discard
# this response.
if resp[0] == '2':
resp = self.getresp()
if resp[0] != '1':
raise error_reply, resp
else:
sock = self.makeport()
if rest is not None:
self.sendcmd("REST %s" % rest)
resp = self.sendcmd(cmd)
# See above.
if resp[0] == '2':
resp = self.getresp()
if resp[0] != '1':
raise error_reply, resp
conn, sockaddr = sock.accept()
if resp[:3] == '150':
# this is conditional in case we received a 125
size = parse150(resp)
return conn, size
def transfercmd(self, cmd, rest=None):
"""Like ntransfercmd() but returns only the socket."""
return self.ntransfercmd(cmd, rest)[0]
def login(self, user = '', passwd = '', acct = ''):
'''Login, default anonymous.'''
if not user: user = 'anonymous'
if not passwd: passwd = ''
if not acct: acct = ''
if user == 'anonymous' and passwd in ('', '-'):
# If there is no anonymous ftp password specified
# then we'll just use anonymous@
# We don't send any other thing because:
# - We want to remain anonymous
# - We want to stop SPAM
# - We don't want to let ftp sites to discriminate by the user,
# host or country.
passwd = passwd + 'anonymous@'
resp = self.sendcmd('USER ' + user)
if resp[0] == '3': resp = self.sendcmd('PASS ' + passwd)
if resp[0] == '3': resp = self.sendcmd('ACCT ' + acct)
if resp[0] != '2':
raise error_reply, resp
return resp
def retrbinary(self, cmd, callback, blocksize=8192, rest=None):
"""Retrieve data in binary mode. A new port is created for you.
Args:
cmd: A RETR command.
callback: A single parameter callable to be called on each
block of data read.
blocksize: The maximum number of bytes to read from the
socket at one time. [default: 8192]
rest: Passed to transfercmd(). [default: None]
Returns:
The response code.
"""
self.voidcmd('TYPE I')
conn = self.transfercmd(cmd, rest)
while 1:
data = conn.recv(blocksize)
if not data:
break
callback(data)
conn.close()
return self.voidresp()
def retrlines(self, cmd, callback = None):
"""Retrieve data in line mode. A new port is created for you.
Args:
cmd: A RETR, LIST, NLST, or MLSD command.
callback: An optional single parameter callable that is called
for each line with the trailing CRLF stripped.
[default: print_line()]
Returns:
The response code.
"""
if callback is None: callback = print_line
resp = self.sendcmd('TYPE A')
conn = self.transfercmd(cmd)
fp = conn.makefile('rb')
while 1:
line = fp.readline()
if self.debugging > 2: print '*retr*', repr(line)
if not line:
break
if line[-2:] == CRLF:
line = line[:-2]
elif line[-1:] == '\n':
line = line[:-1]
callback(line)
fp.close()
conn.close()
return self.voidresp()
def storbinary(self, cmd, fp, blocksize=8192, callback=None):
"""Store a file in binary mode. A new port is created for you.
Args:
cmd: A STOR command.
fp: A file-like object with a read(num_bytes) method.
blocksize: The maximum data size to read from fp and send over
the connection at once. [default: 8192]
callback: An optional single parameter callable that is called on
on each block of data after it is sent. [default: None]
Returns:
The response code.
"""
self.voidcmd('TYPE I')
conn = self.transfercmd(cmd)
while 1:
buf = fp.read(blocksize)
if not buf: break
conn.sendall(buf)
if callback: callback(buf)
conn.close()
return self.voidresp()
def storlines(self, cmd, fp, callback=None):
"""Store a file in line mode. A new port is created for you.
Args:
cmd: A STOR command.
fp: A file-like object with a readline() method.
callback: An optional single parameter callable that is called on
on each line after it is sent. [default: None]
Returns:
The response code.
"""
self.voidcmd('TYPE A')
conn = self.transfercmd(cmd)
while 1:
buf = fp.readline()
if not buf: break
if buf[-2:] != CRLF:
if buf[-1] in CRLF: buf = buf[:-1]
buf = buf + CRLF
conn.sendall(buf)
if callback: callback(buf)
conn.close()
return self.voidresp()
def acct(self, password):
'''Send new account name.'''
cmd = 'ACCT ' + password
return self.voidcmd(cmd)
def nlst(self, *args):
'''Return a list of files in a given directory (default the current).'''
cmd = 'NLST'
for arg in args:
cmd = cmd + (' ' + arg)
files = []
self.retrlines(cmd, files.append)
return files
def dir(self, *args):
'''List a directory in long form.
By default list current directory to stdout.
Optional last argument is callback function; all
non-empty arguments before it are concatenated to the
LIST command. (This *should* only be used for a pathname.)'''
cmd = 'LIST'
func = None
if args[-1:] and type(args[-1]) != type(''):
args, func = args[:-1], args[-1]
for arg in args:
if arg:
cmd = cmd + (' ' + arg)
self.retrlines(cmd, func)
def rename(self, fromname, toname):
'''Rename a file.'''
resp = self.sendcmd('RNFR ' + fromname)
if resp[0] != '3':
raise error_reply, resp
return self.voidcmd('RNTO ' + toname)
def delete(self, filename):
'''Delete a file.'''
resp = self.sendcmd('DELE ' + filename)
if resp[:3] in ('250', '200'):
return resp
elif resp[:1] == '5':
raise error_perm, resp
else:
raise error_reply, resp
def cwd(self, dirname):
'''Change to a directory.'''
if dirname == '..':
try:
return self.voidcmd('CDUP')
except error_perm, msg:
if msg.args[0][:3] != '500':
raise
elif dirname == '':
dirname = '.' # does nothing, but could return error
cmd = 'CWD ' + dirname
return self.voidcmd(cmd)
def size(self, filename):
'''Retrieve the size of a file.'''
# The SIZE command is defined in RFC-3659
resp = self.sendcmd('SIZE ' + filename)
if resp[:3] == '213':
s = resp[3:].strip()
try:
return int(s)
except (OverflowError, ValueError):
return long(s)
def mkd(self, dirname):
'''Make a directory, return its full pathname.'''
resp = self.sendcmd('MKD ' + dirname)
return parse257(resp)
def rmd(self, dirname):
'''Remove a directory.'''
return self.voidcmd('RMD ' + dirname)
def pwd(self):
'''Return current working directory.'''
resp = self.sendcmd('PWD')
return parse257(resp)
def quit(self):
'''Quit, and close the connection.'''
resp = self.voidcmd('QUIT')
self.close()
return resp
def close(self):
'''Close the connection without assuming anything about it.'''
if self.file:
self.file.close()
self.sock.close()
self.file = self.sock = None
_150_re = None
def parse150(resp):
'''Parse the '150' response for a RETR request.
Returns the expected transfer size or None; size is not guaranteed to
be present in the 150 message.
'''
if resp[:3] != '150':
raise error_reply, resp
global _150_re
if _150_re is None:
import re
_150_re = re.compile("150 .* \((\d+) bytes\)", re.IGNORECASE)
m = _150_re.match(resp)
if not m:
return None
s = m.group(1)
try:
return int(s)
except (OverflowError, ValueError):
return long(s)
_227_re = None
def parse227(resp):
'''Parse the '227' response for a PASV request.
Raises error_proto if it does not contain '(h1,h2,h3,h4,p1,p2)'
Return ('host.addr.as.numbers', port#) tuple.'''
if resp[:3] != '227':
raise error_reply, resp
global _227_re
if _227_re is None:
import re
_227_re = re.compile(r'(\d+),(\d+),(\d+),(\d+),(\d+),(\d+)')
m = _227_re.search(resp)
if not m:
raise error_proto, resp
numbers = m.groups()
host = '.'.join(numbers[:4])
port = (int(numbers[4]) << 8) + int(numbers[5])
return host, port
def parse229(resp, peer):
'''Parse the '229' response for a EPSV request.
Raises error_proto if it does not contain '(|||port|)'
Return ('host.addr.as.numbers', port#) tuple.'''
if resp[:3] != '229':
raise error_reply, resp
left = resp.find('(')
if left < 0: raise error_proto, resp
right = resp.find(')', left + 1)
if right < 0:
raise error_proto, resp # should contain '(|||port|)'
if resp[left + 1] != resp[right - 1]:
raise error_proto, resp
parts = resp[left + 1:right].split(resp[left+1])
if len(parts) != 5:
raise error_proto, resp
host = peer[0]
port = int(parts[3])
return host, port
def parse257(resp):
'''Parse the '257' response for a MKD or PWD request.
This is a response to a MKD or PWD request: a directory name.
Returns the directoryname in the 257 reply.'''
if resp[:3] != '257':
raise error_reply, resp
if resp[3:5] != ' "':
return '' # Not compliant to RFC 959, but UNIX ftpd does this
dirname = ''
i = 5
n = len(resp)
while i < n:
c = resp[i]
i = i+1
if c == '"':
if i >= n or resp[i] != '"':
break
i = i+1
dirname = dirname + c
return dirname
def print_line(line):
'''Default retrlines callback to print a line.'''
print line
def ftpcp(source, sourcename, target, targetname = '', type = 'I'):
'''Copy file from one FTP-instance to another.'''
if not targetname: targetname = sourcename
type = 'TYPE ' + type
source.voidcmd(type)
target.voidcmd(type)
sourcehost, sourceport = parse227(source.sendcmd('PASV'))
target.sendport(sourcehost, sourceport)
# RFC 959: the user must "listen" [...] BEFORE sending the
# transfer request.
# So: STOR before RETR, because here the target is a "user".
treply = target.sendcmd('STOR ' + targetname)
if treply[:3] not in ('125', '150'): raise error_proto # RFC 959
sreply = source.sendcmd('RETR ' + sourcename)
if sreply[:3] not in ('125', '150'): raise error_proto # RFC 959
source.voidresp()
target.voidresp()
class Netrc:
"""Class to parse & provide access to 'netrc' format files.
See the netrc(4) man page for information on the file format.
WARNING: This class is obsolete -- use module netrc instead.
"""
__defuser = None
__defpasswd = None
__defacct = None
def __init__(self, filename=None):
if filename is None:
if "HOME" in os.environ:
filename = os.path.join(os.environ["HOME"],
".netrc")
else:
raise IOError, \
"specify file to load or set $HOME"
self.__hosts = {}
self.__macros = {}
fp = open(filename, "r")
in_macro = 0
while 1:
line = fp.readline()
if not line: break
if in_macro and line.strip():
macro_lines.append(line)
continue
elif in_macro:
self.__macros[macro_name] = tuple(macro_lines)
in_macro = 0
words = line.split()
host = user = passwd = acct = None
default = 0
i = 0
while i < len(words):
w1 = words[i]
if i+1 < len(words):
w2 = words[i + 1]
else:
w2 = None
if w1 == 'default':
default = 1
elif w1 == 'machine' and w2:
host = w2.lower()
i = i + 1
elif w1 == 'login' and w2:
user = w2
i = i + 1
elif w1 == 'password' and w2:
passwd = w2
i = i + 1
elif w1 == 'account' and w2:
acct = w2
i = i + 1
elif w1 == 'macdef' and w2:
macro_name = w2
macro_lines = []
in_macro = 1
break
i = i + 1
if default:
self.__defuser = user or self.__defuser
self.__defpasswd = passwd or self.__defpasswd
self.__defacct = acct or self.__defacct
if host:
if host in self.__hosts:
ouser, opasswd, oacct = \
self.__hosts[host]
user = user or ouser
passwd = passwd or opasswd
acct = acct or oacct
self.__hosts[host] = user, passwd, acct
fp.close()
def get_hosts(self):
"""Return a list of hosts mentioned in the .netrc file."""
return self.__hosts.keys()
def get_account(self, host):
"""Returns login information for the named host.
The return value is a triple containing userid,
password, and the accounting field.
"""
host = host.lower()
user = passwd = acct = None
if host in self.__hosts:
user, passwd, acct = self.__hosts[host]
user = user or self.__defuser
passwd = passwd or self.__defpasswd
acct = acct or self.__defacct
return user, passwd, acct
def get_macros(self):
"""Return a list of all defined macro names."""
return self.__macros.keys()
def get_macro(self, macro):
"""Return a sequence of lines which define a named macro."""
return self.__macros[macro]
def test():
'''Test program.
Usage: ftp [-d] [-r[file]] host [-l[dir]] [-d[dir]] [-p] [file] ...
-d dir
-l list
-p password
'''
if len(sys.argv) < 2:
print test.__doc__
sys.exit(0)
debugging = 0
rcfile = None
while sys.argv[1] == '-d':
debugging = debugging+1
del sys.argv[1]
if sys.argv[1][:2] == '-r':
# get name of alternate ~/.netrc file:
rcfile = sys.argv[1][2:]
del sys.argv[1]
host = sys.argv[1]
ftp = FTP(host)
ftp.set_debuglevel(debugging)
userid = passwd = acct = ''
try:
netrc = Netrc(rcfile)
except IOError:
if rcfile is not None:
sys.stderr.write("Could not open account file"
" -- using anonymous login.")
else:
try:
userid, passwd, acct = netrc.get_account(host)
except KeyError:
# no account for host
sys.stderr.write(
"No account -- using anonymous login.")
ftp.login(userid, passwd, acct)
for file in sys.argv[2:]:
if file[:2] == '-l':
ftp.dir(file[2:])
elif file[:2] == '-d':
cmd = 'CWD'
if file[2:]: cmd = cmd + ' ' + file[2:]
resp = ftp.sendcmd(cmd)
elif file == '-p':
ftp.set_pasv(not ftp.passiveserver)
else:
ftp.retrbinary('RETR ' + file, \
sys.stdout.write, 1024)
ftp.quit()
if __name__ == '__main__':
test()
|
deanhiller/databus
|
webapp/play1.3.x/python/Lib/ftplib.py
|
Python
|
mpl-2.0
| 29,449
|
# Copyright 2014-2017 The ODL contributors
#
# This file is part of ODL.
#
# This Source Code Form is subject to the terms of the Mozilla Public License,
# v. 2.0. If a copy of the MPL was not distributed with this file, You can
# obtain one at https://mozilla.org/MPL/2.0/.
from __future__ import division
import numpy as np
import pytest
import odl
from odl.trafos.backends import pyfftw_call, PYFFTW_AVAILABLE
from odl.util import (
is_real_dtype, complex_dtype)
from odl.util.testutils import (
all_almost_equal, simple_fixture)
pytestmark = pytest.mark.skipif(not PYFFTW_AVAILABLE,
reason='`pyfftw` backend not available')
# --- pytest fixtures --- #
planning = simple_fixture('planning', ['estimate', 'measure', 'patient',
'exhaustive'])
direction = simple_fixture('direction', ['forward', 'backward'])
# --- helper functions --- #
def _random_array(shape, dtype):
if is_real_dtype(dtype):
return np.random.rand(*shape).astype(dtype)
else:
return (np.random.rand(*shape).astype(dtype) +
1j * np.random.rand(*shape).astype(dtype))
def _params_from_dtype(dtype):
if is_real_dtype(dtype):
halfcomplex = True
else:
halfcomplex = False
return halfcomplex, complex_dtype(dtype)
def _halfcomplex_shape(shape, axes=None):
if axes is None:
axes = tuple(range(len(shape)))
try:
axes = (int(axes),)
except TypeError:
pass
shape = list(shape)
shape[axes[-1]] = shape[axes[-1]] // 2 + 1
return shape
# ---- pyfftw_call ---- #
def test_pyfftw_call_forward(odl_floating_dtype):
# Test against Numpy's FFT
dtype = odl_floating_dtype
if dtype == np.dtype('float16'): # not supported, skipping
return
halfcomplex, out_dtype = _params_from_dtype(dtype)
for shape in [(10,), (3, 4, 5)]:
arr = _random_array(shape, dtype)
if halfcomplex:
true_dft = np.fft.rfftn(arr)
dft_arr = np.empty(_halfcomplex_shape(shape), dtype=out_dtype)
else:
true_dft = np.fft.fftn(arr)
dft_arr = np.empty(shape, dtype=out_dtype)
pyfftw_call(arr, dft_arr, direction='forward',
halfcomplex=halfcomplex, preserve_input=False)
assert all_almost_equal(dft_arr, true_dft)
def test_pyfftw_call_threads():
shape = (3, 4, 5)
arr = _random_array(shape, dtype='complex64')
true_dft = np.fft.fftn(arr)
dft_arr = np.empty(shape, dtype='complex64')
pyfftw_call(arr, dft_arr, direction='forward', preserve_input=False,
threads=4)
assert all_almost_equal(dft_arr, true_dft)
shape = (1000,) # Trigger cpu_count() as number of threads
arr = _random_array(shape, dtype='complex64')
true_dft = np.fft.fftn(arr)
dft_arr = np.empty(shape, dtype='complex64')
pyfftw_call(arr, dft_arr, direction='forward', preserve_input=False)
assert all_almost_equal(dft_arr, true_dft)
def test_pyfftw_call_backward(odl_floating_dtype):
# Test against Numpy's IFFT, no normalization
dtype = odl_floating_dtype
if dtype == np.dtype('float16'): # not supported, skipping
return
halfcomplex, in_dtype = _params_from_dtype(dtype)
for shape in [(10,), (3, 4, 5)]:
# Scaling happens wrt output (large) shape
idft_scaling = np.prod(shape)
if halfcomplex:
arr = _random_array(_halfcomplex_shape(shape), in_dtype)
true_idft = np.fft.irfftn(arr, shape) * idft_scaling
else:
arr = _random_array(shape, in_dtype)
true_idft = np.fft.ifftn(arr) * idft_scaling
idft_arr = np.empty(shape, dtype=dtype)
pyfftw_call(arr, idft_arr, direction='backward',
halfcomplex=halfcomplex)
assert all_almost_equal(idft_arr, true_idft)
def test_pyfftw_call_bad_input(direction):
# Complex
# Bad dtype
dtype_in = np.dtype('complex128')
arr_in = np.empty(3, dtype=dtype_in)
bad_dtypes_out = np.sctypes['float'] + np.sctypes['complex']
if dtype_in in bad_dtypes_out:
# This one is correct, so we remove it
bad_dtypes_out.remove(dtype_in)
for bad_dtype in bad_dtypes_out:
arr_out = np.empty(3, dtype=bad_dtype)
with pytest.raises(ValueError):
pyfftw_call(arr_in, arr_out, halfcomplex=False,
direction=direction)
# Bad shape
shape = (3, 4)
arr_in = np.empty(shape, dtype='complex128')
bad_shapes_out = [(3, 3), (3,), (4,), (3, 4, 5), ()]
for bad_shape in bad_shapes_out:
arr_out = np.empty(bad_shape, dtype='complex128')
with pytest.raises(ValueError):
pyfftw_call(arr_in, arr_out, halfcomplex=False,
direction=direction)
# Duplicate axes
arr_in = np.empty((3, 4, 5), dtype='complex128')
arr_out = np.empty_like(arr_in)
bad_axes_list = [(0, 0, 1), (1, 1, 1), (-1, -1)]
for bad_axes in bad_axes_list:
with pytest.raises(ValueError):
pyfftw_call(arr_in, arr_out, axes=bad_axes,
direction=direction)
# Axis entry out of range
arr_in = np.empty((3, 4, 5), dtype='complex128')
arr_out = np.empty_like(arr_in)
bad_axes_list = [(0, 3), (-4,)]
for bad_axes in bad_axes_list:
with pytest.raises(ValueError):
pyfftw_call(arr_in, arr_out, axes=bad_axes,
direction=direction)
# Halfcomplex not possible for complex data
arr_in = np.empty((3, 4, 5), dtype='complex128')
arr_out = np.empty_like(arr_in)
with pytest.raises(ValueError):
pyfftw_call(arr_in, arr_out, halfcomplex=True,
direction=direction)
# Data type mismatch
arr_in = np.empty((3, 4, 5), dtype='complex128')
arr_out = np.empty_like(arr_in, dtype='complex64')
with pytest.raises(ValueError):
pyfftw_call(arr_in, arr_out, direction=direction)
# Halfcomplex
# Bad dtype
dtype_in = 'float64'
arr_in = np.empty(10, dtype=dtype_in)
bad_dtypes_out = np.sctypes['float'] + np.sctypes['complex']
try:
# This one is correct, so we remove it
bad_dtypes_out.remove(np.dtype('complex128'))
except ValueError:
pass
for bad_dtype in bad_dtypes_out:
arr_out = np.empty(6, dtype=bad_dtype)
with pytest.raises(ValueError):
if direction == 'forward':
pyfftw_call(arr_in, arr_out, halfcomplex=True,
direction='forward')
else:
pyfftw_call(arr_out, arr_in, halfcomplex=True,
direction='backward')
# Bad shape
shape = (3, 4, 5)
axes_list = [None, (0, 1), (1,), (1, 2), (2, 1), (-1, -2, -3)]
arr_in = np.empty(shape, dtype='float64')
# Correct shapes:
# [(3, 4, 3), (3, 3, 5), (3, 3, 5), (3, 4, 3), (3, 3, 5), (2, 4, 5)]
bad_shapes_out = [(3, 4, 2), (3, 4, 3), (2, 3, 5), (3, 2, 3),
(3, 4, 3), (3, 4, 3)]
always_bad_shapes = [(3, 4), (3, 4, 5)]
for bad_shape, axes in zip(bad_shapes_out, axes_list):
for always_bad_shape in always_bad_shapes:
arr_out = np.empty(always_bad_shape, dtype='complex128')
with pytest.raises(ValueError):
if direction == 'forward':
pyfftw_call(arr_in, arr_out, axes=axes, halfcomplex=True,
direction='forward')
else:
pyfftw_call(arr_out, arr_in, axes=axes, halfcomplex=True,
direction='backward')
arr_out = np.empty(bad_shape, dtype='complex128')
with pytest.raises(ValueError):
if direction == 'forward':
pyfftw_call(arr_in, arr_out, axes=axes, halfcomplex=True,
direction='forward')
else:
pyfftw_call(arr_out, arr_in, axes=axes, halfcomplex=True,
direction='backward')
def test_pyfftw_call_forward_real_not_halfcomplex():
# Test against Numpy's FFT
for shape in [(10,), (3, 4, 5)]:
arr = _random_array(shape, dtype='float64')
true_dft = np.fft.fftn(arr)
dft_arr = np.empty(shape, dtype='complex128')
pyfftw_call(arr, dft_arr, direction='forward', halfcomplex=False)
assert all_almost_equal(dft_arr, true_dft)
def test_pyfftw_call_backward_real_not_halfcomplex():
# Test against Numpy's IFFT, no normalization
for shape in [(10,), (3, 4, 5)]:
# Scaling happens wrt output (large) shape
idft_scaling = np.prod(shape)
arr = _random_array(shape, dtype='float64')
true_idft = np.fft.ifftn(arr) * idft_scaling
idft_arr = np.empty(shape, dtype='complex128')
pyfftw_call(arr, idft_arr, direction='backward', halfcomplex=False)
assert all_almost_equal(idft_arr, true_idft)
def test_pyfftw_call_plan_preserve_input(planning):
for shape in [(10,), (3, 4)]:
arr = _random_array(shape, dtype='complex128')
arr_cpy = arr.copy()
idft_scaling = np.prod(shape)
true_idft = np.fft.ifftn(arr) * idft_scaling
idft_arr = np.empty(shape, dtype='complex128')
pyfftw_call(arr, idft_arr, direction='backward', halfcomplex=False,
planning=planning)
assert all_almost_equal(arr, arr_cpy) # Input perserved
assert all_almost_equal(idft_arr, true_idft)
def test_pyfftw_call_forward_with_axes(odl_floating_dtype):
dtype = odl_floating_dtype
if dtype == np.dtype('float16'): # not supported, skipping
return
halfcomplex, out_dtype = _params_from_dtype(dtype)
shape = (3, 4, 5)
test_axes = [(0, 1), [1], (-1,), (1, 0), (-1, -2, -3)]
for axes in test_axes:
arr = _random_array(shape, dtype)
if halfcomplex:
true_dft = np.fft.rfftn(arr, axes=axes)
dft_arr = np.empty(_halfcomplex_shape(shape, axes),
dtype=out_dtype)
else:
true_dft = np.fft.fftn(arr, axes=axes)
dft_arr = np.empty(shape, dtype=out_dtype)
pyfftw_call(arr, dft_arr, direction='forward', axes=axes,
halfcomplex=halfcomplex)
assert all_almost_equal(dft_arr, true_dft)
def test_pyfftw_call_backward_with_axes(odl_floating_dtype):
dtype = odl_floating_dtype
if dtype == np.dtype('float16'): # not supported, skipping
return
halfcomplex, in_dtype = _params_from_dtype(dtype)
shape = (3, 4, 5)
test_axes = [(0, 1), [1], (-1,), (1, 0), (-1, -2, -3)]
for axes in test_axes:
# Only the shape indexed by axes count for the scaling
active_shape = np.take(shape, axes)
idft_scaling = np.prod(active_shape)
if halfcomplex:
arr = _random_array(_halfcomplex_shape(shape, axes), in_dtype)
true_idft = (np.fft.irfftn(arr, s=active_shape, axes=axes) *
idft_scaling)
else:
arr = _random_array(shape, in_dtype)
true_idft = (np.fft.ifftn(arr, s=active_shape, axes=axes) *
idft_scaling)
idft_arr = np.empty(shape, dtype=dtype)
pyfftw_call(arr, idft_arr, direction='backward', axes=axes,
halfcomplex=halfcomplex)
assert all_almost_equal(idft_arr, true_idft)
def test_pyfftw_call_forward_with_plan():
for shape in [(10,), (3, 4, 5)]:
arr = _random_array(shape, dtype='complex128')
arr_cpy = arr.copy()
true_dft = np.fft.fftn(arr)
# First run, create plan
dft_arr = np.empty(shape, dtype='complex128')
plan = pyfftw_call(arr, dft_arr, direction='forward',
halfcomplex=False, planning_effort='measure')
# Second run, reuse with fresh output array
dft_arr = np.empty(shape, dtype='complex128')
pyfftw_call(arr, dft_arr, direction='forward', fftw_plan=plan,
halfcomplex=False)
assert all_almost_equal(arr, arr_cpy) # Input perserved
assert all_almost_equal(dft_arr, true_dft)
def test_pyfftw_call_backward_with_plan():
for shape in [(10,), (3, 4, 5)]:
arr = _random_array(shape, dtype='complex128')
arr_cpy = arr.copy()
idft_scaling = np.prod(shape)
true_idft = np.fft.ifftn(arr) * idft_scaling
# First run, create plan
idft_arr = np.empty(shape, dtype='complex128')
plan = pyfftw_call(arr, idft_arr, direction='backward',
halfcomplex=False, planning_effort='measure')
# Second run, reuse with fresh output array
idft_arr = np.empty(shape, dtype='complex128')
pyfftw_call(arr, idft_arr, direction='backward', fftw_plan=plan,
halfcomplex=False)
assert all_almost_equal(arr, arr_cpy) # Input perserved
assert all_almost_equal(idft_arr, true_idft)
if __name__ == '__main__':
odl.util.test_file(__file__)
|
kohr-h/odl
|
odl/test/trafos/backends/pyfftw_bindings_test.py
|
Python
|
mpl-2.0
| 13,174
|
from __future__ import unicode_literals
import base64
import calendar
import datetime
import re
import sys
import unicodedata
from binascii import Error as BinasciiError
from email.utils import formatdate
from django.utils import six
from django.utils.datastructures import MultiValueDict
from django.utils.encoding import force_bytes, force_str, force_text
from django.utils.functional import allow_lazy
from django.utils.six.moves.urllib.parse import (
quote, quote_plus, unquote, unquote_plus, urlencode as original_urlencode,
urlparse,
)
ETAG_MATCH = re.compile(r'(?:W/)?"((?:\\.|[^"])*)"')
MONTHS = 'jan feb mar apr may jun jul aug sep oct nov dec'.split()
__D = r'(?P<day>\d{2})'
__D2 = r'(?P<day>[ \d]\d)'
__M = r'(?P<mon>\w{3})'
__Y = r'(?P<year>\d{4})'
__Y2 = r'(?P<year>\d{2})'
__T = r'(?P<hour>\d{2}):(?P<min>\d{2}):(?P<sec>\d{2})'
RFC1123_DATE = re.compile(r'^\w{3}, %s %s %s %s GMT$' % (__D, __M, __Y, __T))
RFC850_DATE = re.compile(r'^\w{6,9}, %s-%s-%s %s GMT$' % (__D, __M, __Y2, __T))
ASCTIME_DATE = re.compile(r'^\w{3} %s %s %s %s$' % (__M, __D2, __T, __Y))
RFC3986_GENDELIMS = str(":/?#[]@")
RFC3986_SUBDELIMS = str("!$&'()*+,;=")
PROTOCOL_TO_PORT = {
'http': 80,
'https': 443,
}
def urlquote(url, safe='/'):
"""
A version of Python's urllib.quote() function that can operate on unicode
strings. The url is first UTF-8 encoded before quoting. The returned string
can safely be used as part of an argument to a subsequent iri_to_uri() call
without double-quoting occurring.
"""
return force_text(quote(force_str(url), force_str(safe)))
urlquote = allow_lazy(urlquote, six.text_type)
def urlquote_plus(url, safe=''):
"""
A version of Python's urllib.quote_plus() function that can operate on
unicode strings. The url is first UTF-8 encoded before quoting. The
returned string can safely be used as part of an argument to a subsequent
iri_to_uri() call without double-quoting occurring.
"""
return force_text(quote_plus(force_str(url), force_str(safe)))
urlquote_plus = allow_lazy(urlquote_plus, six.text_type)
def urlunquote(quoted_url):
"""
A wrapper for Python's urllib.unquote() function that can operate on
the result of django.utils.http.urlquote().
"""
return force_text(unquote(force_str(quoted_url)))
urlunquote = allow_lazy(urlunquote, six.text_type)
def urlunquote_plus(quoted_url):
"""
A wrapper for Python's urllib.unquote_plus() function that can operate on
the result of django.utils.http.urlquote_plus().
"""
return force_text(unquote_plus(force_str(quoted_url)))
urlunquote_plus = allow_lazy(urlunquote_plus, six.text_type)
def urlencode(query, doseq=0):
"""
A version of Python's urllib.urlencode() function that can operate on
unicode strings. The parameters are first cast to UTF-8 encoded strings and
then encoded as per normal.
"""
if isinstance(query, MultiValueDict):
query = query.lists()
elif hasattr(query, 'items'):
query = query.items()
return original_urlencode(
[(force_str(k),
[force_str(i) for i in v] if isinstance(v, (list, tuple)) else force_str(v))
for k, v in query],
doseq)
def cookie_date(epoch_seconds=None):
"""
Formats the time to ensure compatibility with Netscape's cookie standard.
Accepts a floating point number expressed in seconds since the epoch, in
UTC - such as that outputted by time.time(). If set to None, defaults to
the current time.
Outputs a string in the format 'Wdy, DD-Mon-YYYY HH:MM:SS GMT'.
"""
rfcdate = formatdate(epoch_seconds)
return '%s-%s-%s GMT' % (rfcdate[:7], rfcdate[8:11], rfcdate[12:25])
def http_date(epoch_seconds=None):
"""
Formats the time to match the RFC1123 date format as specified by HTTP
RFC2616 section 3.3.1.
Accepts a floating point number expressed in seconds since the epoch, in
UTC - such as that outputted by time.time(). If set to None, defaults to
the current time.
Outputs a string in the format 'Wdy, DD Mon YYYY HH:MM:SS GMT'.
"""
return formatdate(epoch_seconds, usegmt=True)
def parse_http_date(date):
"""
Parses a date format as specified by HTTP RFC2616 section 3.3.1.
The three formats allowed by the RFC are accepted, even if only the first
one is still in widespread use.
Returns an integer expressed in seconds since the epoch, in UTC.
"""
# emails.Util.parsedate does the job for RFC1123 dates; unfortunately
# RFC2616 makes it mandatory to support RFC850 dates too. So we roll
# our own RFC-compliant parsing.
for regex in RFC1123_DATE, RFC850_DATE, ASCTIME_DATE:
m = regex.match(date)
if m is not None:
break
else:
raise ValueError("%r is not in a valid HTTP date format" % date)
try:
year = int(m.group('year'))
if year < 100:
if year < 70:
year += 2000
else:
year += 1900
month = MONTHS.index(m.group('mon').lower()) + 1
day = int(m.group('day'))
hour = int(m.group('hour'))
min = int(m.group('min'))
sec = int(m.group('sec'))
result = datetime.datetime(year, month, day, hour, min, sec)
return calendar.timegm(result.utctimetuple())
except Exception:
six.reraise(ValueError, ValueError("%r is not a valid date" % date), sys.exc_info()[2])
def parse_http_date_safe(date):
"""
Same as parse_http_date, but returns None if the input is invalid.
"""
try:
return parse_http_date(date)
except Exception:
pass
# Base 36 functions: useful for generating compact URLs
def base36_to_int(s):
"""
Converts a base 36 string to an ``int``. Raises ``ValueError` if the
input won't fit into an int.
"""
# To prevent overconsumption of server resources, reject any
# base36 string that is long than 13 base36 digits (13 digits
# is sufficient to base36-encode any 64-bit integer)
if len(s) > 13:
raise ValueError("Base36 input too large")
value = int(s, 36)
# ... then do a final check that the value will fit into an int to avoid
# returning a long (#15067). The long type was removed in Python 3.
if six.PY2 and value > sys.maxint:
raise ValueError("Base36 input too large")
return value
def int_to_base36(i):
"""
Converts an integer to a base36 string
"""
char_set = '0123456789abcdefghijklmnopqrstuvwxyz'
if i < 0:
raise ValueError("Negative base36 conversion input.")
if six.PY2:
if not isinstance(i, six.integer_types):
raise TypeError("Non-integer base36 conversion input.")
if i > sys.maxint:
raise ValueError("Base36 conversion input too large.")
if i < 36:
return char_set[i]
b36 = ''
while i != 0:
i, n = divmod(i, 36)
b36 = char_set[n] + b36
return b36
def urlsafe_base64_encode(s):
"""
Encodes a bytestring in base64 for use in URLs, stripping any trailing
equal signs.
"""
return base64.urlsafe_b64encode(s).rstrip(b'\n=')
def urlsafe_base64_decode(s):
"""
Decodes a base64 encoded string, adding back any trailing equal signs that
might have been stripped.
"""
s = force_bytes(s)
try:
return base64.urlsafe_b64decode(s.ljust(len(s) + len(s) % 4, b'='))
except (LookupError, BinasciiError) as e:
raise ValueError(e)
def parse_etags(etag_str):
"""
Parses a string with one or several etags passed in If-None-Match and
If-Match headers by the rules in RFC 2616. Returns a list of etags
without surrounding double quotes (") and unescaped from \<CHAR>.
"""
etags = ETAG_MATCH.findall(etag_str)
if not etags:
# etag_str has wrong format, treat it as an opaque string then
return [etag_str]
etags = [e.encode('ascii').decode('unicode_escape') for e in etags]
return etags
def quote_etag(etag):
"""
Wraps a string in double quotes escaping contents as necessary.
"""
return '"%s"' % etag.replace('\\', '\\\\').replace('"', '\\"')
def same_origin(url1, url2):
"""
Checks if two URLs are 'same-origin'
"""
p1, p2 = urlparse(url1), urlparse(url2)
try:
o1 = (p1.scheme, p1.hostname, p1.port or PROTOCOL_TO_PORT[p1.scheme])
o2 = (p2.scheme, p2.hostname, p2.port or PROTOCOL_TO_PORT[p2.scheme])
return o1 == o2
except (ValueError, KeyError):
return False
def is_safe_url(url, host=None):
"""
Return ``True`` if the url is a safe redirection (i.e. it doesn't point to
a different host and uses a safe scheme).
Always returns ``False`` on an empty url.
"""
if url is not None:
url = url.strip()
if not url:
return False
if six.PY2:
try:
url = force_text(url)
except UnicodeDecodeError:
return False
# Chrome treats \ completely as / in paths but it could be part of some
# basic auth credentials so we need to check both URLs.
return _is_safe_url(url, host) and _is_safe_url(url.replace('\\', '/'), host)
def _is_safe_url(url, host):
# Chrome considers any URL with more than two slashes to be absolute, but
# urlparse is not so flexible. Treat any url with three slashes as unsafe.
if url.startswith('///'):
return False
url_info = urlparse(url)
# Forbid URLs like http:///example.com - with a scheme, but without a hostname.
# In that URL, example.com is not the hostname but, a path component. However,
# Chrome will still consider example.com to be the hostname, so we must not
# allow this syntax.
if not url_info.netloc and url_info.scheme:
return False
# Forbid URLs that start with control characters. Some browsers (like
# Chrome) ignore quite a few control characters at the start of a
# URL and might consider the URL as scheme relative.
if unicodedata.category(url[0])[0] == 'C':
return False
return ((not url_info.netloc or url_info.netloc == host) and
(not url_info.scheme or url_info.scheme in ['http', 'https']))
|
devs1991/test_edx_docmode
|
venv/lib/python2.7/site-packages/django/utils/http.py
|
Python
|
agpl-3.0
| 10,279
|
from datetime import date, timedelta
from django.conf import settings
date_in_near_future = date.today() + timedelta(days=14)
FOUR_YEARS_IN_DAYS = 1462
election_date_before = lambda r: {
'DATE_TODAY': date.today()
}
election_date_on_election_day = lambda r: {
'DATE_TODAY': date_in_near_future
}
election_date_after = lambda r: {
'DATE_TODAY': date.today() + timedelta(days=28)
}
processors = settings.TEMPLATE_CONTEXT_PROCESSORS
processors_before = processors + \
("candidates.tests.dates.election_date_before",)
processors_on_election_day = processors + \
("candidates.tests.dates.election_date_on_election_day",)
processors_after = processors + \
("candidates.tests.dates.election_date_after",)
|
mysociety/yournextmp-popit
|
candidates/tests/dates.py
|
Python
|
agpl-3.0
| 728
|
"""
XBlock runtime services for LibraryContentModule
"""
from django.core.exceptions import PermissionDenied
from opaque_keys.edx.locator import LibraryLocator
from xmodule.library_content_module import ANY_CAPA_TYPE_VALUE
from xmodule.modulestore.exceptions import ItemNotFoundError
from xmodule.capa_module import CapaDescriptor
class LibraryToolsService(object):
"""
Service that allows LibraryContentModule to interact with libraries in the
modulestore.
"""
def __init__(self, modulestore):
self.store = modulestore
def _get_library(self, library_key):
"""
Given a library key like "library-v1:ProblemX+PR0B", return the
'library' XBlock with meta-information about the library.
Returns None on error.
"""
if not isinstance(library_key, LibraryLocator):
library_key = LibraryLocator.from_string(library_key)
assert library_key.version_guid is None
try:
return self.store.get_library(library_key, remove_version=False, remove_branch=False)
except ItemNotFoundError:
return None
def get_library_version(self, lib_key):
"""
Get the version (an ObjectID) of the given library.
Returns None if the library does not exist.
"""
library = self._get_library(lib_key)
if library:
# We need to know the library's version so ensure it's set in library.location.library_key.version_guid
assert library.location.library_key.version_guid is not None
return library.location.library_key.version_guid
return None
def create_block_analytics_summary(self, course_key, block_keys):
"""
Given a CourseKey and a list of (block_type, block_id) pairs,
prepare the JSON-ready metadata needed for analytics logging.
This is [
{"usage_key": x, "original_usage_key": y, "original_usage_version": z, "descendants": [...]}
]
where the main list contains all top-level blocks, and descendants contains a *flat* list of all
descendants of the top level blocks, if any.
"""
def summarize_block(usage_key):
""" Basic information about the given block """
orig_key, orig_version = self.store.get_block_original_usage(usage_key)
return {
"usage_key": unicode(usage_key),
"original_usage_key": unicode(orig_key) if orig_key else None,
"original_usage_version": unicode(orig_version) if orig_version else None,
}
result_json = []
for block_key in block_keys:
key = course_key.make_usage_key(*block_key)
info = summarize_block(key)
info['descendants'] = []
try:
block = self.store.get_item(key, depth=None) # Load the item and all descendants
children = list(getattr(block, "children", []))
while children:
child_key = children.pop()
child = self.store.get_item(child_key)
info['descendants'].append(summarize_block(child_key))
children.extend(getattr(child, "children", []))
except ItemNotFoundError:
pass # The block has been deleted
result_json.append(info)
return result_json
def _filter_child(self, usage_key, capa_type):
"""
Filters children by CAPA problem type, if configured
"""
if capa_type == ANY_CAPA_TYPE_VALUE:
return True
if usage_key.block_type != "problem":
return False
descriptor = self.store.get_item(usage_key, depth=0)
assert isinstance(descriptor, CapaDescriptor)
return capa_type in descriptor.problem_types
def can_use_library_content(self, block):
"""
Determines whether a modulestore holding a course_id supports libraries.
"""
return self.store.check_supports(block.location.course_key, 'copy_from_template')
def update_children(self, dest_block, user_id, user_perms=None):
"""
This method is to be used when the library that a LibraryContentModule
references has been updated. It will re-fetch all matching blocks from
the libraries, and copy them as children of dest_block. The children
will be given new block_ids, but the definition ID used should be the
exact same definition ID used in the library.
This method will update dest_block's 'source_library_version' field to
store the version number of the libraries used, so we easily determine
if dest_block is up to date or not.
"""
if user_perms and not user_perms.can_write(dest_block.location.course_key):
raise PermissionDenied()
if not dest_block.source_library_id:
dest_block.source_library_version = ""
return
source_blocks = []
library_key = dest_block.source_library_key
library = self._get_library(library_key)
if library is None:
raise ValueError("Requested library not found.")
if user_perms and not user_perms.can_read(library_key):
raise PermissionDenied()
filter_children = (dest_block.capa_type != ANY_CAPA_TYPE_VALUE)
if filter_children:
# Apply simple filtering based on CAPA problem types:
source_blocks.extend([key for key in library.children if self._filter_child(key, dest_block.capa_type)])
else:
source_blocks.extend(library.children)
with self.store.bulk_operations(dest_block.location.course_key):
dest_block.source_library_version = unicode(library.location.library_key.version_guid)
self.store.update_item(dest_block, user_id)
dest_block.children = self.store.copy_from_template(source_blocks, dest_block.location, user_id)
# ^-- copy_from_template updates the children in the DB
# but we must also set .children here to avoid overwriting the DB again
def list_available_libraries(self):
"""
List all known libraries.
Returns tuples of (LibraryLocator, display_name)
"""
return [
(lib.location.library_key.replace(version_guid=None, branch=None), lib.display_name)
for lib in self.store.get_libraries()
]
|
eestay/edx-platform
|
common/lib/xmodule/xmodule/library_tools.py
|
Python
|
agpl-3.0
| 6,493
|
from PyQt4 import QtCore, QtGui
import os
class ConfigPage(QtGui.QWizardPage):
def __init__(self, templates, parent=None):
super(ConfigPage, self).__init__(parent)
#self.setTitle("Configuration")
#self.setSubTitle("Alter configuration and build your own platform.")
#self.setPixmap(QtGui.QWizard.WatermarkPixmap,
# QtGui.QPixmap(':/images/watermark1.png'))
self.templates = templates
self.view = QtGui.QTreeView()
self.panel = QtGui.QWidget()
self.info = QtGui.QTextBrowser()
self.hsplit = QtGui.QSplitter(QtCore.Qt.Vertical)
self.vsplit = QtGui.QSplitter(QtCore.Qt.Horizontal)
self.hsplit.addWidget(self.panel)
self.hsplit.addWidget(self.info)
self.vsplit.addWidget(self.view)
self.vsplit.addWidget(self.hsplit)
def click(index):
item = index.internalPointer()
self.info.setText(QtCore.QVariant(item.description).toString())
self.model.clicked(item)
self.view.activated.connect(click)
self.view.entered.connect(click)
self.view.clicked.connect(click)
#self.view.setModel(model)
self.layout = QtGui.QGridLayout()
self.layout.addWidget(self.vsplit)
#self.setStyleSheet("* { background: yellow }")
#self.setMaximumHeight(0xFFFFFF)
#self.vsplit.setMaximumHeight(0xFFFFFF)
#self.hsplit.setMaximumHeight(0xFFFFFF)
#self.view.setMaximumHeight(0xFFFFFF)
self.setLayout(self.layout)
#self.hsplit.moveSplitter(340,0)
def initializePage(self):
self.panel.setParent(None)
self.panel = QtGui.QWidget()
self.hsplit.insertWidget(0, self.panel)
self.model = self.templates.getModel(self.panel)
self.view.setModel(self.model)
self.view.expandAll()
self.view.setColumnWidth(0, 220)
self.view.setColumnWidth(1, 20)
self.setLayout(self.layout)
#self.vsplit.moveSplitter(280,1)
#self.hsplit.moveSplitter(120,1)
|
hoangt/core
|
core/tools/generator/wizard/config.py
|
Python
|
agpl-3.0
| 2,087
|
##############################################################################
# Copyright (c) 2013-2017, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/llnl/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
import platform
import shutil
import sys
import os
from spack import *
class Namd(MakefilePackage):
"""NAMDis a parallel molecular dynamics code designed for
high-performance simulation of large biomolecular systems."""
homepage = "http://www.ks.uiuc.edu/Research/namd/"
url = "file://{0}/NAMD_2.12_Source.tar.gz".format(os.getcwd())
version('2.12', '2a1191909b1ab03bf0205971ad4d8ee9')
variant('fftw', default='3', values=('none', '2', '3', 'mkl'),
description='Enable the use of FFTW/FFTW3/MKL FFT')
variant('interface', default='none', values=('none', 'tcl', 'python'),
description='Enables TCL and/or python interface')
depends_on('charm')
depends_on('fftw@:2.99', when="fftw=2")
depends_on('fftw@3:', when="fftw=3")
depends_on('intel-mkl', when="fftw=mkl")
depends_on('tcl', when='interface=tcl')
depends_on('tcl', when='interface=python')
depends_on('python', when='interface=python')
def _copy_arch_file(self, lib):
config_filename = 'arch/{0}.{1}'.format(self.arch, lib)
shutil.copy('arch/Linux-x86_64.{0}'.format(lib),
config_filename)
if lib == 'tcl':
filter_file(r'-ltcl8\.5',
'-ltcl{0}'.format(self.spec['tcl'].version.up_to(2)),
config_filename)
def _append_option(self, opts, lib):
if lib != 'python':
self._copy_arch_file(lib)
spec = self.spec
opts.extend([
'--with-{0}'.format(lib),
'--{0}-prefix'.format(lib), spec[lib].prefix
])
@property
def arch(self):
plat = sys.platform
if plat.startswith("linux"):
plat = "linux"
march = platform.machine()
return '{0}-{1}'.format(plat, march)
@property
def build_directory(self):
return '{0}-spack'.format(self.arch)
def edit(self, spec, prefix):
with working_dir('arch'):
with open('{0}.arch'.format(self.build_directory), 'w') as fh:
# this options are take from the default provided
# configuration files
optims_opts = {
'gcc': '-m64 -O3 -fexpensive-optimizations -ffast-math',
'intel': '-O2 -ip'
}
optim_opts = optims_opts[self.compiler.name] \
if self.compiler.name in optims_opts else ''
fh.write('\n'.join([
'NAMD_ARCH = {0}'.format(self.arch),
'CHARMARCH = ',
'CXX = {0.cxx} {0.cxx11_flag}'.format(
self.compiler),
'CXXOPTS = {0}'.format(optim_opts),
'CC = {0}'.format(self.compiler.cc),
'COPTS = {0}'.format(optim_opts),
''
]))
self._copy_arch_file('base')
opts = ['--charm-base', spec['charm'].prefix]
fftw_version = spec.variants['fftw'].value
if fftw_version == 'none':
opts.append('--without-fftw')
elif fftw_version == 'mkl':
self._append_option(opts, 'mkl')
else:
_fftw = 'fftw{0}'.format('' if fftw_version == '2' else '3')
self._copy_arch_file(_fftw)
opts.extend(['--with-{0}'.format(_fftw),
'--fftw-prefix', spec['fftw'].prefix])
interface_type = spec.variants['interface'].value
if interface_type != 'none':
self._append_option(opts, 'tcl')
if interface_type == 'python':
self._append_option(opts, 'python')
else:
opts.extend([
'--without-tcl',
'--without-python'
])
config = Executable('./config')
config(self.build_directory, *opts)
def install(self, spec, prefix):
with working_dir(self.build_directory):
mkdirp(prefix.bin)
install('namd2', prefix.bin)
# I'm not sure this is a good idea or if an autoload of the charm
# module would not be better.
install('charmrun', prefix.bin)
|
TheTimmy/spack
|
var/spack/repos/builtin/packages/namd/package.py
|
Python
|
lgpl-2.1
| 5,455
|
# -*- Mode: Python; test-case-name:flumotion.test.test_worker_worker -*-
# vi:si:et:sw=4:sts=4:ts=4
# Flumotion - a streaming media server
# Copyright (C) 2004,2005,2006,2007,2008,2009 Fluendo, S.L.
# Copyright (C) 2010,2011 Flumotion Services, S.A.
# All rights reserved.
#
# This file may be distributed and/or modified under the terms of
# the GNU Lesser General Public License version 2.1 as published by
# the Free Software Foundation.
# This file is distributed without any warranty; without even the implied
# warranty of merchantability or fitness for a particular purpose.
# See "LICENSE.LGPL" in the source distribution for more information.
#
# Headers in this file shall remain intact.
from twisted.internet import defer
from twisted.spread import pb
from flumotion.common import testsuite
from flumotion.test import realm
from flumotion.twisted import pb as fpb
from flumotion.worker import medium
class TestWorkerAvatar(fpb.PingableAvatar):
def __init__(self, avatarId, mind):
fpb.PingableAvatar.__init__(self, avatarId)
self.setMind(mind)
class TestWorkerRealm(realm.TestRealm):
deferredAvatar = None
deferredLogout = None
def getDeferredAvatar(self):
if self.deferredAvatar is None:
self.deferredAvatar = defer.Deferred()
return self.deferredAvatar
def getDeferredLogout(self):
if self.deferredLogout is None:
self.deferredLogout = defer.Deferred()
return self.deferredLogout
def requestAvatar(self, avatarId, keycard, mind, *ifaces):
avatar = TestWorkerAvatar(avatarId, mind)
self.getDeferredAvatar().callback(avatar)
return (pb.IPerspective, avatar,
lambda: self.avatarLogout(avatar))
def avatarLogout(self, avatar):
self.debug('worker logged out: %s', avatar.avatarId)
self.getDeferredLogout().callback(avatar)
class TestWorkerMedium(testsuite.TestCase):
def setUp(self):
self.realm = TestWorkerRealm()
def tearDown(self):
return self.realm.shutdown()
def testConnect(self):
m = medium.WorkerMedium(None)
connectionInfo = self.realm.getConnectionInfo()
connectionInfo.authenticator.avatarId = 'foo'
m.startConnecting(connectionInfo)
def connected(avatar):
m.stopConnecting()
return self.realm.getDeferredLogout()
def disconnected(avatar):
self.assertEquals(avatar.avatarId, 'foo')
d = self.realm.getDeferredAvatar()
d.addCallback(connected)
d.addCallback(disconnected)
return d
|
timvideos/flumotion
|
flumotion/test/test_worker_medium.py
|
Python
|
lgpl-2.1
| 2,617
|
# Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class PyLocalcider(PythonPackage):
"""Tools for calculating sequence properties of disordered proteins"""
homepage = "http://pappulab.github.io/localCIDER"
url = "https://pypi.io/packages/source/l/localcider/localcider-0.1.14.tar.gz"
version('0.1.14', sha256='54ff29e8a011947cca5df79e96f3c69a76c49c4db41dcf1608663992be3e3f5f')
depends_on('py-setuptools', type='build')
depends_on('py-numpy', type=('build', 'run'))
depends_on('py-matplotlib', type=('build', 'run'))
depends_on('py-scipy', type=('build', 'run'))
|
rspavel/spack
|
var/spack/repos/builtin/packages/py-localcider/package.py
|
Python
|
lgpl-2.1
| 788
|
"""distutils.unixccompiler
Contains the UnixCCompiler class, a subclass of CCompiler that handles
the "typical" Unix-style command-line C compiler:
* macros defined with -Dname[=value]
* macros undefined with -Uname
* include search directories specified with -Idir
* libraries specified with -lllib
* library search directories specified with -Ldir
* compile handled by 'cc' (or similar) executable with -c option:
compiles .c to .o
* link static library handled by 'ar' command (possibly with 'ranlib')
* link shared library handled by 'cc -shared'
"""
import os, sys, re
from distutils import sysconfig
from distutils.dep_util import newer
from distutils.ccompiler import \
CCompiler, gen_preprocess_options, gen_lib_options
from distutils.errors import \
DistutilsExecError, CompileError, LibError, LinkError
from distutils import log
if sys.platform == 'darwin':
import _osx_support
# XXX Things not currently handled:
# * optimization/debug/warning flags; we just use whatever's in Python's
# Makefile and live with it. Is this adequate? If not, we might
# have to have a bunch of subclasses GNUCCompiler, SGICCompiler,
# SunCCompiler, and I suspect down that road lies madness.
# * even if we don't know a warning flag from an optimization flag,
# we need some way for outsiders to feed preprocessor/compiler/linker
# flags in to us -- eg. a sysadmin might want to mandate certain flags
# via a site config file, or a user might want to set something for
# compiling this module distribution only via the setup.py command
# line, whatever. As long as these options come from something on the
# current system, they can be as system-dependent as they like, and we
# should just happily stuff them into the preprocessor/compiler/linker
# options and carry on.
class UnixCCompiler(CCompiler):
compiler_type = 'unix'
# These are used by CCompiler in two places: the constructor sets
# instance attributes 'preprocessor', 'compiler', etc. from them, and
# 'set_executable()' allows any of these to be set. The defaults here
# are pretty generic; they will probably have to be set by an outsider
# (eg. using information discovered by the sysconfig about building
# Python extensions).
executables = {'preprocessor' : None,
'compiler' : ["cc"],
'compiler_so' : ["cc"],
'compiler_cxx' : ["cc"],
'linker_so' : ["cc", "-shared"],
'linker_exe' : ["cc"],
'archiver' : ["ar", "-cr"],
'ranlib' : None,
}
if sys.platform[:6] == "darwin":
executables['ranlib'] = ["ranlib"]
# Needed for the filename generation methods provided by the base
# class, CCompiler. NB. whoever instantiates/uses a particular
# UnixCCompiler instance should set 'shared_lib_ext' -- we set a
# reasonable common default here, but it's not necessarily used on all
# Unices!
src_extensions = [".c",".C",".cc",".cxx",".cpp",".m"]
obj_extension = ".o"
static_lib_extension = ".a"
shared_lib_extension = ".so"
dylib_lib_extension = ".dylib"
static_lib_format = shared_lib_format = dylib_lib_format = "lib%s%s"
if sys.platform == "cygwin":
exe_extension = ".exe"
def preprocess(self, source, output_file=None, macros=None,
include_dirs=None, extra_preargs=None, extra_postargs=None):
fixed_args = self._fix_compile_args(None, macros, include_dirs)
ignore, macros, include_dirs = fixed_args
pp_opts = gen_preprocess_options(macros, include_dirs)
pp_args = self.preprocessor + pp_opts
if output_file:
pp_args.extend(['-o', output_file])
if extra_preargs:
pp_args[:0] = extra_preargs
if extra_postargs:
pp_args.extend(extra_postargs)
pp_args.append(source)
# We need to preprocess: either we're being forced to, or we're
# generating output to stdout, or there's a target output file and
# the source file is newer than the target (or the target doesn't
# exist).
if self.force or output_file is None or newer(source, output_file):
if output_file:
self.mkpath(os.path.dirname(output_file))
try:
self.spawn(pp_args)
except DistutilsExecError as msg:
raise CompileError(msg)
def _compile(self, obj, src, ext, cc_args, extra_postargs, pp_opts):
compiler_so = self.compiler_so
if sys.platform == 'darwin':
compiler_so = _osx_support.compiler_fixup(compiler_so,
cc_args + extra_postargs)
try:
self.spawn(compiler_so + cc_args + [src, '-o', obj] +
extra_postargs)
except DistutilsExecError as msg:
raise CompileError(msg)
def create_static_lib(self, objects, output_libname,
output_dir=None, debug=0, target_lang=None):
objects, output_dir = self._fix_object_args(objects, output_dir)
output_filename = \
self.library_filename(output_libname, output_dir=output_dir)
if self._need_link(objects, output_filename):
self.mkpath(os.path.dirname(output_filename))
self.spawn(self.archiver +
[output_filename] +
objects + self.objects)
# Not many Unices required ranlib anymore -- SunOS 4.x is, I
# think the only major Unix that does. Maybe we need some
# platform intelligence here to skip ranlib if it's not
# needed -- or maybe Python's configure script took care of
# it for us, hence the check for leading colon.
if self.ranlib:
try:
self.spawn(self.ranlib + [output_filename])
except DistutilsExecError as msg:
raise LibError(msg)
else:
log.debug("skipping %s (up-to-date)", output_filename)
def link(self, target_desc, objects,
output_filename, output_dir=None, libraries=None,
library_dirs=None, runtime_library_dirs=None,
export_symbols=None, debug=0, extra_preargs=None,
extra_postargs=None, build_temp=None, target_lang=None):
objects, output_dir = self._fix_object_args(objects, output_dir)
fixed_args = self._fix_lib_args(libraries, library_dirs,
runtime_library_dirs)
libraries, library_dirs, runtime_library_dirs = fixed_args
lib_opts = gen_lib_options(self, library_dirs, runtime_library_dirs,
libraries)
if not isinstance(output_dir, (str, type(None))):
raise TypeError("'output_dir' must be a string or None")
if output_dir is not None:
output_filename = os.path.join(output_dir, output_filename)
if self._need_link(objects, output_filename):
ld_args = (objects + self.objects +
lib_opts + ['-o', output_filename])
if debug:
ld_args[:0] = ['-g']
if extra_preargs:
ld_args[:0] = extra_preargs
if extra_postargs:
ld_args.extend(extra_postargs)
self.mkpath(os.path.dirname(output_filename))
try:
if target_desc == CCompiler.EXECUTABLE:
linker = self.linker_exe[:]
else:
linker = self.linker_so[:]
if target_lang == "c++" and self.compiler_cxx:
# skip over environment variable settings if /usr/bin/env
# is used to set up the linker's environment.
# This is needed on OSX. Note: this assumes that the
# normal and C++ compiler have the same environment
# settings.
i = 0
if os.path.basename(linker[0]) == "env":
i = 1
while '=' in linker[i]:
i += 1
linker[i] = self.compiler_cxx[i]
if sys.platform == 'darwin':
linker = _osx_support.compiler_fixup(linker, ld_args)
self.spawn(linker + ld_args)
except DistutilsExecError as msg:
raise LinkError(msg)
else:
log.debug("skipping %s (up-to-date)", output_filename)
# -- Miscellaneous methods -----------------------------------------
# These are all used by the 'gen_lib_options() function, in
# ccompiler.py.
def library_dir_option(self, dir):
return "-L" + dir
def _is_gcc(self, compiler_name):
return "gcc" in compiler_name or "g++" in compiler_name
def runtime_library_dir_option(self, dir):
# XXX Hackish, at the very least. See Python bug #445902:
# http://sourceforge.net/tracker/index.php
# ?func=detail&aid=445902&group_id=5470&atid=105470
# Linkers on different platforms need different options to
# specify that directories need to be added to the list of
# directories searched for dependencies when a dynamic library
# is sought. GCC on GNU systems (Linux, FreeBSD, ...) has to
# be told to pass the -R option through to the linker, whereas
# other compilers and gcc on other systems just know this.
# Other compilers may need something slightly different. At
# this time, there's no way to determine this information from
# the configuration data stored in the Python installation, so
# we use this hack.
compiler = os.path.basename(sysconfig.get_config_var("CC"))
if sys.platform[:6] == "darwin":
# MacOSX's linker doesn't understand the -R flag at all
return "-L" + dir
elif sys.platform[:5] == "hp-ux":
if self._is_gcc(compiler):
return ["-Wl,+s", "-L" + dir]
return ["+s", "-L" + dir]
elif sys.platform[:7] == "irix646" or sys.platform[:6] == "osf1V5":
return ["-rpath", dir]
else:
if self._is_gcc(compiler):
# gcc on non-GNU systems does not need -Wl, but can
# use it anyway. Since distutils has always passed in
# -Wl whenever gcc was used in the past it is probably
# safest to keep doing so.
if sysconfig.get_config_var("GNULD") == "yes":
# GNU ld needs an extra option to get a RUNPATH
# instead of just an RPATH.
return "-Wl,--enable-new-dtags,-R" + dir
else:
return "-Wl,-R" + dir
else:
# No idea how --enable-new-dtags would be passed on to
# ld if this system was using GNU ld. Don't know if a
# system like this even exists.
return "-R" + dir
def library_option(self, lib):
return "-l" + lib
def find_library_file(self, dirs, lib, debug=0):
shared_f = self.library_filename(lib, lib_type='shared')
dylib_f = self.library_filename(lib, lib_type='dylib')
static_f = self.library_filename(lib, lib_type='static')
if sys.platform == 'darwin':
# On OSX users can specify an alternate SDK using
# '-isysroot', calculate the SDK root if it is specified
# (and use it further on)
cflags = sysconfig.get_config_var('CFLAGS')
m = re.search(r'-isysroot\s+(\S+)', cflags)
if m is None:
sysroot = '/'
else:
sysroot = m.group(1)
for dir in dirs:
shared = os.path.join(dir, shared_f)
dylib = os.path.join(dir, dylib_f)
static = os.path.join(dir, static_f)
if sys.platform == 'darwin' and (
dir.startswith('/System/') or (
dir.startswith('/usr/') and not dir.startswith('/usr/local/'))):
shared = os.path.join(sysroot, dir[1:], shared_f)
dylib = os.path.join(sysroot, dir[1:], dylib_f)
static = os.path.join(sysroot, dir[1:], static_f)
# We're second-guessing the linker here, with not much hard
# data to go on: GCC seems to prefer the shared library, so I'm
# assuming that *all* Unix C compilers do. And of course I'm
# ignoring even GCC's "-static" option. So sue me.
if os.path.exists(dylib):
return dylib
elif os.path.exists(shared):
return shared
elif os.path.exists(static):
return static
# Oops, didn't find it in *any* of 'dirs'
return None
|
Orav/kbengine
|
kbe/src/lib/python/Lib/distutils/unixccompiler.py
|
Python
|
lgpl-3.0
| 13,419
|
# -*- coding: utf-8 -*-
from __future__ import division
# -*- coding: utf-8 -*-
#
# Sphinx documentation build configuration file, created by
# sphinx-quickstart.py on Sat Mar 8 21:47:50 2008.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# The contents of this file are pickled, so don't put values in the namespace
# that aren't pickleable (module imports are okay, they're removed automatically).
#
# All configuration values have a default value; values that are commented out
# serve to show the default value.
import sys, os, re
# If your extensions are in another directory, add it here.
sys.path.append(os.path.dirname(os.path.dirname(__file__)))
# General configuration
# ---------------------
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.addons.*') or your custom ones.
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.doctest']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# General substitutions.
project = 'SpiffWorkflow'
copyright = '2012 ' + ', '.join(open('../AUTHORS').readlines())
# The default replacements for |version| and |release|, also used in various
# other places throughout the built documents.
#
# The short X.Y version.
import SpiffWorkflow
version = SpiffWorkflow.__version__
# The full version, including alpha/beta/rc tags.
release = version
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of documents that shouldn't be included in the build.
#unused_docs = []
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
show_authors = True
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'friendly'
# Options for HTML output
# -----------------------
# The style sheet to use for HTML and HTML Help pages. A file of that name
# must exist either in Sphinx' static/ path, or in one of the custom paths
# given in html_static_path.
html_style = 'sphinxdoc.css'
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['figures']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Content template for the index page.
html_index = 'index.html'
# Custom sidebar templates, maps page names to templates.
html_sidebars = {'index': 'indexsidebar.html'}
# Additional templates that should be rendered to pages, maps page names to
# templates.
html_additional_pages = {'index': 'index.html'}
# If true, the reST sources are included in the HTML build as _sources/<name>.
#html_copy_source = True
html_use_opensearch = 'http://sphinx.pocoo.org'
# Output file base name for HTML help builder.
htmlhelp_basename = 'Sphinxdoc'
# Options for LaTeX output
# ------------------------
# The paper size ('letter' or 'a4').
#latex_paper_size = 'letter'
# The font size ('10pt', '11pt' or '12pt').
#latex_font_size = '10pt'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, document class [howto/manual]).
latex_documents = [('contents', 'sphinx.tex', 'Sphinx Documentation',
'Georg Brandl', 'manual', 1)]
latex_logo = '_static/sphinx.png'
#latex_use_parts = True
# Additional stuff for the LaTeX preamble.
latex_elements = {
'fontpkg': '\\usepackage{palatino}'
}
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# Extension interface
# -------------------
from sphinx import addnodes
dir_sig_re = re.compile(r'\.\. ([^:]+)::(.*)$')
def parse_directive(env, sig, signode):
if not sig.startswith('.'):
dec_sig = '.. %s::' % sig
signode += addnodes.desc_name(dec_sig, dec_sig)
return sig
m = dir_sig_re.match(sig)
if not m:
signode += addnodes.desc_name(sig, sig)
return sig
name, args = m.groups()
dec_name = '.. %s::' % name
signode += addnodes.desc_name(dec_name, dec_name)
signode += addnodes.desc_addname(args, args)
return name
def parse_role(env, sig, signode):
signode += addnodes.desc_name(':%s:' % sig, ':%s:' % sig)
return sig
event_sig_re = re.compile(r'([a-zA-Z-]+)\s*\((.*)\)')
def parse_event(env, sig, signode):
m = event_sig_re.match(sig)
if not m:
signode += addnodes.desc_name(sig, sig)
return sig
name, args = m.groups()
signode += addnodes.desc_name(name, name)
plist = addnodes.desc_parameterlist()
for arg in args.split(','):
arg = arg.strip()
plist += addnodes.desc_parameter(arg, arg)
signode += plist
return name
def setup(app):
from sphinx.ext.autodoc import cut_lines
app.connect('autodoc-process-docstring', cut_lines(4, what=['module']))
app.add_description_unit('directive', 'dir', 'pair: %s; directive', parse_directive)
app.add_description_unit('role', 'role', 'pair: %s; role', parse_role)
app.add_description_unit('confval', 'confval', 'pair: %s; configuration value')
app.add_description_unit('event', 'event', 'pair: %s; event', parse_event)
|
zetaops/SpiffWorkflow
|
doc/conf.py
|
Python
|
lgpl-3.0
| 5,907
|
# Copyright (c) 2010 Cloud.com, Inc
# Copyright 2012 Cloudbase Solutions Srl
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Management class for basic VM operations.
"""
import functools
import os
from oslo.config import cfg
from nova.api.metadata import base as instance_metadata
from nova import exception
from nova.openstack.common import excutils
from nova.openstack.common.gettextutils import _
from nova.openstack.common import importutils
from nova.openstack.common import log as logging
from nova.openstack.common import processutils
from nova.openstack.common import units
from nova import utils
from nova.virt import configdrive
from nova.virt.hyperv import constants
from nova.virt.hyperv import imagecache
from nova.virt.hyperv import utilsfactory
from nova.virt.hyperv import vhdutilsv2
from nova.virt.hyperv import vmutils
from nova.virt.hyperv import volumeops
LOG = logging.getLogger(__name__)
hyperv_opts = [
cfg.BoolOpt('limit_cpu_features',
default=False,
help='Required for live migration among '
'hosts with different CPU features'),
cfg.BoolOpt('config_drive_inject_password',
default=False,
help='Sets the admin password in the config drive image'),
cfg.StrOpt('qemu_img_cmd',
default="qemu-img.exe",
help='Path of qemu-img command which is used to convert '
'between different image types'),
cfg.BoolOpt('config_drive_cdrom',
default=False,
help='Attaches the Config Drive image as a cdrom drive '
'instead of a disk drive'),
cfg.BoolOpt('enable_instance_metrics_collection',
default=False,
help='Enables metrics collections for an instance by using '
'Hyper-V\'s metric APIs. Collected data can by retrieved '
'by other apps and services, e.g.: Ceilometer. '
'Requires Hyper-V / Windows Server 2012 and above'),
cfg.FloatOpt('dynamic_memory_ratio',
default=1.0,
help='Enables dynamic memory allocation (ballooning) when '
'set to a value greater than 1. The value expresses '
'the ratio between the total RAM assigned to an '
'instance and its startup RAM amount. For example a '
'ratio of 2.0 for an instance with 1024MB of RAM '
'implies 512MB of RAM allocated at startup')
]
CONF = cfg.CONF
CONF.register_opts(hyperv_opts, 'hyperv')
CONF.import_opt('use_cow_images', 'nova.virt.driver')
CONF.import_opt('network_api_class', 'nova.network')
def check_admin_permissions(function):
@functools.wraps(function)
def wrapper(self, *args, **kwds):
# Make sure the windows account has the required admin permissions.
self._vmutils.check_admin_permissions()
return function(self, *args, **kwds)
return wrapper
class VMOps(object):
_vif_driver_class_map = {
'nova.network.neutronv2.api.API':
'nova.virt.hyperv.vif.HyperVNeutronVIFDriver',
'nova.network.api.API':
'nova.virt.hyperv.vif.HyperVNovaNetworkVIFDriver',
}
def __init__(self):
self._vmutils = utilsfactory.get_vmutils()
self._vhdutils = utilsfactory.get_vhdutils()
self._pathutils = utilsfactory.get_pathutils()
self._volumeops = volumeops.VolumeOps()
self._imagecache = imagecache.ImageCache()
self._vif_driver = None
self._load_vif_driver_class()
def _load_vif_driver_class(self):
try:
class_name = self._vif_driver_class_map[CONF.network_api_class]
self._vif_driver = importutils.import_object(class_name)
except KeyError:
raise TypeError(_("VIF driver not found for "
"network_api_class: %s") %
CONF.network_api_class)
def list_instances(self):
return self._vmutils.list_instances()
def get_info(self, instance):
"""Get information about the VM."""
LOG.debug(_("get_info called for instance"), instance=instance)
instance_name = instance['name']
if not self._vmutils.vm_exists(instance_name):
raise exception.InstanceNotFound(instance_id=instance['uuid'])
info = self._vmutils.get_vm_summary_info(instance_name)
state = constants.HYPERV_POWER_STATE[info['EnabledState']]
return {'state': state,
'max_mem': info['MemoryUsage'],
'mem': info['MemoryUsage'],
'num_cpu': info['NumberOfProcessors'],
'cpu_time': info['UpTime']}
def _create_root_vhd(self, context, instance):
base_vhd_path = self._imagecache.get_cached_image(context, instance)
format_ext = base_vhd_path.split('.')[-1]
root_vhd_path = self._pathutils.get_root_vhd_path(instance['name'],
format_ext)
try:
if CONF.use_cow_images:
LOG.debug(_("Creating differencing VHD. Parent: "
"%(base_vhd_path)s, Target: %(root_vhd_path)s"),
{'base_vhd_path': base_vhd_path,
'root_vhd_path': root_vhd_path})
self._vhdutils.create_differencing_vhd(root_vhd_path,
base_vhd_path)
else:
LOG.debug(_("Copying VHD image %(base_vhd_path)s to target: "
"%(root_vhd_path)s"),
{'base_vhd_path': base_vhd_path,
'root_vhd_path': root_vhd_path})
self._pathutils.copyfile(base_vhd_path, root_vhd_path)
base_vhd_info = self._vhdutils.get_vhd_info(base_vhd_path)
base_vhd_size = base_vhd_info['MaxInternalSize']
root_vhd_size = instance['root_gb'] * units.Gi
# NOTE(lpetrut): Checking the namespace is needed as the
# following method is not yet implemented in vhdutilsv2.
if not isinstance(self._vhdutils, vhdutilsv2.VHDUtilsV2):
root_vhd_internal_size = (
self._vhdutils.get_internal_vhd_size_by_file_size(
root_vhd_path, root_vhd_size))
else:
root_vhd_internal_size = root_vhd_size
if root_vhd_internal_size < base_vhd_size:
error_msg = _("Cannot resize a VHD to a smaller size, the"
" original size is %(base_vhd_size)s, the"
" newer size is %(root_vhd_size)s"
) % {'base_vhd_size': base_vhd_size,
'root_vhd_size': root_vhd_internal_size}
raise vmutils.HyperVException(error_msg)
elif root_vhd_internal_size > base_vhd_size:
LOG.debug(_("Resizing VHD %(root_vhd_path)s to new "
"size %(root_vhd_size)s"),
{'root_vhd_size': root_vhd_internal_size,
'root_vhd_path': root_vhd_path})
self._vhdutils.resize_vhd(root_vhd_path, root_vhd_size)
except Exception:
with excutils.save_and_reraise_exception():
if self._pathutils.exists(root_vhd_path):
self._pathutils.remove(root_vhd_path)
return root_vhd_path
def create_ephemeral_vhd(self, instance):
eph_vhd_size = instance.get('ephemeral_gb', 0) * units.Gi
if eph_vhd_size:
vhd_format = self._vhdutils.get_best_supported_vhd_format()
eph_vhd_path = self._pathutils.get_ephemeral_vhd_path(
instance['name'], vhd_format)
self._vhdutils.create_dynamic_vhd(eph_vhd_path, eph_vhd_size,
vhd_format)
return eph_vhd_path
@check_admin_permissions
def spawn(self, context, instance, image_meta, injected_files,
admin_password, network_info, block_device_info=None):
"""Create a new VM and start it."""
LOG.info(_("Spawning new instance"), instance=instance)
instance_name = instance['name']
if self._vmutils.vm_exists(instance_name):
raise exception.InstanceExists(name=instance_name)
# Make sure we're starting with a clean slate.
self._delete_disk_files(instance_name)
if self._volumeops.ebs_root_in_block_devices(block_device_info):
root_vhd_path = None
else:
root_vhd_path = self._create_root_vhd(context, instance)
eph_vhd_path = self.create_ephemeral_vhd(instance)
try:
self.create_instance(instance, network_info, block_device_info,
root_vhd_path, eph_vhd_path)
if configdrive.required_by(instance):
self._create_config_drive(instance, injected_files,
admin_password)
self.power_on(instance)
except Exception as ex:
LOG.exception(ex)
self.destroy(instance)
raise vmutils.HyperVException(_('Spawn instance failed'))
def create_instance(self, instance, network_info, block_device_info,
root_vhd_path, eph_vhd_path):
instance_name = instance['name']
self._vmutils.create_vm(instance_name,
instance['memory_mb'],
instance['vcpus'],
CONF.hyperv.limit_cpu_features,
CONF.hyperv.dynamic_memory_ratio)
ctrl_disk_addr = 0
if root_vhd_path:
self._vmutils.attach_ide_drive(instance_name,
root_vhd_path,
0,
ctrl_disk_addr,
constants.IDE_DISK)
ctrl_disk_addr += 1
if eph_vhd_path:
self._vmutils.attach_ide_drive(instance_name,
eph_vhd_path,
0,
ctrl_disk_addr,
constants.IDE_DISK)
self._vmutils.create_scsi_controller(instance_name)
self._volumeops.attach_volumes(block_device_info,
instance_name,
root_vhd_path is None)
for vif in network_info:
LOG.debug(_('Creating nic for instance: %s'), instance_name)
self._vmutils.create_nic(instance_name,
vif['id'],
vif['address'])
self._vif_driver.plug(instance, vif)
if CONF.hyperv.enable_instance_metrics_collection:
self._vmutils.enable_vm_metrics_collection(instance_name)
def _create_config_drive(self, instance, injected_files, admin_password):
if CONF.config_drive_format != 'iso9660':
vmutils.HyperVException(_('Invalid config_drive_format "%s"') %
CONF.config_drive_format)
LOG.info(_('Using config drive for instance: %s'), instance=instance)
extra_md = {}
if admin_password and CONF.hyperv.config_drive_inject_password:
extra_md['admin_pass'] = admin_password
inst_md = instance_metadata.InstanceMetadata(instance,
content=injected_files,
extra_md=extra_md)
instance_path = self._pathutils.get_instance_dir(
instance['name'])
configdrive_path_iso = os.path.join(instance_path, 'configdrive.iso')
LOG.info(_('Creating config drive at %(path)s'),
{'path': configdrive_path_iso}, instance=instance)
with configdrive.ConfigDriveBuilder(instance_md=inst_md) as cdb:
try:
cdb.make_drive(configdrive_path_iso)
except processutils.ProcessExecutionError as e:
with excutils.save_and_reraise_exception():
LOG.error(_('Creating config drive failed with error: %s'),
e, instance=instance)
if not CONF.hyperv.config_drive_cdrom:
drive_type = constants.IDE_DISK
configdrive_path = os.path.join(instance_path,
'configdrive.vhd')
utils.execute(CONF.hyperv.qemu_img_cmd,
'convert',
'-f',
'raw',
'-O',
'vpc',
configdrive_path_iso,
configdrive_path,
attempts=1)
self._pathutils.remove(configdrive_path_iso)
else:
drive_type = constants.IDE_DVD
configdrive_path = configdrive_path_iso
self._vmutils.attach_ide_drive(instance['name'], configdrive_path,
1, 0, drive_type)
def _disconnect_volumes(self, volume_drives):
for volume_drive in volume_drives:
self._volumeops.disconnect_volume(volume_drive)
def _delete_disk_files(self, instance_name):
self._pathutils.get_instance_dir(instance_name,
create_dir=False,
remove_dir=True)
def destroy(self, instance, network_info=None, block_device_info=None,
destroy_disks=True):
instance_name = instance['name']
LOG.info(_("Got request to destroy instance: %s"), instance_name)
try:
if self._vmutils.vm_exists(instance_name):
#Stop the VM first.
self.power_off(instance)
storage = self._vmutils.get_vm_storage_paths(instance_name)
(disk_files, volume_drives) = storage
self._vmutils.destroy_vm(instance_name)
self._disconnect_volumes(volume_drives)
else:
LOG.debug(_("Instance not found: %s"), instance_name)
if destroy_disks:
self._delete_disk_files(instance_name)
except Exception as ex:
LOG.exception(ex)
raise vmutils.HyperVException(_('Failed to destroy instance: %s') %
instance_name)
def reboot(self, instance, network_info, reboot_type):
"""Reboot the specified instance."""
LOG.debug(_("reboot instance"), instance=instance)
self._set_vm_state(instance['name'],
constants.HYPERV_VM_STATE_REBOOT)
def pause(self, instance):
"""Pause VM instance."""
LOG.debug(_("Pause instance"), instance=instance)
self._set_vm_state(instance["name"],
constants.HYPERV_VM_STATE_PAUSED)
def unpause(self, instance):
"""Unpause paused VM instance."""
LOG.debug(_("Unpause instance"), instance=instance)
self._set_vm_state(instance["name"],
constants.HYPERV_VM_STATE_ENABLED)
def suspend(self, instance):
"""Suspend the specified instance."""
LOG.debug(_("Suspend instance"), instance=instance)
self._set_vm_state(instance["name"],
constants.HYPERV_VM_STATE_SUSPENDED)
def resume(self, instance):
"""Resume the suspended VM instance."""
LOG.debug(_("Resume instance"), instance=instance)
self._set_vm_state(instance["name"],
constants.HYPERV_VM_STATE_ENABLED)
def power_off(self, instance):
"""Power off the specified instance."""
LOG.debug(_("Power off instance"), instance=instance)
self._set_vm_state(instance["name"],
constants.HYPERV_VM_STATE_DISABLED)
def power_on(self, instance):
"""Power on the specified instance."""
LOG.debug(_("Power on instance"), instance=instance)
self._set_vm_state(instance["name"],
constants.HYPERV_VM_STATE_ENABLED)
def _set_vm_state(self, vm_name, req_state):
try:
self._vmutils.set_vm_state(vm_name, req_state)
LOG.debug(_("Successfully changed state of VM %(vm_name)s"
" to: %(req_state)s"),
{'vm_name': vm_name, 'req_state': req_state})
except Exception as ex:
LOG.exception(ex)
msg = (_("Failed to change vm state of %(vm_name)s"
" to %(req_state)s") %
{'vm_name': vm_name, 'req_state': req_state})
raise vmutils.HyperVException(msg)
|
petrutlucian94/nova_dev
|
nova/virt/hyperv/vmops.py
|
Python
|
apache-2.0
| 17,820
|
from foam.sfa.util.xrn import urn_to_hrn
from foam.sfa.trust.credential import Credential
from foam.sfa.trust.auth import Auth
class Start:
def __init__(self, xrn, creds, **kwargs):
hrn, type = urn_to_hrn(xrn)
valid_creds = Auth().checkCredentials(creds, 'startslice', hrn)
origin_hrn = Credential(string=valid_creds[0]).get_gid_caller().get_hrn()
return
|
dana-i2cat/felix
|
ofam/src/src/foam/sfa/methods/Start.py
|
Python
|
apache-2.0
| 402
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# IkaLog
# ======
# Copyright (C) 2015 Takeshi HASEGAWA
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#
# This module is still in proof of concept, and subject to change.
#
from datetime import datetime
# IkaLog Output Plugin: Write 'Alive Squids' CSV data
#
class AliveSquidsCSV(object):
##
# Write a line to text file.
# @param self The Object Pointer.
# @param record Record (text)
#
def write_record(self, file, record):
try:
csv_file = open(file, "a")
csv_file.write(record)
csv_file.close
except:
print("CSV: Failed to write CSV File")
def write_alive_squids_csv(self, context, basename="ikabattle_log", debug=False):
csv = ["tick,y\n", "tick,y\n"]
for sample in context['game']['livesTrack']:
if debug:
print('lives sample = %s', sample)
time = sample[0]
del sample[0]
num_team = 0
for team in sample:
num_squid = 0
for alive in team:
num_squid = num_squid + 1
if alive:
csv[num_team] = "%s%d, %d\n" % (
csv[num_team], time, num_squid)
num_team = num_team + 1
num_team = 0
t = datetime.now()
t_str = t.strftime("%Y%m%d_%H%M")
for f in csv:
self.write_record('%s/%s_team%d.csv' %
(self.dest_dir, basename, num_team), f)
num_team = num_team + 1
def write_flags_csv(self, context, basename="ikabattle_log", debug=False):
# データがない場合は書かない
if len(context['game']['towerTrack']) == 0:
return
csv = "tick,pos,max,min\n"
for sample in context['game']['towerTrack']:
if debug:
print('tower sample = %s', sample)
time = sample[0]
sample = sample[1]
csv = "%s%d, %d, %d, %d\n" % (
csv, time, sample['pos'], sample['max'], sample['min'])
self.write_record('%s/%s_tower.csv' % (self.dest_dir, basename), csv)
##
# on_game_individual_result Hook
# @param self The Object Pointer
# @param context IkaLog context
#
def on_game_individual_result(self, context):
t = datetime.now()
basename = t.strftime("ikabattle_log_%Y%m%d_%H%M")
self.write_alive_squids_csv(context, basename=basename, debug=self.debug)
self.write_flags_csv(context, basename=basename, debug=self.debug)
##
# Constructor
# @param self The Object Pointer.
# @param dest_dir Destionation directory (Relative path, or absolute path)
def __init__(self, dir='./log/', debug=False):
self.dest_dir = dir
self.debug = debug
|
kshimo69/IkaLog
|
ikalog/outputs/alive_squids_csv.py
|
Python
|
apache-2.0
| 3,445
|
#!/usr/bin/env python
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
#
# Script which generates a collage of provider logos from multiple provider
# logo files.
#
# It works in two steps:
#
# 1. Resize all the provider logo files (reduce the dimensions)
# 2. Assemble a final image from the resized images
import os
import sys
import argparse
import subprocess
import random
from os.path import join as pjoin
DIMENSIONS = '150x150' # Dimensions of the resized image (<width>x<height>)
GEOMETRY = '+4+4' # How to arrange images (+<rows>+<columns>)
TO_CREATE_DIRS = ['resized/', 'final/']
def setup(output_path):
"""
Create missing directories.
"""
for directory in TO_CREATE_DIRS:
final_path = pjoin(output_path, directory)
if not os.path.exists(final_path):
os.makedirs(final_path)
def get_logo_files(input_path):
logo_files = os.listdir(input_path)
logo_files = [name for name in logo_files if
'resized' not in name and name.endswith('png')]
logo_files = [pjoin(input_path, name) for name in logo_files]
return logo_files
def resize_images(logo_files, output_path):
resized_images = []
for logo_file in logo_files:
name, ext = os.path.splitext(os.path.basename(logo_file))
new_name = '%s%s' % (name, ext)
out_name = pjoin(output_path, 'resized/', new_name)
print('Resizing image: %(name)s' % {'name': logo_file})
values = {'name': logo_file, 'out_name': out_name,
'dimensions': DIMENSIONS}
cmd = 'convert %(name)s -resize %(dimensions)s %(out_name)s'
cmd = cmd % values
subprocess.call(cmd, shell=True)
resized_images.append(out_name)
return resized_images
def assemble_final_image(resized_images, output_path):
final_name = pjoin(output_path, 'final/logos.png')
random.shuffle(resized_images)
values = {'images': ' '.join(resized_images), 'geometry': GEOMETRY,
'out_name': final_name}
cmd = 'montage %(images)s -geometry %(geometry)s %(out_name)s'
cmd = cmd % values
print('Generating final image: %(name)s' % {'name': final_name})
subprocess.call(cmd, shell=True)
def main(input_path, output_path):
if not os.path.exists(input_path):
print('Path doesn\'t exist: %s' % (input_path))
sys.exit(2)
if not os.path.exists(output_path):
print('Path doesn\'t exist: %s' % (output_path))
sys.exit(2)
logo_files = get_logo_files(input_path=input_path)
setup(output_path=output_path)
resized_images = resize_images(logo_files=logo_files,
output_path=output_path)
assemble_final_image(resized_images=resized_images,
output_path=output_path)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Assemble provider logos '
' in a single image')
parser.add_argument('--input-path', action='store',
help='Path to directory which contains provider '
'logo files')
parser.add_argument('--output-path', action='store',
help='Path where the new files will be written')
args = parser.parse_args()
input_path = os.path.abspath(args.input_path)
output_path = os.path.abspath(args.output_path)
main(input_path=input_path, output_path=output_path)
|
Kami/libcloud
|
contrib/generate_provider_logos_collage_image.py
|
Python
|
apache-2.0
| 4,224
|
# QUANTCONNECT.COM - Democratizing Finance, Empowering Individuals.
# Lean Algorithmic Trading Engine v2.0. Copyright 2014 QuantConnect Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from clr import AddReference
AddReference("System.Core")
AddReference("QuantConnect.Common")
AddReference("QuantConnect.Algorithm")
from System import *
from QuantConnect import *
from QuantConnect.Algorithm import QCAlgorithm
from QuantConnect.Data.UniverseSelection import *
### <summary>
### Demonstration of using coarse and fine universe selection together to filter down a smaller universe of stocks.
### </summary>
### <meta name="tag" content="using data" />
### <meta name="tag" content="universes" />
### <meta name="tag" content="coarse universes" />
### <meta name="tag" content="fine universes" />
class CoarseFundamentalTop3Algorithm(QCAlgorithm):
def Initialize(self):
'''Initialise the data and resolution required, as well as the cash and start-end dates for your algorithm. All algorithms must initialized.'''
self.SetStartDate(2014,3,24) #Set Start Date
self.SetEndDate(2014,4,7) #Set End Date
self.SetCash(50000) #Set Strategy Cash
# what resolution should the data *added* to the universe be?
self.UniverseSettings.Resolution = Resolution.Daily
# this add universe method accepts a single parameter that is a function that
# accepts an IEnumerable<CoarseFundamental> and returns IEnumerable<Symbol>
self.AddUniverse(self.CoarseSelectionFunction)
self.__numberOfSymbols = 3
self._changes = None
# sort the data by daily dollar volume and take the top 'NumberOfSymbols'
def CoarseSelectionFunction(self, coarse):
# sort descending by daily dollar volume
sortedByDollarVolume = sorted(coarse, key=lambda x: x.DollarVolume, reverse=True)
# return the symbol objects of the top entries from our sorted collection
return [ x.Symbol for x in sortedByDollarVolume[:self.__numberOfSymbols] ]
def OnData(self, data):
self.Log(f"OnData({self.UtcTime}): Keys: {', '.join([key.Value for key in data.Keys])}")
# if we have no changes, do nothing
if self._changes is None: return
# liquidate removed securities
for security in self._changes.RemovedSecurities:
if security.Invested:
self.Liquidate(security.Symbol)
# we want 1/N allocation in each security in our universe
for security in self._changes.AddedSecurities:
self.SetHoldings(security.Symbol, 1 / self.__numberOfSymbols)
self._changes = None
# this event fires whenever we have changes to our universe
def OnSecuritiesChanged(self, changes):
self._changes = changes
self.Log(f"OnSecuritiesChanged({self.UtcTime}):: {changes}")
def OnOrderEvent(self, fill):
self.Log(f"OnOrderEvent({self.UtcTime}):: {fill}")
|
AnshulYADAV007/Lean
|
Algorithm.Python/CoarseFundamentalTop3Algorithm.py
|
Python
|
apache-2.0
| 3,479
|
# -*- coding: utf-8 -*-
import json
import os
import re
import cherrypy
import mako
from girder import constants
from girder.models.setting import Setting
from girder.settings import SettingKey
from girder.utility import config
class WebrootBase:
"""
Serves a template file in response to GET requests.
This will typically be the base class of any non-API endpoints.
"""
exposed = True
def __init__(self, templatePath):
self.vars = {}
self.config = config.getConfig()
self._templateDirs = []
self.setTemplatePath(templatePath)
def updateHtmlVars(self, vars):
"""
If any of the variables in the index html need to change, call this
with the updated set of variables to render the template with.
"""
self.vars.update(vars)
def setTemplatePath(self, templatePath):
"""
Set the path to a template file to render instead of the default template.
The default template remains available so that custom templates can
inherit from it. To do so, save the default template filename from
the templateFilename attribute before calling this function, pass
it as a variable to the custom template using updateHtmlVars(), and
reference that variable in an <%inherit> directive like:
<%inherit file="${context.get('defaultTemplateFilename')}"/>
"""
templateDir, templateFilename = os.path.split(templatePath)
self._templateDirs.append(templateDir)
self.templateFilename = templateFilename
# Reset TemplateLookup instance so that it will be instantiated lazily,
# with the latest template directories, on the next GET request
self._templateLookup = None
@staticmethod
def _escapeJavascript(string):
# Per the advice at:
# https://www.owasp.org/index.php/XSS_(Cross_Site_Scripting)_Prevention_Cheat_Sheet#Output_Encoding_Rules_Summary
# replace all non-alphanumeric characters with "\0uXXXX" unicode escaping:
# https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Lexical_grammar#Unicode_escape_sequences
return re.sub(
r'[^a-zA-Z0-9]',
lambda match: '\\u%04X' % ord(match.group()),
string
)
def _renderHTML(self):
if self._templateLookup is None:
self._templateLookup = mako.lookup.TemplateLookup(directories=self._templateDirs)
template = self._templateLookup.get_template(self.templateFilename)
return template.render(js=self._escapeJavascript, json=json.dumps, **self.vars)
def GET(self, **params):
return self._renderHTML()
def DELETE(self, **params):
raise cherrypy.HTTPError(405)
def PATCH(self, **params):
raise cherrypy.HTTPError(405)
def POST(self, **params):
raise cherrypy.HTTPError(405)
def PUT(self, **params):
raise cherrypy.HTTPError(405)
class Webroot(WebrootBase):
"""
The webroot endpoint simply serves the main index HTML file.
"""
def __init__(self, templatePath=None):
if not templatePath:
templatePath = os.path.join(constants.PACKAGE_DIR, 'utility', 'webroot.mako')
super().__init__(templatePath)
self.vars = {}
def _renderHTML(self):
from girder.utility import server
from girder.plugin import loadedPlugins
self.vars['plugins'] = loadedPlugins()
self.vars['pluginCss'] = []
self.vars['pluginJs'] = []
builtDir = os.path.join(constants.STATIC_ROOT_DIR, 'built', 'plugins')
for plugin in self.vars['plugins']:
if os.path.exists(os.path.join(builtDir, plugin, 'plugin.min.css')):
self.vars['pluginCss'].append(plugin)
if os.path.exists(os.path.join(builtDir, plugin, 'plugin.min.js')):
self.vars['pluginJs'].append(plugin)
self.vars['apiRoot'] = server.getApiRoot()
self.vars['staticPublicPath'] = server.getStaticPublicPath()
self.vars['brandName'] = Setting().get(SettingKey.BRAND_NAME)
self.vars['contactEmail'] = Setting().get(SettingKey.CONTACT_EMAIL_ADDRESS)
self.vars['privacyNoticeHref'] = Setting().get(SettingKey.PRIVACY_NOTICE)
self.vars['bannerColor'] = Setting().get(SettingKey.BANNER_COLOR)
self.vars['registrationPolicy'] = Setting().get(SettingKey.REGISTRATION_POLICY)
self.vars['enablePasswordLogin'] = Setting().get(SettingKey.ENABLE_PASSWORD_LOGIN)
return super()._renderHTML()
|
RafaelPalomar/girder
|
girder/utility/webroot.py
|
Python
|
apache-2.0
| 4,603
|
# Copyright (c) 2014 Mirantis Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from sahara.service.edp import base_engine
from sahara.utils import edp
class FakeJobEngine(base_engine.JobEngine):
def cancel_job(self, job_execution):
pass
def get_job_status(self, job_execution):
pass
def run_job(self, job_execution):
return 'engine_job_id', edp.JOB_STATUS_SUCCEEDED, None
def run_scheduled_job(self, job_execution):
pass
def validate_job_execution(self, cluster, job, data):
pass
@staticmethod
def get_possible_job_config(job_type):
return None
@staticmethod
def get_supported_job_types():
return edp.JOB_TYPES_ALL
|
tellesnobrega/sahara
|
sahara/plugins/fake/edp_engine.py
|
Python
|
apache-2.0
| 1,219
|
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from heat.common.i18n import _
from heat.engine import attributes
from heat.engine import properties
from heat.engine.resources.openstack.neutron import neutron
from heat.engine import support
class Net(neutron.NeutronResource):
PROPERTIES = (
NAME, VALUE_SPECS, ADMIN_STATE_UP, TENANT_ID, SHARED,
DHCP_AGENT_IDS, PORT_SECURITY_ENABLED,
) = (
'name', 'value_specs', 'admin_state_up', 'tenant_id', 'shared',
'dhcp_agent_ids', 'port_security_enabled',
)
ATTRIBUTES = (
STATUS, NAME_ATTR, SUBNETS, ADMIN_STATE_UP_ATTR, TENANT_ID_ATTR,
PORT_SECURITY_ENABLED_ATTR, MTU_ATTR,
) = (
"status", "name", "subnets", "admin_state_up", "tenant_id",
"port_security_enabled", "mtu",
)
properties_schema = {
NAME: properties.Schema(
properties.Schema.STRING,
_('A string specifying a symbolic name for the network, which is '
'not required to be unique.'),
update_allowed=True
),
VALUE_SPECS: properties.Schema(
properties.Schema.MAP,
_('Extra parameters to include in the "network" object in the '
'creation request. Parameters are often specific to installed '
'hardware or extensions.'),
default={},
update_allowed=True
),
ADMIN_STATE_UP: properties.Schema(
properties.Schema.BOOLEAN,
_('A boolean value specifying the administrative status of the '
'network.'),
default=True,
update_allowed=True
),
TENANT_ID: properties.Schema(
properties.Schema.STRING,
_('The ID of the tenant which will own the network. Only '
'administrative users can set the tenant identifier; this '
'cannot be changed using authorization policies.')
),
SHARED: properties.Schema(
properties.Schema.BOOLEAN,
_('Whether this network should be shared across all tenants. '
'Note that the default policy setting restricts usage of this '
'attribute to administrative users only.'),
default=False,
update_allowed=True
),
DHCP_AGENT_IDS: properties.Schema(
properties.Schema.LIST,
_('The IDs of the DHCP agent to schedule the network. Note that '
'the default policy setting in Neutron restricts usage of this '
'property to administrative users only.'),
update_allowed=True
),
PORT_SECURITY_ENABLED: properties.Schema(
properties.Schema.BOOLEAN,
_('Flag to enable/disable port security on the network. It '
'provides the default value for the attribute of the ports '
'created on this network'),
update_allowed=True,
support_status=support.SupportStatus(version='5.0.0')
),
}
attributes_schema = {
STATUS: attributes.Schema(
_("The status of the network."),
type=attributes.Schema.STRING
),
NAME_ATTR: attributes.Schema(
_("The name of the network."),
type=attributes.Schema.STRING
),
SUBNETS: attributes.Schema(
_("Subnets of this network."),
type=attributes.Schema.LIST
),
ADMIN_STATE_UP_ATTR: attributes.Schema(
_("The administrative status of the network."),
type=attributes.Schema.STRING
),
TENANT_ID_ATTR: attributes.Schema(
_("The tenant owning this network."),
type=attributes.Schema.STRING
),
PORT_SECURITY_ENABLED_ATTR: attributes.Schema(
_("Port security enabled of the network."),
support_status=support.SupportStatus(version='5.0.0'),
type=attributes.Schema.BOOLEAN
),
MTU_ATTR: attributes.Schema(
_("The maximum transmission unit size(in bytes) for the network."),
support_status=support.SupportStatus(version='5.0.0'),
type=attributes.Schema.INTEGER
),
}
def handle_create(self):
props = self.prepare_properties(
self.properties,
self.physical_resource_name())
dhcp_agent_ids = props.pop(self.DHCP_AGENT_IDS, None)
net = self.neutron().create_network({'network': props})['network']
self.resource_id_set(net['id'])
if dhcp_agent_ids:
self._replace_dhcp_agents(dhcp_agent_ids)
def _show_resource(self):
return self.neutron().show_network(
self.resource_id)['network']
def check_create_complete(self, *args):
attributes = self._show_resource()
return self.is_built(attributes)
def handle_delete(self):
client = self.neutron()
try:
client.delete_network(self.resource_id)
except Exception as ex:
self.client_plugin().ignore_not_found(ex)
else:
return True
def handle_update(self, json_snippet, tmpl_diff, prop_diff):
props = self.prepare_update_properties(json_snippet)
dhcp_agent_ids = props.pop(self.DHCP_AGENT_IDS, None)
if self.DHCP_AGENT_IDS in prop_diff:
if dhcp_agent_ids is not None:
self._replace_dhcp_agents(dhcp_agent_ids)
del prop_diff[self.DHCP_AGENT_IDS]
if len(prop_diff) > 0:
self.neutron().update_network(
self.resource_id, {'network': props})
def check_update_complete(self, *args):
attributes = self._show_resource()
return self.is_built(attributes)
def _replace_dhcp_agents(self, dhcp_agent_ids):
ret = self.neutron().list_dhcp_agent_hosting_networks(
self.resource_id)
old = set([agent['id'] for agent in ret['agents']])
new = set(dhcp_agent_ids)
for dhcp_agent_id in new - old:
try:
self.neutron().add_network_to_dhcp_agent(
dhcp_agent_id, {'network_id': self.resource_id})
except Exception as ex:
# if 409 is happened, the agent is already associated.
if not self.client_plugin().is_conflict(ex):
raise
for dhcp_agent_id in old - new:
try:
self.neutron().remove_network_from_dhcp_agent(
dhcp_agent_id, self.resource_id)
except Exception as ex:
# assume 2 patterns about status_code following:
# 404: the network or agent is already gone
# 409: the network isn't scheduled by the dhcp_agent
if not (self.client_plugin().is_conflict(ex) or
self.client_plugin().is_not_found(ex)):
raise
def resource_mapping():
return {
'OS::Neutron::Net': Net,
}
|
miguelgrinberg/heat
|
heat/engine/resources/openstack/neutron/net.py
|
Python
|
apache-2.0
| 7,583
|
input = """
% Guess colours.
chosenColour(N,C) | notChosenColour(N,C) :- node(N), colour(C).
% At least one color per node.
:- #count{ C : chosenColour(X,C) } > 1, node(X).
:- #count{ C : chosenColour(X,C) } < 1, node(X).
% No two adjacent nodes have the same colour.
:- link(X,Y), X<Y, chosenColour(X,C), chosenColour(Y,C).
node(1).
node(2).
node(3).
node(4).
node(5).
link(1,2).
link(2,1).
link(1,3).
link(3,1).
link(2,3).
link(3,2).
link(3,5).
link(5,3).
link(4,5).
link(5,4).
colour(red0).
colour(green0).
colour(blue0).
"""
output = """
{chosenColour(1,blue0), chosenColour(2,green0), chosenColour(3,red0), chosenColour(4,blue0), chosenColour(5,green0), colour(blue0), colour(green0), colour(red0), link(1,2), link(1,3), link(2,1), link(2,3), link(3,1), link(3,2), link(3,5), link(4,5), link(5,3), link(5,4), node(1), node(2), node(3), node(4), node(5), notChosenColour(1,green0), notChosenColour(1,red0), notChosenColour(2,blue0), notChosenColour(2,red0), notChosenColour(3,blue0), notChosenColour(3,green0), notChosenColour(4,green0), notChosenColour(4,red0), notChosenColour(5,blue0), notChosenColour(5,red0)}
{chosenColour(1,blue0), chosenColour(2,green0), chosenColour(3,red0), chosenColour(4,green0), chosenColour(5,blue0), colour(blue0), colour(green0), colour(red0), link(1,2), link(1,3), link(2,1), link(2,3), link(3,1), link(3,2), link(3,5), link(4,5), link(5,3), link(5,4), node(1), node(2), node(3), node(4), node(5), notChosenColour(1,green0), notChosenColour(1,red0), notChosenColour(2,blue0), notChosenColour(2,red0), notChosenColour(3,blue0), notChosenColour(3,green0), notChosenColour(4,blue0), notChosenColour(4,red0), notChosenColour(5,green0), notChosenColour(5,red0)}
{chosenColour(1,blue0), chosenColour(2,green0), chosenColour(3,red0), chosenColour(4,red0), chosenColour(5,blue0), colour(blue0), colour(green0), colour(red0), link(1,2), link(1,3), link(2,1), link(2,3), link(3,1), link(3,2), link(3,5), link(4,5), link(5,3), link(5,4), node(1), node(2), node(3), node(4), node(5), notChosenColour(1,green0), notChosenColour(1,red0), notChosenColour(2,blue0), notChosenColour(2,red0), notChosenColour(3,blue0), notChosenColour(3,green0), notChosenColour(4,blue0), notChosenColour(4,green0), notChosenColour(5,green0), notChosenColour(5,red0)}
{chosenColour(1,blue0), chosenColour(2,green0), chosenColour(3,red0), chosenColour(4,red0), chosenColour(5,green0), colour(blue0), colour(green0), colour(red0), link(1,2), link(1,3), link(2,1), link(2,3), link(3,1), link(3,2), link(3,5), link(4,5), link(5,3), link(5,4), node(1), node(2), node(3), node(4), node(5), notChosenColour(1,green0), notChosenColour(1,red0), notChosenColour(2,blue0), notChosenColour(2,red0), notChosenColour(3,blue0), notChosenColour(3,green0), notChosenColour(4,blue0), notChosenColour(4,green0), notChosenColour(5,blue0), notChosenColour(5,red0)}
{chosenColour(1,blue0), chosenColour(2,red0), chosenColour(3,green0), chosenColour(4,blue0), chosenColour(5,red0), colour(blue0), colour(green0), colour(red0), link(1,2), link(1,3), link(2,1), link(2,3), link(3,1), link(3,2), link(3,5), link(4,5), link(5,3), link(5,4), node(1), node(2), node(3), node(4), node(5), notChosenColour(1,green0), notChosenColour(1,red0), notChosenColour(2,blue0), notChosenColour(2,green0), notChosenColour(3,blue0), notChosenColour(3,red0), notChosenColour(4,green0), notChosenColour(4,red0), notChosenColour(5,blue0), notChosenColour(5,green0)}
{chosenColour(1,blue0), chosenColour(2,red0), chosenColour(3,green0), chosenColour(4,green0), chosenColour(5,blue0), colour(blue0), colour(green0), colour(red0), link(1,2), link(1,3), link(2,1), link(2,3), link(3,1), link(3,2), link(3,5), link(4,5), link(5,3), link(5,4), node(1), node(2), node(3), node(4), node(5), notChosenColour(1,green0), notChosenColour(1,red0), notChosenColour(2,blue0), notChosenColour(2,green0), notChosenColour(3,blue0), notChosenColour(3,red0), notChosenColour(4,blue0), notChosenColour(4,red0), notChosenColour(5,green0), notChosenColour(5,red0)}
{chosenColour(1,blue0), chosenColour(2,red0), chosenColour(3,green0), chosenColour(4,green0), chosenColour(5,red0), colour(blue0), colour(green0), colour(red0), link(1,2), link(1,3), link(2,1), link(2,3), link(3,1), link(3,2), link(3,5), link(4,5), link(5,3), link(5,4), node(1), node(2), node(3), node(4), node(5), notChosenColour(1,green0), notChosenColour(1,red0), notChosenColour(2,blue0), notChosenColour(2,green0), notChosenColour(3,blue0), notChosenColour(3,red0), notChosenColour(4,blue0), notChosenColour(4,red0), notChosenColour(5,blue0), notChosenColour(5,green0)}
{chosenColour(1,blue0), chosenColour(2,red0), chosenColour(3,green0), chosenColour(4,red0), chosenColour(5,blue0), colour(blue0), colour(green0), colour(red0), link(1,2), link(1,3), link(2,1), link(2,3), link(3,1), link(3,2), link(3,5), link(4,5), link(5,3), link(5,4), node(1), node(2), node(3), node(4), node(5), notChosenColour(1,green0), notChosenColour(1,red0), notChosenColour(2,blue0), notChosenColour(2,green0), notChosenColour(3,blue0), notChosenColour(3,red0), notChosenColour(4,blue0), notChosenColour(4,green0), notChosenColour(5,green0), notChosenColour(5,red0)}
{chosenColour(1,green0), chosenColour(2,blue0), chosenColour(3,red0), chosenColour(4,blue0), chosenColour(5,green0), colour(blue0), colour(green0), colour(red0), link(1,2), link(1,3), link(2,1), link(2,3), link(3,1), link(3,2), link(3,5), link(4,5), link(5,3), link(5,4), node(1), node(2), node(3), node(4), node(5), notChosenColour(1,blue0), notChosenColour(1,red0), notChosenColour(2,green0), notChosenColour(2,red0), notChosenColour(3,blue0), notChosenColour(3,green0), notChosenColour(4,green0), notChosenColour(4,red0), notChosenColour(5,blue0), notChosenColour(5,red0)}
{chosenColour(1,green0), chosenColour(2,blue0), chosenColour(3,red0), chosenColour(4,green0), chosenColour(5,blue0), colour(blue0), colour(green0), colour(red0), link(1,2), link(1,3), link(2,1), link(2,3), link(3,1), link(3,2), link(3,5), link(4,5), link(5,3), link(5,4), node(1), node(2), node(3), node(4), node(5), notChosenColour(1,blue0), notChosenColour(1,red0), notChosenColour(2,green0), notChosenColour(2,red0), notChosenColour(3,blue0), notChosenColour(3,green0), notChosenColour(4,blue0), notChosenColour(4,red0), notChosenColour(5,green0), notChosenColour(5,red0)}
{chosenColour(1,green0), chosenColour(2,blue0), chosenColour(3,red0), chosenColour(4,red0), chosenColour(5,blue0), colour(blue0), colour(green0), colour(red0), link(1,2), link(1,3), link(2,1), link(2,3), link(3,1), link(3,2), link(3,5), link(4,5), link(5,3), link(5,4), node(1), node(2), node(3), node(4), node(5), notChosenColour(1,blue0), notChosenColour(1,red0), notChosenColour(2,green0), notChosenColour(2,red0), notChosenColour(3,blue0), notChosenColour(3,green0), notChosenColour(4,blue0), notChosenColour(4,green0), notChosenColour(5,green0), notChosenColour(5,red0)}
{chosenColour(1,green0), chosenColour(2,blue0), chosenColour(3,red0), chosenColour(4,red0), chosenColour(5,green0), colour(blue0), colour(green0), colour(red0), link(1,2), link(1,3), link(2,1), link(2,3), link(3,1), link(3,2), link(3,5), link(4,5), link(5,3), link(5,4), node(1), node(2), node(3), node(4), node(5), notChosenColour(1,blue0), notChosenColour(1,red0), notChosenColour(2,green0), notChosenColour(2,red0), notChosenColour(3,blue0), notChosenColour(3,green0), notChosenColour(4,blue0), notChosenColour(4,green0), notChosenColour(5,blue0), notChosenColour(5,red0)}
{chosenColour(1,green0), chosenColour(2,red0), chosenColour(3,blue0), chosenColour(4,blue0), chosenColour(5,green0), colour(blue0), colour(green0), colour(red0), link(1,2), link(1,3), link(2,1), link(2,3), link(3,1), link(3,2), link(3,5), link(4,5), link(5,3), link(5,4), node(1), node(2), node(3), node(4), node(5), notChosenColour(1,blue0), notChosenColour(1,red0), notChosenColour(2,blue0), notChosenColour(2,green0), notChosenColour(3,green0), notChosenColour(3,red0), notChosenColour(4,green0), notChosenColour(4,red0), notChosenColour(5,blue0), notChosenColour(5,red0)}
{chosenColour(1,green0), chosenColour(2,red0), chosenColour(3,blue0), chosenColour(4,blue0), chosenColour(5,red0), colour(blue0), colour(green0), colour(red0), link(1,2), link(1,3), link(2,1), link(2,3), link(3,1), link(3,2), link(3,5), link(4,5), link(5,3), link(5,4), node(1), node(2), node(3), node(4), node(5), notChosenColour(1,blue0), notChosenColour(1,red0), notChosenColour(2,blue0), notChosenColour(2,green0), notChosenColour(3,green0), notChosenColour(3,red0), notChosenColour(4,green0), notChosenColour(4,red0), notChosenColour(5,blue0), notChosenColour(5,green0)}
{chosenColour(1,green0), chosenColour(2,red0), chosenColour(3,blue0), chosenColour(4,green0), chosenColour(5,red0), colour(blue0), colour(green0), colour(red0), link(1,2), link(1,3), link(2,1), link(2,3), link(3,1), link(3,2), link(3,5), link(4,5), link(5,3), link(5,4), node(1), node(2), node(3), node(4), node(5), notChosenColour(1,blue0), notChosenColour(1,red0), notChosenColour(2,blue0), notChosenColour(2,green0), notChosenColour(3,green0), notChosenColour(3,red0), notChosenColour(4,blue0), notChosenColour(4,red0), notChosenColour(5,blue0), notChosenColour(5,green0)}
{chosenColour(1,green0), chosenColour(2,red0), chosenColour(3,blue0), chosenColour(4,red0), chosenColour(5,green0), colour(blue0), colour(green0), colour(red0), link(1,2), link(1,3), link(2,1), link(2,3), link(3,1), link(3,2), link(3,5), link(4,5), link(5,3), link(5,4), node(1), node(2), node(3), node(4), node(5), notChosenColour(1,blue0), notChosenColour(1,red0), notChosenColour(2,blue0), notChosenColour(2,green0), notChosenColour(3,green0), notChosenColour(3,red0), notChosenColour(4,blue0), notChosenColour(4,green0), notChosenColour(5,blue0), notChosenColour(5,red0)}
{chosenColour(1,red0), chosenColour(2,blue0), chosenColour(3,green0), chosenColour(4,blue0), chosenColour(5,red0), colour(blue0), colour(green0), colour(red0), link(1,2), link(1,3), link(2,1), link(2,3), link(3,1), link(3,2), link(3,5), link(4,5), link(5,3), link(5,4), node(1), node(2), node(3), node(4), node(5), notChosenColour(1,blue0), notChosenColour(1,green0), notChosenColour(2,green0), notChosenColour(2,red0), notChosenColour(3,blue0), notChosenColour(3,red0), notChosenColour(4,green0), notChosenColour(4,red0), notChosenColour(5,blue0), notChosenColour(5,green0)}
{chosenColour(1,red0), chosenColour(2,blue0), chosenColour(3,green0), chosenColour(4,green0), chosenColour(5,blue0), colour(blue0), colour(green0), colour(red0), link(1,2), link(1,3), link(2,1), link(2,3), link(3,1), link(3,2), link(3,5), link(4,5), link(5,3), link(5,4), node(1), node(2), node(3), node(4), node(5), notChosenColour(1,blue0), notChosenColour(1,green0), notChosenColour(2,green0), notChosenColour(2,red0), notChosenColour(3,blue0), notChosenColour(3,red0), notChosenColour(4,blue0), notChosenColour(4,red0), notChosenColour(5,green0), notChosenColour(5,red0)}
{chosenColour(1,red0), chosenColour(2,blue0), chosenColour(3,green0), chosenColour(4,green0), chosenColour(5,red0), colour(blue0), colour(green0), colour(red0), link(1,2), link(1,3), link(2,1), link(2,3), link(3,1), link(3,2), link(3,5), link(4,5), link(5,3), link(5,4), node(1), node(2), node(3), node(4), node(5), notChosenColour(1,blue0), notChosenColour(1,green0), notChosenColour(2,green0), notChosenColour(2,red0), notChosenColour(3,blue0), notChosenColour(3,red0), notChosenColour(4,blue0), notChosenColour(4,red0), notChosenColour(5,blue0), notChosenColour(5,green0)}
{chosenColour(1,red0), chosenColour(2,blue0), chosenColour(3,green0), chosenColour(4,red0), chosenColour(5,blue0), colour(blue0), colour(green0), colour(red0), link(1,2), link(1,3), link(2,1), link(2,3), link(3,1), link(3,2), link(3,5), link(4,5), link(5,3), link(5,4), node(1), node(2), node(3), node(4), node(5), notChosenColour(1,blue0), notChosenColour(1,green0), notChosenColour(2,green0), notChosenColour(2,red0), notChosenColour(3,blue0), notChosenColour(3,red0), notChosenColour(4,blue0), notChosenColour(4,green0), notChosenColour(5,green0), notChosenColour(5,red0)}
{chosenColour(1,red0), chosenColour(2,green0), chosenColour(3,blue0), chosenColour(4,blue0), chosenColour(5,green0), colour(blue0), colour(green0), colour(red0), link(1,2), link(1,3), link(2,1), link(2,3), link(3,1), link(3,2), link(3,5), link(4,5), link(5,3), link(5,4), node(1), node(2), node(3), node(4), node(5), notChosenColour(1,blue0), notChosenColour(1,green0), notChosenColour(2,blue0), notChosenColour(2,red0), notChosenColour(3,green0), notChosenColour(3,red0), notChosenColour(4,green0), notChosenColour(4,red0), notChosenColour(5,blue0), notChosenColour(5,red0)}
{chosenColour(1,red0), chosenColour(2,green0), chosenColour(3,blue0), chosenColour(4,blue0), chosenColour(5,red0), colour(blue0), colour(green0), colour(red0), link(1,2), link(1,3), link(2,1), link(2,3), link(3,1), link(3,2), link(3,5), link(4,5), link(5,3), link(5,4), node(1), node(2), node(3), node(4), node(5), notChosenColour(1,blue0), notChosenColour(1,green0), notChosenColour(2,blue0), notChosenColour(2,red0), notChosenColour(3,green0), notChosenColour(3,red0), notChosenColour(4,green0), notChosenColour(4,red0), notChosenColour(5,blue0), notChosenColour(5,green0)}
{chosenColour(1,red0), chosenColour(2,green0), chosenColour(3,blue0), chosenColour(4,green0), chosenColour(5,red0), colour(blue0), colour(green0), colour(red0), link(1,2), link(1,3), link(2,1), link(2,3), link(3,1), link(3,2), link(3,5), link(4,5), link(5,3), link(5,4), node(1), node(2), node(3), node(4), node(5), notChosenColour(1,blue0), notChosenColour(1,green0), notChosenColour(2,blue0), notChosenColour(2,red0), notChosenColour(3,green0), notChosenColour(3,red0), notChosenColour(4,blue0), notChosenColour(4,red0), notChosenColour(5,blue0), notChosenColour(5,green0)}
{chosenColour(1,red0), chosenColour(2,green0), chosenColour(3,blue0), chosenColour(4,red0), chosenColour(5,green0), colour(blue0), colour(green0), colour(red0), link(1,2), link(1,3), link(2,1), link(2,3), link(3,1), link(3,2), link(3,5), link(4,5), link(5,3), link(5,4), node(1), node(2), node(3), node(4), node(5), notChosenColour(1,blue0), notChosenColour(1,green0), notChosenColour(2,blue0), notChosenColour(2,red0), notChosenColour(3,green0), notChosenColour(3,red0), notChosenColour(4,blue0), notChosenColour(4,green0), notChosenColour(5,blue0), notChosenColour(5,red0)}
"""
|
Yarrick13/hwasp
|
tests/wasp1/AllAnswerSets/3col_aggregates_1_5_enc2.test.py
|
Python
|
apache-2.0
| 14,386
|
# Copyright (c) Frederick Dean
# See LICENSE for details.
"""
Unit tests for :py:obj:`OpenSSL.rand`.
"""
from unittest import main
import os
import stat
from OpenSSL.test.util import TestCase, b
from OpenSSL import rand
class RandTests(TestCase):
def test_bytes_wrong_args(self):
"""
:py:obj:`OpenSSL.rand.bytes` raises :py:obj:`TypeError` if called with the wrong
number of arguments or with a non-:py:obj:`int` argument.
"""
self.assertRaises(TypeError, rand.bytes)
self.assertRaises(TypeError, rand.bytes, None)
self.assertRaises(TypeError, rand.bytes, 3, None)
# XXX Test failure of the malloc() in rand_bytes.
def test_bytes(self):
"""
Verify that we can obtain bytes from rand_bytes() and
that they are different each time. Test the parameter
of rand_bytes() for bad values.
"""
b1 = rand.bytes(50)
self.assertEqual(len(b1), 50)
b2 = rand.bytes(num_bytes=50) # parameter by name
self.assertNotEqual(b1, b2) # Hip, Hip, Horay! FIPS complaince
b3 = rand.bytes(num_bytes=0)
self.assertEqual(len(b3), 0)
exc = self.assertRaises(ValueError, rand.bytes, -1)
self.assertEqual(str(exc), "num_bytes must not be negative")
def test_add_wrong_args(self):
"""
When called with the wrong number of arguments, or with arguments not of
type :py:obj:`str` and :py:obj:`int`, :py:obj:`OpenSSL.rand.add` raises :py:obj:`TypeError`.
"""
self.assertRaises(TypeError, rand.add)
self.assertRaises(TypeError, rand.add, b("foo"), None)
self.assertRaises(TypeError, rand.add, None, 3)
self.assertRaises(TypeError, rand.add, b("foo"), 3, None)
def test_add(self):
"""
:py:obj:`OpenSSL.rand.add` adds entropy to the PRNG.
"""
rand.add(b('hamburger'), 3)
def test_seed_wrong_args(self):
"""
When called with the wrong number of arguments, or with a non-:py:obj:`str`
argument, :py:obj:`OpenSSL.rand.seed` raises :py:obj:`TypeError`.
"""
self.assertRaises(TypeError, rand.seed)
self.assertRaises(TypeError, rand.seed, None)
self.assertRaises(TypeError, rand.seed, b("foo"), None)
def test_seed(self):
"""
:py:obj:`OpenSSL.rand.seed` adds entropy to the PRNG.
"""
rand.seed(b('milk shake'))
def test_status_wrong_args(self):
"""
:py:obj:`OpenSSL.rand.status` raises :py:obj:`TypeError` when called with any
arguments.
"""
self.assertRaises(TypeError, rand.status, None)
def test_status(self):
"""
:py:obj:`OpenSSL.rand.status` returns :py:obj:`True` if the PRNG has sufficient
entropy, :py:obj:`False` otherwise.
"""
# It's hard to know what it is actually going to return. Different
# OpenSSL random engines decide differently whether they have enough
# entropy or not.
self.assertTrue(rand.status() in (1, 2))
def test_egd_wrong_args(self):
"""
:py:obj:`OpenSSL.rand.egd` raises :py:obj:`TypeError` when called with the wrong
number of arguments or with arguments not of type :py:obj:`str` and :py:obj:`int`.
"""
self.assertRaises(TypeError, rand.egd)
self.assertRaises(TypeError, rand.egd, None)
self.assertRaises(TypeError, rand.egd, "foo", None)
self.assertRaises(TypeError, rand.egd, None, 3)
self.assertRaises(TypeError, rand.egd, "foo", 3, None)
def test_egd_missing(self):
"""
:py:obj:`OpenSSL.rand.egd` returns :py:obj:`0` or :py:obj:`-1` if the
EGD socket passed to it does not exist.
"""
result = rand.egd(self.mktemp())
expected = (-1, 0)
self.assertTrue(
result in expected,
"%r not in %r" % (result, expected))
def test_cleanup_wrong_args(self):
"""
:py:obj:`OpenSSL.rand.cleanup` raises :py:obj:`TypeError` when called with any
arguments.
"""
self.assertRaises(TypeError, rand.cleanup, None)
def test_cleanup(self):
"""
:py:obj:`OpenSSL.rand.cleanup` releases the memory used by the PRNG and returns
:py:obj:`None`.
"""
self.assertIdentical(rand.cleanup(), None)
def test_load_file_wrong_args(self):
"""
:py:obj:`OpenSSL.rand.load_file` raises :py:obj:`TypeError` when called the wrong
number of arguments or arguments not of type :py:obj:`str` and :py:obj:`int`.
"""
self.assertRaises(TypeError, rand.load_file)
self.assertRaises(TypeError, rand.load_file, "foo", None)
self.assertRaises(TypeError, rand.load_file, None, 1)
self.assertRaises(TypeError, rand.load_file, "foo", 1, None)
def test_write_file_wrong_args(self):
"""
:py:obj:`OpenSSL.rand.write_file` raises :py:obj:`TypeError` when called with the
wrong number of arguments or a non-:py:obj:`str` argument.
"""
self.assertRaises(TypeError, rand.write_file)
self.assertRaises(TypeError, rand.write_file, None)
self.assertRaises(TypeError, rand.write_file, "foo", None)
def test_files(self):
"""
Test reading and writing of files via rand functions.
"""
# Write random bytes to a file
tmpfile = self.mktemp()
# Make sure it exists (so cleanup definitely succeeds)
fObj = open(tmpfile, 'w')
fObj.close()
try:
rand.write_file(tmpfile)
# Verify length of written file
size = os.stat(tmpfile)[stat.ST_SIZE]
self.assertEquals(size, 1024)
# Read random bytes from file
rand.load_file(tmpfile)
rand.load_file(tmpfile, 4) # specify a length
finally:
# Cleanup
os.unlink(tmpfile)
if __name__ == '__main__':
main()
|
msabramo/pyOpenSSL
|
OpenSSL/test/test_rand.py
|
Python
|
apache-2.0
| 6,054
|
# Copyright 2016 Cisco Systems, Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
from binascii import hexlify
import mock
import socket
import unittest
from networking_cisco.plugins.cisco.cpnr.cpnr_client import UnexpectedError
from networking_cisco.plugins.cisco.cpnr.cpnr_dns_relay_agent import (
DnsRelayAgent)
from networking_cisco.plugins.cisco.cpnr.cpnr_dns_relay_agent import cfg
from networking_cisco.plugins.cisco.cpnr.cpnr_dns_relay_agent import DnsPacket
from networking_cisco.plugins.cisco.cpnr.cpnr_dns_relay_agent import OPTS
class TestDnsRelayAgent(unittest.TestCase):
@mock.patch('networking_cisco.plugins.cisco.'
'cpnr.cpnr_dns_relay_agent.netns')
@mock.patch('socket.socket')
def test_open_dns_ext_socket(self,
mock_socket,
mock_netns):
cfg.CONF.register_opts(OPTS, 'cisco_pnr')
relay = DnsRelayAgent()
mock_netns.iflist.return_value = []
mock_netns.iflist.return_value.append(('lo', '127.0.0.1', '255.0.0.0'))
sock = mock_socket.return_value
sock.getsockname.return_value = ('127.0.0.1', 123456)
sock, addr, port = relay._open_dns_ext_socket()
mock_socket.assert_has_calls([
mock.call(socket.AF_INET, socket.SOCK_DGRAM),
mock.call().bind(('127.0.0.1', 0)),
mock.call().getsockname(),
mock.call().connect(('127.0.0.1', 53))]
)
# check exception thrown if no interfaces
with self.assertRaises(UnexpectedError):
mock_netns.iflist.return_value = []
sock, addr, port = relay._open_dns_ext_socket()
# check exception thrown if no matching interfaces
with self.assertRaises(UnexpectedError):
mock_netns.iflist.return_value = []
mock_netns.iflist.return_value.append(('eth0', '10.0.0.10',
'255.255.255.0'))
sock, addr, port = relay._open_dns_ext_socket()
# check matching interface found if not first in list
mock_netns.iflist.return_value = []
mock_netns.iflist.return_value.append(('eth0', '10.0.0.10',
'255.255.255.0'))
mock_netns.iflist.return_value.append(('lo', '127.0.0.1', '255.0.0.0'))
sock, addr, port = relay._open_dns_ext_socket()
@mock.patch('networking_cisco.plugins.cisco.'
'cpnr.cpnr_dns_relay_agent.netns')
@mock.patch('socket.socket')
def test_open_dns_int_socket(self,
mock_socket,
mock_netns):
cfg.CONF.register_opts(OPTS, 'cisco_pnr')
relay = DnsRelayAgent()
mock_netns.iflist.return_value = []
mock_netns.iflist.return_value.append(('eth0', '10.21.1.13',
'255.255.255.0'))
sock, addr, port = relay._open_dns_int_socket()
self.assertTrue(mock_netns.iflist.called, "Failed to call iflist.")
mock_socket.assert_has_calls([
mock.call(socket.AF_INET, socket.SOCK_DGRAM),
mock.call().setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1),
mock.call().bind(('10.21.1.13', 53))]
)
# check exception thrown if no interfaces
with self.assertRaises(UnexpectedError):
mock_netns.iflist.return_value = []
sock, addr, port = relay._open_dns_int_socket()
def test_convert_namespace_to_viewid(self):
cfg.CONF.register_opts(OPTS, 'cisco_pnr')
relay = DnsRelayAgent()
namespace = 'qdhcp-d7c31f74-5d9e-47b7-86f2-64879023c04d'
viewid = relay._convert_namespace_to_viewid(namespace)
tmp = 0x64879023c04d & 0x7fffffff
self.assertEqual(viewid, str(tmp))
class TestDnsPacket(unittest.TestCase):
def test_parse(self):
# test regular DNS request
line = ('84 a5 01 00 00 01 00 00 00 00 00 00 06 72 '
'65 64 68 61 74 03 63 6f 6d 00 00 01 00 01')
buf = bytearray.fromhex(line)
pkt = DnsPacket.parse(buf, 28)
self.assertEqual(0x84a5, pkt.get_msgid())
self.assertTrue(pkt.isreq)
self.assertEqual(0, pkt.arcnt)
self.assertEqual(0, pkt.optlen)
self.assertEqual(28, pkt.txt_insert_pos)
# test DNS request with EDNS0
line = ('81 71 01 20 00 01 00 00 00 00 00 01 06 72 65 '
'64 68 61 74 03 63 6f 6d 00 00 01 00 01 00 00 '
'29 10 00 00 00 00 00 00 00')
buf = bytearray.fromhex(line)
pkt = DnsPacket.parse(buf, 38)
self.assertEqual(0x8171, pkt.get_msgid())
self.assertTrue(pkt.isreq)
self.assertEqual(1, pkt.arcnt)
self.assertEqual(10, pkt.optlen)
self.assertEqual(28, pkt.txt_insert_pos)
# test regular DNS response
line = ('b6 5e 81 80 00 01 00 01 00 00 00 00 06 72 65 '
'64 68 61 74 03 63 6f 6d 00 00 01 00 01 c0 0c '
'00 01 00 01 00 00 00 08 00 04 d1 84 b7 69')
buf = bytearray.fromhex(line)
pkt = DnsPacket.parse(buf, 44)
self.assertEqual(0xb65e, pkt.get_msgid())
self.assertFalse(pkt.isreq)
self.assertEqual(0, pkt.arcnt)
self.assertEqual(0, pkt.optlen)
self.assertEqual(-1, pkt.txt_insert_pos)
def test_set_viewid(self):
pkt = DnsPacket()
pkt.set_viewid('123456789')
self.assertEqual(pkt.viewid, '123456789')
def test_data(self):
# call with regular DNS request
line = ('84 a5 01 00 00 01 00 00 00 00 00 00 06 72 '
'65 64 68 61 74 03 63 6f 6d 00 00 01 00 01')
buf = bytearray.fromhex(line)
pktbuf = bytearray(4096)
pktbuf[0:len(buf)] = buf
pkt = DnsPacket.parse(pktbuf, 28)
pkt.set_viewid('123456')
mod_buf = pkt.data()
self.assertEqual(pkt.arcnt, 1)
hextxtstr = hexlify(DnsPacket.TXT_RR)
hexstr = hexlify(mod_buf)
self.assertNotEqual(-1, hexstr.find(hextxtstr))
# call with DNS request with EDNS0
line = ('81 71 01 20 00 01 00 00 00 00 00 01 06 72 65 '
'64 68 61 74 03 63 6f 6d 00 00 01 00 01 00 00 '
'29 10 00 00 00 00 00 00 00')
buf = bytearray.fromhex(line)
pktbuf = bytearray(4096)
pktbuf[0:len(buf)] = buf
pkt = DnsPacket.parse(pktbuf, 38)
pkt.set_viewid('123456')
mod_buf = pkt.data()
self.assertEqual(2, pkt.arcnt)
hexstr = hexlify(mod_buf)
self.assertNotEqual(-1, hexstr.find(hextxtstr))
def test_skip_over_domain_name(self):
# test skip over name at beginning, end up on ^
# 4test5cisco3com0^
bytes = bytearray(b'\x04\x74\x65\x73\x74\x05\x63\x69\x73\x63'
b'\x6f\x03\x63\x6f\x6d\x00\x5e')
pos = DnsPacket.skip_over_domain_name(bytes, 0)
self.assertEqual(16, pos)
self.assertEqual('^', chr(bytes[pos]))
# test skip over name in the middle, end up on ^
# 2552552552554test5cisco3com0^
bytes = bytearray(b'\xff\xff\xff\xff\x04\x74\x65\x73\x74\x05\x63'
b'\x69\x73\x63\x6f\x03\x63\x6f\x6d\x00\x5e')
pos = DnsPacket.skip_over_domain_name(bytes, 4)
self.assertEqual(20, pos)
self.assertEqual('^', chr(bytes[pos]))
# test skip over length and pointer at beginning, end up on ^
bytes = bytearray(b'\xc0\x55\x5e')
pos = DnsPacket.skip_over_domain_name(bytes, 0)
self.assertEqual(2, pos)
self.assertEqual('^', chr(bytes[pos]))
# test skip over length and pointer in the middle, end up on ^
bytes = bytearray(b'\xff\xff\xff\xff\xff\xff\xff\xff\xff\xc0\x55\x5e')
pos = DnsPacket.skip_over_domain_name(bytes, 9)
self.assertEqual(11, pos)
self.assertEqual('^', chr(bytes[pos]))
|
Tehsmash/networking-cisco
|
networking_cisco/tests/unit/cisco/cpnr/test_dns_relay.py
|
Python
|
apache-2.0
| 8,534
|
import os
# toolchains options
ARCH='arm'
CPU='cortex-m4'
CROSS_TOOL='gcc'
# bsp lib config
BSP_LIBRARY_TYPE = None
if os.getenv('RTT_CC'):
CROSS_TOOL = os.getenv('RTT_CC')
if os.getenv('RTT_ROOT'):
RTT_ROOT = os.getenv('RTT_ROOT')
# cross_tool provides the cross compiler
# EXEC_PATH is the compiler execute path, for example, CodeSourcery, Keil MDK, IAR
if CROSS_TOOL == 'gcc':
PLATFORM = 'gcc'
EXEC_PATH = r'C:\Users\XXYYZZ'
elif CROSS_TOOL == 'keil':
PLATFORM = 'armcc'
EXEC_PATH = r'C:/Keil_v5'
elif CROSS_TOOL == 'iar':
PLATFORM = 'iar'
EXEC_PATH = r'C:/Program Files (x86)/IAR Systems/Embedded Workbench 8.0'
if os.getenv('RTT_EXEC_PATH'):
EXEC_PATH = os.getenv('RTT_EXEC_PATH')
BUILD = 'debug'
if PLATFORM == 'gcc':
# toolchains
PREFIX = 'arm-none-eabi-'
CC = PREFIX + 'gcc'
AS = PREFIX + 'gcc'
AR = PREFIX + 'ar'
CXX = PREFIX + 'g++'
LINK = PREFIX + 'gcc'
TARGET_EXT = 'elf'
SIZE = PREFIX + 'size'
OBJDUMP = PREFIX + 'objdump'
OBJCPY = PREFIX + 'objcopy'
DEVICE = ' -mcpu=cortex-m4 -mthumb -mfpu=fpv4-sp-d16 -mfloat-abi=hard -ffunction-sections -fdata-sections'
CFLAGS = DEVICE + ' -Dgcc'
AFLAGS = ' -c' + DEVICE + ' -x assembler-with-cpp -Wa,-mimplicit-it=thumb '
LFLAGS = DEVICE + ' -Wl,--gc-sections,-Map=rt-thread.map,-cref,-u,Reset_Handler -T board/linker_scripts/link.lds'
CPATH = ''
LPATH = ''
if BUILD == 'debug':
CFLAGS += ' -O0 -gdwarf-2 -g'
AFLAGS += ' -gdwarf-2'
else:
CFLAGS += ' -O2'
CXXFLAGS = CFLAGS
POST_ACTION = OBJCPY + ' -O binary $TARGET rtthread.bin\n' + SIZE + ' $TARGET \n'
elif PLATFORM == 'armcc':
# toolchains
CC = 'armcc'
CXX = 'armcc'
AS = 'armasm'
AR = 'armar'
LINK = 'armlink'
TARGET_EXT = 'axf'
DEVICE = ' --cpu Cortex-M4.fp '
CFLAGS = '-c ' + DEVICE + ' --apcs=interwork --c99'
AFLAGS = DEVICE + ' --apcs=interwork '
LFLAGS = DEVICE + ' --info sizes --info totals --info unused --info veneers --list rtthread.map --strict --scatter "board\linker_scripts\link.sct"'
CFLAGS += ' -I' + EXEC_PATH + '/ARM/ARMCC/include'
LFLAGS += ' --libpath=' + EXEC_PATH + '/ARM/ARMCC/lib'
CFLAGS += ' -D__MICROLIB '
AFLAGS += ' --pd "__MICROLIB SETA 1" '
LFLAGS += ' --library_type=microlib '
EXEC_PATH += '/ARM/ARMCC/bin/'
if BUILD == 'debug':
CFLAGS += ' -g -O0'
AFLAGS += ' -g'
else:
CFLAGS += ' -O2'
CXXFLAGS = CFLAGS
CFLAGS += ' -std=c99'
POST_ACTION = 'fromelf --bin $TARGET --output rtthread.bin \nfromelf -z $TARGET'
elif PLATFORM == 'iar':
# toolchains
CC = 'iccarm'
CXX = 'iccarm'
AS = 'iasmarm'
AR = 'iarchive'
LINK = 'ilinkarm'
TARGET_EXT = 'out'
DEVICE = '-Dewarm'
CFLAGS = DEVICE
CFLAGS += ' --diag_suppress Pa050'
CFLAGS += ' --no_cse'
CFLAGS += ' --no_unroll'
CFLAGS += ' --no_inline'
CFLAGS += ' --no_code_motion'
CFLAGS += ' --no_tbaa'
CFLAGS += ' --no_clustering'
CFLAGS += ' --no_scheduling'
CFLAGS += ' --endian=little'
CFLAGS += ' --cpu=Cortex-M4'
CFLAGS += ' -e'
CFLAGS += ' --fpu=VFPv4_sp'
CFLAGS += ' --dlib_config "' + EXEC_PATH + '/arm/INC/c/DLib_Config_Normal.h"'
CFLAGS += ' --silent'
AFLAGS = DEVICE
AFLAGS += ' -s+'
AFLAGS += ' -w+'
AFLAGS += ' -r'
AFLAGS += ' --cpu Cortex-M4'
AFLAGS += ' --fpu VFPv4_sp'
AFLAGS += ' -S'
if BUILD == 'debug':
CFLAGS += ' --debug'
CFLAGS += ' -On'
else:
CFLAGS += ' -Oh'
LFLAGS = ' --config "board/linker_scripts/link.icf"'
LFLAGS += ' --entry __iar_program_start'
CXXFLAGS = CFLAGS
EXEC_PATH = EXEC_PATH + '/arm/bin/'
POST_ACTION = 'ielftool --bin $TARGET rtthread.bin'
|
FlyLu/rt-thread
|
bsp/stm32/stm32l475-st-discovery/rtconfig.py
|
Python
|
apache-2.0
| 3,831
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name
"""Unique operator"""
from tvm import te, tir
from ..te import hybrid
from .scan import cumsum
from .sort import sort, argsort
def _calc_adjacent_diff_ir(data, output, binop=tir.Sub):
"""Low level IR to calculate adjacent difference in an 1-D array.
Parameters
----------
data : Buffer
Input 1-D Buffer.
output: Buffer
A buffer to store adjacent difference, of the same shape as data. The adjacent difference
is defined as: output[0] = 0, output[i] = binop(data[i], data[i-1])
where i > 0 and i < len(data).
binop: function, optional
A binary associative op to use for calculating adjacent difference. The function takes two
TIR expressions and produce a new TIR expression. By default it uses tvm.tir.Sub to
compute the adjacent difference.
"""
ib = tir.ir_builder.create()
data_ptr = ib.buffer_ptr(data)
output_ptr = ib.buffer_ptr(output)
with ib.for_range(0, data.shape[0], kind="parallel") as i:
with ib.if_scope(i == 0):
output_ptr[0] = 0
with ib.else_scope():
output_ptr[i] = tir.Cast(output.dtype, binop(data_ptr[i], data_ptr[i - 1]))
return ib.get()
def _calc_adjacent_diff(data, out_dtype="int32", binop=tir.Sub):
"""Function calculate adjacent difference in an 1-D array.
Parameters
----------
data : tvm.te.Tensor
Input 1-D tensor.
output_dtype : str
The output tensor data type.
binop: function, optional
A binary associative op to use for calculating difference. The function takes two
TIR expressions and produce a new TIR expression. By default it uses tvm.tir.Sub to
compute the adjacent difference.
Returns
-------
output : tvm.te.Tensor
1-D tensor storing the adjacent difference of the input tensor. The adjacent difference
is defined as: output[0] = 0, output[i] = binop(data[i], data[i-1])
where i > 0 and i < len(data).
"""
return te.extern(
[data.shape],
[data],
lambda ins, outs: _calc_adjacent_diff_ir(ins[0], outs[0], binop=binop),
dtype=[out_dtype],
name="_calc_adjacent_diff",
tag="_calc_adjacent_diff_cpu",
)
@hybrid.script
def _calc_num_unique(inc_scan):
"""Helper function to get the number of unique elements fron inc_scan tensor"""
output = output_tensor((1,), "int32")
output[0] = inc_scan[inc_scan.shape[0] - 1] + int32(1)
return output
def _calc_unique_ir(
data, argsorted_indices, inc_scan, index_converter, unique_elements, inverse_indices, counts
):
"""Low level IR to calculate unique elements, inverse indices, and counts (optional) of
unique elements of 1-D array.
Parameters
----------
data : Buffer
Input 1-D Buffer.
argsorted_indices : Buffer
A buffer that stores the argsorted indices of the input data.
inc_scan : Buffer
A buffer that stores the inclusive scan of the binary tir.NE adjacent difference
of the sorted data.
index_converter (optional) : Buffer
An optional index converter that transforms the unique element index
such that new_idx = index_converter[old_idx].
unique_elements : Buffer
A buffer that stores the unique elements.
inverse_indices : Buffer
A buffer that stores the the index of each input data element in the unique element array.
counts (optional) : Buffer
A buffer that stores the count of each unique element.
"""
ib = tir.ir_builder.create()
data_ptr = ib.buffer_ptr(data)
argsorted_indices_ptr = ib.buffer_ptr(argsorted_indices)
inc_scan_ptr = ib.buffer_ptr(inc_scan)
unique_elements_ptr = ib.buffer_ptr(unique_elements)
inverse_indices_ptr = ib.buffer_ptr(inverse_indices)
index_converter_ptr = None
if isinstance(index_converter, tir.Buffer):
index_converter_ptr = ib.buffer_ptr(index_converter)
if isinstance(counts, tir.Buffer):
counts_ptr = ib.buffer_ptr(counts)
# use indices_ptr as a tmp buffer to store tids with inc_scan[tid] != inc_scan[tid-1]
unique_seq_indices_ptr = ib.buffer_ptr(inverse_indices)
data_length = data.shape[0]
# if need to return counts
if isinstance(counts, tir.Buffer):
num_unique = inc_scan_ptr[inc_scan.shape[0] - 1] + 1
num_elements = data.shape[0]
unique_seq_indices_ptr[num_unique - 1] = num_elements
with ib.new_scope():
with ib.for_range(0, data_length, kind="parallel") as i:
with ib.if_scope(i > 0):
with ib.if_scope(inc_scan_ptr[i] != inc_scan_ptr[i - 1]):
unique_seq_indices_ptr[inc_scan_ptr[i] - 1] = i
with ib.new_scope():
with ib.for_range(0, num_unique, kind="parallel") as i:
unique_idx = i if not index_converter_ptr else index_converter_ptr[i]
with ib.if_scope(i == 0):
counts_ptr[unique_idx] = unique_seq_indices_ptr[i]
with ib.else_scope():
counts_ptr[unique_idx] = (
unique_seq_indices_ptr[i] - unique_seq_indices_ptr[i - 1]
)
# calculate unique elements and inverse indices
with ib.new_scope():
with ib.for_range(0, data_length, kind="parallel") as i:
data_idx = argsorted_indices_ptr[i]
unique_idx = (
inc_scan_ptr[i] if not index_converter_ptr else index_converter_ptr[inc_scan_ptr[i]]
)
inverse_indices_ptr[data_idx] = unique_idx
with ib.if_scope(i == 0):
unique_elements_ptr[unique_idx] = data_ptr[data_idx]
with ib.else_scope():
with ib.if_scope(inc_scan_ptr[i] != inc_scan_ptr[i - 1]):
unique_elements_ptr[unique_idx] = data_ptr[data_idx]
return ib.get()
@hybrid.script
def _calc_first_occurence(argsorted_indices, inc_scan):
"""Hybrid script to calculate the first occurence of each unique element in the input data.
Parameters
----------
argsorted_indices : tvm.te.Tensor
A tensor that stores the argsorted indices of the input data.
inc_scan : tvm.te.Tensor
A tensor that stores the inclusive scan of the binary tir.NE adjacent difference
of the sorted data.
first_occurence : tvm.te.Tensor
A tensor that stores the first occurence of each unique element in the input data.
"""
first_occurence = output_tensor(argsorted_indices.shape, "int32")
for i in parallel(argsorted_indices.shape[0]):
first_occurence[i] = argsorted_indices.shape[0]
for i in parallel(argsorted_indices.shape[0]):
if i == 0 or inc_scan[i] != inc_scan[i - 1]:
first_occurence[inc_scan[i]] = argsorted_indices[i]
return first_occurence
def unique(data, is_sorted=True, return_counts=False):
"""
Find the unique elements of a 1-D tensor. Please note `output` and `counts` are all padded to
have the same length of `data` and element with index >= num_unique[0] has undefined value.
Parameters
----------
data : tvm.te.Tensor
A 1-D tensor of integers.
sorted : bool
Whether to sort the unique elements in ascending order before returning as output.
return_counts : bool
Whether to return the count of each unique element.
Returns
-------
unique : tvm.te.Tensor
A 1-D tensor containing the unique elements of the input data tensor. The same size as
the input data. If there are less unique elements than input data, the end of the tensor
is padded with zeros.
indices : tvm.te.Tensor
A 1-D tensor. The same size as output. For each entry in output, it contains
the index of its first occurence in the input data. The end of the tensor is padded
with the length of the input data.
inverse_indices : tvm.te.Tensor
A 1-D tensor. For each entry in data, it contains the index of that data element in
the unique array. (Note that inverse_indices is very similar to indices if output is not
sorted.)
num_unique : tvm.te.Tensor
A 1-D tensor with size=1 containing the number of unique elements in the input data tensor.
counts (optional) : tvm.te.Tensor
A 1-D tensor containing the count of each unique element in the output.
Examples
--------
.. code-block:: python
[output, indices, num_unique] = unique([4, 5, 1, 2, 3, 3, 4, 5], False, False)
output = [4, 5, 1, 2, 3, _, _, _]
indices = [0, 1, 2, 3, 4, _, _, _]
inverse_indices = [0, 1, 2, 3, 4, 4, 0, 1]
num_unique = [5]
[output, indices, num_unique, counts] = unique([4, 5, 1, 2, 3, 3, 4, 5], False, True)
output = [4, 5, 1, 2, 3, _, _, _]
indices = [0, 1, 2, 3, 4, _, _, _]
inverse_indices = [0, 1, 2, 3, 4, 4, 0, 1]
num_unique = [5]
counts = [2, 2, 1, 1, 2, _, _, _]
[output, indices, num_unique] = unique([4, 5, 1, 2, 3, 3, 4, 5], True)
output = [1, 2, 3, 4, 5, _, _, _]
indices = [2, 3, 4, 0, 1, _, _, _]
inverse_indices = [3, 4, 0, 1, 2, 2, 3, 4]
num_unique = [5]
"""
sorted_data = sort(data)
argsorted_indices = argsort(data, dtype="int32")
# adjacent difference
adjacent_diff = _calc_adjacent_diff(sorted_data, "int32", tir.NE)
# inclusive scan
inc_scan = cumsum(adjacent_diff, dtype="int32", exclusive=0)
# total number of unique elements
num_unique_elements = _calc_num_unique(inc_scan)
# prepare outputs
if return_counts:
out_data_shape = [data.shape] * 3
out_dtypes = [data.dtype, "int32", "int32"]
else:
out_data_shape = [data.shape] * 2
out_dtypes = [data.dtype, "int32"]
# prepare inputs and fcompute
first_occurence = _calc_first_occurence(argsorted_indices, inc_scan)
if is_sorted:
in_data = [data, argsorted_indices, inc_scan]
if return_counts:
fcompute = lambda ins, outs: _calc_unique_ir(*ins, None, *outs)
else:
fcompute = lambda ins, outs: _calc_unique_ir(*ins, None, *outs, None)
indices = first_occurence
else:
# calculate index converter by sorting unique elements by their first occurence
argsorted_first_occurence = argsort(first_occurence, dtype="int32")
index_converter = argsort(argsorted_first_occurence, dtype="int32")
in_data = [data, argsorted_indices, inc_scan, index_converter]
if return_counts:
fcompute = lambda ins, outs: _calc_unique_ir(*ins, *outs)
else:
fcompute = lambda ins, outs: _calc_unique_ir(*ins, *outs, None)
# First occurence is in order of sorted unique output, if we sort the first_occurence array
# we get the correct result
indices = sort(first_occurence)
outs = te.extern(
out_data_shape,
in_data,
fcompute,
dtype=out_dtypes,
name="_calc_unique",
tag="_calc_unique_cpu",
)
if return_counts:
return [outs[0], indices, outs[1], num_unique_elements, outs[2]]
return [outs[0], indices, outs[1], num_unique_elements]
|
Laurawly/tvm-1
|
python/tvm/topi/unique.py
|
Python
|
apache-2.0
| 12,249
|
def this_is_the_outer_lib():
print 'For imports test'
|
fingeronthebutton/RIDE
|
utest/resources/robotdata/imports/outer_lib.py
|
Python
|
apache-2.0
| 58
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from tempest.api.orchestration import base
from tempest.common.utils import data_utils
from tempest.openstack.common import log as logging
from tempest.test import attr
LOG = logging.getLogger(__name__)
class StacksTestJSON(base.BaseOrchestrationTest):
_interface = 'json'
empty_template = "HeatTemplateFormatVersion: '2012-12-12'\n"
@classmethod
def setUpClass(cls):
super(StacksTestJSON, cls).setUpClass()
cls.client = cls.orchestration_client
@attr(type='smoke')
def test_stack_list_responds(self):
resp, stacks = self.client.list_stacks()
self.assertEqual('200', resp['status'])
self.assertIsInstance(stacks, list)
@attr(type='smoke')
def test_stack_crud_no_resources(self):
stack_name = data_utils.rand_name('heat')
# create the stack
stack_identifier = self.create_stack(
stack_name, self.empty_template)
stack_id = stack_identifier.split('/')[1]
# wait for create complete (with no resources it should be instant)
self.client.wait_for_stack_status(stack_identifier, 'CREATE_COMPLETE')
# check for stack in list
resp, stacks = self.client.list_stacks()
list_ids = list([stack['id'] for stack in stacks])
self.assertIn(stack_id, list_ids)
# fetch the stack
resp, stack = self.client.get_stack(stack_identifier)
self.assertEqual('CREATE_COMPLETE', stack['stack_status'])
# fetch the stack by name
resp, stack = self.client.get_stack(stack_name)
self.assertEqual('CREATE_COMPLETE', stack['stack_status'])
# fetch the stack by id
resp, stack = self.client.get_stack(stack_id)
self.assertEqual('CREATE_COMPLETE', stack['stack_status'])
# delete the stack
resp = self.client.delete_stack(stack_identifier)
self.assertEqual('204', resp[0]['status'])
|
eltonkevani/tempest_el_env
|
tempest/api/orchestration/stacks/test_stacks.py
|
Python
|
apache-2.0
| 2,542
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""This module is deprecated. Please use `airflow.providers.amazon.aws.operators.emr_terminate_job_flow`."""
import warnings
# pylint: disable=unused-import
from airflow.providers.amazon.aws.operators.emr_terminate_job_flow import EmrTerminateJobFlowOperator # noqa
warnings.warn(
"This module is deprecated. Please use `airflow.providers.amazon.aws.operators.emr_terminate_job_flow`.",
DeprecationWarning,
stacklevel=2,
)
|
airbnb/airflow
|
airflow/contrib/operators/emr_terminate_job_flow_operator.py
|
Python
|
apache-2.0
| 1,226
|
"""
WSGI config for mdotproject project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.8/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "project.settings")
application = get_wsgi_application()
|
uw-it-aca/mdot-rest
|
docker/wsgi.py
|
Python
|
apache-2.0
| 395
|
#!/usr/bin/env python3.4
# -*- coding: utf-8 -*-
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os, sys, re, urllib, json, subprocess
import time
import urllib.request
import smtplib
from email.mime.text import MIMEText
# Function for fetching JSON via HTTPS
def getJSON(url, creds = None, cookie = None):
headers = {}
if creds and len(creds) > 0:
xcreds = creds.encode(encoding='ascii', errors='replace')
auth = base64.encodebytes(xcreds).decode('ascii', errors='replace').replace("\n", '')
headers = {"Content-type": "application/json",
"Accept": "*/*",
"Authorization": "Basic %s" % auth
}
request = urllib.request.Request(url, headers = headers)
result = urllib.request.urlopen(request)
return json.loads(result.read().decode('utf-8', errors = 'replace'))
# Get the current queue
js = getJSON("https://reporeq.apache.org/queue.json")
created = 0
# If queue is valid:
if js:
print("analysing %u items" % len(js))
# For each item:
# - Check that it hasn't been mirrored yet
# - Check that a repo with this name doesn't exist already
# - Check that name is valid
# - Mirror repo if all is okay
for item in js:
# Make sure this is a GH integration request AND it's been mirrored more than a day ago, so GH caught up.
if not 'githubbed' in item and item['github'] == True and 'mirrordate' in item and item['mirrordate'] < (time.time()-86400):
reponame = item['name']
# Check valid name
if len(reponame) < 5 or reponame.find("..") != -1 or reponame.find("/") != -1:
print("Invalid repo name!")
continue
# Set some vars
notify = item['notify']
description = item['description'] if 'description' in item else "Unknown"
# Make sure the repo exists!
if os.path.exists("/x1/git/mirrors/%s" % reponame):
print("%s is there, adding web hooks" % reponame)
try:
xreponame = reponame.replace(".git", "") # Cut off the .git part, so GH will not bork
inp = subprocess.check_output("/usr/local/etc/git_self_serve/add-webhook.sh %s" % xreponame, shell = True).decode('ascii', 'replace')
except subprocess.CalledProcessError as err:
print("Borked: %s" % err.output)
continue
else:
print("Repo doesn't exist, ignoring this request...sort of")
# Notify reporeq that we've GH'ed this repository!
print("Notifying https://reporeq.apache.org/ss.lua?githubbed=%s" % reponame)
request = urllib.request.Request("https://reporeq.apache.org/ss.lua?githubbed=%s" % reponame)
result = urllib.request.urlopen(request)
# Inform infra@ and private@$pmc that the mirror has been set up
msg = MIMEText("New repository %s has now had GitHub integration enabled!\n\nWith regards,\nApache Infrastructure." % (reponame))
msg['Subject'] = 'Github integration set up: %s' % reponame
msg['From'] = "git@apache.org"
msg['Reply-To'] = "users@infra.apache.org"
msg['To'] = "users@infra.apache.org, private@%s.apache.org" % item['pmc']
s = smtplib.SMTP(host='mail.apache.org', port=2025)
s.send_message(msg)
s.quit()
# We made a thing!
created += 1
print("All done for today! Made %u new repos" % created)
|
sebbASF/infrastructure-puppet
|
modules/git_self_serve/files/githubcron.py
|
Python
|
apache-2.0
| 4,335
|
#!/usr/bin/env python
#
# Copyright (c) 2013 Juniper Networks, Inc. All rights reserved.
#
#
# mockredis
#
# This module helps start and stop redis instances for unit-testing
# redis must be pre-installed for this to work
#
import os
import signal
import subprocess
import logging
import socket
import time
import redis
logging.basicConfig(level=logging.INFO,
format='%(asctime)s %(levelname)s %(message)s')
redis_ver = '2.6.13'
redis_bdir = '/tmp/cache/' + os.environ['USER'] + '/systemless_test'
redis_url = redis_bdir + '/redis-'+redis_ver+'.tar.gz'
redis_exe = redis_bdir + '/bin/redis-server'
def install_redis():
if not os.path.exists(redis_url):
process = subprocess.Popen(['wget', '-P', redis_bdir,
'https://redis.googlecode.com/files/redis-'\
+ redis_ver + '.tar.gz'],
cwd=redis_bdir)
process.wait()
if process.returncode is not 0:
raise SystemError('wget '+redis_url)
if not os.path.exists(redis_bdir + '/redis-'+redis_ver):
process = subprocess.Popen(['tar', 'xzvf', redis_url],
cwd=redis_bdir)
process.wait()
if process.returncode is not 0:
raise SystemError('untar '+redis_url)
if not os.path.exists(redis_exe):
process = subprocess.Popen(['make', 'PREFIX=' + redis_bdir, 'install'],
cwd=redis_bdir + '/redis-'+redis_ver)
process.wait()
if process.returncode is not 0:
raise SystemError('install '+redis_url)
def get_redis_path():
if not os.path.exists(redis_exe):
install_redis()
return redis_exe
def redis_version():
'''
Determine redis-server version
'''
return 2.6
'''
command = "redis-server --version"
logging.info('redis_version call 1')
process = subprocess.Popen(command.split(' '), stdout=subprocess.PIPE)
logging.info('redis_version call 2')
output, _ = process.communicate()
if "v=2.6" in output[0]:
return 2.6
else:
return 2.4
'''
def start_redis(port, password=None):
'''
Client uses this function to start an instance of redis
Arguments:
cport : An unused TCP port for redis to use as the client port
'''
exe = get_redis_path()
version = redis_version()
if version == 2.6:
redis_conf = "redis.26.conf"
else:
redis_conf = "redis.24.conf"
conftemplate = os.path.dirname(os.path.abspath(__file__)) + "/" +\
redis_conf
redisbase = "/tmp/redis.%s.%d/" % (os.getenv('USER', 'None'), port)
output, _ = call_command_("rm -rf " + redisbase)
output, _ = call_command_("mkdir " + redisbase)
output, _ = call_command_("mkdir " + redisbase + "cache")
logging.info('Redis Port %d' % port)
output, _ = call_command_("cp " + conftemplate + " " + redisbase +
redis_conf)
replace_string_(redisbase + redis_conf,
[("/var/run/redis_6379.pid", redisbase + "pid"),
("port 6379", "port " + str(port)),
("/var/log/redis_6379.log", redisbase + "log"),
("/var/lib/redis/6379", redisbase + "cache")])
if password:
replace_string_(redisbase + redis_conf,[("# requirepass foobared","requirepass " + password)])
command = exe + " " + redisbase + redis_conf
subprocess.Popen(command.split(' '),
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
r = redis.StrictRedis(host='localhost', port=port, db=0, password=password)
done = False
start_wait = os.getenv('CONTRIAL_ANALYTICS_TEST_MAX_START_WAIT_TIME', 15)
cnt = 0
while not done:
try:
r.ping()
except:
cnt += 1
if cnt > start_wait:
logging.info('Redis Failed. Logs below: ')
with open(redisbase + "log", 'r') as fin:
logging.info(fin.read())
return False
logging.info('Redis not ready')
time.sleep(1)
else:
done = True
logging.info('Redis ready')
return True
def stop_redis(port, password=None):
'''
Client uses this function to stop an instance of redis
This will only work for redis instances that were started by this module
Arguments:
cport : The Client Port for the instance of redis to be stopped
'''
r = redis.StrictRedis(host='localhost', port=port, db=0, password=password)
r.shutdown()
del r
redisbase = "/tmp/redis.%s.%d/" % (os.getenv('USER', 'None'), port)
output, _ = call_command_("rm -rf " + redisbase)
def replace_string_(filePath, findreplace):
"replaces all findStr by repStr in file filePath"
print filePath
tempName = filePath + '~~~'
input = open(filePath)
output = open(tempName, 'w')
s = input.read()
for couple in findreplace:
outtext = s.replace(couple[0], couple[1])
s = outtext
output.write(outtext)
output.close()
input.close()
os.rename(tempName, filePath)
def call_command_(command):
process = subprocess.Popen(command.split(' '),
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
return process.communicate()
if __name__ == "__main__":
cs = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
cs.bind(("", 0))
cport = cs.getsockname()[1]
cs.close()
start_redis(cport)
|
facetothefate/contrail-controller
|
src/analytics/test/utils/mockredis/mockredis/mockredis.py
|
Python
|
apache-2.0
| 5,634
|
# Copyright 2013 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import logging
from shotgun.settings import LOG_FILE
def configure_logger():
"""Configures shotgun logger
"""
logger = logging.getLogger('shotgun')
logger.setLevel(logging.DEBUG)
formatter = logging.Formatter(
'%(asctime)s %(levelname)s %(process)d (%(module)s) %(message)s',
"%Y-%m-%d %H:%M:%S")
stream_handler = logging.StreamHandler()
stream_handler.setLevel(logging.DEBUG)
stream_handler.setFormatter(formatter)
file_handler = logging.FileHandler(LOG_FILE)
file_handler.setLevel(logging.DEBUG)
file_handler.setFormatter(formatter)
logger.addHandler(stream_handler)
logger.addHandler(file_handler)
|
prmtl/fuel-web
|
shotgun/shotgun/logger.py
|
Python
|
apache-2.0
| 1,285
|
# Copyright 2016 Huawei Technologies India Pvt. Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import mock
from oslo_config import cfg
from ryu.services.protocols.bgp import bgpspeaker
from ryu.services.protocols.bgp.rtconf.neighbors import CONNECT_MODE_ACTIVE
from neutron.services.bgp.agent import config as bgp_config
from neutron.services.bgp.driver import exceptions as bgp_driver_exc
from neutron.services.bgp.driver.ryu import driver as ryu_driver
from neutron.tests import base
# Test variables for BGP Speaker
FAKE_LOCAL_AS1 = 12345
FAKE_LOCAL_AS2 = 23456
FAKE_ROUTER_ID = '1.1.1.1'
# Test variables for BGP Peer
FAKE_PEER_AS = 45678
FAKE_PEER_IP = '2.2.2.5'
FAKE_AUTH_TYPE = 'md5'
FAKE_PEER_PASSWORD = 'awesome'
# Test variables for Route
FAKE_ROUTE = '2.2.2.0/24'
FAKE_NEXTHOP = '5.5.5.5'
class TestRyuBgpDriver(base.BaseTestCase):
def setUp(self):
super(TestRyuBgpDriver, self).setUp()
cfg.CONF.register_opts(bgp_config.BGP_PROTO_CONFIG_OPTS, 'BGP')
cfg.CONF.set_override('bgp_router_id', FAKE_ROUTER_ID, 'BGP')
self.ryu_bgp_driver = ryu_driver.RyuBgpDriver(cfg.CONF.BGP)
mock_ryu_speaker_p = mock.patch.object(bgpspeaker, 'BGPSpeaker')
self.mock_ryu_speaker = mock_ryu_speaker_p.start()
def test_add_new_bgp_speaker(self):
self.ryu_bgp_driver.add_bgp_speaker(FAKE_LOCAL_AS1)
self.assertEqual(1,
self.ryu_bgp_driver.cache.get_hosted_bgp_speakers_count())
self.mock_ryu_speaker.assert_called_once_with(
as_number=FAKE_LOCAL_AS1, router_id=FAKE_ROUTER_ID,
bgp_server_port=0,
best_path_change_handler=ryu_driver.best_path_change_cb,
peer_down_handler=ryu_driver.bgp_peer_down_cb,
peer_up_handler=ryu_driver.bgp_peer_up_cb)
def test_remove_bgp_speaker(self):
self.ryu_bgp_driver.add_bgp_speaker(FAKE_LOCAL_AS1)
self.assertEqual(1,
self.ryu_bgp_driver.cache.get_hosted_bgp_speakers_count())
speaker = self.ryu_bgp_driver.cache.get_bgp_speaker(FAKE_LOCAL_AS1)
self.ryu_bgp_driver.delete_bgp_speaker(FAKE_LOCAL_AS1)
self.assertEqual(0,
self.ryu_bgp_driver.cache.get_hosted_bgp_speakers_count())
self.assertEqual(1, speaker.shutdown.call_count)
def test_add_bgp_peer_without_password(self):
self.ryu_bgp_driver.add_bgp_speaker(FAKE_LOCAL_AS1)
self.assertEqual(1,
self.ryu_bgp_driver.cache.get_hosted_bgp_speakers_count())
self.ryu_bgp_driver.add_bgp_peer(FAKE_LOCAL_AS1,
FAKE_PEER_IP,
FAKE_PEER_AS)
speaker = self.ryu_bgp_driver.cache.get_bgp_speaker(FAKE_LOCAL_AS1)
speaker.neighbor_add.assert_called_once_with(
address=FAKE_PEER_IP,
remote_as=FAKE_PEER_AS,
password=None,
connect_mode=CONNECT_MODE_ACTIVE)
def test_add_bgp_peer_with_password(self):
self.ryu_bgp_driver.add_bgp_speaker(FAKE_LOCAL_AS1)
self.assertEqual(1,
self.ryu_bgp_driver.cache.get_hosted_bgp_speakers_count())
self.ryu_bgp_driver.add_bgp_peer(FAKE_LOCAL_AS1,
FAKE_PEER_IP,
FAKE_PEER_AS,
FAKE_AUTH_TYPE,
FAKE_PEER_PASSWORD)
speaker = self.ryu_bgp_driver.cache.get_bgp_speaker(FAKE_LOCAL_AS1)
speaker.neighbor_add.assert_called_once_with(
address=FAKE_PEER_IP,
remote_as=FAKE_PEER_AS,
password=FAKE_PEER_PASSWORD,
connect_mode=CONNECT_MODE_ACTIVE)
def test_remove_bgp_peer(self):
self.ryu_bgp_driver.add_bgp_speaker(FAKE_LOCAL_AS1)
self.assertEqual(1,
self.ryu_bgp_driver.cache.get_hosted_bgp_speakers_count())
self.ryu_bgp_driver.delete_bgp_peer(FAKE_LOCAL_AS1, FAKE_PEER_IP)
speaker = self.ryu_bgp_driver.cache.get_bgp_speaker(FAKE_LOCAL_AS1)
speaker.neighbor_del.assert_called_once_with(address=FAKE_PEER_IP)
def test_advertise_route(self):
self.ryu_bgp_driver.add_bgp_speaker(FAKE_LOCAL_AS1)
self.assertEqual(1,
self.ryu_bgp_driver.cache.get_hosted_bgp_speakers_count())
self.ryu_bgp_driver.advertise_route(FAKE_LOCAL_AS1,
FAKE_ROUTE,
FAKE_NEXTHOP)
speaker = self.ryu_bgp_driver.cache.get_bgp_speaker(FAKE_LOCAL_AS1)
speaker.prefix_add.assert_called_once_with(prefix=FAKE_ROUTE,
next_hop=FAKE_NEXTHOP)
def test_withdraw_route(self):
self.ryu_bgp_driver.add_bgp_speaker(FAKE_LOCAL_AS1)
self.assertEqual(1,
self.ryu_bgp_driver.cache.get_hosted_bgp_speakers_count())
self.ryu_bgp_driver.withdraw_route(FAKE_LOCAL_AS1, FAKE_ROUTE)
speaker = self.ryu_bgp_driver.cache.get_bgp_speaker(FAKE_LOCAL_AS1)
speaker.prefix_del.assert_called_once_with(prefix=FAKE_ROUTE)
def test_add_same_bgp_speakers_twice(self):
self.ryu_bgp_driver.add_bgp_speaker(FAKE_LOCAL_AS1)
self.assertRaises(bgp_driver_exc.BgpSpeakerAlreadyScheduled,
self.ryu_bgp_driver.add_bgp_speaker, FAKE_LOCAL_AS1)
def test_add_different_bgp_speakers_when_one_already_added(self):
self.ryu_bgp_driver.add_bgp_speaker(FAKE_LOCAL_AS1)
self.assertRaises(bgp_driver_exc.BgpSpeakerMaxScheduled,
self.ryu_bgp_driver.add_bgp_speaker,
FAKE_LOCAL_AS2)
def test_add_bgp_speaker_with_invalid_asnum_paramtype(self):
self.assertRaises(bgp_driver_exc.InvalidParamType,
self.ryu_bgp_driver.add_bgp_speaker, '12345')
def test_add_bgp_speaker_with_invalid_asnum_range(self):
self.assertRaises(bgp_driver_exc.InvalidParamRange,
self.ryu_bgp_driver.add_bgp_speaker, -1)
self.assertRaises(bgp_driver_exc.InvalidParamRange,
self.ryu_bgp_driver.add_bgp_speaker, 65536)
def test_add_bgp_peer_with_invalid_paramtype(self):
# Test with an invalid asnum data-type
self.ryu_bgp_driver.add_bgp_speaker(FAKE_LOCAL_AS1)
self.assertRaises(bgp_driver_exc.InvalidParamType,
self.ryu_bgp_driver.add_bgp_peer,
FAKE_LOCAL_AS1, FAKE_PEER_IP, '12345')
# Test with an invalid auth-type and an invalid password
self.assertRaises(bgp_driver_exc.InvalidParamType,
self.ryu_bgp_driver.add_bgp_peer,
FAKE_LOCAL_AS1, FAKE_PEER_IP, FAKE_PEER_AS,
'sha-1', 1234)
# Test with an invalid auth-type and a valid password
self.assertRaises(bgp_driver_exc.InvaildAuthType,
self.ryu_bgp_driver.add_bgp_peer,
FAKE_LOCAL_AS1, FAKE_PEER_IP, FAKE_PEER_AS,
'hmac-md5', FAKE_PEER_PASSWORD)
# Test with none auth-type and a valid password
self.assertRaises(bgp_driver_exc.InvaildAuthType,
self.ryu_bgp_driver.add_bgp_peer,
FAKE_LOCAL_AS1, FAKE_PEER_IP, FAKE_PEER_AS,
'none', FAKE_PEER_PASSWORD)
# Test with none auth-type and an invalid password
self.assertRaises(bgp_driver_exc.InvalidParamType,
self.ryu_bgp_driver.add_bgp_peer,
FAKE_LOCAL_AS1, FAKE_PEER_IP, FAKE_PEER_AS,
'none', 1234)
# Test with a valid auth-type and no password
self.assertRaises(bgp_driver_exc.PasswordNotSpecified,
self.ryu_bgp_driver.add_bgp_peer,
FAKE_LOCAL_AS1, FAKE_PEER_IP, FAKE_PEER_AS,
FAKE_AUTH_TYPE, None)
def test_add_bgp_peer_with_invalid_asnum_range(self):
self.ryu_bgp_driver.add_bgp_speaker(FAKE_LOCAL_AS1)
self.assertRaises(bgp_driver_exc.InvalidParamRange,
self.ryu_bgp_driver.add_bgp_peer,
FAKE_LOCAL_AS1, FAKE_PEER_IP, -1)
self.assertRaises(bgp_driver_exc.InvalidParamRange,
self.ryu_bgp_driver.add_bgp_peer,
FAKE_LOCAL_AS1, FAKE_PEER_IP, 65536)
def test_add_bgp_peer_without_adding_speaker(self):
self.assertRaises(bgp_driver_exc.BgpSpeakerNotAdded,
self.ryu_bgp_driver.add_bgp_peer,
FAKE_LOCAL_AS1, FAKE_PEER_IP, FAKE_PEER_AS)
def test_remove_bgp_peer_with_invalid_paramtype(self):
self.ryu_bgp_driver.add_bgp_speaker(FAKE_LOCAL_AS1)
self.assertRaises(bgp_driver_exc.InvalidParamType,
self.ryu_bgp_driver.delete_bgp_peer,
FAKE_LOCAL_AS1, 12345)
def test_remove_bgp_peer_without_adding_speaker(self):
self.assertRaises(bgp_driver_exc.BgpSpeakerNotAdded,
self.ryu_bgp_driver.delete_bgp_peer,
FAKE_LOCAL_AS1, FAKE_PEER_IP)
def test_advertise_route_with_invalid_paramtype(self):
self.ryu_bgp_driver.add_bgp_speaker(FAKE_LOCAL_AS1)
self.assertRaises(bgp_driver_exc.InvalidParamType,
self.ryu_bgp_driver.advertise_route,
FAKE_LOCAL_AS1, 12345, FAKE_NEXTHOP)
self.assertRaises(bgp_driver_exc.InvalidParamType,
self.ryu_bgp_driver.advertise_route,
FAKE_LOCAL_AS1, FAKE_ROUTE, 12345)
def test_advertise_route_without_adding_speaker(self):
self.assertRaises(bgp_driver_exc.BgpSpeakerNotAdded,
self.ryu_bgp_driver.advertise_route,
FAKE_LOCAL_AS1, FAKE_ROUTE, FAKE_NEXTHOP)
def test_withdraw_route_with_invalid_paramtype(self):
self.ryu_bgp_driver.add_bgp_speaker(FAKE_LOCAL_AS1)
self.assertRaises(bgp_driver_exc.InvalidParamType,
self.ryu_bgp_driver.withdraw_route,
FAKE_LOCAL_AS1, 12345)
self.assertRaises(bgp_driver_exc.InvalidParamType,
self.ryu_bgp_driver.withdraw_route,
FAKE_LOCAL_AS1, 12345)
def test_withdraw_route_without_adding_speaker(self):
self.assertRaises(bgp_driver_exc.BgpSpeakerNotAdded,
self.ryu_bgp_driver.withdraw_route,
FAKE_LOCAL_AS1, FAKE_ROUTE)
def test_add_multiple_bgp_speakers(self):
self.ryu_bgp_driver.add_bgp_speaker(FAKE_LOCAL_AS1)
self.assertEqual(1,
self.ryu_bgp_driver.cache.get_hosted_bgp_speakers_count())
self.assertRaises(bgp_driver_exc.BgpSpeakerMaxScheduled,
self.ryu_bgp_driver.add_bgp_speaker,
FAKE_LOCAL_AS2)
self.assertRaises(bgp_driver_exc.BgpSpeakerNotAdded,
self.ryu_bgp_driver.delete_bgp_speaker,
FAKE_LOCAL_AS2)
self.assertEqual(1,
self.ryu_bgp_driver.cache.get_hosted_bgp_speakers_count())
self.ryu_bgp_driver.delete_bgp_speaker(FAKE_LOCAL_AS1)
self.assertEqual(0,
self.ryu_bgp_driver.cache.get_hosted_bgp_speakers_count())
|
wolverineav/neutron
|
neutron/tests/unit/services/bgp/driver/ryu/test_driver.py
|
Python
|
apache-2.0
| 12,381
|
# Copyright (C) 2012 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import optparse
import StringIO
import webkitpy.thirdparty.unittest2 as unittest
from webkitpy.common.host_mock import MockHost
from webkitpy.layout_tests import lint_test_expectations
class FakePort(object):
def __init__(self, host, name, path):
self.host = host
self.name = name
self.path = path
def test_configuration(self):
return None
def expectations_dict(self):
self.host.ports_parsed.append(self.name)
return {self.path: ''}
def bot_expectations(self):
return {}
def skipped_layout_tests(self, _):
return set([])
def all_test_configurations(self):
return []
def configuration_specifier_macros(self):
return []
def get_option(self, _, val):
return val
def path_to_generic_test_expectations_file(self):
return ''
class FakeFactory(object):
def __init__(self, host, ports):
self.host = host
self.ports = {}
for port in ports:
self.ports[port.name] = port
def get(self, port_name, *args, **kwargs): # pylint: disable=W0613,E0202
return self.ports[port_name]
def all_port_names(self, platform=None): # pylint: disable=W0613,E0202
return sorted(self.ports.keys())
class LintTest(unittest.TestCase):
def test_all_configurations(self):
host = MockHost()
host.ports_parsed = []
host.port_factory = FakeFactory(host, (FakePort(host, 'a', 'path-to-a'),
FakePort(host, 'b', 'path-to-b'),
FakePort(host, 'b-win', 'path-to-b')))
logging_stream = StringIO.StringIO()
options = optparse.Values({'platform': None})
res = lint_test_expectations.lint(host, options, logging_stream)
self.assertEqual(res, 0)
self.assertEqual(host.ports_parsed, ['a', 'b', 'b-win'])
def test_lint_test_files(self):
logging_stream = StringIO.StringIO()
options = optparse.Values({'platform': 'test-mac-leopard'})
host = MockHost()
# pylint appears to complain incorrectly about the method overrides pylint: disable=E0202,C0322
# FIXME: incorrect complaints about spacing pylint: disable=C0322
host.port_factory.all_port_names = lambda platform=None: [platform]
res = lint_test_expectations.lint(host, options, logging_stream)
self.assertEqual(res, 0)
self.assertIn('Lint succeeded', logging_stream.getvalue())
def test_lint_test_files__errors(self):
options = optparse.Values({'platform': 'test', 'debug_rwt_logging': False})
host = MockHost()
# FIXME: incorrect complaints about spacing pylint: disable=C0322
port = host.port_factory.get(options.platform, options=options)
port.expectations_dict = lambda: {'foo': '-- syntax error1', 'bar': '-- syntax error2'}
host.port_factory.get = lambda platform, options=None: port
host.port_factory.all_port_names = lambda platform=None: [port.name()]
logging_stream = StringIO.StringIO()
res = lint_test_expectations.lint(host, options, logging_stream)
self.assertEqual(res, -1)
self.assertIn('Lint failed', logging_stream.getvalue())
self.assertIn('foo:1', logging_stream.getvalue())
self.assertIn('bar:1', logging_stream.getvalue())
class MainTest(unittest.TestCase):
def test_success(self):
orig_lint_fn = lint_test_expectations.lint
# unused args pylint: disable=W0613
def interrupting_lint(host, options, logging_stream):
raise KeyboardInterrupt
def successful_lint(host, options, logging_stream):
return 0
def exception_raising_lint(host, options, logging_stream):
assert False
stdout = StringIO.StringIO()
stderr = StringIO.StringIO()
try:
lint_test_expectations.lint = interrupting_lint
res = lint_test_expectations.main([], stdout, stderr)
self.assertEqual(res, lint_test_expectations.INTERRUPTED_EXIT_STATUS)
lint_test_expectations.lint = successful_lint
res = lint_test_expectations.main(['--platform', 'test'], stdout, stderr)
self.assertEqual(res, 0)
lint_test_expectations.lint = exception_raising_lint
res = lint_test_expectations.main([], stdout, stderr)
self.assertEqual(res, lint_test_expectations.EXCEPTIONAL_EXIT_STATUS)
finally:
lint_test_expectations.lint = orig_lint_fn
|
indashnet/InDashNet.Open.UN2000
|
android/external/chromium_org/third_party/WebKit/Tools/Scripts/webkitpy/layout_tests/lint_test_expectations_unittest.py
|
Python
|
apache-2.0
| 6,145
|
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_config import cfg
cfg.CONF.import_opt('stack_scheduler_hints', 'heat.common.config')
class SchedulerHintsMixin(object):
'''
Utility class to encapsulate Scheduler Hint related logic shared
between resources.
'''
HEAT_ROOT_STACK_ID = 'heat_root_stack_id'
HEAT_STACK_ID = 'heat_stack_id'
HEAT_STACK_NAME = 'heat_stack_name'
HEAT_PATH_IN_STACK = 'heat_path_in_stack'
HEAT_RESOURCE_NAME = 'heat_resource_name'
HEAT_RESOURCE_UUID = 'heat_resource_uuid'
def _scheduler_hints(self, scheduler_hints):
'''Augment scheduler hints with supplemental content.'''
if cfg.CONF.stack_scheduler_hints:
if scheduler_hints is None:
scheduler_hints = {}
scheduler_hints[self.HEAT_ROOT_STACK_ID] = \
self.stack.root_stack_id()
scheduler_hints[self.HEAT_STACK_ID] = self.stack.id
scheduler_hints[self.HEAT_STACK_NAME] = self.stack.name
scheduler_hints[self.HEAT_PATH_IN_STACK] = \
self.stack.path_in_stack()
scheduler_hints[self.HEAT_RESOURCE_NAME] = self.name
scheduler_hints[self.HEAT_RESOURCE_UUID] = self.uuid
return scheduler_hints
|
miguelgrinberg/heat
|
heat/engine/resources/scheduler_hints.py
|
Python
|
apache-2.0
| 1,805
|
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# Copyright 2011 Justin Santa Barbara
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Utilities and helper functions."""
import abc
import contextlib
import datetime
import functools
import hashlib
import inspect
import logging as py_logging
import os
import pyclbr
import random
import re
import shutil
import socket
import stat
import sys
import tempfile
import time
import types
from xml.dom import minidom
from xml.parsers import expat
from xml import sax
from xml.sax import expatreader
from xml.sax import saxutils
from os_brick.initiator import connector
from oslo_concurrency import lockutils
from oslo_concurrency import processutils
from oslo_config import cfg
from oslo_log import log as logging
from oslo_utils import encodeutils
from oslo_utils import importutils
from oslo_utils import strutils
from oslo_utils import timeutils
import retrying
import six
from cinder import exception
from cinder.i18n import _, _LE, _LW
CONF = cfg.CONF
LOG = logging.getLogger(__name__)
ISO_TIME_FORMAT = "%Y-%m-%dT%H:%M:%S"
PERFECT_TIME_FORMAT = "%Y-%m-%dT%H:%M:%S.%f"
VALID_TRACE_FLAGS = {'method', 'api'}
TRACE_METHOD = False
TRACE_API = False
synchronized = lockutils.synchronized_with_prefix('cinder-')
def find_config(config_path):
"""Find a configuration file using the given hint.
:param config_path: Full or relative path to the config.
:returns: Full path of the config, if it exists.
:raises: `cinder.exception.ConfigNotFound`
"""
possible_locations = [
config_path,
os.path.join(CONF.state_path, "etc", "cinder", config_path),
os.path.join(CONF.state_path, "etc", config_path),
os.path.join(CONF.state_path, config_path),
"/etc/cinder/%s" % config_path,
]
for path in possible_locations:
if os.path.exists(path):
return os.path.abspath(path)
raise exception.ConfigNotFound(path=os.path.abspath(config_path))
def as_int(obj, quiet=True):
# Try "2" -> 2
try:
return int(obj)
except (ValueError, TypeError):
pass
# Try "2.5" -> 2
try:
return int(float(obj))
except (ValueError, TypeError):
pass
# Eck, not sure what this is then.
if not quiet:
raise TypeError(_("Can not translate %s to integer.") % (obj))
return obj
def is_int_like(val):
"""Check if a value looks like an int."""
try:
return str(int(val)) == str(val)
except Exception:
return False
def check_exclusive_options(**kwargs):
"""Checks that only one of the provided options is actually not-none.
Iterates over all the kwargs passed in and checks that only one of said
arguments is not-none, if more than one is not-none then an exception will
be raised with the names of those arguments who were not-none.
"""
if not kwargs:
return
pretty_keys = kwargs.pop("pretty_keys", True)
exclusive_options = {}
for (k, v) in kwargs.items():
if v is not None:
exclusive_options[k] = True
if len(exclusive_options) > 1:
# Change the format of the names from pythonic to
# something that is more readable.
#
# Ex: 'the_key' -> 'the key'
if pretty_keys:
names = [k.replace('_', ' ') for k in kwargs.keys()]
else:
names = kwargs.keys()
names = ", ".join(sorted(names))
msg = (_("May specify only one of %s") % (names))
raise exception.InvalidInput(reason=msg)
def execute(*cmd, **kwargs):
"""Convenience wrapper around oslo's execute() method."""
if 'run_as_root' in kwargs and 'root_helper' not in kwargs:
kwargs['root_helper'] = get_root_helper()
return processutils.execute(*cmd, **kwargs)
def check_ssh_injection(cmd_list):
ssh_injection_pattern = ['`', '$', '|', '||', ';', '&', '&&', '>', '>>',
'<']
# Check whether injection attacks exist
for arg in cmd_list:
arg = arg.strip()
# Check for matching quotes on the ends
is_quoted = re.match('^(?P<quote>[\'"])(?P<quoted>.*)(?P=quote)$', arg)
if is_quoted:
# Check for unescaped quotes within the quoted argument
quoted = is_quoted.group('quoted')
if quoted:
if (re.match('[\'"]', quoted) or
re.search('[^\\\\][\'"]', quoted)):
raise exception.SSHInjectionThreat(command=cmd_list)
else:
# We only allow spaces within quoted arguments, and that
# is the only special character allowed within quotes
if len(arg.split()) > 1:
raise exception.SSHInjectionThreat(command=cmd_list)
# Second, check whether danger character in command. So the shell
# special operator must be a single argument.
for c in ssh_injection_pattern:
if c not in arg:
continue
result = arg.find(c)
if not result == -1:
if result == 0 or not arg[result - 1] == '\\':
raise exception.SSHInjectionThreat(command=cmd_list)
def create_channel(client, width, height):
"""Invoke an interactive shell session on server."""
channel = client.invoke_shell()
channel.resize_pty(width, height)
return channel
def cinderdir():
import cinder
return os.path.abspath(cinder.__file__).split('cinder/__init__.py')[0]
def last_completed_audit_period(unit=None):
"""This method gives you the most recently *completed* audit period.
arguments:
units: string, one of 'hour', 'day', 'month', 'year'
Periods normally begin at the beginning (UTC) of the
period unit (So a 'day' period begins at midnight UTC,
a 'month' unit on the 1st, a 'year' on Jan, 1)
unit string may be appended with an optional offset
like so: 'day@18' This will begin the period at 18:00
UTC. 'month@15' starts a monthly period on the 15th,
and year@3 begins a yearly one on March 1st.
returns: 2 tuple of datetimes (begin, end)
The begin timestamp of this audit period is the same as the
end of the previous.
"""
if not unit:
unit = CONF.volume_usage_audit_period
offset = 0
if '@' in unit:
unit, offset = unit.split("@", 1)
offset = int(offset)
rightnow = timeutils.utcnow()
if unit not in ('month', 'day', 'year', 'hour'):
raise ValueError('Time period must be hour, day, month or year')
if unit == 'month':
if offset == 0:
offset = 1
end = datetime.datetime(day=offset,
month=rightnow.month,
year=rightnow.year)
if end >= rightnow:
year = rightnow.year
if 1 >= rightnow.month:
year -= 1
month = 12 + (rightnow.month - 1)
else:
month = rightnow.month - 1
end = datetime.datetime(day=offset,
month=month,
year=year)
year = end.year
if 1 >= end.month:
year -= 1
month = 12 + (end.month - 1)
else:
month = end.month - 1
begin = datetime.datetime(day=offset, month=month, year=year)
elif unit == 'year':
if offset == 0:
offset = 1
end = datetime.datetime(day=1, month=offset, year=rightnow.year)
if end >= rightnow:
end = datetime.datetime(day=1,
month=offset,
year=rightnow.year - 1)
begin = datetime.datetime(day=1,
month=offset,
year=rightnow.year - 2)
else:
begin = datetime.datetime(day=1,
month=offset,
year=rightnow.year - 1)
elif unit == 'day':
end = datetime.datetime(hour=offset,
day=rightnow.day,
month=rightnow.month,
year=rightnow.year)
if end >= rightnow:
end = end - datetime.timedelta(days=1)
begin = end - datetime.timedelta(days=1)
elif unit == 'hour':
end = rightnow.replace(minute=offset, second=0, microsecond=0)
if end >= rightnow:
end = end - datetime.timedelta(hours=1)
begin = end - datetime.timedelta(hours=1)
return (begin, end)
def list_of_dicts_to_dict(seq, key):
"""Convert list of dicts to a indexted dict.
Takes a list of dicts, and converts it a nested dict
indexed by <key>
:param seq: list of dicts
:parm key: key in dicts to index by
example:
lst = [{'id': 1, ...}, {'id': 2, ...}...]
key = 'id'
returns {1:{'id': 1, ...}, 2:{'id':2, ...}
"""
return {d[key]: dict(d, index=d[key]) for (i, d) in enumerate(seq)}
class ProtectedExpatParser(expatreader.ExpatParser):
"""An expat parser which disables DTD's and entities by default."""
def __init__(self, forbid_dtd=True, forbid_entities=True,
*args, **kwargs):
# Python 2.x old style class
expatreader.ExpatParser.__init__(self, *args, **kwargs)
self.forbid_dtd = forbid_dtd
self.forbid_entities = forbid_entities
def start_doctype_decl(self, name, sysid, pubid, has_internal_subset):
raise ValueError("Inline DTD forbidden")
def entity_decl(self, entityName, is_parameter_entity, value, base,
systemId, publicId, notationName):
raise ValueError("<!ENTITY> forbidden")
def unparsed_entity_decl(self, name, base, sysid, pubid, notation_name):
# expat 1.2
raise ValueError("<!ENTITY> forbidden")
def reset(self):
expatreader.ExpatParser.reset(self)
if self.forbid_dtd:
self._parser.StartDoctypeDeclHandler = self.start_doctype_decl
if self.forbid_entities:
self._parser.EntityDeclHandler = self.entity_decl
self._parser.UnparsedEntityDeclHandler = self.unparsed_entity_decl
def safe_minidom_parse_string(xml_string):
"""Parse an XML string using minidom safely.
"""
try:
return minidom.parseString(xml_string, parser=ProtectedExpatParser())
except sax.SAXParseException:
raise expat.ExpatError()
def xhtml_escape(value):
"""Escapes a string so it is valid within XML or XHTML."""
return saxutils.escape(value, {'"': '"', "'": '''})
def get_from_path(items, path):
"""Returns a list of items matching the specified path.
Takes an XPath-like expression e.g. prop1/prop2/prop3, and for each item
in items, looks up items[prop1][prop2][prop3]. Like XPath, if any of the
intermediate results are lists it will treat each list item individually.
A 'None' in items or any child expressions will be ignored, this function
will not throw because of None (anywhere) in items. The returned list
will contain no None values.
"""
if path is None:
raise exception.Error('Invalid mini_xpath')
(first_token, sep, remainder) = path.partition('/')
if first_token == '':
raise exception.Error('Invalid mini_xpath')
results = []
if items is None:
return results
if not isinstance(items, list):
# Wrap single objects in a list
items = [items]
for item in items:
if item is None:
continue
get_method = getattr(item, 'get', None)
if get_method is None:
continue
child = get_method(first_token)
if child is None:
continue
if isinstance(child, list):
# Flatten intermediate lists
for x in child:
results.append(x)
else:
results.append(child)
if not sep:
# No more tokens
return results
else:
return get_from_path(results, remainder)
def is_valid_boolstr(val):
"""Check if the provided string is a valid bool string or not."""
val = str(val).lower()
return (val == 'true' or val == 'false' or
val == 'yes' or val == 'no' or
val == 'y' or val == 'n' or
val == '1' or val == '0')
def is_none_string(val):
"""Check if a string represents a None value."""
if not isinstance(val, six.string_types):
return False
return val.lower() == 'none'
def monkey_patch():
"""Patches decorators for all functions in a specified module.
If the CONF.monkey_patch set as True,
this function patches a decorator
for all functions in specified modules.
You can set decorators for each modules
using CONF.monkey_patch_modules.
The format is "Module path:Decorator function".
Example: 'cinder.api.ec2.cloud:' \
cinder.openstack.common.notifier.api.notify_decorator'
Parameters of the decorator is as follows.
(See cinder.openstack.common.notifier.api.notify_decorator)
:param name: name of the function
:param function: object of the function
"""
# If CONF.monkey_patch is not True, this function do nothing.
if not CONF.monkey_patch:
return
# Get list of modules and decorators
for module_and_decorator in CONF.monkey_patch_modules:
module, decorator_name = module_and_decorator.split(':')
# import decorator function
decorator = importutils.import_class(decorator_name)
__import__(module)
# Retrieve module information using pyclbr
module_data = pyclbr.readmodule_ex(module)
for key in module_data.keys():
# set the decorator for the class methods
if isinstance(module_data[key], pyclbr.Class):
clz = importutils.import_class("%s.%s" % (module, key))
for method, func in inspect.getmembers(clz, inspect.ismethod):
setattr(
clz, method,
decorator("%s.%s.%s" % (module, key, method), func))
# set the decorator for the function
if isinstance(module_data[key], pyclbr.Function):
func = importutils.import_class("%s.%s" % (module, key))
setattr(sys.modules[module], key,
decorator("%s.%s" % (module, key), func))
def make_dev_path(dev, partition=None, base='/dev'):
"""Return a path to a particular device.
>>> make_dev_path('xvdc')
/dev/xvdc
>>> make_dev_path('xvdc', 1)
/dev/xvdc1
"""
path = os.path.join(base, dev)
if partition:
path += str(partition)
return path
def sanitize_hostname(hostname):
"""Return a hostname which conforms to RFC-952 and RFC-1123 specs."""
if six.PY3:
hostname = hostname.encode('latin-1', 'ignore')
hostname = hostname.decode('latin-1')
else:
if isinstance(hostname, six.text_type):
hostname = hostname.encode('latin-1', 'ignore')
hostname = re.sub('[ _]', '-', hostname)
hostname = re.sub('[^\w.-]+', '', hostname)
hostname = hostname.lower()
hostname = hostname.strip('.-')
return hostname
def hash_file(file_like_object):
"""Generate a hash for the contents of a file."""
checksum = hashlib.sha1()
any(map(checksum.update, iter(lambda: file_like_object.read(32768), b'')))
return checksum.hexdigest()
def service_is_up(service):
"""Check whether a service is up based on last heartbeat."""
last_heartbeat = service['updated_at'] or service['created_at']
# Timestamps in DB are UTC.
elapsed = (timeutils.utcnow(with_timezone=True) -
last_heartbeat).total_seconds()
return abs(elapsed) <= CONF.service_down_time
def read_file_as_root(file_path):
"""Secure helper to read file as root."""
try:
out, _err = execute('cat', file_path, run_as_root=True)
return out
except processutils.ProcessExecutionError:
raise exception.FileNotFound(file_path=file_path)
@contextlib.contextmanager
def temporary_chown(path, owner_uid=None):
"""Temporarily chown a path.
:params owner_uid: UID of temporary owner (defaults to current user)
"""
if owner_uid is None:
owner_uid = os.getuid()
orig_uid = os.stat(path).st_uid
if orig_uid != owner_uid:
execute('chown', owner_uid, path, run_as_root=True)
try:
yield
finally:
if orig_uid != owner_uid:
execute('chown', orig_uid, path, run_as_root=True)
@contextlib.contextmanager
def tempdir(**kwargs):
tmpdir = tempfile.mkdtemp(**kwargs)
try:
yield tmpdir
finally:
try:
shutil.rmtree(tmpdir)
except OSError as e:
LOG.debug('Could not remove tmpdir: %s',
six.text_type(e))
def walk_class_hierarchy(clazz, encountered=None):
"""Walk class hierarchy, yielding most derived classes first."""
if not encountered:
encountered = []
for subclass in clazz.__subclasses__():
if subclass not in encountered:
encountered.append(subclass)
# drill down to leaves first
for subsubclass in walk_class_hierarchy(subclass, encountered):
yield subsubclass
yield subclass
def get_root_helper():
return 'sudo cinder-rootwrap %s' % CONF.rootwrap_config
def brick_get_connector_properties(multipath=False, enforce_multipath=False):
"""Wrapper to automatically set root_helper in brick calls.
:param multipath: A boolean indicating whether the connector can
support multipath.
:param enforce_multipath: If True, it raises exception when multipath=True
is specified but multipathd is not running.
If False, it falls back to multipath=False
when multipathd is not running.
"""
root_helper = get_root_helper()
return connector.get_connector_properties(root_helper,
CONF.my_ip,
multipath,
enforce_multipath)
def brick_get_connector(protocol, driver=None,
execute=processutils.execute,
use_multipath=False,
device_scan_attempts=3,
*args, **kwargs):
"""Wrapper to get a brick connector object.
This automatically populates the required protocol as well
as the root_helper needed to execute commands.
"""
root_helper = get_root_helper()
return connector.InitiatorConnector.factory(protocol, root_helper,
driver=driver,
execute=execute,
use_multipath=use_multipath,
device_scan_attempts=
device_scan_attempts,
*args, **kwargs)
def require_driver_initialized(driver):
"""Verifies if `driver` is initialized
If the driver is not initialized, an exception will be raised.
:params driver: The driver instance.
:raises: `exception.DriverNotInitialized`
"""
# we can't do anything if the driver didn't init
if not driver.initialized:
driver_name = driver.__class__.__name__
LOG.error(_LE("Volume driver %s not initialized"), driver_name)
raise exception.DriverNotInitialized()
def get_file_mode(path):
"""This primarily exists to make unit testing easier."""
return stat.S_IMODE(os.stat(path).st_mode)
def get_file_gid(path):
"""This primarily exists to make unit testing easier."""
return os.stat(path).st_gid
def get_file_size(path):
"""Returns the file size."""
return os.stat(path).st_size
def _get_disk_of_partition(devpath, st=None):
"""Gets a disk device path and status from partition path.
Returns a disk device path from a partition device path, and stat for
the device. If devpath is not a partition, devpath is returned as it is.
For example, '/dev/sda' is returned for '/dev/sda1', and '/dev/disk1' is
for '/dev/disk1p1' ('p' is prepended to the partition number if the disk
name ends with numbers).
"""
diskpath = re.sub('(?:(?<=\d)p)?\d+$', '', devpath)
if diskpath != devpath:
try:
st_disk = os.stat(diskpath)
if stat.S_ISBLK(st_disk.st_mode):
return (diskpath, st_disk)
except OSError:
pass
# devpath is not a partition
if st is None:
st = os.stat(devpath)
return (devpath, st)
def get_bool_param(param_string, params):
param = params.get(param_string, False)
if not is_valid_boolstr(param):
msg = _('Value %(param)s for %(param_string)s is not a '
'boolean.') % {'param': param, 'param_string': param_string}
raise exception.InvalidParameterValue(err=msg)
return strutils.bool_from_string(param, strict=True)
def get_blkdev_major_minor(path, lookup_for_file=True):
"""Get 'major:minor' number of block device.
Get the device's 'major:minor' number of a block device to control
I/O ratelimit of the specified path.
If lookup_for_file is True and the path is a regular file, lookup a disk
device which the file lies on and returns the result for the device.
"""
st = os.stat(path)
if stat.S_ISBLK(st.st_mode):
path, st = _get_disk_of_partition(path, st)
return '%d:%d' % (os.major(st.st_rdev), os.minor(st.st_rdev))
elif stat.S_ISCHR(st.st_mode):
# No I/O ratelimit control is provided for character devices
return None
elif lookup_for_file:
# lookup the mounted disk which the file lies on
out, _err = execute('df', path)
devpath = out.split("\n")[1].split()[0]
if devpath[0] is not '/':
# the file is on a network file system
return None
return get_blkdev_major_minor(devpath, False)
else:
msg = _("Unable to get a block device for file \'%s\'") % path
raise exception.Error(msg)
def check_string_length(value, name, min_length=0, max_length=None):
"""Check the length of specified string.
:param value: the value of the string
:param name: the name of the string
:param min_length: the min_length of the string
:param max_length: the max_length of the string
"""
if not isinstance(value, six.string_types):
msg = _("%s is not a string or unicode") % name
raise exception.InvalidInput(message=msg)
if len(value) < min_length:
msg = _("%(name)s has a minimum character requirement of "
"%(min_length)s.") % {'name': name, 'min_length': min_length}
raise exception.InvalidInput(message=msg)
if max_length and len(value) > max_length:
msg = _("%(name)s has more than %(max_length)s "
"characters.") % {'name': name, 'max_length': max_length}
raise exception.InvalidInput(message=msg)
_visible_admin_metadata_keys = ['readonly', 'attached_mode']
def add_visible_admin_metadata(volume):
"""Add user-visible admin metadata to regular metadata.
Extracts the admin metadata keys that are to be made visible to
non-administrators, and adds them to the regular metadata structure for the
passed-in volume.
"""
visible_admin_meta = {}
if volume.get('volume_admin_metadata'):
if isinstance(volume['volume_admin_metadata'], dict):
volume_admin_metadata = volume['volume_admin_metadata']
for key in volume_admin_metadata:
if key in _visible_admin_metadata_keys:
visible_admin_meta[key] = volume_admin_metadata[key]
else:
for item in volume['volume_admin_metadata']:
if item['key'] in _visible_admin_metadata_keys:
visible_admin_meta[item['key']] = item['value']
# avoid circular ref when volume is a Volume instance
elif (volume.get('admin_metadata') and
isinstance(volume.get('admin_metadata'), dict)):
for key in _visible_admin_metadata_keys:
if key in volume['admin_metadata'].keys():
visible_admin_meta[key] = volume['admin_metadata'][key]
if not visible_admin_meta:
return
# NOTE(zhiyan): update visible administration metadata to
# volume metadata, administration metadata will rewrite existing key.
if volume.get('volume_metadata'):
orig_meta = list(volume.get('volume_metadata'))
for item in orig_meta:
if item['key'] in visible_admin_meta.keys():
item['value'] = visible_admin_meta.pop(item['key'])
for key, value in visible_admin_meta.items():
orig_meta.append({'key': key, 'value': value})
volume['volume_metadata'] = orig_meta
# avoid circular ref when vol is a Volume instance
elif (volume.get('metadata') and
isinstance(volume.get('metadata'), dict)):
volume['metadata'].update(visible_admin_meta)
else:
volume['metadata'] = visible_admin_meta
def remove_invalid_filter_options(context, filters,
allowed_search_options):
"""Remove search options that are not valid for non-admin API/context."""
if context.is_admin:
# Allow all options
return
# Otherwise, strip out all unknown options
unknown_options = [opt for opt in filters
if opt not in allowed_search_options]
bad_options = ", ".join(unknown_options)
LOG.debug("Removing options '%s' from query.", bad_options)
for opt in unknown_options:
del filters[opt]
def is_blk_device(dev):
try:
if stat.S_ISBLK(os.stat(dev).st_mode):
return True
return False
except Exception:
LOG.debug('Path %s not found in is_blk_device check', dev)
return False
def retry(exceptions, interval=1, retries=3, backoff_rate=2,
wait_random=False):
def _retry_on_exception(e):
return isinstance(e, exceptions)
def _backoff_sleep(previous_attempt_number, delay_since_first_attempt_ms):
exp = backoff_rate ** previous_attempt_number
wait_for = interval * exp
if wait_random:
random.seed()
wait_val = random.randrange(interval * 1000.0, wait_for * 1000.0)
else:
wait_val = wait_for * 1000.0
LOG.debug("Sleeping for %s seconds", (wait_val / 1000.0))
return wait_val
def _print_stop(previous_attempt_number, delay_since_first_attempt_ms):
delay_since_first_attempt = delay_since_first_attempt_ms / 1000.0
LOG.debug("Failed attempt %s", previous_attempt_number)
LOG.debug("Have been at this for %s seconds",
delay_since_first_attempt)
return previous_attempt_number == retries
if retries < 1:
raise ValueError('Retries must be greater than or '
'equal to 1 (received: %s). ' % retries)
def _decorator(f):
@six.wraps(f)
def _wrapper(*args, **kwargs):
r = retrying.Retrying(retry_on_exception=_retry_on_exception,
wait_func=_backoff_sleep,
stop_func=_print_stop)
return r.call(f, *args, **kwargs)
return _wrapper
return _decorator
def convert_version_to_int(version):
try:
if isinstance(version, six.string_types):
version = convert_version_to_tuple(version)
if isinstance(version, tuple):
return six.moves.reduce(lambda x, y: (x * 1000) + y, version)
except Exception:
msg = _("Version %s is invalid.") % version
raise exception.CinderException(msg)
def convert_version_to_str(version_int):
version_numbers = []
factor = 1000
while version_int != 0:
version_number = version_int - (version_int // factor * factor)
version_numbers.insert(0, six.text_type(version_number))
version_int = version_int // factor
return '.'.join(map(str, version_numbers))
def convert_version_to_tuple(version_str):
return tuple(int(part) for part in version_str.split('.'))
def convert_str(text):
"""Convert to native string.
Convert bytes and Unicode strings to native strings:
* convert to bytes on Python 2:
encode Unicode using encodeutils.safe_encode()
* convert to Unicode on Python 3: decode bytes from UTF-8
"""
if six.PY2:
return encodeutils.safe_encode(text)
else:
if isinstance(text, bytes):
return text.decode('utf-8')
else:
return text
def trace_method(f):
"""Decorates a function if TRACE_METHOD is true."""
@functools.wraps(f)
def trace_method_logging_wrapper(*args, **kwargs):
if TRACE_METHOD:
return trace(f)(*args, **kwargs)
return f(*args, **kwargs)
return trace_method_logging_wrapper
def trace_api(f):
"""Decorates a function if TRACE_API is true."""
@functools.wraps(f)
def trace_api_logging_wrapper(*args, **kwargs):
if TRACE_API:
return trace(f)(*args, **kwargs)
return f(*args, **kwargs)
return trace_api_logging_wrapper
def trace(f):
"""Trace calls to the decorated function.
This decorator should always be defined as the outermost decorator so it
is defined last. This is important so it does not interfere
with other decorators.
Using this decorator on a function will cause its execution to be logged at
`DEBUG` level with arguments, return values, and exceptions.
:returns a function decorator
"""
func_name = f.__name__
@functools.wraps(f)
def trace_logging_wrapper(*args, **kwargs):
if len(args) > 0:
maybe_self = args[0]
else:
maybe_self = kwargs.get('self', None)
if maybe_self and hasattr(maybe_self, '__module__'):
logger = logging.getLogger(maybe_self.__module__)
else:
logger = LOG
# NOTE(ameade): Don't bother going any further if DEBUG log level
# is not enabled for the logger.
if not logger.isEnabledFor(py_logging.DEBUG):
return f(*args, **kwargs)
all_args = inspect.getcallargs(f, *args, **kwargs)
logger.debug('==> %(func)s: call %(all_args)r',
{'func': func_name, 'all_args': all_args})
start_time = time.time() * 1000
try:
result = f(*args, **kwargs)
except Exception as exc:
total_time = int(round(time.time() * 1000)) - start_time
logger.debug('<== %(func)s: exception (%(time)dms) %(exc)r',
{'func': func_name,
'time': total_time,
'exc': exc})
raise
total_time = int(round(time.time() * 1000)) - start_time
logger.debug('<== %(func)s: return (%(time)dms) %(result)r',
{'func': func_name,
'time': total_time,
'result': result})
return result
return trace_logging_wrapper
class TraceWrapperMetaclass(type):
"""Metaclass that wraps all methods of a class with trace_method.
This metaclass will cause every function inside of the class to be
decorated with the trace_method decorator.
To use the metaclass you define a class like so:
@six.add_metaclass(utils.TraceWrapperMetaclass)
class MyClass(object):
"""
def __new__(meta, classname, bases, classDict):
newClassDict = {}
for attributeName, attribute in classDict.items():
if isinstance(attribute, types.FunctionType):
# replace it with a wrapped version
attribute = functools.update_wrapper(trace_method(attribute),
attribute)
newClassDict[attributeName] = attribute
return type.__new__(meta, classname, bases, newClassDict)
class TraceWrapperWithABCMetaclass(abc.ABCMeta, TraceWrapperMetaclass):
"""Metaclass that wraps all methods of a class with trace."""
pass
def setup_tracing(trace_flags):
"""Set global variables for each trace flag.
Sets variables TRACE_METHOD and TRACE_API, which represent
whether to log method and api traces.
:param trace_flags: a list of strings
"""
global TRACE_METHOD
global TRACE_API
try:
trace_flags = [flag.strip() for flag in trace_flags]
except TypeError: # Handle when trace_flags is None or a test mock
trace_flags = []
for invalid_flag in (set(trace_flags) - VALID_TRACE_FLAGS):
LOG.warning(_LW('Invalid trace flag: %s'), invalid_flag)
TRACE_METHOD = 'method' in trace_flags
TRACE_API = 'api' in trace_flags
def resolve_hostname(hostname):
"""Resolves host name to IP address.
Resolves a host name (my.data.point.com) to an IP address (10.12.143.11).
This routine also works if the data passed in hostname is already an IP.
In this case, the same IP address will be returned.
:param hostname: Host name to resolve.
:return: IP Address for Host name.
"""
result = socket.getaddrinfo(hostname, None)[0]
(family, socktype, proto, canonname, sockaddr) = result
LOG.debug('Asked to resolve hostname %(host)s and got IP %(ip)s.',
{'host': hostname, 'ip': sockaddr[0]})
return sockaddr[0]
|
nikesh-mahalka/cinder
|
cinder/utils.py
|
Python
|
apache-2.0
| 34,822
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
import bottle
import commands
from bottle import route, send_file, template
@route('/')
def index():
bottle.TEMPLATES.clear() # For rapid development
return template("index", master_port = master_port)
@route('/framework/:id#[0-9-]*#')
def framework(id):
bottle.TEMPLATES.clear() # For rapid development
return template("framework", master_port = master_port, framework_id = id)
@route('/static/:filename#.*#')
def static(filename):
send_file(filename, root = './webui/static')
@route('/log/:level#[A-Z]*#')
def log_full(level):
send_file('mesos-master.' + level, root = log_dir,
guessmime = False, mimetype = 'text/plain')
@route('/log/:level#[A-Z]*#/:lines#[0-9]*#')
def log_tail(level, lines):
bottle.response.content_type = 'text/plain'
command = 'tail -%s %s/mesos-master.%s' % (lines, log_dir, level)
return commands.getoutput(command)
bottle.TEMPLATE_PATH.append('./webui/master/')
# TODO(*): Add an assert to confirm that all the arguments we are
# expecting have been passed to us, which will give us a better error
# message when they aren't!
master_port = sys.argv[1]
webui_port = sys.argv[2]
log_dir = sys.argv[3]
bottle.debug(True)
bottle.run(host = '0.0.0.0', port = webui_port)
|
charlescearl/VirtualMesos
|
src/webui/master/webui.py
|
Python
|
apache-2.0
| 2,037
|
# Copyright 2009-2015 Eucalyptus Systems, Inc.
#
# Redistribution and use of this software in source and binary forms,
# with or without modification, are permitted provided that the following
# conditions are met:
#
# Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from requestbuilder import Arg
from requestbuilder.response import PaginatedResponse
from euca2ools.commands.iam import IAMRequest, arg_account_name
from euca2ools.commands.iam.getaccountpolicy import GetAccountPolicy
class ListAccountPolicies(IAMRequest):
DESCRIPTION = ('[Eucalyptus only] List one or all policies '
'policies attached to an account')
ARGS = [arg_account_name(help='''name or ID of the account owning
the policies to list (required)'''),
Arg('-p', '--policy-name', metavar='POLICY', route_to=None,
help='display a specific policy'),
Arg('-v', '--verbose', action='store_true', route_to=None,
help='''display the contents of the resulting policies (in
addition to their names)'''),
Arg('--pretty-print', action='store_true', route_to=None,
help='''when printing the contents of policies, reformat them
for easier reading''')]
LIST_TAGS = ['PolicyNames']
def main(self):
return PaginatedResponse(self, (None,), ('PolicyNames',))
def prepare_for_page(self, page):
# Pages are defined by markers
self.params['Marker'] = page
def get_next_page(self, response):
if response.get('IsTruncated') == 'true':
return response['Marker']
def print_result(self, result):
if self.args.get('policy_name'):
# Look for the specific policy the user asked for
for policy_name in result.get('PolicyNames', []):
if policy_name == self.args['policy_name']:
if self.args['verbose']:
self.print_policy(policy_name)
else:
print policy_name
break
else:
for policy_name in result.get('PolicyNames', []):
print policy_name
if self.args['verbose']:
self.print_policy(policy_name)
def print_policy(self, policy_name):
req = GetAccountPolicy(
service=self.service, AccountName=self.args['AccountName'],
PolicyName=policy_name, pretty_print=self.args['pretty_print'])
response = req.main()
req.print_result(response)
|
vasiliykochergin/euca2ools
|
euca2ools/commands/iam/listaccountpolicies.py
|
Python
|
bsd-2-clause
| 3,688
|
# Copyright (c) 2017, MD2K Center of Excellence
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from typing import List
import numpy as np
from scipy import signal
from cerebralcortex.data_processor.signalprocessing.dataquality import Quality
from cerebralcortex.kernel.datatypes.datapoint import DataPoint
from cerebralcortex.kernel.datatypes.datastream import DataStream
def filter_bad_ecg(ecg: DataStream,
ecg_quality: DataStream) -> DataStream:
"""
This function combines the raw ecg and ecg data quality datastream and only keeps those datapoints that are assigned acceptable in data quality
:param ecg: raw ecg datastream
:param ecg_quality: ecg quality datastream
:return: filtered ecg datastream
"""
ecg_filtered = DataStream.from_datastream([ecg])
ecg_quality_array = ecg_quality.data
ecg_raw_timestamp_array = np.array([i.start_time.timestamp() for i in ecg.data])
ecg_filtered_array = []
initial_index = 0
for item in ecg_quality_array:
if item.sample == Quality.ACCEPTABLE:
final_index = initial_index
for i in range(initial_index, len(ecg.data)):
if item.start_time.timestamp() <= ecg_raw_timestamp_array[i] <= item.end_time.timestamp():
ecg_filtered_array.append(ecg.data[i])
final_index = i
initial_index = final_index
ecg_filtered.data = ecg_filtered_array
return ecg_filtered
def compute_rr_intervals(ecg: DataStream,
ecg_quality: DataStream,
fs: float) -> DataStream:
"""
filter ecg datastream first and compute rr-interval datastream from the ecg datastream
:param ecg:ecg datastream
:param ecg_quality : ecg quality annotated datastream
:param fs: sampling frequency
:return: rr-interval datastream
"""
ecg_filtered = filter_bad_ecg(ecg, ecg_quality)
# compute the r-peak array
ecg_rpeak = detect_rpeak(ecg_filtered, fs)
return ecg_rpeak
def rr_interval_update(rpeak_temp1: List[DataPoint],
rr_ave: float,
min_size: int = 8) -> float:
"""
:param min_size: 8 last R-peaks are checked to compute the running rr interval average
:param rpeak_temp1: R peak locations
:param rr_ave: previous rr-interval average
:return: the new rr-interval average of the previously detected 8 R peak locations
"""
peak_interval = np.diff([0] + rpeak_temp1) # TODO: rpeak_temp1 is a datapoint, what should this be converted to?
return rr_ave if len(peak_interval) < min_size else np.sum(peak_interval[-min_size:]) / min_size
def compute_moving_window_int(sample: np.ndarray,
fs: float,
blackman_win_length: int,
filter_length: int = 257,
delta: float = .02) -> np.ndarray:
"""
:param sample: ecg sample array
:param fs: sampling frequency
:param blackman_win_length: length of the blackman window on which to compute the moving window integration
:param filter_length: length of the FIR bandpass filter on which filtering is done on ecg sample array
:param delta: to compute the weights of each band in FIR filter
:return: the Moving window integration of the sample array
"""
# I believe these constants can be kept in a file
# filter edges
filter_edges = [0, 4.5 * 2 / fs, 5 * 2 / fs, 20 * 2 / fs, 20.5 * 2 / fs, 1]
# gains at filter band edges
gains = [0, 0, 1, 1, 0, 0]
# weights
weights = [500 / delta, 1 / delta, 500 / delta]
# length of the FIR filter
# FIR filter coefficients for bandpass filtering
filter_coeff = signal.firls(filter_length, filter_edges, gains, weights)
# bandpass filtered signal
bandpass_signal = signal.convolve(sample, filter_coeff, 'same')
bandpass_signal /= np.percentile(bandpass_signal, 90)
# derivative array
derivative_array = (np.array([-1.0, -2.0, 0, 2.0, 1.0])) * (1 / 8)
# derivative signal (differentiation of the bandpass)
derivative_signal = signal.convolve(bandpass_signal, derivative_array, 'same')
derivative_signal /= np.percentile(derivative_signal, 90)
# squared derivative signal
derivative_squared_signal = derivative_signal ** 2
derivative_squared_signal /= np.percentile(derivative_squared_signal, 90)
# blackman window
blackman_window = np.blackman(blackman_win_length)
# moving window Integration of squared derivative signal
mov_win_int_signal = signal.convolve(derivative_squared_signal, blackman_window, 'same')
mov_win_int_signal /= np.percentile(mov_win_int_signal, 90)
return mov_win_int_signal
def check_peak(data: List[DataPoint]) -> bool:
"""
This is a function to check the condition of a simple peak of signal y in index i
:param data:
:return:
"""
if len(data) < 3:
return False
midpoint = int(len(data) / 2)
test_value = data[0]
for i in data[1:midpoint + 1]:
if test_value < i:
test_value = i
else:
return False
for i in data[midpoint + 1:]:
if test_value > i:
test_value = i
else:
return False
return True
# TODO: CODE_REVIEW: Justify in the method documentation string the justification of the default values
# TODO: CODE_REVIEW: Make hard-coded constants default method parameter
def compute_r_peaks(threshold_1: float,
rr_ave: float,
mov_win_int_signal: np.ndarray,
peak_tuple_array: List[tuple]) -> list:
"""
This function does the adaptive thresholding of the signal to get the R-peak locations
:param threshold_1: Thr1 is the threshold above which the R peak
:param rr_ave: running RR-interval average
:param mov_win_int_signal: signal sample array
:param peak_tuple_array: A tuple array containing location and values of the simple peaks detected in the process before
:returns rpeak_array_indices: The location of the R peaks in the signal sample array once found this is returned
"""
peak_location_in_signal_array = [i[0] for i in peak_tuple_array] # location of the simple peaks in signal array
amplitude_in_peak_locations = [i[1] for i in peak_tuple_array] # simple peak's amplitude in signal array
threshold_2 = 0.5 * threshold_1 # any signal value between threshold_2 and threshold_1 is a noise peak
sig_lev = 4 * threshold_1 # current signal level -any signal above thrice the signal level is discarded as a spurious value
noise_lev = 0.1 * sig_lev # current noise level of the signal
ind_rpeak = 0
rpeak_array_indices = []
rpeak_inds_in_peak_array = []
while ind_rpeak < len(peak_location_in_signal_array):
# if for 166 percent of the present RR interval no peak is detected as R peak then threshold_2 is taken as the
# R peak threshold and the maximum of the range is taken as a R peak and RR interval is updated accordingly
if len(rpeak_array_indices) >= 1 and peak_location_in_signal_array[ind_rpeak] - peak_location_in_signal_array[
rpeak_inds_in_peak_array[-1]] > 1.66 * rr_ave and ind_rpeak - rpeak_inds_in_peak_array[-1] > 1:
# values and indexes of previous peaks discarded as not an R peak whose magnitude is above threshold_2
searchback_array = [(k - rpeak_inds_in_peak_array[-1], amplitude_in_peak_locations[k]) for k in
range(rpeak_inds_in_peak_array[-1] + 1, ind_rpeak) if
3 * sig_lev > amplitude_in_peak_locations[k] > threshold_2]
if len(searchback_array) > 0:
# maximum inside the range calculated beforehand is taken as R peak
searchback_array_inrange_values = [x[1] for x in searchback_array]
searchback_max_index = np.argmax(searchback_array_inrange_values)
rpeak_array_indices.append(peak_location_in_signal_array[
rpeak_inds_in_peak_array[-1] + searchback_array[searchback_max_index][
0]])
rpeak_inds_in_peak_array.append(
rpeak_inds_in_peak_array[-1] + searchback_array[searchback_max_index][0])
sig_lev = ewma(sig_lev, mov_win_int_signal[peak_location_in_signal_array[ind_rpeak]],
.125) # update the current signal level
threshold_1 = noise_lev + 0.25 * (sig_lev - noise_lev)
threshold_2 = 0.5 * threshold_1
rr_ave = rr_interval_update(rpeak_array_indices, rr_ave)
ind_rpeak = rpeak_inds_in_peak_array[-1] + 1
else:
threshold_1 = noise_lev + 0.25 * (sig_lev - noise_lev)
threshold_2 = 0.5 * threshold_1
ind_rpeak += 1
else:
# R peak checking
if threshold_1 <= mov_win_int_signal[peak_location_in_signal_array[ind_rpeak]] < 3 * sig_lev:
rpeak_array_indices.append(peak_location_in_signal_array[ind_rpeak])
rpeak_inds_in_peak_array.append(ind_rpeak)
sig_lev = ewma(sig_lev, mov_win_int_signal[peak_location_in_signal_array[ind_rpeak]],
.125) # update the signal level
# noise peak checking
elif threshold_1 > mov_win_int_signal[peak_location_in_signal_array[ind_rpeak]] > threshold_2:
noise_lev = ewma(noise_lev, mov_win_int_signal[peak_location_in_signal_array[ind_rpeak]],
.125) # update the noise level
threshold_1 = noise_lev + 0.25 * (sig_lev - noise_lev)
threshold_2 = 0.5 * threshold_1
ind_rpeak += 1
rr_ave = rr_interval_update(rpeak_array_indices, rr_ave)
return rpeak_array_indices
def ewma(value: float, new_value: float, alpha: float) -> float:
"""
:param value:
:param new_value:
:param alpha:
:return:
"""
return alpha * new_value + (1 - alpha) * value
# TODO: CODE_REVIEW: Justify in the method documentation string the justification of the default values
# TODO: CODE_REVIEW: Make hard-coded constants default method parameter
def remove_close_peaks(rpeak_temp1: list,
sample: np.ndarray,
fs: float,
min_range: float = .5) -> list:
"""
This function removes one of two peaks from two consecutive R peaks
if difference among them is less than the minimum possible
:param min_range:
:param rpeak_temp1: R peak array containing the index of the R peaks
:param sample: sample array
:param fs: sampling frequency
:return: R peak array with no close R peaks
"""
difference = 0
rpeak_temp2 = rpeak_temp1
while difference != 1:
length_rpeak_temp2 = len(rpeak_temp2)
temp = np.diff(rpeak_temp2)
comp_index1 = [rpeak_temp2[i] for i in range(len(temp)) if temp[i] < min_range * fs]
comp_index2 = [rpeak_temp2[i + 1] for i in range(len(temp)) if temp[i] < min_range * fs]
comp1 = sample[comp_index1]
comp2 = sample[comp_index2]
checkmin = np.matrix([comp1, comp2])
temp_ind1 = [i for i in range(len(temp)) if temp[i] < min_range * fs]
temp_ind2 = np.argmin(np.array(checkmin), axis=0)
temp_ind = temp_ind1 + temp_ind2
temp_ind = np.unique(temp_ind)
count = 0
for i in temp_ind:
rpeak_temp2.remove(rpeak_temp2[i - count])
count = count + 1
difference = length_rpeak_temp2 - len(rpeak_temp2) + 1
return rpeak_temp2
def confirm_peaks(rpeak_temp1: list,
sample: np.ndarray,
fs: float,
range_for_checking: float = 1 / 10) -> np.ndarray:
"""
This function does the final check on the R peaks detected and
finds the maximum in a range of fs/10 of the detected peak location and assigns it to be the peak
:param rpeak_temp1: R peak array containing the index of the R peaks
:param sample: sample array
:param fs: sampling frequency
:param range_for_checking : The peaks are checked within a range of fs/10 to get the maximum value within that range
:return: final R peak array
"""
for i in range(1, len(rpeak_temp1) - 1):
start_index = int(rpeak_temp1[i] - np.ceil(range_for_checking * fs))
end_index = int(rpeak_temp1[i] + np.ceil(range_for_checking * fs) + 1)
index = np.argmax(sample[start_index:end_index])
rpeak_temp1[i] = rpeak_temp1[i] - np.ceil(range_for_checking * fs) + index
return np.array(rpeak_temp1).astype(np.int64)
# TODO: CODE_REVIEW: Make hard-coded constants default method parameter
def detect_rpeak(ecg: DataStream,
fs: float = 64,
threshold: float = 0.5,
blackman_win_len_range: float = 0.2) -> DataStream:
"""
This program implements the Pan Tomkins algorithm on ECG signal to detect the R peaks
Since the ecg array can have discontinuity in the timestamp arrays the rr-interval calculated
in the algorithm is calculated in terms of the index in the sample array
The algorithm consists of some major steps
1. computation of the moving window integration of the signal in terms of blackman window of a prescribed length
2. compute all the peaks of the moving window integration signal
3. adaptive thresholding with dynamic signal and noise thresholds applied to filter out the R peak locations
4. confirm the R peaks through differentiation from the nearby peaks and remove the false peaks
:param ecg: ecg array of tuples (timestamp,value)
:param fs: sampling frequency
:param threshold: initial threshold to detect the R peak in a signal normalized by the 90th percentile. .5 is default.
:param blackman_win_len_range : the range to calculate blackman window length
:return: R peak array of tuples (timestamp, Rpeak interval)
"""
data = ecg.data
result = DataStream.from_datastream([ecg])
if len(data) == 0:
result.data = []
return result
sample = np.array([i.sample for i in data])
timestamp = np.array([i.start_time for i in data])
# computes the moving window integration of the signal
blackman_win_len = np.ceil(fs * blackman_win_len_range)
y = compute_moving_window_int(sample, fs, blackman_win_len)
peak_location_values = [(i, y[i]) for i in range(2, len(y) - 1) if check_peak(y[i - 2:i + 3])]
# initial RR interval average
peak_location = [i[0] for i in peak_location_values]
running_rr_avg = sum(np.diff(peak_location)) / (len(peak_location) - 1)
rpeak_temp1 = compute_r_peaks(threshold, running_rr_avg, y, peak_location_values)
rpeak_temp2 = remove_close_peaks(rpeak_temp1, sample, fs)
index = confirm_peaks(rpeak_temp2, sample, fs)
rpeak_timestamp = timestamp[index]
rpeak_value = np.diff(rpeak_timestamp)
rpeak_timestamp = rpeak_timestamp[1:]
result_data = []
for k in range(len(rpeak_value)):
result_data.append(
DataPoint.from_tuple(rpeak_timestamp[k], rpeak_value[k].seconds + rpeak_value[k].microseconds / 1e6))
# Create resulting datastream to be returned
result.data = result_data
return result
|
nasirali1/CerebralCortex
|
cerebralcortex/data_processor/signalprocessing/ecg.py
|
Python
|
bsd-2-clause
| 16,830
|
# ------------------------------------------------------------------------------
# Copyright (c) 2010-2013, EVEthing team
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
# INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
# NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY
# OF SUCH DAMAGE.
# ------------------------------------------------------------------------------
from .apitask import APITask
from thing.models import RefType
class RefTypes(APITask):
name = 'thing.ref_types'
def run(self, url, taskstate_id, apikey_id, zero):
if self.init(taskstate_id, apikey_id) is False:
return
# Fetch the API data
if self.fetch_api(url, {}, use_auth=False) is False or self.root is None:
return
# Build a refTypeID:row dictionary
bulk_data = {}
for row in self.root.findall('result/rowset/row'):
bulk_data[int(row.attrib['refTypeID'])] = row
# Bulk retrieve all of those stations that exist
rt_map = RefType.objects.in_bulk(bulk_data.keys())
new = []
for refTypeID, row in bulk_data.items():
reftype = rt_map.get(refTypeID)
# RefType does not exist, make a new one
if reftype is None:
new.append(RefType(
id=refTypeID,
name=row.attrib['refTypeName'],
))
# RefType exists and name has changed, update it
elif reftype.name != row.attrib['refTypeName']:
reftype.name = row.attrib['refTypeName']
reftype.save()
# Create any new stations
if new:
RefType.objects.bulk_create(new)
return True
# ---------------------------------------------------------------------------
|
madcowfred/evething
|
thing/tasks/reftypes.py
|
Python
|
bsd-2-clause
| 2,946
|
# -*- coding: UTF-8 -*-
from django.conf import settings as dsettings
from django.contrib.auth import models as authModels
from django.core.paginator import Paginator, InvalidPage, EmptyPage
from django.http import HttpResponse, Http404
from django.shortcuts import render, render_to_response, get_object_or_404
from django.template import RequestContext
from django.template.defaultfilters import slugify
from microblog import models, settings
from taggit.models import Tag, TaggedItem
from decorator import decorator
try:
import json
except ImportError:
import simplejson as json
def render_json(f):
"""
decoratore da applicare ad una vista per serializzare in json il risultato.
"""
if dsettings.DEBUG:
ct = 'text/plain'
j = lambda d: json.dumps(d, indent=2)
else:
ct = 'application/json'
j = json.dumps
def wrapper(func, *args, **kw):
try:
result = func(*args, **kw)
except Exception, e:
result = j(str(e))
status = 500
else:
if isinstance(result, HttpResponse):
return result
else:
result = j(result)
status = 200
return HttpResponse(content=result, content_type=ct, status=status)
return decorator(wrapper, f)
def post_list(request):
return render(request, 'microblog/post_list.html', {})
def category(request, category):
category = get_object_or_404(models.Category, name=category)
return render_to_response(
'microblog/category.html',
{
'category': category,
},
context_instance=RequestContext(request)
)
def post_list_by_year(request, year, month=None):
return render_to_response(
'microblog/list_by_year.html',
{
'year': year,
'month': month,
},
context_instance=RequestContext(request)
)
def tag(request, tag):
tag = get_object_or_404(Tag, name=tag)
return render_to_response(
'microblog/tag.html',
{
'tag': tag,
},
context_instance=RequestContext(request)
)
def author(request, author):
user = [
u for u in authModels.User.objects.all()
if slugify('%s-%s' % (u.first_name, u.last_name)) == author
]
if not user:
raise Http404()
else:
user = user[0]
return render_to_response(
'microblog/author.html',
{
'author': user,
},
context_instance=RequestContext(request)
)
def _paginate_posts(post_list, request):
if settings.MICROBLOG_POST_LIST_PAGINATION:
paginator = Paginator(post_list, settings.MICROBLOG_POST_PER_PAGE)
try:
page = int(request.GET.get("page", "1"))
except ValueError:
page = 1
try:
posts = paginator.page(page)
except (EmptyPage, InvalidPage):
posts = paginator.page(1)
else:
paginator = Paginator(post_list, len(post_list) or 1)
posts = paginator.page(1)
return posts
def _posts_list(request, featured=False):
if settings.MICROBLOG_LANGUAGE_FALLBACK_ON_POST_LIST:
lang = None
else:
lang = request.LANGUAGE_CODE
return models.Post.objects\
.byLanguage(lang)\
.byFeatured(featured)\
.published()
def _post_detail(request, content):
if not settings.MICROBLOG_POST_FILTER([content.post], request.user):
raise Http404()
return render_to_response(
'microblog/post_detail.html',
{
'post': content.post,
'content': content
},
context_instance=RequestContext(request)
)
def _trackback_ping(request, content):
def success():
x = ('<?xml version="1.0" encoding="utf-8"?>\n'
'<response><error>0</error></response>')
return HttpResponse(content=x, content_type='text/xml')
def failure(message=''):
x = ('<?xml version="1.0" encoding="utf-8"?>\n'
'<response><error>1</error><message>%s</message></response>') % message
return HttpResponse(content=x, content_type='text/xml', status=400)
if request.method != 'POST':
return failure('only POST method is supported')
if not request.POST.get('url'):
return failure('url argument is mandatory')
t = {
'url': request.POST['url'],
'blog_name': request.POST.get('blog_name', ''),
'title': request.POST.get('title', ''),
'excerpt': request.POST.get('excerpt', ''),
}
from microblog.moderation import moderate
if not moderate(request, 'trackback', t['title'], url=t['url']):
return failure('moderated')
content.new_trackback(**t)
return success()
@render_json
def _comment_count(request, content):
post = content.post
if settings.MICROBLOG_COMMENT == 'comment':
import django_comments as comments
from django.contrib.contenttypes.models import ContentType
model = comments.get_model()
q = model.objects.filter(
content_type=ContentType.objects.get_for_model(post),
object_pk=post.id,
is_public=True
)
return q.count()
else:
import httplib2
from urllib import quote
h = httplib2.Http()
params = {
'forum_api_key': settings.MICROBLOG_COMMENT_DISQUS_FORUM_KEY,
'url': content.get_url(),
}
args = '&'.join('%s=%s' % (k, quote(v)) for k, v in params.items())
url = settings.MICROBLOG_COMMENT_DISQUS_API_URL + 'get_thread_by_url?%s' % args
resp, page = h.request(url)
if resp.status != 200:
return -1
page = json.loads(page)
if not page['succeeded']:
return -1
elif page['message'] is None:
return 0
else:
return page['message']['num_comments']
def _post404(f):
def wrapper(*args, **kw):
try:
return f(*args, **kw)
except models.PostContent.DoesNotExist:
raise Http404()
return wrapper
if settings.MICROBLOG_URL_STYLE == 'date':
def _get(slug, year, month, day):
return models.PostContent.objects\
.select_related('post')\
.getBySlugAndDate(slug, year, month, day)
@_post404
def post_detail(request, year, month, day, slug):
return _post_detail(
request,
content=_get(slug, year, month, day)
)
@_post404
def trackback_ping(request, year, month, day, slug):
return _trackback_ping(
request,
content=_get(slug, year, month, day)
)
@_post404
def comment_count(request, year, month, day, slug):
return _comment_count(
request,
content = _get(slug, year, month, day)
)
elif settings.MICROBLOG_URL_STYLE == 'category':
def _get(slug, category):
return models.PostContent.objects\
.select_related('post')\
.getBySlugAndCategory(slug, category)
@_post404
def post_detail(request, category, slug):
return _post_detail(
request,
content=_get(slug, category),
)
@_post404
def trackback_ping(request, category, slug):
return _trackback_ping(
request,
content=_get(slug, category),
)
@_post404
def comment_count(request, category, slug):
return _comment_count(
request,
content=_get(slug, category),
)
|
barrachri/epcon
|
microblog/views.py
|
Python
|
bsd-2-clause
| 7,631
|
#!/usr/bin/env python
import sys
from collections import namedtuple
import poppler
import cairo
from os.path import abspath
Point = namedtuple('Point', ['x', 'y'])
Line = namedtuple('Line', ['start', 'end'])
Polygon = namedtuple('Polygon', 'points')
Rectangle = namedtuple('Rectangle', ['top_left', 'bottom_right'])
AnnotationGroup = namedtuple('AnnotationGroup', ['name', 'color', 'shapes'])
Color = namedtuple('Color', ['red', 'green', 'blue'])
__all__ = [
'render_page',
'make_annotations',
]
def draw_line(context, line):
context.move_to(line.start.x, line.start.y)
context.line_to(line.end.x, line.end.y)
context.stroke()
def draw_polygon(context, polygon):
if len(polygon.points) == 0:
return
first_point = polygon.points[0]
context.move_to(first_point.x, first_point.y)
for line in polygon.points[1:]:
context.line_to(line.x, line.y)
context.stroke()
def draw_rectangle(context, rectangle):
width = abs(rectangle.bottom_right.x - rectangle.top_left.x)
height = abs(rectangle.bottom_right.y - rectangle.top_left.y)
context.rectangle(rectangle.top_left.x,
rectangle.top_left.y,
width,
height)
context.stroke()
RENDERERS = {}
RENDERERS[Line] = draw_line
RENDERERS[Rectangle] = draw_rectangle
RENDERERS[Polygon] = draw_polygon
class CairoPdfPageRenderer(object):
def __init__(self, pdf_page, svg_filename, png_filename):
self._svg_filename = abspath(svg_filename)
self._png_filename = abspath(png_filename) if png_filename else None
self._context, self._surface = self._get_context(
svg_filename, *pdf_page.get_size())
white = poppler.Color()
white.red = white.green = white.blue = 65535
black = poppler.Color()
black.red = black.green = black.blue = 0
# red = poppler.Color()
# red.red = red.green = red.blue = 0
# red.red = 65535
width = pdf_page.get_size()[0]
# We render everything 3 times, moving
# one page-width to the right each time.
self._offset_colors = [
(0, white, white, True),
(width, black, white, True),
(2 * width, black, black, False)
]
for offset, fg_color, bg_color, render_graphics in self._offset_colors:
# Render into context, with a different offset
# each time.
self._context.save()
self._context.translate(offset, 0)
sel = poppler.Rectangle()
sel.x1, sel.y1 = (0, 0)
sel.x2, sel.y2 = pdf_page.get_size()
if render_graphics:
pdf_page.render(self._context)
pdf_page.render_selection(
self._context, sel, sel, poppler.SELECTION_GLYPH,
fg_color, bg_color)
self._context.restore()
@staticmethod
def _get_context(filename, width, height):
SCALE = 1
# left, middle, right
N_RENDERINGS = 3
surface = cairo.SVGSurface(
filename, N_RENDERINGS * width * SCALE, height * SCALE)
# srf = cairo.ImageSurface(
# cairo.FORMAT_RGB24, int(w*SCALE), int(h*SCALE))
context = cairo.Context(surface)
context.scale(SCALE, SCALE)
# Set background color to white
context.set_source_rgb(1, 1, 1)
context.paint()
return context, surface
def draw(self, shape, color):
self._context.save()
self._context.set_line_width(1)
self._context.set_source_rgba(color.red,
color.green,
color.blue,
0.5)
self._context.translate(self._offset_colors[1][0], 0)
RENDERERS[type(shape)](self._context, shape)
self._context.restore()
def flush(self):
if self._png_filename is not None:
self._surface.write_to_png(self._png_filename)
# NOTE! The flush is rather expensive, since it writes out the svg
# data. The profile will show a large amount of time spent inside it.
# Removing it won't help the execution time at all, it will just move
# it somewhere that the profiler can't see it
# (at garbage collection time)
self._surface.flush()
self._surface.finish()
def render_page(pdf_filename, page_number, annotations, svg_file=None,
png_file=None):
"""
Render a single page of a pdf with graphical annotations added.
"""
page = extract_pdf_page(pdf_filename, page_number)
renderer = CairoPdfPageRenderer(page, svg_file, png_file)
for annotation in annotations:
assert isinstance(annotation, AnnotationGroup), (
"annotations: {0}, annotation: {1}".format(
annotations, annotation))
for shape in annotation.shapes:
renderer.draw(shape, annotation.color)
renderer.flush()
def extract_pdf_page(filename, page_number):
file_uri = "file://{0}".format(abspath(filename))
doc = poppler.document_new_from_file(file_uri, "")
page = doc.get_page(page_number)
return page
def make_annotations(table_container):
"""
Take the output of the table-finding algorithm (TableFinder) and create
AnnotationGroups. These can be drawn on top of the original PDF page to
visualise how the algorithm arrived at its output.
"""
annotations = []
annotations.append(
AnnotationGroup(
name='all_glyphs',
color=Color(0, 1, 0),
shapes=convert_rectangles(table_container.all_glyphs)))
annotations.append(
AnnotationGroup(
name='all_words',
color=Color(0, 0, 1),
shapes=convert_rectangles(table_container.all_words)))
annotations.append(
AnnotationGroup(
name='text_barycenters',
color=Color(0, 0, 1),
shapes=convert_barycenters(table_container.all_glyphs)))
annotations.append(
AnnotationGroup(
name='hat_graph_vertical',
color=Color(0, 1, 0),
shapes=make_hat_graph(
table_container._y_point_values,
table_container._center_lines,
direction="vertical")))
for table in table_container:
annotations.append(
AnnotationGroup(
name='row_edges',
color=Color(1, 0, 0),
shapes=convert_horizontal_lines(
table.row_edges, table.bounding_box)))
annotations.append(
AnnotationGroup(
name='column_edges',
color=Color(1, 0, 0),
shapes=convert_vertical_lines(
table.column_edges, table.bounding_box)))
annotations.append(
AnnotationGroup(
name='glyph_histogram_horizontal',
color=Color(1, 0, 0),
shapes=make_glyph_histogram(
table._x_glyph_histogram, table.bounding_box,
direction="horizontal")))
annotations.append(
AnnotationGroup(
name='glyph_histogram_vertical',
color=Color(1, 0, 0),
shapes=make_glyph_histogram(
table._y_glyph_histogram, table.bounding_box,
direction="vertical")))
annotations.append(
AnnotationGroup(
name='horizontal_glyph_above_threshold',
color=Color(0, 0, 0),
shapes=make_thresholds(
table._x_threshold_segs, table.bounding_box,
direction="horizontal")))
annotations.append(
AnnotationGroup(
name='vertical_glyph_above_threshold',
color=Color(0, 0, 0),
shapes=make_thresholds(
table._y_threshold_segs, table.bounding_box,
direction="vertical")))
# Draw bounding boxes last so that they appear on top
annotations.append(
AnnotationGroup(
name='table_bounding_boxes',
color=Color(0, 0, 1),
shapes=convert_rectangles(table_container.bounding_boxes)))
return annotations
def make_thresholds(segments, box, direction):
lines = []
for segment in segments:
if direction == "horizontal":
lines.append(Line(Point(segment.start, box.bottom + 10),
Point(segment.end, box.bottom + 10)))
else:
lines.append(Line(Point(10, segment.start),
Point(10, segment.end)))
return lines
def make_hat_graph(hats, center_lines, direction):
"""
Draw estimated text barycenter
"""
max_value = max(v for _, v in hats)
DISPLAY_WIDTH = 25
points = []
polygon = Polygon(points)
def point(x, y):
points.append(Point(x, y))
for position, value in hats:
point(((value / max_value - 1) * DISPLAY_WIDTH), position)
lines = []
for position in center_lines:
lines.append(Line(Point(-DISPLAY_WIDTH, position),
Point(0, position)))
return [polygon] + lines
def make_glyph_histogram(histogram, box, direction):
# if direction == "vertical":
# return []
bin_edges, bin_values = histogram
if not bin_edges:
# There are no glyphs, and nothing to render!
return []
points = []
polygon = Polygon(points)
def point(x, y):
points.append(Point(x, y))
# def line(*args):
# lines.append(Line(*args))
previous_value = 0 if direction == "horizontal" else box.bottom
x = zip(bin_edges, bin_values)
for edge, value in x:
if direction == "horizontal":
value *= 0.75
value = box.bottom - value
point(edge, previous_value)
point(edge, value)
else:
value *= 0.25
value += 7 # shift pixels to the right
point(previous_value, edge)
point(value, edge)
previous_value = value
# Final point is at 0
if direction == "horizontal":
point(edge, 0)
else:
point(box.bottom, edge)
# Draw edge density plot (not terribly interesting, should probably be
# deleted)
# lines = []
# if direction == "horizontal":
# for edge in bin_edges:
# lines.append(Line(Point(edge, box.bottom),
# Point(edge, box.bottom + 5)))
# else:
# for edge in bin_edges:
# lines.append(Line(Point(0, edge), Point(5, edge)))
return [polygon] # + lines
def convert_rectangles(boxes):
return [Rectangle(Point(b.left, b.top), Point(b.right, b.bottom))
for b in boxes]
def convert_barycenters(boxes):
return [Line(Point(b.left, b.barycenter.midpoint),
Point(b.right, b.barycenter.midpoint))
for b in boxes if b.barycenter is not None]
def convert_horizontal_lines(y_edges, bbox):
return [Line(Point(bbox.left, y), Point(bbox.right, y))
for y in y_edges]
def convert_vertical_lines(x_edges, bbox):
return [Line(Point(x, bbox.top), Point(x, bbox.bottom))
for x in x_edges]
if __name__ == '__main__':
annotations = [
AnnotationGroup(
name='',
color=Color(1, 0, 0),
shapes=[Rectangle(Point(100, 100), Point(200, 200))])
]
render_page(sys.argv[1], 0, annotations)
|
drj11/pdftables
|
pdftables/diagnostics.py
|
Python
|
bsd-2-clause
| 11,691
|
"""
Maximum likelihood covariance estimator.
"""
# Author: Alexandre Gramfort <alexandre.gramfort@inria.fr>
# Gael Varoquaux <gael.varoquaux@normalesup.org>
# Virgile Fritsch <virgile.fritsch@inria.fr>
#
# License: BSD 3 clause
# avoid division truncation
import warnings
import numpy as np
from scipy import linalg
from ..base import BaseEstimator
from ..utils import check_array
from ..utils.extmath import fast_logdet
from ..metrics.pairwise import pairwise_distances
def log_likelihood(emp_cov, precision):
"""Computes the sample mean of the log_likelihood under a covariance model
computes the empirical expected log-likelihood (accounting for the
normalization terms and scaling), allowing for universal comparison (beyond
this software package)
Parameters
----------
emp_cov : 2D ndarray (n_features, n_features)
Maximum Likelihood Estimator of covariance
precision : 2D ndarray (n_features, n_features)
The precision matrix of the covariance model to be tested
Returns
-------
sample mean of the log-likelihood
"""
p = precision.shape[0]
log_likelihood_ = - np.sum(emp_cov * precision) + fast_logdet(precision)
log_likelihood_ -= p * np.log(2 * np.pi)
log_likelihood_ /= 2.
return log_likelihood_
def empirical_covariance(X, assume_centered=False):
"""Computes the Maximum likelihood covariance estimator
Parameters
----------
X : ndarray, shape (n_samples, n_features)
Data from which to compute the covariance estimate
assume_centered : boolean
If True, data will not be centered before computation.
Useful when working with data whose mean is almost, but not exactly
zero.
If False, data will be centered before computation.
Returns
-------
covariance : 2D ndarray, shape (n_features, n_features)
Empirical covariance (Maximum Likelihood Estimator).
"""
X = np.asarray(X)
if X.ndim == 1:
X = np.reshape(X, (1, -1))
if X.shape[0] == 1:
warnings.warn("Only one sample available. "
"You may want to reshape your data array")
if assume_centered:
covariance = np.dot(X.T, X) / X.shape[0]
else:
covariance = np.cov(X.T, bias=1)
if covariance.ndim == 0:
covariance = np.array([[covariance]])
return covariance
class EmpiricalCovariance(BaseEstimator):
"""Maximum likelihood covariance estimator
Read more in the :ref:`User Guide <covariance>`.
Parameters
----------
store_precision : bool
Specifies if the estimated precision is stored.
assume_centered : bool
If True, data are not centered before computation.
Useful when working with data whose mean is almost, but not exactly
zero.
If False (default), data are centered before computation.
Attributes
----------
location_ : array-like, shape (n_features,)
Estimated location, i.e. the estimated mean.
covariance_ : 2D ndarray, shape (n_features, n_features)
Estimated covariance matrix
precision_ : 2D ndarray, shape (n_features, n_features)
Estimated pseudo-inverse matrix.
(stored only if store_precision is True)
Examples
--------
>>> import numpy as np
>>> from sklearn.covariance import EmpiricalCovariance
>>> from sklearn.datasets import make_gaussian_quantiles
>>> real_cov = np.array([[.8, .3],
... [.3, .4]])
>>> rng = np.random.RandomState(0)
>>> X = rng.multivariate_normal(mean=[0, 0],
... cov=real_cov,
... size=500)
>>> cov = EmpiricalCovariance().fit(X)
>>> cov.covariance_
array([[0.7569..., 0.2818...],
[0.2818..., 0.3928...]])
>>> cov.location_
array([0.0622..., 0.0193...])
"""
def __init__(self, store_precision=True, assume_centered=False):
self.store_precision = store_precision
self.assume_centered = assume_centered
def _set_covariance(self, covariance):
"""Saves the covariance and precision estimates
Storage is done accordingly to `self.store_precision`.
Precision stored only if invertible.
Parameters
----------
covariance : 2D ndarray, shape (n_features, n_features)
Estimated covariance matrix to be stored, and from which precision
is computed.
"""
covariance = check_array(covariance)
# set covariance
self.covariance_ = covariance
# set precision
if self.store_precision:
self.precision_ = linalg.pinvh(covariance)
else:
self.precision_ = None
def get_precision(self):
"""Getter for the precision matrix.
Returns
-------
precision_ : array-like
The precision matrix associated to the current covariance object.
"""
if self.store_precision:
precision = self.precision_
else:
precision = linalg.pinvh(self.covariance_)
return precision
def fit(self, X, y=None):
"""Fits the Maximum Likelihood Estimator covariance model
according to the given training data and parameters.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Training data, where n_samples is the number of samples and
n_features is the number of features.
y
not used, present for API consistence purpose.
Returns
-------
self : object
"""
X = check_array(X)
if self.assume_centered:
self.location_ = np.zeros(X.shape[1])
else:
self.location_ = X.mean(0)
covariance = empirical_covariance(
X, assume_centered=self.assume_centered)
self._set_covariance(covariance)
return self
def score(self, X_test, y=None):
"""Computes the log-likelihood of a Gaussian data set with
`self.covariance_` as an estimator of its covariance matrix.
Parameters
----------
X_test : array-like, shape = [n_samples, n_features]
Test data of which we compute the likelihood, where n_samples is
the number of samples and n_features is the number of features.
X_test is assumed to be drawn from the same distribution than
the data used in fit (including centering).
y
not used, present for API consistence purpose.
Returns
-------
res : float
The likelihood of the data set with `self.covariance_` as an
estimator of its covariance matrix.
"""
# compute empirical covariance of the test set
test_cov = empirical_covariance(
X_test - self.location_, assume_centered=True)
# compute log likelihood
res = log_likelihood(test_cov, self.get_precision())
return res
def error_norm(self, comp_cov, norm='frobenius', scaling=True,
squared=True):
"""Computes the Mean Squared Error between two covariance estimators.
(In the sense of the Frobenius norm).
Parameters
----------
comp_cov : array-like, shape = [n_features, n_features]
The covariance to compare with.
norm : str
The type of norm used to compute the error. Available error types:
- 'frobenius' (default): sqrt(tr(A^t.A))
- 'spectral': sqrt(max(eigenvalues(A^t.A))
where A is the error ``(comp_cov - self.covariance_)``.
scaling : bool
If True (default), the squared error norm is divided by n_features.
If False, the squared error norm is not rescaled.
squared : bool
Whether to compute the squared error norm or the error norm.
If True (default), the squared error norm is returned.
If False, the error norm is returned.
Returns
-------
The Mean Squared Error (in the sense of the Frobenius norm) between
`self` and `comp_cov` covariance estimators.
"""
# compute the error
error = comp_cov - self.covariance_
# compute the error norm
if norm == "frobenius":
squared_norm = np.sum(error ** 2)
elif norm == "spectral":
squared_norm = np.amax(linalg.svdvals(np.dot(error.T, error)))
else:
raise NotImplementedError(
"Only spectral and frobenius norms are implemented")
# optionally scale the error norm
if scaling:
squared_norm = squared_norm / error.shape[0]
# finally get either the squared norm or the norm
if squared:
result = squared_norm
else:
result = np.sqrt(squared_norm)
return result
def mahalanobis(self, X):
"""Computes the squared Mahalanobis distances of given observations.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
The observations, the Mahalanobis distances of the which we
compute. Observations are assumed to be drawn from the same
distribution than the data used in fit.
Returns
-------
dist : array, shape = [n_samples,]
Squared Mahalanobis distances of the observations.
"""
precision = self.get_precision()
# compute mahalanobis distances
dist = pairwise_distances(X, self.location_[np.newaxis, :],
metric='mahalanobis', VI=precision)
return np.reshape(dist, (len(X),)) ** 2
|
chrsrds/scikit-learn
|
sklearn/covariance/empirical_covariance_.py
|
Python
|
bsd-3-clause
| 9,848
|
#!/usr/bin/env python
import sys
bsd = '''
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
* Neither the name of the Willow Garage, Inc. nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.
'''
skip_check_tag = "Willow Garage BSD License not applicable"
nerrors = 0
import os
autofix = False
if "ECTO_LICENSE_AUTOFIX" in os.environ:
autofix = True
files = sys.argv[1:]
commentchars = { '.cpp' : '//',
'.hpp' : '//',
'.py' : '#',
'.cmake' : '#',
'.txt' : '#'
}
for filename in files:
txt = open(filename).read()
thiserror = False
result = filename + "..."
if skip_check_tag in txt:
result += "ok"
else:
for l in bsd.split('\n'):
if l not in txt:
result += "missing: " + l + "\n"
thiserror = True
if thiserror:
nerrors += 1
else:
result += "ok"
if thiserror and autofix:
newf = open(filename, "w")
for k, v in commentchars.iteritems():
if filename.endswith(k):
cmt = v
if txt.startswith('#!'):
hashbang, rest = txt.split('\n', 1)
print >>newf, hashbang
else:
rest = txt
print >>newf, cmt, bsd.replace('\n', '\n' + cmt + ' ')
print >>newf, rest
newf.close()
result += filename + "AUTOFIXED"
print result
sys.exit(nerrors)
|
drmateo/ecto
|
test/compile/check_new_bsd_license.py
|
Python
|
bsd-3-clause
| 2,820
|
#!/usr/bin/env python
# Copyright (c) 2011 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Makes sure that files include headers from allowed directories.
Checks DEPS files in the source tree for rules, and applies those rules to
"#include" commands in source files. Any source file including something not
permitted by the DEPS files will fail.
The format of the deps file:
First you have the normal module-level deps. These are the ones used by
gclient. An example would be:
deps = {
"base":"http://foo.bar/trunk/base"
}
DEPS files not in the top-level of a module won't need this. Then you have
any additional include rules. You can add (using "+") or subtract (using "-")
from the previously specified rules (including module-level deps).
include_rules = {
# Code should be able to use base (it's specified in the module-level
# deps above), but nothing in "base/evil" because it's evil.
"-base/evil",
# But this one subdirectory of evil is OK.
"+base/evil/not",
# And it can include files from this other directory even though there is
# no deps rule for it.
"+tools/crime_fighter"
}
DEPS files may be placed anywhere in the tree. Each one applies to all
subdirectories, where there may be more DEPS files that provide additions or
subtractions for their own sub-trees.
There is an implicit rule for the current directory (where the DEPS file lives)
and all of its subdirectories. This prevents you from having to explicitly
allow the current directory everywhere. This implicit rule is applied first,
so you can modify or remove it using the normal include rules.
The rules are processed in order. This means you can explicitly allow a higher
directory and then take away permissions from sub-parts, or the reverse.
Note that all directory separators must be slashes (Unix-style) and not
backslashes. All directories should be relative to the source root and use
only lowercase.
"""
import os
import optparse
import pipes
import re
import sys
import copy
# Variable name used in the DEPS file to specify module-level deps.
DEPS_VAR_NAME = "deps"
# Variable name used in the DEPS file to add or subtract include files from
# the module-level deps.
INCLUDE_RULES_VAR_NAME = "include_rules"
# Optionally present in the DEPS file to list subdirectories which should not
# be checked. This allows us to skip third party code, for example.
SKIP_SUBDIRS_VAR_NAME = "skip_child_includes"
# The maximum number of lines to check in each source file before giving up.
MAX_LINES = 150
# The maximum line length, this is to be efficient in the case of very long
# lines (which can't be #includes).
MAX_LINE_LENGTH = 128
# Set to true for more output. This is set by the command line options.
VERBOSE = False
# This regular expression will be used to extract filenames from include
# statements.
EXTRACT_INCLUDE_PATH = re.compile('[ \t]*#[ \t]*(?:include|import)[ \t]+"(.*)"')
# In lowercase, using forward slashes as directory separators, ending in a
# forward slash. Set by the command line options.
BASE_DIRECTORY = ""
# The directories which contain the sources managed by git.
GIT_SOURCE_DIRECTORY = set()
# Specifies a single rule for an include, which can be either allow or disallow.
class Rule(object):
def __init__(self, allow, dir, source):
self._allow = allow
self._dir = dir
self._source = source
def __str__(self):
if (self._allow):
return '"+%s" from %s.' % (self._dir, self._source)
return '"-%s" from %s.' % (self._dir, self._source)
def ParentOrMatch(self, other):
"""Returns true if the input string is an exact match or is a parent
of the current rule. For example, the input "foo" would match "foo/bar"."""
return self._dir == other or self._dir.startswith(other + "/")
def ChildOrMatch(self, other):
"""Returns true if the input string would be covered by this rule. For
example, the input "foo/bar" would match the rule "foo"."""
return self._dir == other or other.startswith(self._dir + "/")
def ParseRuleString(rule_string, source):
"""Returns a tuple of a boolean indicating whether the directory is an allow
rule, and a string holding the directory name.
"""
if len(rule_string) < 1:
raise Exception('The rule string "%s" is too short\nin %s' %
(rule_string, source))
if rule_string[0] == "+":
return (True, rule_string[1:])
if rule_string[0] == "-":
return (False, rule_string[1:])
raise Exception('The rule string "%s" does not begin with a "+" or a "-"' %
rule_string)
class Rules:
def __init__(self):
"""Initializes the current rules with an empty rule list."""
self._rules = []
def __str__(self):
ret = "Rules = [\n"
ret += "\n".join([" %s" % x for x in self._rules])
ret += "]\n"
return ret
def AddRule(self, rule_string, source):
"""Adds a rule for the given rule string.
Args:
rule_string: The include_rule string read from the DEPS file to apply.
source: A string representing the location of that string (filename, etc.)
so that we can give meaningful errors.
"""
(add_rule, rule_dir) = ParseRuleString(rule_string, source)
# Remove any existing rules or sub-rules that apply. For example, if we're
# passed "foo", we should remove "foo", "foo/bar", but not "foobar".
self._rules = [x for x in self._rules if not x.ParentOrMatch(rule_dir)]
self._rules.insert(0, Rule(add_rule, rule_dir, source))
def DirAllowed(self, allowed_dir):
"""Returns a tuple (success, message), where success indicates if the given
directory is allowed given the current set of rules, and the message tells
why if the comparison failed."""
for rule in self._rules:
if rule.ChildOrMatch(allowed_dir):
# This rule applies.
if rule._allow:
return (True, "")
return (False, rule.__str__())
# No rules apply, fail.
return (False, "no rule applying")
def ApplyRules(existing_rules, deps, includes, cur_dir):
"""Applies the given deps and include rules, returning the new rules.
Args:
existing_rules: A set of existing rules that will be combined.
deps: The list of imports from the "deps" section of the DEPS file.
include: The list of rules from the "include_rules" section of DEPS.
cur_dir: The current directory. We will create an implicit rule that
allows inclusion from this directory.
Returns: A new set of rules combining the existing_rules with the other
arguments.
"""
rules = copy.copy(existing_rules)
# First apply the implicit "allow" rule for the current directory.
if cur_dir.lower().startswith(BASE_DIRECTORY):
relative_dir = cur_dir[len(BASE_DIRECTORY) + 1:]
# Normalize path separators to slashes.
relative_dir = relative_dir.replace("\\", "/")
source = relative_dir
if len(source) == 0:
source = "top level" # Make the help string a little more meaningful.
rules.AddRule("+" + relative_dir, "Default rule for " + source)
else:
raise Exception("Internal error: base directory is not at the beginning" +
" for\n %s and base dir\n %s" %
(cur_dir, BASE_DIRECTORY))
# Next apply the DEPS additions, these are all allowed. Note that DEPS start
# out with "src/" which we want to trim.
for (index, key) in enumerate(deps):
if key.startswith("src/"):
key = key[4:]
rules.AddRule("+" + key, relative_dir + "'s deps for " + key)
# Last, apply the additional explicit rules.
for (index, rule_str) in enumerate(includes):
if not len(relative_dir):
rule_description = "the top level include_rules"
else:
rule_description = relative_dir + "'s include_rules"
rules.AddRule(rule_str, rule_description)
return rules
def ApplyDirectoryRules(existing_rules, dir_name):
"""Combines rules from the existing rules and the new directory.
Any directory can contain a DEPS file. Toplevel DEPS files can contain
module dependencies which are used by gclient. We use these, along with
additional include rules and implicit rules for the given directory, to
come up with a combined set of rules to apply for the directory.
Args:
existing_rules: The rules for the parent directory. We'll add-on to these.
dir_name: The directory name that the deps file may live in (if it exists).
This will also be used to generate the implicit rules.
Returns: A tuple containing: (1) the combined set of rules to apply to the
sub-tree, and (2) a list of all subdirectories that should NOT be
checked, as specified in the DEPS file (if any).
"""
# Check for a .svn directory in this directory or check this directory is
# contained in git source direcotries. This will tell us if it's a source
# directory and should be checked.
if not (os.path.exists(os.path.join(dir_name, ".svn")) or
(dir_name.lower() in GIT_SOURCE_DIRECTORY)):
return (None, [])
# Check the DEPS file in this directory.
if VERBOSE:
print "Applying rules from", dir_name
def FromImpl(unused, unused2):
pass # NOP function so "From" doesn't fail.
def FileImpl(unused):
pass # NOP function so "File" doesn't fail.
class _VarImpl:
def __init__(self, local_scope):
self._local_scope = local_scope
def Lookup(self, var_name):
"""Implements the Var syntax."""
if var_name in self._local_scope.get("vars", {}):
return self._local_scope["vars"][var_name]
raise Error("Var is not defined: %s" % var_name)
local_scope = {}
global_scope = {
"File": FileImpl,
"From": FromImpl,
"Var": _VarImpl(local_scope).Lookup,
}
deps_file = os.path.join(dir_name, "DEPS")
if os.path.isfile(deps_file):
execfile(deps_file, global_scope, local_scope)
elif VERBOSE:
print " No deps file found in", dir_name
# Even if a DEPS file does not exist we still invoke ApplyRules
# to apply the implicit "allow" rule for the current directory
deps = local_scope.get(DEPS_VAR_NAME, {})
include_rules = local_scope.get(INCLUDE_RULES_VAR_NAME, [])
skip_subdirs = local_scope.get(SKIP_SUBDIRS_VAR_NAME, [])
return (ApplyRules(existing_rules, deps, include_rules, dir_name),
skip_subdirs)
def ShouldCheckFile(file_name):
"""Returns True if the given file is a type we want to check."""
checked_extensions = [
'.h',
'.cc',
'.m',
'.mm',
]
basename, extension = os.path.splitext(file_name)
return extension in checked_extensions
def CheckLine(rules, line):
"""Checks the given file with the given rule set. If the line is an #include
directive and is illegal, a string describing the error will be returned.
Otherwise, None will be returned."""
found_item = EXTRACT_INCLUDE_PATH.match(line)
if not found_item:
return None # Not a match
include_path = found_item.group(1)
# Fix up backslashes in case somebody accidentally used them.
include_path.replace("\\", "/")
if include_path.find("/") < 0:
# Don't fail when no directory is specified. We may want to be more
# strict about this in the future.
if VERBOSE:
print " WARNING: directory specified with no path: " + include_path
return None
(allowed, why_failed) = rules.DirAllowed(include_path)
if not allowed:
if VERBOSE:
retval = "\nFor " + rules.__str__()
else:
retval = ""
return retval + ('Illegal include: "%s"\n Because of %s' %
(include_path, why_failed))
return None
def CheckFile(rules, file_name):
"""Checks the given file with the given rule set.
Args:
rules: The set of rules that apply to files in this directory.
file_name: The source file to check.
Returns: Either a string describing the error if there was one, or None if
the file checked out OK.
"""
if VERBOSE:
print "Checking: " + file_name
ret_val = "" # We'll collect the error messages in here
try:
cur_file = open(file_name, "r")
in_if0 = 0
for cur_line in range(MAX_LINES):
cur_line = cur_file.readline(MAX_LINE_LENGTH).strip()
# Check to see if we're at / inside a #if 0 block
if cur_line == '#if 0':
in_if0 += 1
continue
if in_if0 > 0:
if cur_line.startswith('#if'):
in_if0 += 1
elif cur_line == '#endif':
in_if0 -= 1
continue
line_status = CheckLine(rules, cur_line)
if line_status is not None:
if len(line_status) > 0: # Add newline to separate messages.
line_status += "\n"
ret_val += line_status
cur_file.close()
except IOError:
if VERBOSE:
print "Unable to open file: " + file_name
cur_file.close()
# Map empty string to None for easier checking.
if len(ret_val) == 0:
return None
return ret_val
def CheckDirectory(parent_rules, dir_name):
(rules, skip_subdirs) = ApplyDirectoryRules(parent_rules, dir_name)
if rules == None:
return True
# Collect a list of all files and directories to check.
files_to_check = []
dirs_to_check = []
success = True
contents = os.listdir(dir_name)
for cur in contents:
if cur in skip_subdirs:
continue # Don't check children that DEPS has asked us to skip.
full_name = os.path.join(dir_name, cur)
if os.path.isdir(full_name):
dirs_to_check.append(full_name)
elif ShouldCheckFile(full_name):
files_to_check.append(full_name)
# First check all files in this directory.
for cur in files_to_check:
file_status = CheckFile(rules, cur)
if file_status != None:
print "ERROR in " + cur + "\n" + file_status
success = False
# Next recurse into the subdirectories.
for cur in dirs_to_check:
if not CheckDirectory(rules, cur):
success = False
return success
def GetGitSourceDirectory(root):
"""Returns a set of the directories to be checked.
Args:
root: The repository root where .git directory exists.
Returns:
A set of directories which contain sources managed by git.
"""
git_source_directory = set()
popen_out = os.popen("cd %s && git ls-files --full-name ." %
pipes.quote(root))
for line in popen_out.readlines():
dir_name = os.path.join(root, os.path.dirname(line))
# Add the directory as well as all the parent directories.
while dir_name != root:
git_source_directory.add(dir_name)
dir_name = os.path.dirname(dir_name)
git_source_directory.add(root)
return git_source_directory
def PrintUsage():
print """Usage: python checkdeps.py [--root <root>] [tocheck]
--root Specifies the repository root. This defaults to "../../.." relative
to the script file. This will be correct given the normal location
of the script in "<root>/tools/checkdeps".
tocheck Specifies the directory, relative to root, to check. This defaults
to "." so it checks everything. Only one level deep is currently
supported, so you can say "chrome" but not "chrome/browser".
Examples:
python checkdeps.py
python checkdeps.py --root c:\\source chrome"""
def checkdeps(options, args):
global VERBOSE
if options.verbose:
VERBOSE = True
# Optional base directory of the repository.
global BASE_DIRECTORY
if not options.base_directory:
BASE_DIRECTORY = os.path.abspath(
os.path.join(os.path.abspath(os.path.dirname(__file__)), "../.."))
else:
BASE_DIRECTORY = os.path.abspath(options.base_directory)
# Figure out which directory we have to check.
if len(args) == 0:
# No directory to check specified, use the repository root.
start_dir = BASE_DIRECTORY
elif len(args) == 1:
# Directory specified. Start here. It's supposed to be relative to the
# base directory.
start_dir = os.path.abspath(os.path.join(BASE_DIRECTORY, args[0]))
else:
# More than one argument, we don't handle this.
PrintUsage()
return 1
print "Using base directory:", BASE_DIRECTORY
print "Checking:", start_dir
base_rules = Rules()
# The base directory should be lower case from here on since it will be used
# for substring matching on the includes, and we compile on case-insensitive
# systems. Plus, we always use slashes here since the include parsing code
# will also normalize to slashes.
BASE_DIRECTORY = BASE_DIRECTORY.lower()
BASE_DIRECTORY = BASE_DIRECTORY.replace("\\", "/")
start_dir = start_dir.replace("\\", "/")
if os.path.exists(os.path.join(BASE_DIRECTORY, ".git")):
global GIT_SOURCE_DIRECTORY
GIT_SOURCE_DIRECTORY = GetGitSourceDirectory(BASE_DIRECTORY)
success = CheckDirectory(base_rules, start_dir)
if not success:
print "\nFAILED\n"
return 1
print "\nSUCCESS\n"
return 0
def main():
option_parser = optparse.OptionParser()
option_parser.add_option("", "--root", default="", dest="base_directory",
help='Specifies the repository root. This defaults '
'to "../../.." relative to the script file, which '
'will normally be the repository root.')
option_parser.add_option("-v", "--verbose", action="store_true",
default=False, help="Print debug logging")
options, args = option_parser.parse_args()
return checkdeps(options, args)
if '__main__' == __name__:
sys.exit(main())
|
rogerwang/chromium
|
tools/checkdeps/checkdeps.py
|
Python
|
bsd-3-clause
| 17,591
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Bottle is a fast and simple micro-framework for small web applications. It
offers request dispatching (Routes) with url parameter support, templates,
a built-in HTTP Server and adapters for many third party WSGI/HTTP-server and
template engines - all in a single file and with no dependencies other than the
Python Standard Library.
Homepage and documentation: http://bottlepy.org/
Copyright (c) 2014, Marcel Hellkamp.
License: MIT (see LICENSE for details)
"""
from __future__ import with_statement
__author__ = 'Marcel Hellkamp'
__version__ = '0.13-dev'
__license__ = 'MIT'
# The gevent server adapter needs to patch some modules before they are imported
# This is why we parse the commandline parameters here but handle them later
if __name__ == '__main__':
from optparse import OptionParser
_cmd_parser = OptionParser(usage="usage: %prog [options] package.module:app")
_opt = _cmd_parser.add_option
_opt("--version", action="store_true", help="show version number.")
_opt("-b", "--bind", metavar="ADDRESS", help="bind socket to ADDRESS.")
_opt("-s", "--server", default='wsgiref', help="use SERVER as backend.")
_opt("-p", "--plugin", action="append", help="install additional plugin/s.")
_opt("--debug", action="store_true", help="start server in debug mode.")
_opt("--reload", action="store_true", help="auto-reload on file changes.")
_cmd_options, _cmd_args = _cmd_parser.parse_args()
if _cmd_options.server and _cmd_options.server.startswith('gevent'):
import gevent.monkey; gevent.monkey.patch_all()
import base64, cgi, email.utils, functools, hmac, imp, itertools, mimetypes,\
os, re, subprocess, sys, tempfile, threading, time, warnings
from datetime import date as datedate, datetime, timedelta
from tempfile import TemporaryFile
from traceback import format_exc, print_exc
from inspect import getargspec
from unicodedata import normalize
try: from simplejson import dumps as json_dumps, loads as json_lds
except ImportError: # pragma: no cover
try: from json import dumps as json_dumps, loads as json_lds
except ImportError:
try: from django.utils.simplejson import dumps as json_dumps, loads as json_lds
except ImportError:
def json_dumps(data):
raise ImportError("JSON support requires Python 2.6 or simplejson.")
json_lds = json_dumps
# We now try to fix 2.5/2.6/3.1/3.2 incompatibilities.
# It ain't pretty but it works... Sorry for the mess.
py = sys.version_info
py3k = py >= (3, 0, 0)
py25 = py < (2, 6, 0)
py31 = (3, 1, 0) <= py < (3, 2, 0)
# Workaround for the missing "as" keyword in py3k.
def _e(): return sys.exc_info()[1]
# Workaround for the "print is a keyword/function" Python 2/3 dilemma
# and a fallback for mod_wsgi (resticts stdout/err attribute access)
try:
_stdout, _stderr = sys.stdout.write, sys.stderr.write
except IOError:
_stdout = lambda x: sys.stdout.write(x)
_stderr = lambda x: sys.stderr.write(x)
# Lots of stdlib and builtin differences.
if py3k:
import http.client as httplib
import _thread as thread
from urllib.parse import urljoin, SplitResult as UrlSplitResult
from urllib.parse import urlencode, quote as urlquote, unquote as urlunquote
urlunquote = functools.partial(urlunquote, encoding='latin1')
from http.cookies import SimpleCookie
from collections import MutableMapping as DictMixin
import pickle
from io import BytesIO
from configparser import ConfigParser
basestring = str
unicode = str
json_loads = lambda s: json_lds(touni(s))
callable = lambda x: hasattr(x, '__call__')
imap = map
def _raise(*a): raise a[0](a[1]).with_traceback(a[2])
else: # 2.x
import httplib
import thread
from urlparse import urljoin, SplitResult as UrlSplitResult
from urllib import urlencode, quote as urlquote, unquote as urlunquote
from Cookie import SimpleCookie
from itertools import imap
import cPickle as pickle
from StringIO import StringIO as BytesIO
from ConfigParser import SafeConfigParser as ConfigParser
if py25:
msg = "Python 2.5 support may be dropped in future versions of Bottle."
warnings.warn(msg, DeprecationWarning)
from UserDict import DictMixin
def next(it): return it.next()
bytes = str
else: # 2.6, 2.7
from collections import MutableMapping as DictMixin
unicode = unicode
json_loads = json_lds
eval(compile('def _raise(*a): raise a[0], a[1], a[2]', '<py3fix>', 'exec'))
# Some helpers for string/byte handling
def tob(s, enc='utf8'):
return s.encode(enc) if isinstance(s, unicode) else bytes(s)
def touni(s, enc='utf8', err='strict'):
if isinstance(s, bytes):
return s.decode(enc, err)
else:
return unicode(s or ("" if s is None else s))
tonat = touni if py3k else tob
# 3.2 fixes cgi.FieldStorage to accept bytes (which makes a lot of sense).
# 3.1 needs a workaround.
if py31:
from io import TextIOWrapper
class NCTextIOWrapper(TextIOWrapper):
def close(self): pass # Keep wrapped buffer open.
# A bug in functools causes it to break if the wrapper is an instance method
def update_wrapper(wrapper, wrapped, *a, **ka):
try: functools.update_wrapper(wrapper, wrapped, *a, **ka)
except AttributeError: pass
# These helpers are used at module level and need to be defined first.
# And yes, I know PEP-8, but sometimes a lower-case classname makes more sense.
def depr(message, hard=False):
warnings.warn(message, DeprecationWarning, stacklevel=3)
def makelist(data): # This is just to handy
if isinstance(data, (tuple, list, set, dict)): return list(data)
elif data: return [data]
else: return []
class DictProperty(object):
''' Property that maps to a key in a local dict-like attribute. '''
def __init__(self, attr, key=None, read_only=False):
self.attr, self.key, self.read_only = attr, key, read_only
def __call__(self, func):
functools.update_wrapper(self, func, updated=[])
self.getter, self.key = func, self.key or func.__name__
return self
def __get__(self, obj, cls):
if obj is None: return self
key, storage = self.key, getattr(obj, self.attr)
if key not in storage: storage[key] = self.getter(obj)
return storage[key]
def __set__(self, obj, value):
if self.read_only: raise AttributeError("Read-Only property.")
getattr(obj, self.attr)[self.key] = value
def __delete__(self, obj):
if self.read_only: raise AttributeError("Read-Only property.")
del getattr(obj, self.attr)[self.key]
class cached_property(object):
''' A property that is only computed once per instance and then replaces
itself with an ordinary attribute. Deleting the attribute resets the
property. '''
def __init__(self, func):
self.__doc__ = getattr(func, '__doc__')
self.func = func
def __get__(self, obj, cls):
if obj is None: return self
value = obj.__dict__[self.func.__name__] = self.func(obj)
return value
class lazy_attribute(object):
''' A property that caches itself to the class object. '''
def __init__(self, func):
functools.update_wrapper(self, func, updated=[])
self.getter = func
def __get__(self, obj, cls):
value = self.getter(cls)
setattr(cls, self.__name__, value)
return value
###############################################################################
# Exceptions and Events ########################################################
###############################################################################
class BottleException(Exception):
""" A base class for exceptions used by bottle. """
pass
###############################################################################
# Routing ######################################################################
###############################################################################
class RouteError(BottleException):
""" This is a base class for all routing related exceptions """
class RouteReset(BottleException):
""" If raised by a plugin or request handler, the route is reset and all
plugins are re-applied. """
class RouterUnknownModeError(RouteError): pass
class RouteSyntaxError(RouteError):
""" The route parser found something not supported by this router. """
class RouteBuildError(RouteError):
""" The route could not be built. """
def _re_flatten(p):
''' Turn all capturing groups in a regular expression pattern into
non-capturing groups. '''
if '(' not in p: return p
return re.sub(r'(\\*)(\(\?P<[^>]+>|\((?!\?))',
lambda m: m.group(0) if len(m.group(1)) % 2 else m.group(1) + '(?:', p)
class Router(object):
''' A Router is an ordered collection of route->target pairs. It is used to
efficiently match WSGI requests against a number of routes and return
the first target that satisfies the request. The target may be anything,
usually a string, ID or callable object. A route consists of a path-rule
and a HTTP method.
The path-rule is either a static path (e.g. `/contact`) or a dynamic
path that contains wildcards (e.g. `/wiki/<page>`). The wildcard syntax
and details on the matching order are described in docs:`routing`.
'''
default_pattern = '[^/]+'
default_filter = 're'
#: The current CPython regexp implementation does not allow more
#: than 99 matching groups per regular expression.
_MAX_GROUPS_PER_PATTERN = 99
def __init__(self, strict=False):
self.rules = [] # All rules in order
self._groups = {} # index of regexes to find them in dyna_routes
self.builder = {} # Data structure for the url builder
self.static = {} # Search structure for static routes
self.dyna_routes = {}
self.dyna_regexes = {} # Search structure for dynamic routes
#: If true, static routes are no longer checked first.
self.strict_order = strict
self.filters = {
're': lambda conf:
(_re_flatten(conf or self.default_pattern), None, None),
'int': lambda conf: (r'-?\d+', int, lambda x: str(int(x))),
'float': lambda conf: (r'-?[\d.]+', float, lambda x: str(float(x))),
'path': lambda conf: (r'.+?', None, None)}
def add_filter(self, name, func):
''' Add a filter. The provided function is called with the configuration
string as parameter and must return a (regexp, to_python, to_url) tuple.
The first element is a string, the last two are callables or None. '''
self.filters[name] = func
rule_syntax = re.compile('(\\\\*)'\
'(?:(?::([a-zA-Z_][a-zA-Z_0-9]*)?()(?:#(.*?)#)?)'\
'|(?:<([a-zA-Z_][a-zA-Z_0-9]*)?(?::([a-zA-Z_]*)'\
'(?::((?:\\\\.|[^\\\\>]+)+)?)?)?>))')
def _itertokens(self, rule):
offset, prefix = 0, ''
for match in self.rule_syntax.finditer(rule):
prefix += rule[offset:match.start()]
g = match.groups()
if len(g[0])%2: # Escaped wildcard
prefix += match.group(0)[len(g[0]):]
offset = match.end()
continue
if prefix:
yield prefix, None, None
name, filtr, conf = g[4:7] if g[2] is None else g[1:4]
yield name, filtr or 'default', conf or None
offset, prefix = match.end(), ''
if offset <= len(rule) or prefix:
yield prefix+rule[offset:], None, None
def add(self, rule, method, target, name=None):
''' Add a new rule or replace the target for an existing rule. '''
anons = 0 # Number of anonymous wildcards found
keys = [] # Names of keys
pattern = '' # Regular expression pattern with named groups
filters = [] # Lists of wildcard input filters
builder = [] # Data structure for the URL builder
is_static = True
for key, mode, conf in self._itertokens(rule):
if mode:
is_static = False
if mode == 'default': mode = self.default_filter
mask, in_filter, out_filter = self.filters[mode](conf)
if not key:
pattern += '(?:%s)' % mask
key = 'anon%d' % anons
anons += 1
else:
pattern += '(?P<%s>%s)' % (key, mask)
keys.append(key)
if in_filter: filters.append((key, in_filter))
builder.append((key, out_filter or str))
elif key:
pattern += re.escape(key)
builder.append((None, key))
self.builder[rule] = builder
if name: self.builder[name] = builder
if is_static and not self.strict_order:
self.static.setdefault(method, {})
self.static[method][self.build(rule)] = (target, None)
return
try:
re_pattern = re.compile('^(%s)$' % pattern)
re_match = re_pattern.match
except re.error:
raise RouteSyntaxError("Could not add Route: %s (%s)" % (rule, _e()))
if filters:
def getargs(path):
url_args = re_match(path).groupdict()
for name, wildcard_filter in filters:
try:
url_args[name] = wildcard_filter(url_args[name])
except ValueError:
raise HTTPError(400, 'Path has wrong format.')
return url_args
elif re_pattern.groupindex:
def getargs(path):
return re_match(path).groupdict()
else:
getargs = None
flatpat = _re_flatten(pattern)
whole_rule = (rule, flatpat, target, getargs)
if (flatpat, method) in self._groups:
if DEBUG:
msg = 'Route <%s %s> overwrites a previously defined route'
warnings.warn(msg % (method, rule), RuntimeWarning)
self.dyna_routes[method][self._groups[flatpat, method]] = whole_rule
else:
self.dyna_routes.setdefault(method, []).append(whole_rule)
self._groups[flatpat, method] = len(self.dyna_routes[method]) - 1
self._compile(method)
def _compile(self, method):
all_rules = self.dyna_routes[method]
comborules = self.dyna_regexes[method] = []
maxgroups = self._MAX_GROUPS_PER_PATTERN
for x in range(0, len(all_rules), maxgroups):
some = all_rules[x:x+maxgroups]
combined = (flatpat for (_, flatpat, _, _) in some)
combined = '|'.join('(^%s$)' % flatpat for flatpat in combined)
combined = re.compile(combined).match
rules = [(target, getargs) for (_, _, target, getargs) in some]
comborules.append((combined, rules))
def build(self, _name, *anons, **query):
''' Build an URL by filling the wildcards in a rule. '''
builder = self.builder.get(_name)
if not builder: raise RouteBuildError("No route with that name.", _name)
try:
for i, value in enumerate(anons): query['anon%d'%i] = value
url = ''.join([f(query.pop(n)) if n else f for (n,f) in builder])
return url if not query else url+'?'+urlencode(query)
except KeyError:
raise RouteBuildError('Missing URL argument: %r' % _e().args[0])
def match(self, environ):
''' Return a (target, url_agrs) tuple or raise HTTPError(400/404/405). '''
verb = environ['REQUEST_METHOD'].upper()
path = environ['PATH_INFO'] or '/'
target = None
if verb == 'HEAD':
methods = ['PROXY', verb, 'GET', 'ANY']
else:
methods = ['PROXY', verb, 'ANY']
for method in methods:
if method in self.static and path in self.static[method]:
target, getargs = self.static[method][path]
return target, getargs(path) if getargs else {}
elif method in self.dyna_regexes:
for combined, rules in self.dyna_regexes[method]:
match = combined(path)
if match:
target, getargs = rules[match.lastindex - 1]
return target, getargs(path) if getargs else {}
# No matching route found. Collect alternative methods for 405 response
allowed = set([])
nocheck = set(methods)
for method in set(self.static) - nocheck:
if path in self.static[method]:
allowed.add(verb)
for method in set(self.dyna_regexes) - allowed - nocheck:
for combined, rules in self.dyna_regexes[method]:
match = combined(path)
if match:
allowed.add(method)
if allowed:
allow_header = ",".join(sorted(allowed))
raise HTTPError(405, "Method not allowed.", Allow=allow_header)
# No matching route and no alternative method found. We give up
raise HTTPError(404, "Not found: " + repr(path))
class Route(object):
''' This class wraps a route callback along with route specific metadata and
configuration and applies Plugins on demand. It is also responsible for
turing an URL path rule into a regular expression usable by the Router.
'''
def __init__(self, app, rule, method, callback, name=None,
plugins=None, skiplist=None, **config):
#: The application this route is installed to.
self.app = app
#: The path-rule string (e.g. ``/wiki/:page``).
self.rule = rule
#: The HTTP method as a string (e.g. ``GET``).
self.method = method
#: The original callback with no plugins applied. Useful for introspection.
self.callback = callback
#: The name of the route (if specified) or ``None``.
self.name = name or None
#: A list of route-specific plugins (see :meth:`Bottle.route`).
self.plugins = plugins or []
#: A list of plugins to not apply to this route (see :meth:`Bottle.route`).
self.skiplist = skiplist or []
#: Additional keyword arguments passed to the :meth:`Bottle.route`
#: decorator are stored in this dictionary. Used for route-specific
#: plugin configuration and meta-data.
self.config = ConfigDict().load_dict(config)
@cached_property
def call(self):
''' The route callback with all plugins applied. This property is
created on demand and then cached to speed up subsequent requests.'''
return self._make_callback()
def reset(self):
''' Forget any cached values. The next time :attr:`call` is accessed,
all plugins are re-applied. '''
self.__dict__.pop('call', None)
def prepare(self):
''' Do all on-demand work immediately (useful for debugging).'''
self.call
def all_plugins(self):
''' Yield all Plugins affecting this route. '''
unique = set()
for p in reversed(self.app.plugins + self.plugins):
if True in self.skiplist: break
name = getattr(p, 'name', False)
if name and (name in self.skiplist or name in unique): continue
if p in self.skiplist or type(p) in self.skiplist: continue
if name: unique.add(name)
yield p
def _make_callback(self):
callback = self.callback
for plugin in self.all_plugins():
try:
if hasattr(plugin, 'apply'):
callback = plugin.apply(callback, self)
else:
callback = plugin(callback)
except RouteReset: # Try again with changed configuration.
return self._make_callback()
if not callback is self.callback:
update_wrapper(callback, self.callback)
return callback
def get_undecorated_callback(self):
''' Return the callback. If the callback is a decorated function, try to
recover the original function. '''
func = self.callback
func = getattr(func, '__func__' if py3k else 'im_func', func)
closure_attr = '__closure__' if py3k else 'func_closure'
while hasattr(func, closure_attr) and getattr(func, closure_attr):
func = getattr(func, closure_attr)[0].cell_contents
return func
def get_callback_args(self):
''' Return a list of argument names the callback (most likely) accepts
as keyword arguments. If the callback is a decorated function, try
to recover the original function before inspection. '''
return getargspec(self.get_undecorated_callback())[0]
def get_config(self, key, default=None):
''' Lookup a config field and return its value, first checking the
route.config, then route.app.config.'''
for conf in (self.config, self.app.conifg):
if key in conf: return conf[key]
return default
def __repr__(self):
cb = self.get_undecorated_callback()
return '<%s %r %r>' % (self.method, self.rule, cb)
###############################################################################
# Application Object ###########################################################
###############################################################################
class Bottle(object):
""" Each Bottle object represents a single, distinct web application and
consists of routes, callbacks, plugins, resources and configuration.
Instances are callable WSGI applications.
:param catchall: If true (default), handle all exceptions. Turn off to
let debugging middleware handle exceptions.
"""
def __init__(self, catchall=True, autojson=True):
#: A :class:`ConfigDict` for app specific configuration.
self.config = ConfigDict()
self.config._on_change = functools.partial(self.trigger_hook, 'config')
self.config.meta_set('autojson', 'validate', bool)
self.config.meta_set('catchall', 'validate', bool)
self.config['catchall'] = catchall
self.config['autojson'] = autojson
#: A :class:`ResourceManager` for application files
self.resources = ResourceManager()
self.routes = [] # List of installed :class:`Route` instances.
self.router = Router() # Maps requests to :class:`Route` instances.
self.error_handler = {}
# Core plugins
self.plugins = [] # List of installed plugins.
if self.config['autojson']:
self.install(JSONPlugin())
self.install(TemplatePlugin())
#: If true, most exceptions are caught and returned as :exc:`HTTPError`
catchall = DictProperty('config', 'catchall')
__hook_names = 'before_request', 'after_request', 'app_reset', 'config'
__hook_reversed = 'after_request'
@cached_property
def _hooks(self):
return dict((name, []) for name in self.__hook_names)
def add_hook(self, name, func):
''' Attach a callback to a hook. Three hooks are currently implemented:
before_request
Executed once before each request. The request context is
available, but no routing has happened yet.
after_request
Executed once after each request regardless of its outcome.
app_reset
Called whenever :meth:`Bottle.reset` is called.
'''
if name in self.__hook_reversed:
self._hooks[name].insert(0, func)
else:
self._hooks[name].append(func)
def remove_hook(self, name, func):
''' Remove a callback from a hook. '''
if name in self._hooks and func in self._hooks[name]:
self._hooks[name].remove(func)
return True
def trigger_hook(self, __name, *args, **kwargs):
''' Trigger a hook and return a list of results. '''
return [hook(*args, **kwargs) for hook in self._hooks[__name][:]]
def hook(self, name):
""" Return a decorator that attaches a callback to a hook. See
:meth:`add_hook` for details."""
def decorator(func):
self.add_hook(name, func)
return func
return decorator
def mount(self, prefix, app, **options):
''' Mount an application (:class:`Bottle` or plain WSGI) to a specific
URL prefix. Example::
root_app.mount('/admin/', admin_app)
:param prefix: path prefix or `mount-point`. If it ends in a slash,
that slash is mandatory.
:param app: an instance of :class:`Bottle` or a WSGI application.
All other parameters are passed to the underlying :meth:`route` call.
'''
segments = [p for p in prefix.split('/') if p]
if not segments: raise ValueError('Empty path prefix.')
path_depth = len(segments)
def mountpoint_wrapper():
try:
request.path_shift(path_depth)
rs = HTTPResponse([])
def start_response(status, headerlist, exc_info=None):
if exc_info:
try:
_raise(*exc_info)
finally:
exc_info = None
rs.status = status
for name, value in headerlist: rs.add_header(name, value)
return rs.body.append
body = app(request.environ, start_response)
if body and rs.body: body = itertools.chain(rs.body, body)
rs.body = body or rs.body
return rs
finally:
request.path_shift(-path_depth)
options.setdefault('skip', True)
options.setdefault('method', 'PROXY')
options.setdefault('mountpoint', {'prefix': prefix, 'target': app})
options['callback'] = mountpoint_wrapper
self.route('/%s/<:re:.*>' % '/'.join(segments), **options)
if not prefix.endswith('/'):
self.route('/' + '/'.join(segments), **options)
def merge(self, routes):
''' Merge the routes of another :class:`Bottle` application or a list of
:class:`Route` objects into this application. The routes keep their
'owner', meaning that the :data:`Route.app` attribute is not
changed. '''
if isinstance(routes, Bottle):
routes = routes.routes
for route in routes:
self.add_route(route)
def install(self, plugin):
''' Add a plugin to the list of plugins and prepare it for being
applied to all routes of this application. A plugin may be a simple
decorator or an object that implements the :class:`Plugin` API.
'''
if hasattr(plugin, 'setup'): plugin.setup(self)
if not callable(plugin) and not hasattr(plugin, 'apply'):
raise TypeError("Plugins must be callable or implement .apply()")
self.plugins.append(plugin)
self.reset()
return plugin
def uninstall(self, plugin):
''' Uninstall plugins. Pass an instance to remove a specific plugin, a type
object to remove all plugins that match that type, a string to remove
all plugins with a matching ``name`` attribute or ``True`` to remove all
plugins. Return the list of removed plugins. '''
removed, remove = [], plugin
for i, plugin in list(enumerate(self.plugins))[::-1]:
if remove is True or remove is plugin or remove is type(plugin) \
or getattr(plugin, 'name', True) == remove:
removed.append(plugin)
del self.plugins[i]
if hasattr(plugin, 'close'): plugin.close()
if removed: self.reset()
return removed
def reset(self, route=None):
''' Reset all routes (force plugins to be re-applied) and clear all
caches. If an ID or route object is given, only that specific route
is affected. '''
if route is None: routes = self.routes
elif isinstance(route, Route): routes = [route]
else: routes = [self.routes[route]]
for route in routes: route.reset()
if DEBUG:
for route in routes: route.prepare()
self.trigger_hook('app_reset')
def close(self):
''' Close the application and all installed plugins. '''
for plugin in self.plugins:
if hasattr(plugin, 'close'): plugin.close()
self.stopped = True
def run(self, **kwargs):
''' Calls :func:`run` with the same parameters. '''
run(self, **kwargs)
def match(self, environ):
""" Search for a matching route and return a (:class:`Route` , urlargs)
tuple. The second value is a dictionary with parameters extracted
from the URL. Raise :exc:`HTTPError` (404/405) on a non-match."""
return self.router.match(environ)
def get_url(self, routename, **kargs):
""" Return a string that matches a named route """
scriptname = request.environ.get('SCRIPT_NAME', '').strip('/') + '/'
location = self.router.build(routename, **kargs).lstrip('/')
return urljoin(urljoin('/', scriptname), location)
def add_route(self, route):
''' Add a route object, but do not change the :data:`Route.app`
attribute.'''
self.routes.append(route)
self.router.add(route.rule, route.method, route, name=route.name)
if DEBUG: route.prepare()
def route(self, path=None, method='GET', callback=None, name=None,
apply=None, skip=None, **config):
""" A decorator to bind a function to a request URL. Example::
@app.route('/hello/:name')
def hello(name):
return 'Hello %s' % name
The ``:name`` part is a wildcard. See :class:`Router` for syntax
details.
:param path: Request path or a list of paths to listen to. If no
path is specified, it is automatically generated from the
signature of the function.
:param method: HTTP method (`GET`, `POST`, `PUT`, ...) or a list of
methods to listen to. (default: `GET`)
:param callback: An optional shortcut to avoid the decorator
syntax. ``route(..., callback=func)`` equals ``route(...)(func)``
:param name: The name for this route. (default: None)
:param apply: A decorator or plugin or a list of plugins. These are
applied to the route callback in addition to installed plugins.
:param skip: A list of plugins, plugin classes or names. Matching
plugins are not installed to this route. ``True`` skips all.
Any additional keyword arguments are stored as route-specific
configuration and passed to plugins (see :meth:`Plugin.apply`).
"""
if callable(path): path, callback = None, path
plugins = makelist(apply)
skiplist = makelist(skip)
def decorator(callback):
# TODO: Documentation and tests
if isinstance(callback, basestring): callback = load(callback)
for rule in makelist(path) or yieldroutes(callback):
for verb in makelist(method):
verb = verb.upper()
route = Route(self, rule, verb, callback, name=name,
plugins=plugins, skiplist=skiplist, **config)
self.add_route(route)
return callback
return decorator(callback) if callback else decorator
def get(self, path=None, method='GET', **options):
""" Equals :meth:`route`. """
return self.route(path, method, **options)
def post(self, path=None, method='POST', **options):
""" Equals :meth:`route` with a ``POST`` method parameter. """
return self.route(path, method, **options)
def put(self, path=None, method='PUT', **options):
""" Equals :meth:`route` with a ``PUT`` method parameter. """
return self.route(path, method, **options)
def delete(self, path=None, method='DELETE', **options):
""" Equals :meth:`route` with a ``DELETE`` method parameter. """
return self.route(path, method, **options)
def error(self, code=500):
""" Decorator: Register an output handler for a HTTP error code"""
def wrapper(handler):
self.error_handler[int(code)] = handler
return handler
return wrapper
def default_error_handler(self, res):
return tob(template(ERROR_PAGE_TEMPLATE, e=res))
def _handle(self, environ):
path = environ['bottle.raw_path'] = environ['PATH_INFO']
if py3k:
try:
environ['PATH_INFO'] = path.encode('latin1').decode('utf8')
except UnicodeError:
return HTTPError(400, 'Invalid path string. Expected UTF-8')
try:
environ['bottle.app'] = self
request.bind(environ)
response.bind()
try:
self.trigger_hook('before_request')
route, args = self.router.match(environ)
environ['route.handle'] = route
environ['bottle.route'] = route
environ['route.url_args'] = args
return route.call(**args)
finally:
self.trigger_hook('after_request')
except HTTPResponse:
return _e()
except RouteReset:
route.reset()
return self._handle(environ)
except (KeyboardInterrupt, SystemExit, MemoryError):
raise
except Exception:
if not self.catchall: raise
stacktrace = format_exc()
environ['wsgi.errors'].write(stacktrace)
return HTTPError(500, "Internal Server Error", _e(), stacktrace)
def _cast(self, out, peek=None):
""" Try to convert the parameter into something WSGI compatible and set
correct HTTP headers when possible.
Support: False, str, unicode, dict, HTTPResponse, HTTPError, file-like,
iterable of strings and iterable of unicodes
"""
# Empty output is done here
if not out:
if 'Content-Length' not in response:
response['Content-Length'] = 0
return []
# Join lists of byte or unicode strings. Mixed lists are NOT supported
if isinstance(out, (tuple, list))\
and isinstance(out[0], (bytes, unicode)):
out = out[0][0:0].join(out) # b'abc'[0:0] -> b''
# Encode unicode strings
if isinstance(out, unicode):
out = out.encode(response.charset)
# Byte Strings are just returned
if isinstance(out, bytes):
if 'Content-Length' not in response:
response['Content-Length'] = len(out)
return [out]
# HTTPError or HTTPException (recursive, because they may wrap anything)
# TODO: Handle these explicitly in handle() or make them iterable.
if isinstance(out, HTTPError):
out.apply(response)
out = self.error_handler.get(out.status_code, self.default_error_handler)(out)
return self._cast(out)
if isinstance(out, HTTPResponse):
out.apply(response)
return self._cast(out.body)
# File-like objects.
if hasattr(out, 'read'):
if 'wsgi.file_wrapper' in request.environ:
return request.environ['wsgi.file_wrapper'](out)
elif hasattr(out, 'close') or not hasattr(out, '__iter__'):
return WSGIFileWrapper(out)
# Handle Iterables. We peek into them to detect their inner type.
try:
iout = iter(out)
first = next(iout)
while not first:
first = next(iout)
except StopIteration:
return self._cast('')
except HTTPResponse:
first = _e()
except (KeyboardInterrupt, SystemExit, MemoryError):
raise
except Exception:
if not self.catchall: raise
first = HTTPError(500, 'Unhandled exception', _e(), format_exc())
# These are the inner types allowed in iterator or generator objects.
if isinstance(first, HTTPResponse):
return self._cast(first)
elif isinstance(first, bytes):
new_iter = itertools.chain([first], iout)
elif isinstance(first, unicode):
encoder = lambda x: x.encode(response.charset)
new_iter = imap(encoder, itertools.chain([first], iout))
else:
msg = 'Unsupported response type: %s' % type(first)
return self._cast(HTTPError(500, msg))
if hasattr(out, 'close'):
new_iter = _closeiter(new_iter, out.close)
return new_iter
def wsgi(self, environ, start_response):
""" The bottle WSGI-interface. """
try:
out = self._cast(self._handle(environ))
# rfc2616 section 4.3
if response._status_code in (100, 101, 204, 304)\
or environ['REQUEST_METHOD'] == 'HEAD':
if hasattr(out, 'close'): out.close()
out = []
start_response(response._status_line, response.headerlist)
return out
except (KeyboardInterrupt, SystemExit, MemoryError):
raise
except Exception:
if not self.catchall: raise
err = '<h1>Critical error while processing request: %s</h1>' \
% html_escape(environ.get('PATH_INFO', '/'))
if DEBUG:
err += '<h2>Error:</h2>\n<pre>\n%s\n</pre>\n' \
'<h2>Traceback:</h2>\n<pre>\n%s\n</pre>\n' \
% (html_escape(repr(_e())), html_escape(format_exc()))
environ['wsgi.errors'].write(err)
headers = [('Content-Type', 'text/html; charset=UTF-8')]
start_response('500 INTERNAL SERVER ERROR', headers, sys.exc_info())
return [tob(err)]
def __call__(self, environ, start_response):
''' Each instance of :class:'Bottle' is a WSGI application. '''
return self.wsgi(environ, start_response)
def __enter__(self):
''' Use this application as default for all module-level shortcuts. '''
default_app.push(self)
return self
def __exit__(self, exc_type, exc_value, traceback):
default_app.pop()
###############################################################################
# HTTP and WSGI Tools ##########################################################
###############################################################################
class BaseRequest(object):
""" A wrapper for WSGI environment dictionaries that adds a lot of
convenient access methods and properties. Most of them are read-only.
Adding new attributes to a request actually adds them to the environ
dictionary (as 'bottle.request.ext.<name>'). This is the recommended
way to store and access request-specific data.
"""
__slots__ = ('environ')
#: Maximum size of memory buffer for :attr:`body` in bytes.
MEMFILE_MAX = 102400
def __init__(self, environ=None):
""" Wrap a WSGI environ dictionary. """
#: The wrapped WSGI environ dictionary. This is the only real attribute.
#: All other attributes actually are read-only properties.
self.environ = {} if environ is None else environ
self.environ['bottle.request'] = self
@DictProperty('environ', 'bottle.app', read_only=True)
def app(self):
''' Bottle application handling this request. '''
raise RuntimeError('This request is not connected to an application.')
@DictProperty('environ', 'bottle.route', read_only=True)
def route(self):
""" The bottle :class:`Route` object that matches this request. """
raise RuntimeError('This request is not connected to a route.')
@DictProperty('environ', 'route.url_args', read_only=True)
def url_args(self):
""" The arguments extracted from the URL. """
raise RuntimeError('This request is not connected to a route.')
@property
def path(self):
''' The value of ``PATH_INFO`` with exactly one prefixed slash (to fix
broken clients and avoid the "empty path" edge case). '''
return '/' + self.environ.get('PATH_INFO','').lstrip('/')
@property
def method(self):
''' The ``REQUEST_METHOD`` value as an uppercase string. '''
return self.environ.get('REQUEST_METHOD', 'GET').upper()
@DictProperty('environ', 'bottle.request.headers', read_only=True)
def headers(self):
''' A :class:`WSGIHeaderDict` that provides case-insensitive access to
HTTP request headers. '''
return WSGIHeaderDict(self.environ)
def get_header(self, name, default=None):
''' Return the value of a request header, or a given default value. '''
return self.headers.get(name, default)
@DictProperty('environ', 'bottle.request.cookies', read_only=True)
def cookies(self):
""" Cookies parsed into a :class:`FormsDict`. Signed cookies are NOT
decoded. Use :meth:`get_cookie` if you expect signed cookies. """
cookies = SimpleCookie(self.environ.get('HTTP_COOKIE','')).values()
return FormsDict((c.key, c.value) for c in cookies)
def get_cookie(self, key, default=None, secret=None):
""" Return the content of a cookie. To read a `Signed Cookie`, the
`secret` must match the one used to create the cookie (see
:meth:`BaseResponse.set_cookie`). If anything goes wrong (missing
cookie or wrong signature), return a default value. """
value = self.cookies.get(key)
if secret and value:
dec = cookie_decode(value, secret) # (key, value) tuple or None
return dec[1] if dec and dec[0] == key else default
return value or default
@DictProperty('environ', 'bottle.request.query', read_only=True)
def query(self):
''' The :attr:`query_string` parsed into a :class:`FormsDict`. These
values are sometimes called "URL arguments" or "GET parameters", but
not to be confused with "URL wildcards" as they are provided by the
:class:`Router`. '''
get = self.environ['bottle.get'] = FormsDict()
pairs = _parse_qsl(self.environ.get('QUERY_STRING', ''))
for key, value in pairs:
get[key] = value
return get
@DictProperty('environ', 'bottle.request.forms', read_only=True)
def forms(self):
""" Form values parsed from an `url-encoded` or `multipart/form-data`
encoded POST or PUT request body. The result is returned as a
:class:`FormsDict`. All keys and values are strings. File uploads
are stored separately in :attr:`files`. """
forms = FormsDict()
for name, item in self.POST.allitems():
if not isinstance(item, FileUpload):
forms[name] = item
return forms
@DictProperty('environ', 'bottle.request.params', read_only=True)
def params(self):
""" A :class:`FormsDict` with the combined values of :attr:`query` and
:attr:`forms`. File uploads are stored in :attr:`files`. """
params = FormsDict()
for key, value in self.query.allitems():
params[key] = value
for key, value in self.forms.allitems():
params[key] = value
return params
@DictProperty('environ', 'bottle.request.files', read_only=True)
def files(self):
""" File uploads parsed from `multipart/form-data` encoded POST or PUT
request body. The values are instances of :class:`FileUpload`.
"""
files = FormsDict()
for name, item in self.POST.allitems():
if isinstance(item, FileUpload):
files[name] = item
return files
@DictProperty('environ', 'bottle.request.json', read_only=True)
def json(self):
''' If the ``Content-Type`` header is ``application/json``, this
property holds the parsed content of the request body. Only requests
smaller than :attr:`MEMFILE_MAX` are processed to avoid memory
exhaustion. '''
if 'application/json' in self.environ.get('CONTENT_TYPE', ''):
return json_loads(self._get_body_string())
return None
def _iter_body(self, read, bufsize):
maxread = max(0, self.content_length)
while maxread:
part = read(min(maxread, bufsize))
if not part: break
yield part
maxread -= len(part)
def _iter_chunked(self, read, bufsize):
err = HTTPError(400, 'Error while parsing chunked transfer body.')
rn, sem, bs = tob('\r\n'), tob(';'), tob('')
while True:
header = read(1)
while header[-2:] != rn:
c = read(1)
header += c
if not c: raise err
if len(header) > bufsize: raise err
size, _, _ = header.partition(sem)
try:
maxread = int(tonat(size.strip()), 16)
except ValueError:
raise err
if maxread == 0: break
buff = bs
while maxread > 0:
if not buff:
buff = read(min(maxread, bufsize))
part, buff = buff[:maxread], buff[maxread:]
if not part: raise err
yield part
maxread -= len(part)
if read(2) != rn:
raise err
@DictProperty('environ', 'bottle.request.body', read_only=True)
def _body(self):
body_iter = self._iter_chunked if self.chunked else self._iter_body
read_func = self.environ['wsgi.input'].read
body, body_size, is_temp_file = BytesIO(), 0, False
for part in body_iter(read_func, self.MEMFILE_MAX):
body.write(part)
body_size += len(part)
if not is_temp_file and body_size > self.MEMFILE_MAX:
body, tmp = TemporaryFile(mode='w+b'), body
body.write(tmp.getvalue())
del tmp
is_temp_file = True
self.environ['wsgi.input'] = body
body.seek(0)
return body
def _get_body_string(self):
''' read body until content-length or MEMFILE_MAX into a string. Raise
HTTPError(413) on requests that are to large. '''
clen = self.content_length
if clen > self.MEMFILE_MAX:
raise HTTPError(413, 'Request to large')
if clen < 0: clen = self.MEMFILE_MAX + 1
data = self.body.read(clen)
if len(data) > self.MEMFILE_MAX: # Fail fast
raise HTTPError(413, 'Request to large')
return data
@property
def body(self):
""" The HTTP request body as a seek-able file-like object. Depending on
:attr:`MEMFILE_MAX`, this is either a temporary file or a
:class:`io.BytesIO` instance. Accessing this property for the first
time reads and replaces the ``wsgi.input`` environ variable.
Subsequent accesses just do a `seek(0)` on the file object. """
self._body.seek(0)
return self._body
@property
def chunked(self):
''' True if Chunked transfer encoding was. '''
return 'chunked' in self.environ.get('HTTP_TRANSFER_ENCODING', '').lower()
#: An alias for :attr:`query`.
GET = query
@DictProperty('environ', 'bottle.request.post', read_only=True)
def POST(self):
""" The values of :attr:`forms` and :attr:`files` combined into a single
:class:`FormsDict`. Values are either strings (form values) or
instances of :class:`cgi.FieldStorage` (file uploads).
"""
post = FormsDict()
# We default to application/x-www-form-urlencoded for everything that
# is not multipart and take the fast path (also: 3.1 workaround)
if not self.content_type.startswith('multipart/'):
pairs = _parse_qsl(tonat(self._get_body_string(), 'latin1'))
for key, value in pairs:
post[key] = value
return post
safe_env = {'QUERY_STRING':''} # Build a safe environment for cgi
for key in ('REQUEST_METHOD', 'CONTENT_TYPE', 'CONTENT_LENGTH'):
if key in self.environ: safe_env[key] = self.environ[key]
args = dict(fp=self.body, environ=safe_env, keep_blank_values=True)
if py31:
args['fp'] = NCTextIOWrapper(args['fp'], encoding='utf8',
newline='\n')
elif py3k:
args['encoding'] = 'utf8'
data = cgi.FieldStorage(**args)
data = data.list or []
for item in data:
if item.filename:
post[item.name] = FileUpload(item.file, item.name,
item.filename, item.headers)
else:
post[item.name] = item.value
return post
@property
def url(self):
""" The full request URI including hostname and scheme. If your app
lives behind a reverse proxy or load balancer and you get confusing
results, make sure that the ``X-Forwarded-Host`` header is set
correctly. """
return self.urlparts.geturl()
@DictProperty('environ', 'bottle.request.urlparts', read_only=True)
def urlparts(self):
''' The :attr:`url` string as an :class:`urlparse.SplitResult` tuple.
The tuple contains (scheme, host, path, query_string and fragment),
but the fragment is always empty because it is not visible to the
server. '''
env = self.environ
http = env.get('HTTP_X_FORWARDED_PROTO') or env.get('wsgi.url_scheme', 'http')
host = env.get('HTTP_X_FORWARDED_HOST') or env.get('HTTP_HOST')
if not host:
# HTTP 1.1 requires a Host-header. This is for HTTP/1.0 clients.
host = env.get('SERVER_NAME', '127.0.0.1')
port = env.get('SERVER_PORT')
if port and port != ('80' if http == 'http' else '443'):
host += ':' + port
path = urlquote(self.fullpath)
return UrlSplitResult(http, host, path, env.get('QUERY_STRING'), '')
@property
def fullpath(self):
""" Request path including :attr:`script_name` (if present). """
return urljoin(self.script_name, self.path.lstrip('/'))
@property
def query_string(self):
""" The raw :attr:`query` part of the URL (everything in between ``?``
and ``#``) as a string. """
return self.environ.get('QUERY_STRING', '')
@property
def script_name(self):
''' The initial portion of the URL's `path` that was removed by a higher
level (server or routing middleware) before the application was
called. This script path is returned with leading and tailing
slashes. '''
script_name = self.environ.get('SCRIPT_NAME', '').strip('/')
return '/' + script_name + '/' if script_name else '/'
def path_shift(self, shift=1):
''' Shift path segments from :attr:`path` to :attr:`script_name` and
vice versa.
:param shift: The number of path segments to shift. May be negative
to change the shift direction. (default: 1)
'''
script = self.environ.get('SCRIPT_NAME','/')
self['SCRIPT_NAME'], self['PATH_INFO'] = path_shift(script, self.path, shift)
@property
def content_length(self):
''' The request body length as an integer. The client is responsible to
set this header. Otherwise, the real length of the body is unknown
and -1 is returned. In this case, :attr:`body` will be empty. '''
return int(self.environ.get('CONTENT_LENGTH') or -1)
@property
def content_type(self):
''' The Content-Type header as a lowercase-string (default: empty). '''
return self.environ.get('CONTENT_TYPE', '').lower()
@property
def is_xhr(self):
''' True if the request was triggered by a XMLHttpRequest. This only
works with JavaScript libraries that support the `X-Requested-With`
header (most of the popular libraries do). '''
requested_with = self.environ.get('HTTP_X_REQUESTED_WITH','')
return requested_with.lower() == 'xmlhttprequest'
@property
def is_ajax(self):
''' Alias for :attr:`is_xhr`. "Ajax" is not the right term. '''
return self.is_xhr
@property
def auth(self):
""" HTTP authentication data as a (user, password) tuple. This
implementation currently supports basic (not digest) authentication
only. If the authentication happened at a higher level (e.g. in the
front web-server or a middleware), the password field is None, but
the user field is looked up from the ``REMOTE_USER`` environ
variable. On any errors, None is returned. """
basic = parse_auth(self.environ.get('HTTP_AUTHORIZATION',''))
if basic: return basic
ruser = self.environ.get('REMOTE_USER')
if ruser: return (ruser, None)
return None
@property
def remote_route(self):
""" A list of all IPs that were involved in this request, starting with
the client IP and followed by zero or more proxies. This does only
work if all proxies support the ```X-Forwarded-For`` header. Note
that this information can be forged by malicious clients. """
proxy = self.environ.get('HTTP_X_FORWARDED_FOR')
if proxy: return [ip.strip() for ip in proxy.split(',')]
remote = self.environ.get('REMOTE_ADDR')
return [remote] if remote else []
@property
def remote_addr(self):
""" The client IP as a string. Note that this information can be forged
by malicious clients. """
route = self.remote_route
return route[0] if route else None
def copy(self):
""" Return a new :class:`Request` with a shallow :attr:`environ` copy. """
return Request(self.environ.copy())
def get(self, value, default=None): return self.environ.get(value, default)
def __getitem__(self, key): return self.environ[key]
def __delitem__(self, key): self[key] = ""; del(self.environ[key])
def __iter__(self): return iter(self.environ)
def __len__(self): return len(self.environ)
def keys(self): return self.environ.keys()
def __setitem__(self, key, value):
""" Change an environ value and clear all caches that depend on it. """
if self.environ.get('bottle.request.readonly'):
raise KeyError('The environ dictionary is read-only.')
self.environ[key] = value
todelete = ()
if key == 'wsgi.input':
todelete = ('body', 'forms', 'files', 'params', 'post', 'json')
elif key == 'QUERY_STRING':
todelete = ('query', 'params')
elif key.startswith('HTTP_'):
todelete = ('headers', 'cookies')
for key in todelete:
self.environ.pop('bottle.request.'+key, None)
def __repr__(self):
return '<%s: %s %s>' % (self.__class__.__name__, self.method, self.url)
def __getattr__(self, name):
''' Search in self.environ for additional user defined attributes. '''
try:
var = self.environ['bottle.request.ext.%s'%name]
return var.__get__(self) if hasattr(var, '__get__') else var
except KeyError:
raise AttributeError('Attribute %r not defined.' % name)
def __setattr__(self, name, value):
if name == 'environ': return object.__setattr__(self, name, value)
self.environ['bottle.request.ext.%s'%name] = value
def _hkey(s):
return s.title().replace('_','-')
class HeaderProperty(object):
def __init__(self, name, reader=None, writer=str, default=''):
self.name, self.default = name, default
self.reader, self.writer = reader, writer
self.__doc__ = 'Current value of the %r header.' % name.title()
def __get__(self, obj, cls):
if obj is None: return self
value = obj.headers.get(self.name, self.default)
return self.reader(value) if self.reader else value
def __set__(self, obj, value):
obj.headers[self.name] = self.writer(value)
def __delete__(self, obj):
del obj.headers[self.name]
class BaseResponse(object):
""" Storage class for a response body as well as headers and cookies.
This class does support dict-like case-insensitive item-access to
headers, but is NOT a dict. Most notably, iterating over a response
yields parts of the body and not the headers.
:param body: The response body as one of the supported types.
:param status: Either an HTTP status code (e.g. 200) or a status line
including the reason phrase (e.g. '200 OK').
:param headers: A dictionary or a list of name-value pairs.
Additional keyword arguments are added to the list of headers.
Underscores in the header name are replaced with dashes.
"""
default_status = 200
default_content_type = 'text/html; charset=UTF-8'
# Header blacklist for specific response codes
# (rfc2616 section 10.2.3 and 10.3.5)
bad_headers = {
204: set(('Content-Type',)),
304: set(('Allow', 'Content-Encoding', 'Content-Language',
'Content-Length', 'Content-Range', 'Content-Type',
'Content-Md5', 'Last-Modified'))}
def __init__(self, body='', status=None, headers=None, **more_headers):
self._cookies = None
self._headers = {}
self.body = body
self.status = status or self.default_status
if headers:
if isinstance(headers, dict):
headers = headers.items()
for name, value in headers:
self.add_header(name, value)
if more_headers:
for name, value in more_headers.items():
self.add_header(name, value)
def copy(self, cls=None):
''' Returns a copy of self. '''
cls = cls or BaseResponse
assert issubclass(cls, BaseResponse)
copy = cls()
copy.status = self.status
copy._headers = dict((k, v[:]) for (k, v) in self._headers.items())
if self._cookies:
copy._cookies = SimpleCookie()
copy._cookies.load(self._cookies.output())
return copy
def __iter__(self):
return iter(self.body)
def close(self):
if hasattr(self.body, 'close'):
self.body.close()
@property
def status_line(self):
''' The HTTP status line as a string (e.g. ``404 Not Found``).'''
return self._status_line
@property
def status_code(self):
''' The HTTP status code as an integer (e.g. 404).'''
return self._status_code
def _set_status(self, status):
if isinstance(status, int):
code, status = status, _HTTP_STATUS_LINES.get(status)
elif ' ' in status:
status = status.strip()
code = int(status.split()[0])
else:
raise ValueError('String status line without a reason phrase.')
if not 100 <= code <= 999: raise ValueError('Status code out of range.')
self._status_code = code
self._status_line = str(status or ('%d Unknown' % code))
def _get_status(self):
return self._status_line
status = property(_get_status, _set_status, None,
''' A writeable property to change the HTTP response status. It accepts
either a numeric code (100-999) or a string with a custom reason
phrase (e.g. "404 Brain not found"). Both :data:`status_line` and
:data:`status_code` are updated accordingly. The return value is
always a status string. ''')
del _get_status, _set_status
@property
def headers(self):
''' An instance of :class:`HeaderDict`, a case-insensitive dict-like
view on the response headers. '''
hdict = HeaderDict()
hdict.dict = self._headers
return hdict
def __contains__(self, name): return _hkey(name) in self._headers
def __delitem__(self, name): del self._headers[_hkey(name)]
def __getitem__(self, name): return self._headers[_hkey(name)][-1]
def __setitem__(self, name, value): self._headers[_hkey(name)] = [str(value)]
def get_header(self, name, default=None):
''' Return the value of a previously defined header. If there is no
header with that name, return a default value. '''
return self._headers.get(_hkey(name), [default])[-1]
def set_header(self, name, value):
''' Create a new response header, replacing any previously defined
headers with the same name. '''
self._headers[_hkey(name)] = [str(value)]
def add_header(self, name, value):
''' Add an additional response header, not removing duplicates. '''
self._headers.setdefault(_hkey(name), []).append(str(value))
def iter_headers(self):
''' Yield (header, value) tuples, skipping headers that are not
allowed with the current response status code. '''
return self.headerlist
@property
def headerlist(self):
''' WSGI conform list of (header, value) tuples. '''
out = []
headers = list(self._headers.items())
if 'Content-Type' not in self._headers:
headers.append(('Content-Type', [self.default_content_type]))
if self._status_code in self.bad_headers:
bad_headers = self.bad_headers[self._status_code]
headers = [h for h in headers if h[0] not in bad_headers]
out += [(name, val) for name, vals in headers for val in vals]
if self._cookies:
for c in self._cookies.values():
out.append(('Set-Cookie', c.OutputString()))
return out
content_type = HeaderProperty('Content-Type')
content_length = HeaderProperty('Content-Length', reader=int)
expires = HeaderProperty('Expires',
reader=lambda x: datetime.utcfromtimestamp(parse_date(x)),
writer=lambda x: http_date(x))
@property
def charset(self, default='UTF-8'):
""" Return the charset specified in the content-type header (default: utf8). """
if 'charset=' in self.content_type:
return self.content_type.split('charset=')[-1].split(';')[0].strip()
return default
def set_cookie(self, name, value, secret=None, **options):
''' Create a new cookie or replace an old one. If the `secret` parameter is
set, create a `Signed Cookie` (described below).
:param name: the name of the cookie.
:param value: the value of the cookie.
:param secret: a signature key required for signed cookies.
Additionally, this method accepts all RFC 2109 attributes that are
supported by :class:`cookie.Morsel`, including:
:param max_age: maximum age in seconds. (default: None)
:param expires: a datetime object or UNIX timestamp. (default: None)
:param domain: the domain that is allowed to read the cookie.
(default: current domain)
:param path: limits the cookie to a given path (default: current path)
:param secure: limit the cookie to HTTPS connections (default: off).
:param httponly: prevents client-side javascript to read this cookie
(default: off, requires Python 2.6 or newer).
If neither `expires` nor `max_age` is set (default), the cookie will
expire at the end of the browser session (as soon as the browser
window is closed).
Signed cookies may store any pickle-able object and are
cryptographically signed to prevent manipulation. Keep in mind that
cookies are limited to 4kb in most browsers.
Warning: Signed cookies are not encrypted (the client can still see
the content) and not copy-protected (the client can restore an old
cookie). The main intention is to make pickling and unpickling
save, not to store secret information at client side.
'''
if not self._cookies:
self._cookies = SimpleCookie()
if secret:
value = touni(cookie_encode((name, value), secret))
elif not isinstance(value, basestring):
raise TypeError('Secret key missing for non-string Cookie.')
if len(value) > 4096: raise ValueError('Cookie value to long.')
self._cookies[name] = value
for key, value in options.items():
if key == 'max_age':
if isinstance(value, timedelta):
value = value.seconds + value.days * 24 * 3600
if key == 'expires':
if isinstance(value, (datedate, datetime)):
value = value.timetuple()
elif isinstance(value, (int, float)):
value = time.gmtime(value)
value = time.strftime("%a, %d %b %Y %H:%M:%S GMT", value)
self._cookies[name][key.replace('_', '-')] = value
def delete_cookie(self, key, **kwargs):
''' Delete a cookie. Be sure to use the same `domain` and `path`
settings as used to create the cookie. '''
kwargs['max_age'] = -1
kwargs['expires'] = 0
self.set_cookie(key, '', **kwargs)
def __repr__(self):
out = ''
for name, value in self.headerlist:
out += '%s: %s\n' % (name.title(), value.strip())
return out
def _local_property():
ls = threading.local()
def fget(self):
try: return ls.var
except AttributeError:
raise RuntimeError("Request context not initialized.")
def fset(self, value): ls.var = value
def fdel(self): del ls.var
return property(fget, fset, fdel, 'Thread-local property')
class LocalRequest(BaseRequest):
''' A thread-local subclass of :class:`BaseRequest` with a different
set of attributes for each thread. There is usually only one global
instance of this class (:data:`request`). If accessed during a
request/response cycle, this instance always refers to the *current*
request (even on a multithreaded server). '''
bind = BaseRequest.__init__
environ = _local_property()
class LocalResponse(BaseResponse):
''' A thread-local subclass of :class:`BaseResponse` with a different
set of attributes for each thread. There is usually only one global
instance of this class (:data:`response`). Its attributes are used
to build the HTTP response at the end of the request/response cycle.
'''
bind = BaseResponse.__init__
_status_line = _local_property()
_status_code = _local_property()
_cookies = _local_property()
_headers = _local_property()
body = _local_property()
Request = BaseRequest
Response = BaseResponse
class HTTPResponse(Response, BottleException):
def __init__(self, body='', status=None, headers=None, **more_headers):
super(HTTPResponse, self).__init__(body, status, headers, **more_headers)
def apply(self, response):
response._status_code = self._status_code
response._status_line = self._status_line
response._headers = self._headers
response._cookies = self._cookies
response.body = self.body
class HTTPError(HTTPResponse):
default_status = 500
def __init__(self, status=None, body=None, exception=None, traceback=None,
**options):
self.exception = exception
self.traceback = traceback
super(HTTPError, self).__init__(body, status, **options)
###############################################################################
# Plugins ######################################################################
###############################################################################
class PluginError(BottleException): pass
class JSONPlugin(object):
name = 'json'
api = 2
def __init__(self, json_dumps=json_dumps):
self.json_dumps = json_dumps
def apply(self, callback, route):
dumps = self.json_dumps
if not dumps: return callback
def wrapper(*a, **ka):
try:
rv = callback(*a, **ka)
except HTTPError:
rv = _e()
if isinstance(rv, dict):
#Attempt to serialize, raises exception on failure
json_response = dumps(rv)
#Set content type only if serialization succesful
response.content_type = 'application/json'
return json_response
elif isinstance(rv, HTTPResponse) and isinstance(rv.body, dict):
rv.body = dumps(rv.body)
rv.content_type = 'application/json'
return rv
return wrapper
class TemplatePlugin(object):
''' This plugin applies the :func:`view` decorator to all routes with a
`template` config parameter. If the parameter is a tuple, the second
element must be a dict with additional options (e.g. `template_engine`)
or default variables for the template. '''
name = 'template'
api = 2
def apply(self, callback, route):
conf = route.config.get('template')
if isinstance(conf, (tuple, list)) and len(conf) == 2:
return view(conf[0], **conf[1])(callback)
elif isinstance(conf, str):
return view(conf)(callback)
else:
return callback
#: Not a plugin, but part of the plugin API. TODO: Find a better place.
class _ImportRedirect(object):
def __init__(self, name, impmask):
''' Create a virtual package that redirects imports (see PEP 302). '''
self.name = name
self.impmask = impmask
self.module = sys.modules.setdefault(name, imp.new_module(name))
self.module.__dict__.update({'__file__': __file__, '__path__': [],
'__all__': [], '__loader__': self})
sys.meta_path.append(self)
def find_module(self, fullname, path=None):
if '.' not in fullname: return
packname = fullname.rsplit('.', 1)[0]
if packname != self.name: return
return self
def load_module(self, fullname):
if fullname in sys.modules: return sys.modules[fullname]
modname = fullname.rsplit('.', 1)[1]
realname = self.impmask % modname
__import__(realname)
module = sys.modules[fullname] = sys.modules[realname]
setattr(self.module, modname, module)
module.__loader__ = self
return module
###############################################################################
# Common Utilities #############################################################
###############################################################################
class MultiDict(DictMixin):
""" This dict stores multiple values per key, but behaves exactly like a
normal dict in that it returns only the newest value for any given key.
There are special methods available to access the full list of values.
"""
def __init__(self, *a, **k):
self.dict = dict((k, [v]) for (k, v) in dict(*a, **k).items())
def __len__(self): return len(self.dict)
def __iter__(self): return iter(self.dict)
def __contains__(self, key): return key in self.dict
def __delitem__(self, key): del self.dict[key]
def __getitem__(self, key): return self.dict[key][-1]
def __setitem__(self, key, value): self.append(key, value)
def keys(self): return self.dict.keys()
if py3k:
def values(self): return (v[-1] for v in self.dict.values())
def items(self): return ((k, v[-1]) for k, v in self.dict.items())
def allitems(self):
return ((k, v) for k, vl in self.dict.items() for v in vl)
iterkeys = keys
itervalues = values
iteritems = items
iterallitems = allitems
else:
def values(self): return [v[-1] for v in self.dict.values()]
def items(self): return [(k, v[-1]) for k, v in self.dict.items()]
def iterkeys(self): return self.dict.iterkeys()
def itervalues(self): return (v[-1] for v in self.dict.itervalues())
def iteritems(self):
return ((k, v[-1]) for k, v in self.dict.iteritems())
def iterallitems(self):
return ((k, v) for k, vl in self.dict.iteritems() for v in vl)
def allitems(self):
return [(k, v) for k, vl in self.dict.iteritems() for v in vl]
def get(self, key, default=None, index=-1, type=None):
''' Return the most recent value for a key.
:param default: The default value to be returned if the key is not
present or the type conversion fails.
:param index: An index for the list of available values.
:param type: If defined, this callable is used to cast the value
into a specific type. Exception are suppressed and result in
the default value to be returned.
'''
try:
val = self.dict[key][index]
return type(val) if type else val
except Exception:
pass
return default
def append(self, key, value):
''' Add a new value to the list of values for this key. '''
self.dict.setdefault(key, []).append(value)
def replace(self, key, value):
''' Replace the list of values with a single value. '''
self.dict[key] = [value]
def getall(self, key):
''' Return a (possibly empty) list of values for a key. '''
return self.dict.get(key) or []
#: Aliases for WTForms to mimic other multi-dict APIs (Django)
getone = get
getlist = getall
class FormsDict(MultiDict):
''' This :class:`MultiDict` subclass is used to store request form data.
Additionally to the normal dict-like item access methods (which return
unmodified data as native strings), this container also supports
attribute-like access to its values. Attributes are automatically de-
or recoded to match :attr:`input_encoding` (default: 'utf8'). Missing
attributes default to an empty string. '''
#: Encoding used for attribute values.
input_encoding = 'utf8'
#: If true (default), unicode strings are first encoded with `latin1`
#: and then decoded to match :attr:`input_encoding`.
recode_unicode = True
def _fix(self, s, encoding=None):
if isinstance(s, unicode) and self.recode_unicode: # Python 3 WSGI
return s.encode('latin1').decode(encoding or self.input_encoding)
elif isinstance(s, bytes): # Python 2 WSGI
return s.decode(encoding or self.input_encoding)
else:
return s
def decode(self, encoding=None):
''' Returns a copy with all keys and values de- or recoded to match
:attr:`input_encoding`. Some libraries (e.g. WTForms) want a
unicode dictionary. '''
copy = FormsDict()
enc = copy.input_encoding = encoding or self.input_encoding
copy.recode_unicode = False
for key, value in self.allitems():
copy.append(self._fix(key, enc), self._fix(value, enc))
return copy
def getunicode(self, name, default=None, encoding=None):
''' Return the value as a unicode string, or the default. '''
try:
return self._fix(self[name], encoding)
except (UnicodeError, KeyError):
return default
def __getattr__(self, name, default=unicode()):
# Without this guard, pickle generates a cryptic TypeError:
if name.startswith('__') and name.endswith('__'):
return super(FormsDict, self).__getattr__(name)
return self.getunicode(name, default=default)
class HeaderDict(MultiDict):
""" A case-insensitive version of :class:`MultiDict` that defaults to
replace the old value instead of appending it. """
def __init__(self, *a, **ka):
self.dict = {}
if a or ka: self.update(*a, **ka)
def __contains__(self, key): return _hkey(key) in self.dict
def __delitem__(self, key): del self.dict[_hkey(key)]
def __getitem__(self, key): return self.dict[_hkey(key)][-1]
def __setitem__(self, key, value): self.dict[_hkey(key)] = [str(value)]
def append(self, key, value):
self.dict.setdefault(_hkey(key), []).append(str(value))
def replace(self, key, value): self.dict[_hkey(key)] = [str(value)]
def getall(self, key): return self.dict.get(_hkey(key)) or []
def get(self, key, default=None, index=-1):
return MultiDict.get(self, _hkey(key), default, index)
def filter(self, names):
for name in [_hkey(n) for n in names]:
if name in self.dict:
del self.dict[name]
class WSGIHeaderDict(DictMixin):
''' This dict-like class wraps a WSGI environ dict and provides convenient
access to HTTP_* fields. Keys and values are native strings
(2.x bytes or 3.x unicode) and keys are case-insensitive. If the WSGI
environment contains non-native string values, these are de- or encoded
using a lossless 'latin1' character set.
The API will remain stable even on changes to the relevant PEPs.
Currently PEP 333, 444 and 3333 are supported. (PEP 444 is the only one
that uses non-native strings.)
'''
#: List of keys that do not have a ``HTTP_`` prefix.
cgikeys = ('CONTENT_TYPE', 'CONTENT_LENGTH')
def __init__(self, environ):
self.environ = environ
def _ekey(self, key):
''' Translate header field name to CGI/WSGI environ key. '''
key = key.replace('-','_').upper()
if key in self.cgikeys:
return key
return 'HTTP_' + key
def raw(self, key, default=None):
''' Return the header value as is (may be bytes or unicode). '''
return self.environ.get(self._ekey(key), default)
def __getitem__(self, key):
return tonat(self.environ[self._ekey(key)], 'latin1')
def __setitem__(self, key, value):
raise TypeError("%s is read-only." % self.__class__)
def __delitem__(self, key):
raise TypeError("%s is read-only." % self.__class__)
def __iter__(self):
for key in self.environ:
if key[:5] == 'HTTP_':
yield key[5:].replace('_', '-').title()
elif key in self.cgikeys:
yield key.replace('_', '-').title()
def keys(self): return [x for x in self]
def __len__(self): return len(self.keys())
def __contains__(self, key): return self._ekey(key) in self.environ
class ConfigDict(dict):
''' A dict-like configuration storage with additional support for
namespaces, validators, meta-data, on_change listeners and more.
'''
__slots__ = ('_meta', '_on_change')
def __init__(self):
self._meta = {}
self._on_change = lambda name, value: None
def load_config(self, filename):
''' Load values from an *.ini style config file.
If the config file contains sections, their names are used as
namespaces for the values within. The two special sections
``DEFAULT`` and ``bottle`` refer to the root namespace (no prefix).
'''
conf = ConfigParser()
conf.read(filename)
for section in conf.sections():
for key, value in conf.items(section):
if section not in ('DEFAULT', 'bottle'):
key = section + '.' + key
self[key] = value
return self
def load_dict(self, source, namespace=''):
''' Load values from a dictionary structure. Nesting can be used to
represent namespaces.
>>> c.load_dict({'some': {'namespace': {'key': 'value'} } })
{'some.namespace.key': 'value'}
'''
for key, value in source.items():
if isinstance(key, str):
nskey = (namespace + '.' + key).strip('.')
if isinstance(value, dict):
self.load_dict(value, namespace=nskey)
else:
self[nskey] = value
else:
raise TypeError('Key has type %r (not a string)' % type(key))
return self
def update(self, *a, **ka):
''' If the first parameter is a string, all keys are prefixed with this
namespace. Apart from that it works just as the usual dict.update().
Example: ``update('some.namespace', key='value')`` '''
prefix = ''
if a and isinstance(a[0], str):
prefix = a[0].strip('.') + '.'
a = a[1:]
for key, value in dict(*a, **ka).items():
self[prefix+key] = value
def setdefault(self, key, value):
if key not in self:
self[key] = value
def __setitem__(self, key, value):
if not isinstance(key, str):
raise TypeError('Key has type %r (not a string)' % type(key))
value = self.meta_get(key, 'filter', lambda x: x)(value)
if key in self and self[key] is value:
return
self._on_change(key, value)
dict.__setitem__(self, key, value)
def __delitem__(self, key):
self._on_change(key, None)
dict.__delitem__(self, key)
def meta_get(self, key, metafield, default=None):
''' Return the value of a meta field for a key. '''
return self._meta.get(key, {}).get(metafield, default)
def meta_set(self, key, metafield, value):
''' Set the meta field for a key to a new value. This triggers the
on-change handler for existing keys. '''
self._meta.setdefault(key, {})[metafield] = value
if key in self:
self[key] = self[key]
def meta_list(self, key):
''' Return an iterable of meta field names defined for a key. '''
return self._meta.get(key, {}).keys()
class AppStack(list):
""" A stack-like list. Calling it returns the head of the stack. """
def __call__(self):
""" Return the current default application. """
return self[-1]
def push(self, value=None):
""" Add a new :class:`Bottle` instance to the stack """
if not isinstance(value, Bottle):
value = Bottle()
self.append(value)
return value
class WSGIFileWrapper(object):
def __init__(self, fp, buffer_size=1024*64):
self.fp, self.buffer_size = fp, buffer_size
for attr in ('fileno', 'close', 'read', 'readlines', 'tell', 'seek'):
if hasattr(fp, attr): setattr(self, attr, getattr(fp, attr))
def __iter__(self):
buff, read = self.buffer_size, self.read
while True:
part = read(buff)
if not part: return
yield part
class _closeiter(object):
''' This only exists to be able to attach a .close method to iterators that
do not support attribute assignment (most of itertools). '''
def __init__(self, iterator, close=None):
self.iterator = iterator
self.close_callbacks = makelist(close)
def __iter__(self):
return iter(self.iterator)
def close(self):
for func in self.close_callbacks:
func()
class ResourceManager(object):
''' This class manages a list of search paths and helps to find and open
application-bound resources (files).
:param base: default value for :meth:`add_path` calls.
:param opener: callable used to open resources.
:param cachemode: controls which lookups are cached. One of 'all',
'found' or 'none'.
'''
def __init__(self, base='./', opener=open, cachemode='all'):
self.opener = open
self.base = base
self.cachemode = cachemode
#: A list of search paths. See :meth:`add_path` for details.
self.path = []
#: A cache for resolved paths. ``res.cache.clear()`` clears the cache.
self.cache = {}
def add_path(self, path, base=None, index=None, create=False):
''' Add a new path to the list of search paths. Return False if the
path does not exist.
:param path: The new search path. Relative paths are turned into
an absolute and normalized form. If the path looks like a file
(not ending in `/`), the filename is stripped off.
:param base: Path used to absolutize relative search paths.
Defaults to :attr:`base` which defaults to ``os.getcwd()``.
:param index: Position within the list of search paths. Defaults
to last index (appends to the list).
The `base` parameter makes it easy to reference files installed
along with a python module or package::
res.add_path('./resources/', __file__)
'''
base = os.path.abspath(os.path.dirname(base or self.base))
path = os.path.abspath(os.path.join(base, os.path.dirname(path)))
path += os.sep
if path in self.path:
self.path.remove(path)
if create and not os.path.isdir(path):
os.makedirs(path)
if index is None:
self.path.append(path)
else:
self.path.insert(index, path)
self.cache.clear()
return os.path.exists(path)
def __iter__(self):
''' Iterate over all existing files in all registered paths. '''
search = self.path[:]
while search:
path = search.pop()
if not os.path.isdir(path): continue
for name in os.listdir(path):
full = os.path.join(path, name)
if os.path.isdir(full): search.append(full)
else: yield full
def lookup(self, name):
''' Search for a resource and return an absolute file path, or `None`.
The :attr:`path` list is searched in order. The first match is
returend. Symlinks are followed. The result is cached to speed up
future lookups. '''
if name not in self.cache or DEBUG:
for path in self.path:
fpath = os.path.join(path, name)
if os.path.isfile(fpath):
if self.cachemode in ('all', 'found'):
self.cache[name] = fpath
return fpath
if self.cachemode == 'all':
self.cache[name] = None
return self.cache[name]
def open(self, name, mode='r', *args, **kwargs):
''' Find a resource and return a file object, or raise IOError. '''
fname = self.lookup(name)
if not fname: raise IOError("Resource %r not found." % name)
return self.opener(fname, mode=mode, *args, **kwargs)
class FileUpload(object):
def __init__(self, fileobj, name, filename, headers=None):
''' Wrapper for file uploads. '''
#: Open file(-like) object (BytesIO buffer or temporary file)
self.file = fileobj
#: Name of the upload form field
self.name = name
#: Raw filename as sent by the client (may contain unsafe characters)
self.raw_filename = filename
#: A :class:`HeaderDict` with additional headers (e.g. content-type)
self.headers = HeaderDict(headers) if headers else HeaderDict()
content_type = HeaderProperty('Content-Type')
content_length = HeaderProperty('Content-Length', reader=int, default=-1)
@cached_property
def filename(self):
''' Name of the file on the client file system, but normalized to ensure
file system compatibility. An empty filename is returned as 'empty'.
Only ASCII letters, digits, dashes, underscores and dots are
allowed in the final filename. Accents are removed, if possible.
Whitespace is replaced by a single dash. Leading or tailing dots
or dashes are removed. The filename is limited to 255 characters.
'''
fname = self.raw_filename
if not isinstance(fname, unicode):
fname = fname.decode('utf8', 'ignore')
fname = normalize('NFKD', fname).encode('ASCII', 'ignore').decode('ASCII')
fname = os.path.basename(fname.replace('\\', os.path.sep))
fname = re.sub(r'[^a-zA-Z0-9-_.\s]', '', fname).strip()
fname = re.sub(r'[-\s]+', '-', fname).strip('.-')
return fname[:255] or 'empty'
def _copy_file(self, fp, chunk_size=2**16):
read, write, offset = self.file.read, fp.write, self.file.tell()
while 1:
buf = read(chunk_size)
if not buf: break
write(buf)
self.file.seek(offset)
def save(self, destination, overwrite=False, chunk_size=2**16):
''' Save file to disk or copy its content to an open file(-like) object.
If *destination* is a directory, :attr:`filename` is added to the
path. Existing files are not overwritten by default (IOError).
:param destination: File path, directory or file(-like) object.
:param overwrite: If True, replace existing files. (default: False)
:param chunk_size: Bytes to read at a time. (default: 64kb)
'''
if isinstance(destination, basestring): # Except file-likes here
if os.path.isdir(destination):
destination = os.path.join(destination, self.filename)
if not overwrite and os.path.exists(destination):
raise IOError('File exists.')
with open(destination, 'wb') as fp:
self._copy_file(fp, chunk_size)
else:
self._copy_file(destination, chunk_size)
###############################################################################
# Application Helper ###########################################################
###############################################################################
def abort(code=500, text='Unknown Error.'):
""" Aborts execution and causes a HTTP error. """
raise HTTPError(code, text)
def redirect(url, code=None):
""" Aborts execution and causes a 303 or 302 redirect, depending on
the HTTP protocol version. """
if not code:
code = 303 if request.get('SERVER_PROTOCOL') == "HTTP/1.1" else 302
res = response.copy(cls=HTTPResponse)
res.status = code
res.body = ""
res.set_header('Location', urljoin(request.url, url))
raise res
def _file_iter_range(fp, offset, bytes, maxread=1024*1024):
''' Yield chunks from a range in a file. No chunk is bigger than maxread.'''
fp.seek(offset)
while bytes > 0:
part = fp.read(min(bytes, maxread))
if not part: break
bytes -= len(part)
yield part
def static_file(filename, root, mimetype='auto', download=False, charset='UTF-8'):
""" Open a file in a safe way and return :exc:`HTTPResponse` with status
code 200, 305, 403 or 404. The ``Content-Type``, ``Content-Encoding``,
``Content-Length`` and ``Last-Modified`` headers are set if possible.
Special support for ``If-Modified-Since``, ``Range`` and ``HEAD``
requests.
:param filename: Name or path of the file to send.
:param root: Root path for file lookups. Should be an absolute directory
path.
:param mimetype: Defines the content-type header (default: guess from
file extension)
:param download: If True, ask the browser to open a `Save as...` dialog
instead of opening the file with the associated program. You can
specify a custom filename as a string. If not specified, the
original filename is used (default: False).
:param charset: The charset to use for files with a ``text/*``
mime-type. (default: UTF-8)
"""
root = os.path.abspath(root) + os.sep
filename = os.path.abspath(os.path.join(root, filename.strip('/\\')))
headers = dict()
if not filename.startswith(root):
return HTTPError(403, "Access denied.")
if not os.path.exists(filename) or not os.path.isfile(filename):
return HTTPError(404, "File does not exist.")
if not os.access(filename, os.R_OK):
return HTTPError(403, "You do not have permission to access this file.")
if mimetype == 'auto':
mimetype, encoding = mimetypes.guess_type(filename)
if encoding: headers['Content-Encoding'] = encoding
if mimetype:
if mimetype[:5] == 'text/' and charset and 'charset' not in mimetype:
mimetype += '; charset=%s' % charset
headers['Content-Type'] = mimetype
if download:
download = os.path.basename(filename if download == True else download)
headers['Content-Disposition'] = 'attachment; filename="%s"' % download
stats = os.stat(filename)
headers['Content-Length'] = clen = stats.st_size
lm = time.strftime("%a, %d %b %Y %H:%M:%S GMT", time.gmtime(stats.st_mtime))
headers['Last-Modified'] = lm
ims = request.environ.get('HTTP_IF_MODIFIED_SINCE')
if ims:
ims = parse_date(ims.split(";")[0].strip())
if ims is not None and ims >= int(stats.st_mtime):
headers['Date'] = time.strftime("%a, %d %b %Y %H:%M:%S GMT", time.gmtime())
return HTTPResponse(status=304, **headers)
body = '' if request.method == 'HEAD' else open(filename, 'rb')
headers["Accept-Ranges"] = "bytes"
ranges = request.environ.get('HTTP_RANGE')
if 'HTTP_RANGE' in request.environ:
ranges = list(parse_range_header(request.environ['HTTP_RANGE'], clen))
if not ranges:
return HTTPError(416, "Requested Range Not Satisfiable")
offset, end = ranges[0]
headers["Content-Range"] = "bytes %d-%d/%d" % (offset, end-1, clen)
headers["Content-Length"] = str(end-offset)
if body: body = _file_iter_range(body, offset, end-offset)
return HTTPResponse(body, status=206, **headers)
return HTTPResponse(body, **headers)
###############################################################################
# HTTP Utilities and MISC (TODO) ###############################################
###############################################################################
def debug(mode=True):
""" Change the debug level.
There is only one debug level supported at the moment."""
global DEBUG
if mode: warnings.simplefilter('default')
DEBUG = bool(mode)
def http_date(value):
if isinstance(value, (datedate, datetime)):
value = value.utctimetuple()
elif isinstance(value, (int, float)):
value = time.gmtime(value)
if not isinstance(value, basestring):
value = time.strftime("%a, %d %b %Y %H:%M:%S GMT", value)
return value
def parse_date(ims):
""" Parse rfc1123, rfc850 and asctime timestamps and return UTC epoch. """
try:
ts = email.utils.parsedate_tz(ims)
return time.mktime(ts[:8] + (0,)) - (ts[9] or 0) - time.timezone
except (TypeError, ValueError, IndexError, OverflowError):
return None
def parse_auth(header):
""" Parse rfc2617 HTTP authentication header string (basic) and return (user,pass) tuple or None"""
try:
method, data = header.split(None, 1)
if method.lower() == 'basic':
user, pwd = touni(base64.b64decode(tob(data))).split(':',1)
return user, pwd
except (KeyError, ValueError):
return None
def parse_range_header(header, maxlen=0):
''' Yield (start, end) ranges parsed from a HTTP Range header. Skip
unsatisfiable ranges. The end index is non-inclusive.'''
if not header or header[:6] != 'bytes=': return
ranges = [r.split('-', 1) for r in header[6:].split(',') if '-' in r]
for start, end in ranges:
try:
if not start: # bytes=-100 -> last 100 bytes
start, end = max(0, maxlen-int(end)), maxlen
elif not end: # bytes=100- -> all but the first 99 bytes
start, end = int(start), maxlen
else: # bytes=100-200 -> bytes 100-200 (inclusive)
start, end = int(start), min(int(end)+1, maxlen)
if 0 <= start < end <= maxlen:
yield start, end
except ValueError:
pass
def _parse_qsl(qs):
r = []
for pair in qs.replace(';','&').split('&'):
if not pair: continue
nv = pair.split('=', 1)
if len(nv) != 2: nv.append('')
key = urlunquote(nv[0].replace('+', ' '))
value = urlunquote(nv[1].replace('+', ' '))
r.append((key, value))
return r
def _lscmp(a, b):
''' Compares two strings in a cryptographically safe way:
Runtime is not affected by length of common prefix. '''
return not sum(0 if x==y else 1 for x, y in zip(a, b)) and len(a) == len(b)
def cookie_encode(data, key):
''' Encode and sign a pickle-able object. Return a (byte) string '''
msg = base64.b64encode(pickle.dumps(data, -1))
sig = base64.b64encode(hmac.new(tob(key), msg).digest())
return tob('!') + sig + tob('?') + msg
def cookie_decode(data, key):
''' Verify and decode an encoded string. Return an object or None.'''
data = tob(data)
if cookie_is_encoded(data):
sig, msg = data.split(tob('?'), 1)
if _lscmp(sig[1:], base64.b64encode(hmac.new(tob(key), msg).digest())):
return pickle.loads(base64.b64decode(msg))
return None
def cookie_is_encoded(data):
''' Return True if the argument looks like a encoded cookie.'''
return bool(data.startswith(tob('!')) and tob('?') in data)
def html_escape(string):
''' Escape HTML special characters ``&<>`` and quotes ``'"``. '''
return string.replace('&','&').replace('<','<').replace('>','>')\
.replace('"','"').replace("'",''')
def html_quote(string):
''' Escape and quote a string to be used as an HTTP attribute.'''
return '"%s"' % html_escape(string).replace('\n',' ')\
.replace('\r',' ').replace('\t','	')
def yieldroutes(func):
""" Return a generator for routes that match the signature (name, args)
of the func parameter. This may yield more than one route if the function
takes optional keyword arguments. The output is best described by example::
a() -> '/a'
b(x, y) -> '/b/<x>/<y>'
c(x, y=5) -> '/c/<x>' and '/c/<x>/<y>'
d(x=5, y=6) -> '/d' and '/d/<x>' and '/d/<x>/<y>'
"""
path = '/' + func.__name__.replace('__','/').lstrip('/')
spec = getargspec(func)
argc = len(spec[0]) - len(spec[3] or [])
path += ('/<%s>' * argc) % tuple(spec[0][:argc])
yield path
for arg in spec[0][argc:]:
path += '/<%s>' % arg
yield path
def path_shift(script_name, path_info, shift=1):
''' Shift path fragments from PATH_INFO to SCRIPT_NAME and vice versa.
:return: The modified paths.
:param script_name: The SCRIPT_NAME path.
:param script_name: The PATH_INFO path.
:param shift: The number of path fragments to shift. May be negative to
change the shift direction. (default: 1)
'''
if shift == 0: return script_name, path_info
pathlist = path_info.strip('/').split('/')
scriptlist = script_name.strip('/').split('/')
if pathlist and pathlist[0] == '': pathlist = []
if scriptlist and scriptlist[0] == '': scriptlist = []
if shift > 0 and shift <= len(pathlist):
moved = pathlist[:shift]
scriptlist = scriptlist + moved
pathlist = pathlist[shift:]
elif shift < 0 and shift >= -len(scriptlist):
moved = scriptlist[shift:]
pathlist = moved + pathlist
scriptlist = scriptlist[:shift]
else:
empty = 'SCRIPT_NAME' if shift < 0 else 'PATH_INFO'
raise AssertionError("Cannot shift. Nothing left from %s" % empty)
new_script_name = '/' + '/'.join(scriptlist)
new_path_info = '/' + '/'.join(pathlist)
if path_info.endswith('/') and pathlist: new_path_info += '/'
return new_script_name, new_path_info
def auth_basic(check, realm="private", text="Access denied"):
''' Callback decorator to require HTTP auth (basic).
TODO: Add route(check_auth=...) parameter. '''
def decorator(func):
@functools.wraps(func)
def wrapper(*a, **ka):
user, password = request.auth or (None, None)
if user is None or not check(user, password):
err = HTTPError(401, text)
err.add_header('WWW-Authenticate', 'Basic realm="%s"' % realm)
return err
return func(*a, **ka)
return wrapper
return decorator
# Shortcuts for common Bottle methods.
# They all refer to the current default application.
def make_default_app_wrapper(name):
''' Return a callable that relays calls to the current default app. '''
@functools.wraps(getattr(Bottle, name))
def wrapper(*a, **ka):
return getattr(app(), name)(*a, **ka)
return wrapper
route = make_default_app_wrapper('route')
get = make_default_app_wrapper('get')
post = make_default_app_wrapper('post')
put = make_default_app_wrapper('put')
delete = make_default_app_wrapper('delete')
error = make_default_app_wrapper('error')
mount = make_default_app_wrapper('mount')
hook = make_default_app_wrapper('hook')
install = make_default_app_wrapper('install')
uninstall = make_default_app_wrapper('uninstall')
url = make_default_app_wrapper('get_url')
###############################################################################
# Server Adapter ###############################################################
###############################################################################
class ServerAdapter(object):
quiet = False
def __init__(self, host='127.0.0.1', port=8080, **options):
self.options = options
self.host = host
self.port = int(port)
def run(self, handler): # pragma: no cover
pass
def __repr__(self):
args = ', '.join(['%s=%s'%(k,repr(v)) for k, v in self.options.items()])
return "%s(%s)" % (self.__class__.__name__, args)
class CGIServer(ServerAdapter):
quiet = True
def run(self, handler): # pragma: no cover
from wsgiref.handlers import CGIHandler
def fixed_environ(environ, start_response):
environ.setdefault('PATH_INFO', '')
return handler(environ, start_response)
CGIHandler().run(fixed_environ)
class FlupFCGIServer(ServerAdapter):
def run(self, handler): # pragma: no cover
import flup.server.fcgi
self.options.setdefault('bindAddress', (self.host, self.port))
flup.server.fcgi.WSGIServer(handler, **self.options).run()
class WSGIRefServer(ServerAdapter):
def run(self, app): # pragma: no cover
from wsgiref.simple_server import WSGIRequestHandler, WSGIServer
from wsgiref.simple_server import make_server
import socket
class FixedHandler(WSGIRequestHandler):
def address_string(self): # Prevent reverse DNS lookups please.
return self.client_address[0]
def log_request(*args, **kw):
if not self.quiet:
return WSGIRequestHandler.log_request(*args, **kw)
handler_cls = self.options.get('handler_class', FixedHandler)
server_cls = self.options.get('server_class', WSGIServer)
if ':' in self.host: # Fix wsgiref for IPv6 addresses.
if getattr(server_cls, 'address_family') == socket.AF_INET:
class server_cls(server_cls):
address_family = socket.AF_INET6
srv = make_server(self.host, self.port, app, server_cls, handler_cls)
srv.serve_forever()
class CherryPyServer(ServerAdapter):
def run(self, handler): # pragma: no cover
from cherrypy import wsgiserver
self.options['bind_addr'] = (self.host, self.port)
self.options['wsgi_app'] = handler
certfile = self.options.get('certfile')
if certfile:
del self.options['certfile']
keyfile = self.options.get('keyfile')
if keyfile:
del self.options['keyfile']
ssl_module = self.options.get('ssl_module')
if ssl_module:
del self.options['ssl_module']
server = wsgiserver.CherryPyWSGIServer(**self.options)
if certfile:
server.ssl_certificate = certfile
if keyfile:
server.ssl_private_key = keyfile
if ssl_module:
print("Setting SSL module = %s" % ssl_module)
adapterClass = wsgiserver.get_ssl_adapter_class(ssl_module)
adapter = adapterClass(certfile, keyfile)
server.ssl_adapter = adapter
try:
server.start()
finally:
server.stop()
class WaitressServer(ServerAdapter):
def run(self, handler):
from waitress import serve
serve(handler, host=self.host, port=self.port)
class PasteServer(ServerAdapter):
def run(self, handler): # pragma: no cover
from paste import httpserver
from paste.translogger import TransLogger
handler = TransLogger(handler, setup_console_handler=(not self.quiet))
httpserver.serve(handler, host=self.host, port=str(self.port),
**self.options)
class MeinheldServer(ServerAdapter):
def run(self, handler):
from meinheld import server
server.listen((self.host, self.port))
server.run(handler)
class FapwsServer(ServerAdapter):
""" Extremely fast webserver using libev. See http://www.fapws.org/ """
def run(self, handler): # pragma: no cover
import fapws._evwsgi as evwsgi
from fapws import base, config
port = self.port
if float(config.SERVER_IDENT[-2:]) > 0.4:
# fapws3 silently changed its API in 0.5
port = str(port)
evwsgi.start(self.host, port)
# fapws3 never releases the GIL. Complain upstream. I tried. No luck.
if 'BOTTLE_CHILD' in os.environ and not self.quiet:
_stderr("WARNING: Auto-reloading does not work with Fapws3.\n")
_stderr(" (Fapws3 breaks python thread support)\n")
evwsgi.set_base_module(base)
def app(environ, start_response):
environ['wsgi.multiprocess'] = False
return handler(environ, start_response)
evwsgi.wsgi_cb(('', app))
evwsgi.run()
class TornadoServer(ServerAdapter):
""" The super hyped asynchronous server by facebook. Untested. """
def run(self, handler): # pragma: no cover
import tornado.wsgi, tornado.httpserver, tornado.ioloop
container = tornado.wsgi.WSGIContainer(handler)
server = tornado.httpserver.HTTPServer(container)
server.listen(port=self.port,address=self.host)
tornado.ioloop.IOLoop.instance().start()
class AppEngineServer(ServerAdapter):
""" Adapter for Google App Engine. """
quiet = True
def run(self, handler):
from google.appengine.ext.webapp import util
# A main() function in the handler script enables 'App Caching'.
# Lets makes sure it is there. This _really_ improves performance.
module = sys.modules.get('__main__')
if module and not hasattr(module, 'main'):
module.main = lambda: util.run_wsgi_app(handler)
util.run_wsgi_app(handler)
class TwistedServer(ServerAdapter):
""" Untested. """
def run(self, handler):
from twisted.web import server, wsgi
from twisted.python.threadpool import ThreadPool
from twisted.internet import reactor
thread_pool = ThreadPool()
thread_pool.start()
reactor.addSystemEventTrigger('after', 'shutdown', thread_pool.stop)
factory = server.Site(wsgi.WSGIResource(reactor, thread_pool, handler))
reactor.listenTCP(self.port, factory, interface=self.host)
reactor.run()
class DieselServer(ServerAdapter):
""" Untested. """
def run(self, handler):
from diesel.protocols.wsgi import WSGIApplication
app = WSGIApplication(handler, port=self.port)
app.run()
class GeventServer(ServerAdapter):
""" Untested. Options:
* `fast` (default: False) uses libevent's http server, but has some
issues: No streaming, no pipelining, no SSL.
* See gevent.wsgi.WSGIServer() documentation for more options.
"""
def run(self, handler):
from gevent import wsgi, pywsgi, local
if not isinstance(threading.local(), local.local):
msg = "Bottle requires gevent.monkey.patch_all() (before import)"
raise RuntimeError(msg)
if not self.options.pop('fast', None): wsgi = pywsgi
self.options['log'] = None if self.quiet else 'default'
address = (self.host, self.port)
server = wsgi.WSGIServer(address, handler, **self.options)
if 'BOTTLE_CHILD' in os.environ:
import signal
signal.signal(signal.SIGINT, lambda s, f: server.stop())
server.serve_forever()
class GeventSocketIOServer(ServerAdapter):
def run(self,handler):
from socketio import server
address = (self.host, self.port)
server.SocketIOServer(address, handler, **self.options).serve_forever()
class GunicornServer(ServerAdapter):
""" Untested. See http://gunicorn.org/configure.html for options. """
def run(self, handler):
from gunicorn.app.base import Application
config = {'bind': "%s:%d" % (self.host, int(self.port))}
config.update(self.options)
class GunicornApplication(Application):
def init(self, parser, opts, args):
return config
def load(self):
return handler
GunicornApplication().run()
class EventletServer(ServerAdapter):
""" Untested """
def run(self, handler):
from eventlet import wsgi, listen
try:
wsgi.server(listen((self.host, self.port)), handler,
log_output=(not self.quiet))
except TypeError:
# Fallback, if we have old version of eventlet
wsgi.server(listen((self.host, self.port)), handler)
class RocketServer(ServerAdapter):
""" Untested. """
def run(self, handler):
from rocket import Rocket
server = Rocket((self.host, self.port), 'wsgi', { 'wsgi_app' : handler })
server.start()
class BjoernServer(ServerAdapter):
""" Fast server written in C: https://github.com/jonashaag/bjoern """
def run(self, handler):
from bjoern import run
run(handler, self.host, self.port)
class AutoServer(ServerAdapter):
""" Untested. """
adapters = [WaitressServer, PasteServer, TwistedServer, CherryPyServer, WSGIRefServer]
def run(self, handler):
for sa in self.adapters:
try:
return sa(self.host, self.port, **self.options).run(handler)
except ImportError:
pass
server_names = {
'cgi': CGIServer,
'flup': FlupFCGIServer,
'wsgiref': WSGIRefServer,
'waitress': WaitressServer,
'cherrypy': CherryPyServer,
'paste': PasteServer,
'fapws3': FapwsServer,
'tornado': TornadoServer,
'gae': AppEngineServer,
'twisted': TwistedServer,
'diesel': DieselServer,
'meinheld': MeinheldServer,
'gunicorn': GunicornServer,
'eventlet': EventletServer,
'gevent': GeventServer,
'geventSocketIO':GeventSocketIOServer,
'rocket': RocketServer,
'bjoern' : BjoernServer,
'auto': AutoServer,
}
###############################################################################
# Application Control ##########################################################
###############################################################################
def load(target, **namespace):
""" Import a module or fetch an object from a module.
* ``package.module`` returns `module` as a module object.
* ``pack.mod:name`` returns the module variable `name` from `pack.mod`.
* ``pack.mod:func()`` calls `pack.mod.func()` and returns the result.
The last form accepts not only function calls, but any type of
expression. Keyword arguments passed to this function are available as
local variables. Example: ``import_string('re:compile(x)', x='[a-z]')``
"""
module, target = target.split(":", 1) if ':' in target else (target, None)
if module not in sys.modules: __import__(module)
if not target: return sys.modules[module]
if target.isalnum(): return getattr(sys.modules[module], target)
package_name = module.split('.')[0]
namespace[package_name] = sys.modules[package_name]
return eval('%s.%s' % (module, target), namespace)
def load_app(target):
""" Load a bottle application from a module and make sure that the import
does not affect the current default application, but returns a separate
application object. See :func:`load` for the target parameter. """
global NORUN; NORUN, nr_old = True, NORUN
try:
tmp = default_app.push() # Create a new "default application"
rv = load(target) # Import the target module
return rv if callable(rv) else tmp
finally:
default_app.remove(tmp) # Remove the temporary added default application
NORUN = nr_old
_debug = debug
def run(app=None, server='wsgiref', host='127.0.0.1', port=8080,
interval=1, reloader=False, quiet=False, plugins=None,
debug=None, **kargs):
""" Start a server instance. This method blocks until the server terminates.
:param app: WSGI application or target string supported by
:func:`load_app`. (default: :func:`default_app`)
:param server: Server adapter to use. See :data:`server_names` keys
for valid names or pass a :class:`ServerAdapter` subclass.
(default: `wsgiref`)
:param host: Server address to bind to. Pass ``0.0.0.0`` to listens on
all interfaces including the external one. (default: 127.0.0.1)
:param port: Server port to bind to. Values below 1024 require root
privileges. (default: 8080)
:param reloader: Start auto-reloading server? (default: False)
:param interval: Auto-reloader interval in seconds (default: 1)
:param quiet: Suppress output to stdout and stderr? (default: False)
:param options: Options passed to the server adapter.
"""
if NORUN: return
if reloader and not os.environ.get('BOTTLE_CHILD'):
try:
lockfile = None
fd, lockfile = tempfile.mkstemp(prefix='bottle.', suffix='.lock')
os.close(fd) # We only need this file to exist. We never write to it
while os.path.exists(lockfile):
args = [sys.executable] + sys.argv
environ = os.environ.copy()
environ['BOTTLE_CHILD'] = 'true'
environ['BOTTLE_LOCKFILE'] = lockfile
p = subprocess.Popen(args, env=environ)
while p.poll() is None: # Busy wait...
os.utime(lockfile, None) # I am alive!
time.sleep(interval)
if p.poll() != 3:
if os.path.exists(lockfile): os.unlink(lockfile)
sys.exit(p.poll())
except KeyboardInterrupt:
pass
finally:
if os.path.exists(lockfile):
os.unlink(lockfile)
return
try:
if debug is not None: _debug(debug)
app = app or default_app()
if isinstance(app, basestring):
app = load_app(app)
if not callable(app):
raise ValueError("Application is not callable: %r" % app)
for plugin in plugins or []:
app.install(plugin)
if server in server_names:
server = server_names.get(server)
if isinstance(server, basestring):
server = load(server)
if isinstance(server, type):
server = server(host=host, port=port, **kargs)
if not isinstance(server, ServerAdapter):
raise ValueError("Unknown or unsupported server: %r" % server)
server.quiet = server.quiet or quiet
if not server.quiet:
_stderr("Bottle v%s server starting up (using %s)...\n" % (__version__, repr(server)))
_stderr("Listening on http://%s:%d/\n" % (server.host, server.port))
_stderr("Hit Ctrl-C to quit.\n\n")
if reloader:
lockfile = os.environ.get('BOTTLE_LOCKFILE')
bgcheck = FileCheckerThread(lockfile, interval)
with bgcheck:
server.run(app)
if bgcheck.status == 'reload':
sys.exit(3)
else:
server.run(app)
except KeyboardInterrupt:
pass
except (SystemExit, MemoryError):
raise
except:
if not reloader: raise
if not getattr(server, 'quiet', quiet):
print_exc()
time.sleep(interval)
sys.exit(3)
class FileCheckerThread(threading.Thread):
''' Interrupt main-thread as soon as a changed module file is detected,
the lockfile gets deleted or gets to old. '''
def __init__(self, lockfile, interval):
threading.Thread.__init__(self)
self.lockfile, self.interval = lockfile, interval
#: Is one of 'reload', 'error' or 'exit'
self.status = None
def run(self):
exists = os.path.exists
mtime = lambda path: os.stat(path).st_mtime
files = dict()
for module in list(sys.modules.values()):
path = getattr(module, '__file__', '')
if path[-4:] in ('.pyo', '.pyc'): path = path[:-1]
if path and exists(path): files[path] = mtime(path)
while not self.status:
if not exists(self.lockfile)\
or mtime(self.lockfile) < time.time() - self.interval - 5:
self.status = 'error'
thread.interrupt_main()
for path, lmtime in list(files.items()):
if not exists(path) or mtime(path) > lmtime:
self.status = 'reload'
thread.interrupt_main()
break
time.sleep(self.interval)
def __enter__(self):
self.start()
def __exit__(self, exc_type, exc_val, exc_tb):
if not self.status: self.status = 'exit' # silent exit
self.join()
return exc_type is not None and issubclass(exc_type, KeyboardInterrupt)
###############################################################################
# Template Adapters ############################################################
###############################################################################
class TemplateError(HTTPError):
def __init__(self, message):
HTTPError.__init__(self, 500, message)
class BaseTemplate(object):
""" Base class and minimal API for template adapters """
extensions = ['tpl','html','thtml','stpl']
settings = {} #used in prepare()
defaults = {} #used in render()
def __init__(self, source=None, name=None, lookup=[], encoding='utf8', **settings):
""" Create a new template.
If the source parameter (str or buffer) is missing, the name argument
is used to guess a template filename. Subclasses can assume that
self.source and/or self.filename are set. Both are strings.
The lookup, encoding and settings parameters are stored as instance
variables.
The lookup parameter stores a list containing directory paths.
The encoding parameter should be used to decode byte strings or files.
The settings parameter contains a dict for engine-specific settings.
"""
self.name = name
self.source = source.read() if hasattr(source, 'read') else source
self.filename = source.filename if hasattr(source, 'filename') else None
self.lookup = [os.path.abspath(x) for x in lookup]
self.encoding = encoding
self.settings = self.settings.copy() # Copy from class variable
self.settings.update(settings) # Apply
if not self.source and self.name:
self.filename = self.search(self.name, self.lookup)
if not self.filename:
raise TemplateError('Template %s not found.' % repr(name))
if not self.source and not self.filename:
raise TemplateError('No template specified.')
self.prepare(**self.settings)
@classmethod
def search(cls, name, lookup=[]):
""" Search name in all directories specified in lookup.
First without, then with common extensions. Return first hit. """
if not lookup:
depr('The template lookup path list should not be empty.', True) #0.12
lookup = ['.']
if os.path.isabs(name) and os.path.isfile(name):
depr('Absolute template path names are deprecated.', True) #0.12
return os.path.abspath(name)
for spath in lookup:
spath = os.path.abspath(spath) + os.sep
fname = os.path.abspath(os.path.join(spath, name))
if not fname.startswith(spath): continue
if os.path.isfile(fname): return fname
for ext in cls.extensions:
if os.path.isfile('%s.%s' % (fname, ext)):
return '%s.%s' % (fname, ext)
@classmethod
def global_config(cls, key, *args):
''' This reads or sets the global settings stored in class.settings. '''
if args:
cls.settings = cls.settings.copy() # Make settings local to class
cls.settings[key] = args[0]
else:
return cls.settings[key]
def prepare(self, **options):
""" Run preparations (parsing, caching, ...).
It should be possible to call this again to refresh a template or to
update settings.
"""
raise NotImplementedError
def render(self, *args, **kwargs):
""" Render the template with the specified local variables and return
a single byte or unicode string. If it is a byte string, the encoding
must match self.encoding. This method must be thread-safe!
Local variables may be provided in dictionaries (args)
or directly, as keywords (kwargs).
"""
raise NotImplementedError
class MakoTemplate(BaseTemplate):
def prepare(self, **options):
from mako.template import Template
from mako.lookup import TemplateLookup
options.update({'input_encoding':self.encoding})
options.setdefault('format_exceptions', bool(DEBUG))
lookup = TemplateLookup(directories=self.lookup, **options)
if self.source:
self.tpl = Template(self.source, lookup=lookup, **options)
else:
self.tpl = Template(uri=self.name, filename=self.filename, lookup=lookup, **options)
def render(self, *args, **kwargs):
for dictarg in args: kwargs.update(dictarg)
_defaults = self.defaults.copy()
_defaults.update(kwargs)
return self.tpl.render(**_defaults)
class CheetahTemplate(BaseTemplate):
def prepare(self, **options):
from Cheetah.Template import Template
self.context = threading.local()
self.context.vars = {}
options['searchList'] = [self.context.vars]
if self.source:
self.tpl = Template(source=self.source, **options)
else:
self.tpl = Template(file=self.filename, **options)
def render(self, *args, **kwargs):
for dictarg in args: kwargs.update(dictarg)
self.context.vars.update(self.defaults)
self.context.vars.update(kwargs)
out = str(self.tpl)
self.context.vars.clear()
return out
class Jinja2Template(BaseTemplate):
def prepare(self, filters=None, tests=None, globals={}, **kwargs):
from jinja2 import Environment, FunctionLoader
self.env = Environment(loader=FunctionLoader(self.loader), **kwargs)
if filters: self.env.filters.update(filters)
if tests: self.env.tests.update(tests)
if globals: self.env.globals.update(globals)
if self.source:
self.tpl = self.env.from_string(self.source)
else:
self.tpl = self.env.get_template(self.filename)
def render(self, *args, **kwargs):
for dictarg in args: kwargs.update(dictarg)
_defaults = self.defaults.copy()
_defaults.update(kwargs)
return self.tpl.render(**_defaults)
def loader(self, name):
fname = self.search(name, self.lookup)
if not fname: return
with open(fname, "rb") as f:
return f.read().decode(self.encoding)
class SimpleTemplate(BaseTemplate):
# TODO: Figure out how to pass the arguments for this correctly
def prepare(self, escape_func=html_escape, noescape=True, syntax=None, **ka):
self.cache = {}
enc = self.encoding
self._str = lambda x: touni(x, enc)
self._escape = lambda x: escape_func(touni(x, enc))
self.syntax = syntax
if noescape:
self._str, self._escape = self._escape, self._str
@cached_property
def co(self):
return compile(self.code, self.filename or '<string>', 'exec')
@cached_property
def code(self):
source = self.source or open(self.filename, 'rb').read()
try:
source, encoding = touni(source), 'utf8'
except UnicodeError:
depr('Template encodings other than utf8 are no longer supported.') #0.11
source, encoding = touni(source, 'latin1'), 'latin1'
parser = StplParser(source, encoding=encoding, syntax=self.syntax)
code = parser.translate()
self.encoding = parser.encoding
return code
def _rebase(self, _env, _name=None, **kwargs):
_env['_rebase'] = (_name, kwargs)
def _include(self, _env, _name=None, **kwargs):
env = _env.copy()
env.update(kwargs)
if _name not in self.cache:
self.cache[_name] = self.__class__(name=_name, lookup=self.lookup)
return self.cache[_name].execute(env['_stdout'], env)
def execute(self, _stdout, kwargs):
env = self.defaults.copy()
env.update(kwargs)
env.update({'_stdout': _stdout, '_printlist': _stdout.extend,
'include': functools.partial(self._include, env),
'rebase': functools.partial(self._rebase, env), '_rebase': None,
'_str': self._str, '_escape': self._escape, 'get': env.get,
'setdefault': env.setdefault, 'defined': env.__contains__ })
eval(self.co, env)
if env.get('_rebase'):
subtpl, rargs = env.pop('_rebase')
rargs['base'] = ''.join(_stdout) #copy stdout
del _stdout[:] # clear stdout
return self._include(env, subtpl, **rargs)
return env
def render(self, *args, **kwargs):
""" Render the template using keyword arguments as local variables. """
env = {}; stdout = []
for dictarg in args: env.update(dictarg)
env.update(kwargs)
self.execute(stdout, env)
return ''.join(stdout)
class StplSyntaxError(TemplateError): pass
class StplParser(object):
''' Parser for stpl templates. '''
_re_cache = {} #: Cache for compiled re patterns
# This huge pile of voodoo magic splits python code into 8 different tokens.
# 1: All kinds of python strings (trust me, it works)
_re_tok = '((?m)[urbURB]?(?:\'\'(?!\')|""(?!")|\'{6}|"{6}' \
'|\'(?:[^\\\\\']|\\\\.)+?\'|"(?:[^\\\\"]|\\\\.)+?"' \
'|\'{3}(?:[^\\\\]|\\\\.|\\n)+?\'{3}' \
'|"{3}(?:[^\\\\]|\\\\.|\\n)+?"{3}))'
_re_inl = _re_tok.replace('|\\n','') # We re-use this string pattern later
# 2: Comments (until end of line, but not the newline itself)
_re_tok += '|(#.*)'
# 3,4: Keywords that start or continue a python block (only start of line)
_re_tok += '|^([ \\t]*(?:if|for|while|with|try|def|class)\\b)' \
'|^([ \\t]*(?:elif|else|except|finally)\\b)'
# 5: Our special 'end' keyword (but only if it stands alone)
_re_tok += '|((?:^|;)[ \\t]*end[ \\t]*(?=(?:%(block_close)s[ \\t]*)?\\r?$|;|#))'
# 6: A customizable end-of-code-block template token (only end of line)
_re_tok += '|(%(block_close)s[ \\t]*(?=$))'
# 7: And finally, a single newline. The 8th token is 'everything else'
_re_tok += '|(\\r?\\n)'
# Match the start tokens of code areas in a template
_re_split = '(?m)^[ \t]*(\\\\?)((%(line_start)s)|(%(block_start)s))'
# Match inline statements (may contain python strings)
_re_inl = '%%(inline_start)s((?:%s|[^\'"\n]*?)+)%%(inline_end)s' % _re_inl
default_syntax = '<% %> % {{ }}'
def __init__(self, source, syntax=None, encoding='utf8'):
self.source, self.encoding = touni(source, encoding), encoding
self.set_syntax(syntax or self.default_syntax)
self.code_buffer, self.text_buffer = [], []
self.lineno, self.offset = 1, 0
self.indent, self.indent_mod = 0, 0
def get_syntax(self):
''' Tokens as a space separated string (default: <% %> % {{ }}) '''
return self._syntax
def set_syntax(self, syntax):
self._syntax = syntax
self._tokens = syntax.split()
if not syntax in self._re_cache:
names = 'block_start block_close line_start inline_start inline_end'
etokens = map(re.escape, self._tokens)
pattern_vars = dict(zip(names.split(), etokens))
patterns = (self._re_split, self._re_tok, self._re_inl)
patterns = [re.compile(p%pattern_vars) for p in patterns]
self._re_cache[syntax] = patterns
self.re_split, self.re_tok, self.re_inl = self._re_cache[syntax]
syntax = property(get_syntax, set_syntax)
def translate(self):
if self.offset: raise RuntimeError('Parser is a one time instance.')
while True:
m = self.re_split.search(self.source[self.offset:])
if m:
text = self.source[self.offset:self.offset+m.start()]
self.text_buffer.append(text)
self.offset += m.end()
if m.group(1): # Escape syntax
line, sep, _ = self.source[self.offset:].partition('\n')
self.text_buffer.append(m.group(2)+line+sep)
self.offset += len(line+sep)+1
continue
self.flush_text()
self.read_code(multiline=bool(m.group(4)))
else: break
self.text_buffer.append(self.source[self.offset:])
self.flush_text()
return ''.join(self.code_buffer)
def read_code(self, multiline):
code_line, comment = '', ''
while True:
m = self.re_tok.search(self.source[self.offset:])
if not m:
code_line += self.source[self.offset:]
self.offset = len(self.source)
self.write_code(code_line.strip(), comment)
return
code_line += self.source[self.offset:self.offset+m.start()]
self.offset += m.end()
_str, _com, _blk1, _blk2, _end, _cend, _nl = m.groups()
if _str: # Python string
code_line += _str
elif _com: # Python comment (up to EOL)
comment = _com
if multiline and _com.strip().endswith(self._tokens[1]):
multiline = False # Allow end-of-block in comments
elif _blk1: # Start-block keyword (if/for/while/def/try/...)
code_line, self.indent_mod = _blk1, -1
self.indent += 1
elif _blk2: # Continue-block keyword (else/elif/except/...)
code_line, self.indent_mod = _blk2, -1
elif _end: # The non-standard 'end'-keyword (ends a block)
self.indent -= 1
elif _cend: # The end-code-block template token (usually '%>')
if multiline: multiline = False
else: code_line += _cend
else: # \n
self.write_code(code_line.strip(), comment)
self.lineno += 1
code_line, comment, self.indent_mod = '', '', 0
if not multiline:
break
def flush_text(self):
text = ''.join(self.text_buffer)
del self.text_buffer[:]
if not text: return
parts, pos, nl = [], 0, '\\\n'+' '*self.indent
for m in self.re_inl.finditer(text):
prefix, pos = text[pos:m.start()], m.end()
if prefix:
parts.append(nl.join(map(repr, prefix.splitlines(True))))
if prefix.endswith('\n'): parts[-1] += nl
parts.append(self.process_inline(m.group(1).strip()))
if pos < len(text):
prefix = text[pos:]
lines = prefix.splitlines(True)
if lines[-1].endswith('\\\\\n'): lines[-1] = lines[-1][:-3]
elif lines[-1].endswith('\\\\\r\n'): lines[-1] = lines[-1][:-4]
parts.append(nl.join(map(repr, lines)))
code = '_printlist((%s,))' % ', '.join(parts)
self.lineno += code.count('\n')+1
self.write_code(code)
def process_inline(self, chunk):
if chunk[0] == '!': return '_str(%s)' % chunk[1:]
return '_escape(%s)' % chunk
def write_code(self, line, comment=''):
code = ' ' * (self.indent+self.indent_mod)
code += line.lstrip() + comment + '\n'
self.code_buffer.append(code)
def template(*args, **kwargs):
'''
Get a rendered template as a string iterator.
You can use a name, a filename or a template string as first parameter.
Template rendering arguments can be passed as dictionaries
or directly (as keyword arguments).
'''
tpl = args[0] if args else None
adapter = kwargs.pop('template_adapter', SimpleTemplate)
lookup = kwargs.pop('template_lookup', TEMPLATE_PATH)
tplid = (id(lookup), tpl)
if tplid not in TEMPLATES or DEBUG:
settings = kwargs.pop('template_settings', {})
if isinstance(tpl, adapter):
TEMPLATES[tplid] = tpl
if settings: TEMPLATES[tplid].prepare(**settings)
elif "\n" in tpl or "{" in tpl or "%" in tpl or '$' in tpl:
TEMPLATES[tplid] = adapter(source=tpl, lookup=lookup, **settings)
else:
TEMPLATES[tplid] = adapter(name=tpl, lookup=lookup, **settings)
if not TEMPLATES[tplid]:
abort(500, 'Template (%s) not found' % tpl)
for dictarg in args[1:]: kwargs.update(dictarg)
return TEMPLATES[tplid].render(kwargs)
mako_template = functools.partial(template, template_adapter=MakoTemplate)
cheetah_template = functools.partial(template, template_adapter=CheetahTemplate)
jinja2_template = functools.partial(template, template_adapter=Jinja2Template)
def view(tpl_name, **defaults):
''' Decorator: renders a template for a handler.
The handler can control its behavior like that:
- return a dict of template vars to fill out the template
- return something other than a dict and the view decorator will not
process the template, but return the handler result as is.
This includes returning a HTTPResponse(dict) to get,
for instance, JSON with autojson or other castfilters.
'''
def decorator(func):
@functools.wraps(func)
def wrapper(*args, **kwargs):
result = func(*args, **kwargs)
if isinstance(result, (dict, DictMixin)):
tplvars = defaults.copy()
tplvars.update(result)
return template(tpl_name, **tplvars)
elif result is None:
return template(tpl_name, defaults)
return result
return wrapper
return decorator
mako_view = functools.partial(view, template_adapter=MakoTemplate)
cheetah_view = functools.partial(view, template_adapter=CheetahTemplate)
jinja2_view = functools.partial(view, template_adapter=Jinja2Template)
###############################################################################
# Constants and Globals ########################################################
###############################################################################
TEMPLATE_PATH = ['./', './views/']
TEMPLATES = {}
DEBUG = False
NORUN = False # If set, run() does nothing. Used by load_app()
#: A dict to map HTTP status codes (e.g. 404) to phrases (e.g. 'Not Found')
HTTP_CODES = httplib.responses
HTTP_CODES[418] = "I'm a teapot" # RFC 2324
HTTP_CODES[428] = "Precondition Required"
HTTP_CODES[429] = "Too Many Requests"
HTTP_CODES[431] = "Request Header Fields Too Large"
HTTP_CODES[511] = "Network Authentication Required"
_HTTP_STATUS_LINES = dict((k, '%d %s'%(k,v)) for (k,v) in HTTP_CODES.items())
#: The default template used for error pages. Override with @error()
ERROR_PAGE_TEMPLATE = """
%%try:
%%from %s import DEBUG, HTTP_CODES, request, touni
<!DOCTYPE HTML PUBLIC "-//IETF//DTD HTML 2.0//EN">
<html>
<head>
<title>Error: {{e.status}}</title>
<style type="text/css">
html {background-color: #eee; font-family: sans;}
body {background-color: #fff; border: 1px solid #ddd;
padding: 15px; margin: 15px;}
pre {background-color: #eee; border: 1px solid #ddd; padding: 5px;}
</style>
</head>
<body>
<h1>Error: {{e.status}}</h1>
<p>Sorry, the requested URL <tt>{{repr(request.url)}}</tt>
caused an error:</p>
<pre>{{e.body}}</pre>
%%if DEBUG and e.exception:
<h2>Exception:</h2>
<pre>{{repr(e.exception)}}</pre>
%%end
%%if DEBUG and e.traceback:
<h2>Traceback:</h2>
<pre>{{e.traceback}}</pre>
%%end
</body>
</html>
%%except ImportError:
<b>ImportError:</b> Could not generate the error page. Please add bottle to
the import path.
%%end
""" % __name__
#: A thread-safe instance of :class:`LocalRequest`. If accessed from within a
#: request callback, this instance always refers to the *current* request
#: (even on a multithreaded server).
request = LocalRequest()
#: A thread-safe instance of :class:`LocalResponse`. It is used to change the
#: HTTP response for the *current* request.
response = LocalResponse()
#: A thread-safe namespace. Not used by Bottle.
local = threading.local()
# Initialize app stack (create first empty Bottle app)
# BC: 0.6.4 and needed for run()
app = default_app = AppStack()
app.push()
#: A virtual package that redirects import statements.
#: Example: ``import bottle.ext.sqlite`` actually imports `bottle_sqlite`.
ext = _ImportRedirect('bottle.ext' if __name__ == '__main__' else __name__+".ext", 'bottle_%s').module
if __name__ == '__main__':
opt, args, parser = _cmd_options, _cmd_args, _cmd_parser
if opt.version:
_stdout('Bottle %s\n'%__version__)
sys.exit(0)
if not args:
parser.print_help()
_stderr('\nError: No application specified.\n')
sys.exit(1)
sys.path.insert(0, '.')
sys.modules.setdefault('bottle', sys.modules['__main__'])
host, port = (opt.bind or 'localhost'), 8080
if ':' in host and host.rfind(']') < host.rfind(':'):
host, port = host.rsplit(':', 1)
host = host.strip('[]')
run(args[0], host=host, port=int(port), server=opt.server,
reloader=opt.reload, plugins=opt.plugin, debug=opt.debug)
# THE END
|
joshzarrabi/e-mission-server
|
emission/net/api/bottle.py
|
Python
|
bsd-3-clause
| 141,855
|
#
# Copyright (C) 2003-2006 greg Landrum and Rational Discovery LLC
#
# @@ All Rights Reserved @@
# This file is part of the RDKit.
# The contents are covered by the terms of the BSD license
# which is included in the file license.txt, found at the root
# of the RDKit source tree.
#
""" lazy generator of 2D pharmacophore signature data
"""
from __future__ import print_function
from rdkit.Chem.Pharm2D import SigFactory, Matcher
raise NotImplementedError('not finished yet')
class Generator(object):
"""
Important attributes:
- mol: the molecules whose signature is being worked with
- sigFactory : the SigFactory object with signature parameters
NOTE: no preprocessing is carried out for _sigFactory_.
It *must* be pre-initialized.
**Notes**
-
"""
def __init__(self, sigFactory, mol, dMat=None, bitCache=True):
""" constructor
**Arguments**
- sigFactory: a signature factory, see class docs
- mol: a molecule, see class docs
- dMat: (optional) a distance matrix for the molecule. If this
is not provided, one will be calculated
- bitCache: (optional) if nonzero, a local cache of which bits
have been queried will be maintained. Otherwise things must
be recalculate each time a bit is queried.
"""
if not isinstance(sigFactory, SigFactory.SigFactory):
raise ValueError('bad factory')
self.sigFactory = sigFactory
self.mol = mol
if dMat is None:
useBO = sigFactory.includeBondOrder
dMat = Chem.GetDistanceMatrix(mol, useBO)
self.dMat = dMat
if bitCache:
self.bits = {}
else:
self.bits = None
featFamilies = [fam for fam in sigFactory.featFactory.GetFeatureFamilies()
if fam not in sigFactory.skipFeats]
nFeats = len(featFamilies)
featMatches = {}
for fam in featFamilies:
featMatches[fam] = []
feats = sigFactory.featFactory.GetFeaturesForMol(mol)
for feat in feats:
if feat.GetFamily() not in sigFactory.skipFeats:
featMatches[feat.GetFamily()].append(feat.GetAtomIds())
featMatches = [None] * nFeats
for i in range(nFeats):
featMatches[i] = sigFactory.featFactory.GetMolFeature()
self.pattMatches = pattMatches
def GetBit(self, idx):
""" returns a bool indicating whether or not the bit is set
"""
if idx < 0 or idx >= self.sig.GetSize():
raise IndexError('Index %d invalid' % (idx))
if self.bits is not None and idx in self.bits:
return self.bits[idx]
tmp = Matcher.GetAtomsMatchingBit(self.sig, idx, self.mol, dMat=self.dMat, justOne=1,
matchingAtoms=self.pattMatches)
if not tmp or len(tmp) == 0:
res = 0
else:
res = 1
if self.bits is not None:
self.bits[idx] = res
return res
def __len__(self):
""" allows class to support len()
"""
return self.sig.GetSize()
def __getitem__(self, itm):
""" allows class to support random access.
Calls self.GetBit()
"""
return self.GetBit(itm)
if __name__ == '__main__':
import time
from rdkit import RDConfig, Chem
from rdkit.Chem.Pharm2D import Gobbi_Pharm2D, Generate
import random
factory = Gobbi_Pharm2D.factory
nToDo = 100
inD = open(RDConfig.RDDataDir + "/NCI/first_5K.smi", 'r').readlines()[:nToDo]
mols = [None] * len(inD)
for i in range(len(inD)):
smi = inD[i].split('\t')[0]
smi.strip()
mols[i] = Chem.MolFromSmiles(smi)
sig = factory.GetSignature()
nBits = 300
random.seed(23)
bits = [random.randint(0, sig.GetSize() - 1) for x in range(nBits)]
print('Using the Lazy Generator')
t1 = time.time()
for i in range(len(mols)):
if not i % 10:
print('done mol %d of %d' % (i, len(mols)))
gen = Generator(factory, mols[i])
for bit in bits:
v = gen[bit]
t2 = time.time()
print('\tthat took %4.2f seconds' % (t2 - t1))
print('Generating and checking signatures')
t1 = time.time()
for i in range(len(mols)):
if not i % 10:
print('done mol %d of %d' % (i, len(mols)))
sig = Generate.Gen2DFingerprint(mols[i], factory)
for bit in bits:
v = sig[bit]
t2 = time.time()
print('\tthat took %4.2f seconds' % (t2 - t1))
|
rvianello/rdkit
|
rdkit/Chem/Pharm2D/LazyGenerator.py
|
Python
|
bsd-3-clause
| 4,307
|
import pickle
import unittest
import numpy as np
import scipy.sparse as sp
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import raises
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_equal
from sklearn import linear_model, datasets, metrics
from sklearn.base import clone
from sklearn.linear_model import SGDClassifier, SGDRegressor
from sklearn.preprocessing import LabelEncoder, scale
class SparseSGDClassifier(SGDClassifier):
def fit(self, X, y, *args, **kw):
X = sp.csr_matrix(X)
return SGDClassifier.fit(self, X, y, *args, **kw)
def partial_fit(self, X, y, *args, **kw):
X = sp.csr_matrix(X)
return SGDClassifier.partial_fit(self, X, y, *args, **kw)
def decision_function(self, X, *args, **kw):
X = sp.csr_matrix(X)
return SGDClassifier.decision_function(self, X, *args, **kw)
def predict_proba(self, X, *args, **kw):
X = sp.csr_matrix(X)
return SGDClassifier.predict_proba(self, X, *args, **kw)
def predict_log_proba(self, X, *args, **kw):
X = sp.csr_matrix(X)
return SGDClassifier.predict_log_proba(self, X, *args, **kw)
class SparseSGDRegressor(SGDRegressor):
def fit(self, X, y, *args, **kw):
X = sp.csr_matrix(X)
return SGDRegressor.fit(self, X, y, *args, **kw)
def partial_fit(self, X, y, *args, **kw):
X = sp.csr_matrix(X)
return SGDRegressor.partial_fit(self, X, y, *args, **kw)
def decision_function(self, X, *args, **kw):
X = sp.csr_matrix(X)
return SGDRegressor.decision_function(self, X, *args, **kw)
##
## Test Data
##
# test sample 1
X = np.array([[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]])
Y = [1, 1, 1, 2, 2, 2]
T = np.array([[-1, -1], [2, 2], [3, 2]])
true_result = [1, 2, 2]
# test sample 2; string class labels
X2 = np.array([[-1, 1], [-0.75, 0.5], [-1.5, 1.5],
[1, 1], [0.75, 0.5], [1.5, 1.5],
[-1, -1], [0, -0.5], [1, -1]])
Y2 = ["one"] * 3 + ["two"] * 3 + ["three"] * 3
T2 = np.array([[-1.5, 0.5], [1, 2], [0, -2]])
true_result2 = ["one", "two", "three"]
# test sample 3
X3 = np.array([[1, 1, 0, 0, 0, 0], [1, 1, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0], [0, 0, 1, 0, 0, 0],
[0, 0, 0, 0, 1, 1], [0, 0, 0, 0, 1, 1],
[0, 0, 0, 1, 0, 0], [0, 0, 0, 1, 0, 0]])
Y3 = np.array([1, 1, 1, 1, 2, 2, 2, 2])
# test sample 4 - two more or less redundent feature groups
X4 = np.array([[1, 0.9, 0.8, 0, 0, 0], [1, .84, .98, 0, 0, 0],
[1, .96, .88, 0, 0, 0], [1, .91, .99, 0, 0, 0],
[0, 0, 0, .89, .91, 1], [0, 0, 0, .79, .84, 1],
[0, 0, 0, .91, .95, 1], [0, 0, 0, .93, 1, 1]])
Y4 = np.array([1, 1, 1, 1, 2, 2, 2, 2])
iris = datasets.load_iris()
# test sample 5 - test sample 1 as binary classification problem
X5 = np.array([[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]])
Y5 = [1, 1, 1, 2, 2, 2]
true_result5 = [0, 1, 1]
##
## Classification Test Case
##
class CommonTest(object):
def _test_warm_start(self, X, Y, lr):
# Test that explicit warm restart...
clf = self.factory(alpha=0.01, eta0=0.01, n_iter=5, shuffle=False,
learning_rate=lr)
clf.fit(X, Y)
clf2 = self.factory(alpha=0.001, eta0=0.01, n_iter=5, shuffle=False,
learning_rate=lr)
clf2.fit(X, Y,
coef_init=clf.coef_.copy(),
intercept_init=clf.intercept_.copy())
#... and implicit warm restart are equivalent.
clf3 = self.factory(alpha=0.01, eta0=0.01, n_iter=5, shuffle=False,
warm_start=True, learning_rate=lr)
clf3.fit(X, Y)
assert_equal(clf3.t_, clf.t_)
assert_array_almost_equal(clf3.coef_, clf.coef_)
clf3.set_params(alpha=0.001)
clf3.fit(X, Y)
assert_equal(clf3.t_, clf2.t_)
assert_array_almost_equal(clf3.coef_, clf2.coef_)
def test_warm_start_constant(self):
self._test_warm_start(X, Y, "constant")
def test_warm_start_invscaling(self):
self._test_warm_start(X, Y, "invscaling")
def test_warm_start_optimal(self):
self._test_warm_start(X, Y, "optimal")
def test_input_format(self):
"""Input format tests. """
clf = self.factory(alpha=0.01, n_iter=5,
shuffle=False)
clf.fit(X, Y)
Y_ = np.array(Y)[:, np.newaxis]
Y_ = np.c_[Y_, Y_]
assert_raises(ValueError, clf.fit, X, Y_)
def test_clone(self):
"""Test whether clone works ok. """
clf = self.factory(alpha=0.01, n_iter=5, penalty='l1')
clf = clone(clf)
clf.set_params(penalty='l2')
clf.fit(X, Y)
clf2 = self.factory(alpha=0.01, n_iter=5, penalty='l2')
clf2.fit(X, Y)
assert_array_equal(clf.coef_, clf2.coef_)
class DenseSGDClassifierTestCase(unittest.TestCase, CommonTest):
"""Test suite for the dense representation variant of SGD"""
factory = SGDClassifier
def test_sgd(self):
"""Check that SGD gives any results :-)"""
for loss in ("hinge", "squared_hinge", "log", "modified_huber"):
clf = self.factory(penalty='l2', alpha=0.01, fit_intercept=True,
loss=loss, n_iter=10, shuffle=True)
clf.fit(X, Y)
#assert_almost_equal(clf.coef_[0], clf.coef_[1], decimal=7)
assert_array_equal(clf.predict(T), true_result)
@raises(ValueError)
def test_sgd_bad_l1_ratio(self):
"""Check whether expected ValueError on bad l1_ratio"""
self.factory(l1_ratio=1.1)
@raises(ValueError)
def test_sgd_bad_learning_rate_schedule(self):
"""Check whether expected ValueError on bad learning_rate"""
self.factory(learning_rate="<unknown>")
@raises(ValueError)
def test_sgd_bad_eta0(self):
"""Check whether expected ValueError on bad eta0"""
self.factory(eta0=0, learning_rate="constant")
@raises(ValueError)
def test_sgd_bad_alpha(self):
"""Check whether expected ValueError on bad alpha"""
self.factory(alpha=-.1)
@raises(ValueError)
def test_sgd_bad_penalty(self):
"""Check whether expected ValueError on bad penalty"""
self.factory(penalty='foobar', l1_ratio=0.85)
@raises(ValueError)
def test_sgd_bad_loss(self):
"""Check whether expected ValueError on bad loss"""
self.factory(loss="foobar")
@raises(ValueError)
def test_sgd_n_iter_param(self):
"""Test parameter validity check"""
self.factory(n_iter=-10000)
@raises(ValueError)
def test_sgd_shuffle_param(self):
"""Test parameter validity check"""
self.factory(shuffle="false")
@raises(TypeError)
def test_argument_coef(self):
"""Checks coef_init not allowed as model argument (only fit)"""
# Provided coef_ does not match dataset.
self.factory(coef_init=np.zeros((3,))).fit(X, Y)
@raises(ValueError)
def test_provide_coef(self):
"""Checks coef_init shape for the warm starts"""
# Provided coef_ does not match dataset.
self.factory().fit(X, Y, coef_init=np.zeros((3,)))
@raises(ValueError)
def test_set_intercept(self):
"""Checks intercept_ shape for the warm starts"""
# Provided intercept_ does not match dataset.
self.factory().fit(X, Y, intercept_init=np.zeros((3,)))
def test_set_intercept_binary(self):
"""Checks intercept_ shape for the warm starts in binary case"""
self.factory().fit(X5, Y5, intercept_init=0)
def test_set_intercept_to_intercept(self):
"""Checks intercept_ shape consistency for the warm starts"""
# Inconsistent intercept_ shape.
clf = self.factory().fit(X5, Y5)
self.factory().fit(X5, Y5, intercept_init=clf.intercept_)
clf = self.factory().fit(X, Y)
self.factory().fit(X, Y, intercept_init=clf.intercept_)
@raises(ValueError)
def test_sgd_at_least_two_labels(self):
"""Target must have at least two labels"""
self.factory(alpha=0.01, n_iter=20).fit(X2, np.ones(9))
def test_sgd_multiclass(self):
"""Multi-class test case"""
clf = self.factory(alpha=0.01, n_iter=20).fit(X2, Y2)
assert_equal(clf.coef_.shape, (3, 2))
assert_equal(clf.intercept_.shape, (3,))
assert_equal(clf.decision_function([0, 0]).shape, (1, 3))
pred = clf.predict(T2)
assert_array_equal(pred, true_result2)
def test_sgd_multiclass_with_init_coef(self):
"""Multi-class test case"""
clf = self.factory(alpha=0.01, n_iter=20)
clf.fit(X2, Y2, coef_init=np.zeros((3, 2)),
intercept_init=np.zeros(3))
assert_equal(clf.coef_.shape, (3, 2))
assert_true(clf.intercept_.shape, (3,))
pred = clf.predict(T2)
assert_array_equal(pred, true_result2)
def test_sgd_multiclass_njobs(self):
"""Multi-class test case with multi-core support"""
clf = self.factory(alpha=0.01, n_iter=20, n_jobs=2).fit(X2, Y2)
assert_equal(clf.coef_.shape, (3, 2))
assert_equal(clf.intercept_.shape, (3,))
assert_equal(clf.decision_function([0, 0]).shape, (1, 3))
pred = clf.predict(T2)
assert_array_equal(pred, true_result2)
def test_set_coef_multiclass(self):
"""Checks coef_init and intercept_init shape for for multi-class
problems"""
# Provided coef_ does not match dataset
clf = self.factory()
assert_raises(ValueError, clf.fit, X2, Y2, coef_init=np.zeros((2, 2)))
# Provided coef_ does match dataset
clf = self.factory().fit(X2, Y2, coef_init=np.zeros((3, 2)))
# Provided intercept_ does not match dataset
clf = self.factory()
assert_raises(ValueError, clf.fit, X2, Y2,
intercept_init=np.zeros((1,)))
# Provided intercept_ does match dataset.
clf = self.factory().fit(X2, Y2, intercept_init=np.zeros((3,)))
def test_sgd_proba(self):
"""Check SGD.predict_proba"""
# hinge loss does not allow for conditional prob estimate
clf = self.factory(loss="hinge", alpha=0.01, n_iter=10).fit(X, Y)
assert_raises(NotImplementedError, clf.predict_proba, [3, 2])
# log and modified_huber losses can output probability estimates
# binary case
for loss in ["log", "modified_huber"]:
clf = self.factory(loss="modified_huber", alpha=0.01, n_iter=10)
clf.fit(X, Y)
p = clf.predict_proba([3, 2])
assert_true(p[0, 1] > 0.5)
p = clf.predict_proba([-1, -1])
assert_true(p[0, 1] < 0.5)
p = clf.predict_log_proba([3, 2])
assert_true(p[0, 1] > p[0, 0])
p = clf.predict_log_proba([-1, -1])
assert_true(p[0, 1] < p[0, 0])
# log loss multiclass probability estimates
clf = self.factory(loss="log", alpha=0.01, n_iter=10).fit(X2, Y2)
d = clf.decision_function([[.1, -.1], [.3, .2]])
p = clf.predict_proba([[.1, -.1], [.3, .2]])
assert_array_equal(np.argmax(p, axis=1), np.argmax(d, axis=1))
assert_almost_equal(p[0].sum(), 1)
assert_true(np.all(p[0] >= 0))
p = clf.predict_proba([-1, -1])
d = clf.decision_function([-1, -1])
assert_array_equal(np.argsort(p[0]), np.argsort(d[0]))
l = clf.predict_log_proba([3, 2])
p = clf.predict_proba([3, 2])
assert_array_almost_equal(np.log(p), l)
l = clf.predict_log_proba([-1, -1])
p = clf.predict_proba([-1, -1])
assert_array_almost_equal(np.log(p), l)
# Modified Huber multiclass probability estimates; requires a separate
# test because the hard zero/one probabilities may destroy the
# ordering present in decision_function output.
clf = self.factory(loss="modified_huber", alpha=0.01, n_iter=10)
clf.fit(X2, Y2)
d = clf.decision_function([3, 2])
p = clf.predict_proba([3, 2])
if not isinstance(self, SparseSGDClassifierTestCase):
assert_equal(np.argmax(d, axis=1), np.argmax(p, axis=1))
else: # XXX the sparse test gets a different X2 (?)
assert_equal(np.argmin(d, axis=1), np.argmin(p, axis=1))
# the following sample produces decision_function values < -1,
# which would cause naive normalization to fail (see comment
# in SGDClassifier.predict_proba)
x = X.mean(axis=0)
d = clf.decision_function(x)
if np.all(d < -1): # XXX not true in sparse test case (why?)
p = clf.predict_proba(x)
assert_array_almost_equal(p[0], [1/3.] * 3)
def test_sgd_l1(self):
"""Test L1 regularization"""
n = len(X4)
rng = np.random.RandomState(13)
idx = np.arange(n)
rng.shuffle(idx)
X = X4[idx, :]
Y = Y4[idx]
clf = self.factory(penalty='l1', alpha=.2, fit_intercept=False,
n_iter=2000)
clf.fit(X, Y)
assert_array_equal(clf.coef_[0, 1:-1], np.zeros((4,)))
pred = clf.predict(X)
assert_array_equal(pred, Y)
# test sparsify with dense inputs
clf.sparsify()
assert_true(sp.issparse(clf.coef_))
pred = clf.predict(X)
assert_array_equal(pred, Y)
# pickle and unpickle with sparse coef_
clf = pickle.loads(pickle.dumps(clf))
assert_true(sp.issparse(clf.coef_))
pred = clf.predict(X)
assert_array_equal(pred, Y)
def test_class_weights(self):
"""
Test class weights.
"""
X = np.array([[-1.0, -1.0], [-1.0, 0], [-.8, -1.0],
[1.0, 1.0], [1.0, 0.0]])
y = [1, 1, 1, -1, -1]
clf = self.factory(alpha=0.1, n_iter=1000, fit_intercept=False,
class_weight=None)
clf.fit(X, y)
assert_array_equal(clf.predict([[0.2, -1.0]]), np.array([1]))
# we give a small weights to class 1
clf = self.factory(alpha=0.1, n_iter=1000, fit_intercept=False,
class_weight={1: 0.001})
clf.fit(X, y)
# now the hyperplane should rotate clock-wise and
# the prediction on this point should shift
assert_array_equal(clf.predict([[0.2, -1.0]]), np.array([-1]))
def test_equal_class_weight(self):
"""Test if equal class weights approx. equals no class weights. """
X = [[1, 0], [1, 0], [0, 1], [0, 1]]
y = [0, 0, 1, 1]
clf = self.factory(alpha=0.1, n_iter=1000, class_weight=None)
clf.fit(X, y)
X = [[1, 0], [0, 1]]
y = [0, 1]
clf_weighted = self.factory(alpha=0.1, n_iter=1000,
class_weight={0: 0.5, 1: 0.5})
clf_weighted.fit(X, y)
# should be similar up to some epsilon due to learning rate schedule
assert_almost_equal(clf.coef_, clf_weighted.coef_, decimal=2)
@raises(ValueError)
def test_wrong_class_weight_label(self):
"""ValueError due to not existing class label."""
clf = self.factory(alpha=0.1, n_iter=1000, class_weight={0: 0.5})
clf.fit(X, Y)
@raises(ValueError)
def test_wrong_class_weight_format(self):
"""ValueError due to wrong class_weight argument type."""
clf = self.factory(alpha=0.1, n_iter=1000, class_weight=[0.5])
clf.fit(X, Y)
def test_auto_weight(self):
"""Test class weights for imbalanced data"""
# compute reference metrics on iris dataset that is quite balanced by
# default
X, y = iris.data, iris.target
X = scale(X)
idx = np.arange(X.shape[0])
rng = np.random.RandomState(0)
rng.shuffle(idx)
X = X[idx]
y = y[idx]
clf = self.factory(alpha=0.0001, n_iter=1000,
class_weight=None).fit(X, y)
assert_almost_equal(metrics.f1_score(y, clf.predict(X)), 0.96,
decimal=1)
# make the same prediction using automated class_weight
clf_auto = self.factory(alpha=0.0001, n_iter=1000,
class_weight="auto").fit(X, y)
assert_almost_equal(metrics.f1_score(y, clf_auto.predict(X)), 0.96,
decimal=1)
# Make sure that in the balanced case it does not change anything
# to use "auto"
assert_array_almost_equal(clf.coef_, clf_auto.coef_, 6)
# build an very very imbalanced dataset out of iris data
X_0 = X[y == 0, :]
y_0 = y[y == 0]
X_imbalanced = np.vstack([X] + [X_0] * 10)
y_imbalanced = np.concatenate([y] + [y_0] * 10)
# fit a model on the imbalanced data without class weight info
clf = self.factory(n_iter=1000, class_weight=None)
clf.fit(X_imbalanced, y_imbalanced)
y_pred = clf.predict(X)
assert_less(metrics.f1_score(y, y_pred), 0.96)
# fit a model with auto class_weight enabled
clf = self.factory(n_iter=1000, class_weight="auto")
clf.fit(X_imbalanced, y_imbalanced)
y_pred = clf.predict(X)
assert_greater(metrics.f1_score(y, y_pred), 0.96)
# fit another using a fit parameter override
clf = self.factory(n_iter=1000, class_weight="auto")
clf.fit(X_imbalanced, y_imbalanced)
y_pred = clf.predict(X)
assert_greater(metrics.f1_score(y, y_pred), 0.96)
def test_sample_weights(self):
"""Test weights on individual samples"""
X = np.array([[-1.0, -1.0], [-1.0, 0], [-.8, -1.0],
[1.0, 1.0], [1.0, 0.0]])
y = [1, 1, 1, -1, -1]
clf = self.factory(alpha=0.1, n_iter=1000, fit_intercept=False)
clf.fit(X, y)
assert_array_equal(clf.predict([[0.2, -1.0]]), np.array([1]))
# we give a small weights to class 1
clf.fit(X, y, sample_weight=[0.001] * 3 + [1] * 2)
# now the hyperplane should rotate clock-wise and
# the prediction on this point should shift
assert_array_equal(clf.predict([[0.2, -1.0]]), np.array([-1]))
@raises(ValueError)
def test_wrong_sample_weights(self):
"""Test if ValueError is raised if sample_weight has wrong shape"""
clf = self.factory(alpha=0.1, n_iter=1000, fit_intercept=False)
# provided sample_weight too long
clf.fit(X, Y, sample_weight=np.arange(7))
@raises(ValueError)
def test_partial_fit_exception(self):
clf = self.factory(alpha=0.01)
# classes was not specified
clf.partial_fit(X3, Y3)
def test_partial_fit_binary(self):
third = X.shape[0] // 3
clf = self.factory(alpha=0.01)
classes = np.unique(Y)
clf.partial_fit(X[:third], Y[:third], classes=classes)
assert_equal(clf.coef_.shape, (1, X.shape[1]))
assert_equal(clf.intercept_.shape, (1,))
assert_equal(clf.decision_function([0, 0]).shape, (1, ))
id1 = id(clf.coef_.data)
clf.partial_fit(X[third:], Y[third:])
id2 = id(clf.coef_.data)
# check that coef_ haven't been re-allocated
assert_true(id1, id2)
y_pred = clf.predict(T)
assert_array_equal(y_pred, true_result)
def test_partial_fit_multiclass(self):
third = X2.shape[0] // 3
clf = self.factory(alpha=0.01)
classes = np.unique(Y2)
clf.partial_fit(X2[:third], Y2[:third], classes=classes)
assert_equal(clf.coef_.shape, (3, X2.shape[1]))
assert_equal(clf.intercept_.shape, (3,))
assert_equal(clf.decision_function([0, 0]).shape, (1, 3))
id1 = id(clf.coef_.data)
clf.partial_fit(X2[third:], Y2[third:])
id2 = id(clf.coef_.data)
# check that coef_ haven't been re-allocated
assert_true(id1, id2)
def test_fit_then_partial_fit(self):
"""Partial_fit should work after initial fit in the multiclass case.
Non-regression test for #2496; fit would previously produce a
Fortran-ordered coef_ that subsequent partial_fit couldn't handle.
"""
clf = self.factory()
clf.fit(X2, Y2)
clf.partial_fit(X2, Y2) # no exception here
def _test_partial_fit_equal_fit(self, lr):
for X_, Y_, T_ in ((X, Y, T), (X2, Y2, T2)):
clf = self.factory(alpha=0.01, eta0=0.01, n_iter=2,
learning_rate=lr, shuffle=False)
clf.fit(X_, Y_)
y_pred = clf.decision_function(T_)
t = clf.t_
classes = np.unique(Y_)
clf = self.factory(alpha=0.01, eta0=0.01, learning_rate=lr,
shuffle=False)
for i in range(2):
clf.partial_fit(X_, Y_, classes=classes)
y_pred2 = clf.decision_function(T_)
assert_equal(clf.t_, t)
assert_array_almost_equal(y_pred, y_pred2, decimal=2)
def test_partial_fit_equal_fit_constant(self):
self._test_partial_fit_equal_fit("constant")
def test_partial_fit_equal_fit_optimal(self):
self._test_partial_fit_equal_fit("optimal")
def test_partial_fit_equal_fit_invscaling(self):
self._test_partial_fit_equal_fit("invscaling")
def test_regression_losses(self):
clf = self.factory(alpha=0.01, learning_rate="constant",
eta0=0.1, loss="epsilon_insensitive")
clf.fit(X, Y)
assert_equal(1.0, np.mean(clf.predict(X) == Y))
clf = self.factory(alpha=0.01, learning_rate="constant",
eta0=0.1, loss="squared_epsilon_insensitive")
clf.fit(X, Y)
assert_equal(1.0, np.mean(clf.predict(X) == Y))
clf = self.factory(alpha=0.01, loss="huber")
clf.fit(X, Y)
assert_equal(1.0, np.mean(clf.predict(X) == Y))
clf = self.factory(alpha=0.01, learning_rate="constant", eta0=0.01,
loss="squared_loss")
clf.fit(X, Y)
assert_equal(1.0, np.mean(clf.predict(X) == Y))
def test_warm_start_multiclass(self):
self._test_warm_start(X2, Y2, "optimal")
def test_multiple_fit(self):
"""Test multiple calls of fit w/ different shaped inputs."""
clf = self.factory(alpha=0.01, n_iter=5,
shuffle=False)
clf.fit(X, Y)
assert_true(hasattr(clf, "coef_"))
# Non-regression test: try fitting with a different label set.
y = [["ham", "spam"][i] for i in LabelEncoder().fit_transform(Y)]
clf.fit(X[:, :-1], y)
class SparseSGDClassifierTestCase(DenseSGDClassifierTestCase):
"""Run exactly the same tests using the sparse representation variant"""
factory = SparseSGDClassifier
###############################################################################
# Regression Test Case
class DenseSGDRegressorTestCase(unittest.TestCase, CommonTest):
"""Test suite for the dense representation variant of SGD"""
factory = SGDRegressor
def test_sgd(self):
"""Check that SGD gives any results."""
clf = self.factory(alpha=0.1, n_iter=2,
fit_intercept=False)
clf.fit([[0, 0], [1, 1], [2, 2]], [0, 1, 2])
assert_equal(clf.coef_[0], clf.coef_[1])
@raises(ValueError)
def test_sgd_bad_penalty(self):
"""Check whether expected ValueError on bad penalty"""
self.factory(penalty='foobar', l1_ratio=0.85)
@raises(ValueError)
def test_sgd_bad_loss(self):
"""Check whether expected ValueError on bad loss"""
self.factory(loss="foobar")
def test_sgd_least_squares_fit(self):
xmin, xmax = -5, 5
n_samples = 100
rng = np.random.RandomState(0)
X = np.linspace(xmin, xmax, n_samples).reshape(n_samples, 1)
# simple linear function without noise
y = 0.5 * X.ravel()
clf = self.factory(loss='squared_loss', alpha=0.1, n_iter=20,
fit_intercept=False)
clf.fit(X, y)
score = clf.score(X, y)
assert_greater(score, 0.99)
# simple linear function with noise
y = 0.5 * X.ravel() + rng.randn(n_samples, 1).ravel()
clf = self.factory(loss='squared_loss', alpha=0.1, n_iter=20,
fit_intercept=False)
clf.fit(X, y)
score = clf.score(X, y)
assert_greater(score, 0.5)
def test_sgd_epsilon_insensitive(self):
xmin, xmax = -5, 5
n_samples = 100
X = np.linspace(xmin, xmax, n_samples).reshape(n_samples, 1)
# simple linear function without noise
y = 0.5 * X.ravel()
clf = self.factory(loss='epsilon_insensitive', epsilon=0.01,
alpha=0.1, n_iter=20,
fit_intercept=False)
clf.fit(X, y)
score = clf.score(X, y)
assert_true(score > 0.99)
# simple linear function with noise
y = 0.5 * X.ravel() \
+ np.random.randn(n_samples, 1).ravel()
clf = self.factory(loss='epsilon_insensitive', epsilon=0.01,
alpha=0.1, n_iter=20,
fit_intercept=False)
clf.fit(X, y)
score = clf.score(X, y)
assert_true(score > 0.5)
def test_sgd_huber_fit(self):
xmin, xmax = -5, 5
n_samples = 100
rng = np.random.RandomState(0)
X = np.linspace(xmin, xmax, n_samples).reshape(n_samples, 1)
# simple linear function without noise
y = 0.5 * X.ravel()
clf = self.factory(loss="huber", epsilon=0.1, alpha=0.1, n_iter=20,
fit_intercept=False)
clf.fit(X, y)
score = clf.score(X, y)
assert_greater(score, 0.99)
# simple linear function with noise
y = 0.5 * X.ravel() + rng.randn(n_samples, 1).ravel()
clf = self.factory(loss="huber", epsilon=0.1, alpha=0.1, n_iter=20,
fit_intercept=False)
clf.fit(X, y)
score = clf.score(X, y)
assert_greater(score, 0.5)
def test_elasticnet_convergence(self):
"""Check that the SGD output is consistent with coordinate descent"""
n_samples, n_features = 1000, 5
rng = np.random.RandomState(0)
X = np.random.randn(n_samples, n_features)
# ground_truth linear model that generate y from X and to which the
# models should converge if the regularizer would be set to 0.0
ground_truth_coef = rng.randn(n_features)
y = np.dot(X, ground_truth_coef)
# XXX: alpha = 0.1 seems to cause convergence problems
for alpha in [0.01, 0.001]:
for l1_ratio in [0.5, 0.8, 1.0]:
cd = linear_model.ElasticNet(alpha=alpha, l1_ratio=l1_ratio,
fit_intercept=False)
cd.fit(X, y)
sgd = self.factory(penalty='elasticnet', n_iter=50,
alpha=alpha, l1_ratio=l1_ratio,
fit_intercept=False)
sgd.fit(X, y)
err_msg = ("cd and sgd did not converge to comparable "
"results for alpha=%f and l1_ratio=%f"
% (alpha, l1_ratio))
assert_almost_equal(cd.coef_, sgd.coef_, decimal=2,
err_msg=err_msg)
def test_partial_fit(self):
third = X.shape[0] // 3
clf = self.factory(alpha=0.01)
clf.partial_fit(X[:third], Y[:third])
assert_equal(clf.coef_.shape, (X.shape[1], ))
assert_equal(clf.intercept_.shape, (1,))
assert_equal(clf.decision_function([0, 0]).shape, (1, ))
id1 = id(clf.coef_.data)
clf.partial_fit(X[third:], Y[third:])
id2 = id(clf.coef_.data)
# check that coef_ haven't been re-allocated
assert_true(id1, id2)
def _test_partial_fit_equal_fit(self, lr):
clf = self.factory(alpha=0.01, n_iter=2, eta0=0.01,
learning_rate=lr, shuffle=False)
clf.fit(X, Y)
y_pred = clf.predict(T)
t = clf.t_
clf = self.factory(alpha=0.01, eta0=0.01,
learning_rate=lr, shuffle=False)
for i in range(2):
clf.partial_fit(X, Y)
y_pred2 = clf.predict(T)
assert_equal(clf.t_, t)
assert_array_almost_equal(y_pred, y_pred2, decimal=2)
def test_partial_fit_equal_fit_constant(self):
self._test_partial_fit_equal_fit("constant")
def test_partial_fit_equal_fit_optimal(self):
self._test_partial_fit_equal_fit("optimal")
def test_partial_fit_equal_fit_invscaling(self):
self._test_partial_fit_equal_fit("invscaling")
def test_loss_function_epsilon(self):
clf = self.factory(epsilon=0.9)
clf.set_params(epsilon=0.1)
assert clf.loss_functions['huber'][1] == 0.1
class SparseSGDRegressorTestCase(DenseSGDRegressorTestCase):
"""Run exactly the same tests using the sparse representation variant"""
factory = SparseSGDRegressor
def test_l1_ratio():
"""Test if l1 ratio extremes match L1 and L2 penalty settings. """
X, y = datasets.make_classification(n_samples=1000, n_features=100, n_informative=20,
random_state=1234)
# test if elasticnet with l1_ratio near 1 gives same result as pure l1
est_en = SGDClassifier(alpha=0.001, penalty='elasticnet', l1_ratio=0.9999999999).fit(X, y)
est_l1 = SGDClassifier(alpha=0.001, penalty='l1').fit(X, y)
assert_array_almost_equal(est_en.coef_, est_l1.coef_)
# test if elasticnet with l1_ratio near 0 gives same result as pure l2
est_en = SGDClassifier(alpha=0.001, penalty='elasticnet', l1_ratio=0.0000000001).fit(X, y)
est_l2 = SGDClassifier(alpha=0.001, penalty='l2').fit(X, y)
assert_array_almost_equal(est_en.coef_, est_l2.coef_)
|
Tong-Chen/scikit-learn
|
sklearn/linear_model/tests/test_sgd.py
|
Python
|
bsd-3-clause
| 30,538
|
from w3lib.url import parse_data_uri
from scrapy.http import TextResponse
from scrapy.responsetypes import responsetypes
from scrapy.utils.decorators import defers
class DataURIDownloadHandler(object):
lazy = False
def __init__(self, settings):
super(DataURIDownloadHandler, self).__init__()
@defers
def download_request(self, request, spider):
uri = parse_data_uri(request.url)
respcls = responsetypes.from_mimetype(uri.media_type)
resp_kwargs = {}
if (issubclass(respcls, TextResponse) and
uri.media_type.split('/')[0] == 'text'):
charset = uri.media_type_parameters.get('charset')
resp_kwargs['encoding'] = charset
return respcls(url=request.url, body=uri.data, **resp_kwargs)
|
kmike/scrapy
|
scrapy/core/downloader/handlers/datauri.py
|
Python
|
bsd-3-clause
| 791
|
from functools import partial
from .primitives import EMPTY
__all__ = ['identity', 'constantly', 'caller',
'partial', 'rpartial', 'func_partial',
'curry', 'rcurry', 'autocurry',
'iffy']
def identity(x):
return x
def constantly(x):
return lambda *a, **kw: x
# an operator.methodcaller() brother
def caller(*a, **kw):
return lambda f: f(*a, **kw)
# not using functools.partial to get real function
def func_partial(func, *args, **kwargs):
"""
A functools.partial alternative, which returns a real function.
Can be used to construct methods.
"""
return lambda *a, **kw: func(*(args + a), **dict(kwargs, **kw))
def rpartial(func, *args):
return lambda *a: func(*(a + args))
def curry(func, n=EMPTY):
if n is EMPTY:
n = func.__code__.co_argcount
if n <= 1:
return func
elif n == 2:
return lambda x: lambda y: func(x, y)
else:
return lambda x: curry(partial(func, x), n - 1)
def rcurry(func, n=EMPTY):
if n is EMPTY:
n = func.__code__.co_argcount
if n <= 1:
return func
elif n == 2:
return lambda x: lambda y: func(y, x)
else:
return lambda x: rcurry(rpartial(func, x), n - 1)
def autocurry(func, n=EMPTY, _args=(), _kwargs={}):
if n is EMPTY:
n = func.__code__.co_argcount
def autocurried(*a, **kw):
args = _args + a
kwargs = _kwargs.copy()
kwargs.update(kw)
if len(args) + len(kwargs) >= n:
return func(*args, **kwargs)
else:
return autocurry(func, n, _args=args, _kwargs=kwargs)
return autocurried
def iffy(pred, action=EMPTY, default=identity):
if action is EMPTY:
return iffy(bool, pred)
else:
return lambda v: action(v) if pred(v) else \
default(v) if callable(default) else \
default
|
musicpax/funcy
|
funcy/simple_funcs.py
|
Python
|
bsd-3-clause
| 1,941
|
"""Tools for solving inequalities and systems of inequalities. """
from __future__ import print_function, division
from sympy.core import Symbol
from sympy.sets import Interval
from sympy.core.relational import Relational, Eq, Ge, Lt
from sympy.sets.sets import FiniteSet, Union
from sympy.core.singleton import S
from sympy.assumptions import ask, AppliedPredicate, Q
from sympy.functions import re, im, Abs
from sympy.logic import And
from sympy.polys import Poly, PolynomialError, parallel_poly_from_expr
def solve_poly_inequality(poly, rel):
"""Solve a polynomial inequality with rational coefficients.
Examples
========
>>> from sympy import Poly
>>> from sympy.abc import x
>>> from sympy.solvers.inequalities import solve_poly_inequality
>>> solve_poly_inequality(Poly(x, x, domain='ZZ'), '==')
[{0}]
>>> solve_poly_inequality(Poly(x**2 - 1, x, domain='ZZ'), '!=')
[(-oo, -1), (-1, 1), (1, oo)]
>>> solve_poly_inequality(Poly(x**2 - 1, x, domain='ZZ'), '==')
[{-1}, {1}]
See Also
========
solve_poly_inequalities
"""
reals, intervals = poly.real_roots(multiple=False), []
if rel == '==':
for root, _ in reals:
interval = Interval(root, root)
intervals.append(interval)
elif rel == '!=':
left = S.NegativeInfinity
for right, _ in reals + [(S.Infinity, 1)]:
interval = Interval(left, right, True, True)
intervals.append(interval)
left = right
else:
if poly.LC() > 0:
sign = +1
else:
sign = -1
eq_sign, equal = None, False
if rel == '>':
eq_sign = +1
elif rel == '<':
eq_sign = -1
elif rel == '>=':
eq_sign, equal = +1, True
elif rel == '<=':
eq_sign, equal = -1, True
else:
raise ValueError("'%s' is not a valid relation" % rel)
right, right_open = S.Infinity, True
reals.sort(key=lambda w: w[0], reverse=True)
for left, multiplicity in reals:
if multiplicity % 2:
if sign == eq_sign:
intervals.insert(
0, Interval(left, right, not equal, right_open))
sign, right, right_open = -sign, left, not equal
else:
if sign == eq_sign and not equal:
intervals.insert(
0, Interval(left, right, True, right_open))
right, right_open = left, True
elif sign != eq_sign and equal:
intervals.insert(0, Interval(left, left))
if sign == eq_sign:
intervals.insert(
0, Interval(S.NegativeInfinity, right, True, right_open))
return intervals
def solve_poly_inequalities(polys):
"""Solve polynomial inequalities with rational coefficients.
Examples
========
>>> from sympy.solvers.inequalities import solve_poly_inequalities
>>> from sympy.polys import Poly
>>> from sympy.abc import x
>>> solve_poly_inequalities(((
... Poly(x**2 - 3), ">"), (
... Poly(-x**2 + 1), ">")))
(-oo, -sqrt(3)) U (-1, 1) U (sqrt(3), oo)
"""
from sympy import Union
return Union(*[solve_poly_inequality(*p) for p in polys])
def solve_rational_inequalities(eqs):
"""Solve a system of rational inequalities with rational coefficients.
Examples
========
>>> from sympy.abc import x
>>> from sympy import Poly
>>> from sympy.solvers.inequalities import solve_rational_inequalities
>>> solve_rational_inequalities([[
... ((Poly(-x + 1), Poly(1, x)), '>='),
... ((Poly(-x + 1), Poly(1, x)), '<=')]])
{1}
>>> solve_rational_inequalities([[
... ((Poly(x), Poly(1, x)), '!='),
... ((Poly(-x + 1), Poly(1, x)), '>=')]])
(-oo, 0) U (0, 1]
See Also
========
solve_poly_inequality
"""
result = S.EmptySet
for _eqs in eqs:
global_intervals = None
for (numer, denom), rel in _eqs:
numer_intervals = solve_poly_inequality(numer*denom, rel)
denom_intervals = solve_poly_inequality(denom, '==')
if global_intervals is None:
global_intervals = numer_intervals
else:
intervals = []
for numer_interval in numer_intervals:
for global_interval in global_intervals:
interval = numer_interval.intersect(global_interval)
if interval is not S.EmptySet:
intervals.append(interval)
global_intervals = intervals
intervals = []
for global_interval in global_intervals:
for denom_interval in denom_intervals:
global_interval -= denom_interval
if global_interval is not S.EmptySet:
intervals.append(global_interval)
global_intervals = intervals
if not global_intervals:
break
for interval in global_intervals:
result = result.union(interval)
return result
def reduce_rational_inequalities(exprs, gen, assume=True, relational=True):
"""Reduce a system of rational inequalities with rational coefficients.
Examples
========
>>> from sympy import Poly, Symbol
>>> from sympy.solvers.inequalities import reduce_rational_inequalities
>>> x = Symbol('x', real=True)
>>> reduce_rational_inequalities([[x**2 <= 0]], x)
x == 0
>>> reduce_rational_inequalities([[x + 2 > 0]], x)
x > -2
>>> reduce_rational_inequalities([[(x + 2, ">")]], x)
x > -2
>>> reduce_rational_inequalities([[x + 2]], x)
x == -2
"""
exact = True
eqs = []
for _exprs in exprs:
_eqs = []
for expr in _exprs:
if isinstance(expr, tuple):
expr, rel = expr
else:
if expr.is_Relational:
expr, rel = expr.lhs - expr.rhs, expr.rel_op
else:
expr, rel = expr, '=='
try:
(numer, denom), opt = parallel_poly_from_expr(
expr.together().as_numer_denom(), gen)
except PolynomialError:
raise PolynomialError("only polynomials and "
"rational functions are supported in this context")
if not opt.domain.is_Exact:
numer, denom, exact = numer.to_exact(), denom.to_exact(), False
domain = opt.domain.get_exact()
if not (domain.is_ZZ or domain.is_QQ):
raise NotImplementedError(
"inequality solving is not supported over %s" % opt.domain)
_eqs.append(((numer, denom), rel))
eqs.append(_eqs)
solution = solve_rational_inequalities(eqs)
if not exact:
solution = solution.evalf()
if not relational:
return solution
real = ask(Q.real(gen), assumptions=assume)
if not real:
result = And(solution.as_relational(re(gen)), Eq(im(gen), 0))
else:
result = solution.as_relational(gen)
return result
def reduce_abs_inequality(expr, rel, gen, assume=True):
"""Reduce an inequality with nested absolute values.
Examples
========
>>> from sympy import Q, Abs
>>> from sympy.abc import x
>>> from sympy.solvers.inequalities import reduce_abs_inequality
>>> reduce_abs_inequality(Abs(x - 5) - 3, '<', x, assume=Q.real(x))
And(2 < x, x < 8)
>>> reduce_abs_inequality(Abs(x + 2)*3 - 13, '<', x, assume=Q.real(x))
And(-19/3 < x, x < 7/3)
See Also
========
reduce_abs_inequalities
"""
if not ask(Q.real(gen), assumptions=assume):
raise NotImplementedError("can't solve inequalities with absolute "
"values of a complex variable")
def _bottom_up_scan(expr):
exprs = []
if expr.is_Add or expr.is_Mul:
op = expr.__class__
for arg in expr.args:
_exprs = _bottom_up_scan(arg)
if not exprs:
exprs = _exprs
else:
args = []
for expr, conds in exprs:
for _expr, _conds in _exprs:
args.append((op(expr, _expr), conds + _conds))
exprs = args
elif expr.is_Pow:
n = expr.exp
if not n.is_Integer or n < 0:
raise ValueError(
"only non-negative integer powers are allowed")
_exprs = _bottom_up_scan(expr.base)
for expr, conds in _exprs:
exprs.append((expr**n, conds))
elif isinstance(expr, Abs):
_exprs = _bottom_up_scan(expr.args[0])
for expr, conds in _exprs:
exprs.append(( expr, conds + [Ge(expr, 0)]))
exprs.append((-expr, conds + [Lt(expr, 0)]))
else:
exprs = [(expr, [])]
return exprs
exprs = _bottom_up_scan(expr)
mapping = {'<': '>', '<=': '>='}
inequalities = []
for expr, conds in exprs:
if rel not in mapping.keys():
expr = Relational( expr, 0, rel)
else:
expr = Relational(-expr, 0, mapping[rel])
inequalities.append([expr] + conds)
return reduce_rational_inequalities(inequalities, gen, assume)
def reduce_abs_inequalities(exprs, gen, assume=True):
"""Reduce a system of inequalities with nested absolute values.
Examples
========
>>> from sympy import Q, Abs
>>> from sympy.abc import x
>>> from sympy.solvers.inequalities import reduce_abs_inequalities
>>> reduce_abs_inequalities([(Abs(3*x - 5) - 7, '<'),
... (Abs(x + 25) - 13, '>')], x, assume=Q.real(x))
And(-2/3 < x, Or(x < -38, x > -12), x < 4)
>>> reduce_abs_inequalities([(Abs(x - 4) + Abs(3*x - 5) - 7, '<')], x,
... assume=Q.real(x))
And(1/2 < x, x < 4)
See Also
========
reduce_abs_inequality
"""
return And(*[ reduce_abs_inequality(expr, rel, gen, assume)
for expr, rel in exprs ])
def solve_univariate_inequality(expr, gen, assume=True, relational=True):
"""Solves a real univariate inequality.
Examples
========
>>> from sympy.solvers.inequalities import solve_univariate_inequality
>>> from sympy.core.symbol import Symbol
>>> x = Symbol('x', real=True)
>>> solve_univariate_inequality(x**2 >= 4, x)
Or(x <= -2, x >= 2)
>>> solve_univariate_inequality(x**2 >= 4, x, relational=False)
(-oo, -2] U [2, oo)
"""
# Implementation for continous functions
from sympy.solvers.solvers import solve
solns = solve(expr.lhs - expr.rhs, gen, assume=assume)
oo = S.Infinity
start = -oo
sol_sets = [S.EmptySet]
for x in sorted(s for s in solns if s.is_real):
end = x
if expr.subs(gen, (start + end)/2 if start != -oo else end - 1):
sol_sets.append(Interval(start, end, True, True))
if expr.subs(gen, x):
sol_sets.append(FiniteSet(x))
start = end
end = oo
if expr.subs(gen, start + 1):
sol_sets.append(Interval(start, end, True, True))
rv = Union(*sol_sets)
return rv if not relational else rv.as_relational(gen)
def _solve_inequality(ie, s, assume=True):
""" A hacky replacement for solve, since the latter only works for
univariate inequalities. """
if not ie.rel_op in ('>', '>=', '<', '<='):
raise NotImplementedError
expr = ie.lhs - ie.rhs
try:
p = Poly(expr, s)
if p.degree() != 1:
raise NotImplementedError
except (PolynomialError, NotImplementedError):
try:
n, d = expr.as_numer_denom()
return reduce_rational_inequalities([[ie]], s, assume=assume)
except PolynomialError:
return solve_univariate_inequality(ie, s, assume=assume)
a, b = p.all_coeffs()
if a.is_positive:
return ie.func(s, -b/a)
elif a.is_negative:
return ie.func(-b/a, s)
else:
raise NotImplementedError
def reduce_inequalities(inequalities, assume=True, symbols=[]):
"""Reduce a system of inequalities with rational coefficients.
Examples
========
>>> from sympy import Q, sympify as S
>>> from sympy.abc import x, y
>>> from sympy.solvers.inequalities import reduce_inequalities
>>> reduce_inequalities(S(0) <= x + 3, Q.real(x), [])
x >= -3
>>> reduce_inequalities(S(0) <= x + y*2 - 1, True, [x])
-2*y + 1 <= x
"""
if not hasattr(inequalities, '__iter__'):
inequalities = [inequalities]
if len(inequalities) == 1 and len(symbols) == 1 \
and inequalities[0].is_Relational:
try:
return _solve_inequality(inequalities[0], symbols[0],
assume=assume)
except NotImplementedError:
pass
poly_part, abs_part, extra_assume = {}, {}, []
for inequality in inequalities:
if inequality == True:
continue
elif inequality == False:
return False
if isinstance(inequality, AppliedPredicate):
extra_assume.append(inequality)
continue
if inequality.is_Relational:
expr, rel = inequality.lhs - inequality.rhs, inequality.rel_op
else:
expr, rel = inequality, '=='
gens = expr.free_symbols
if not gens:
return False
elif len(gens) == 1:
gen = gens.pop()
else:
raise NotImplementedError(
"only univariate inequalities are supported")
components = expr.find(lambda u: u.is_Function)
if not components:
if gen in poly_part:
poly_part[gen].append((expr, rel))
else:
poly_part[gen] = [(expr, rel)]
else:
if all(isinstance(comp, Abs) for comp in components):
if gen in abs_part:
abs_part[gen].append((expr, rel))
else:
abs_part[gen] = [(expr, rel)]
else:
raise NotImplementedError("can't reduce %s" % inequalities)
extra_assume = And(*extra_assume)
if assume is not None:
assume = And(assume, extra_assume)
else:
assume = extra_assume
poly_reduced = []
abs_reduced = []
for gen, exprs in poly_part.items():
poly_reduced.append(reduce_rational_inequalities([exprs], gen, assume))
for gen, exprs in abs_part.items():
abs_reduced.append(reduce_abs_inequalities(exprs, gen, assume))
return And(*(poly_reduced + abs_reduced))
|
wdv4758h/ZipPy
|
edu.uci.python.benchmark/src/benchmarks/sympy/sympy/solvers/inequalities.py
|
Python
|
bsd-3-clause
| 14,915
|
import Tkinter as tk
root = tk.Tk()
def noop(): pass
menubar = tk.Menu(root)
# create a pulldown menu, and add it to the menu bar
filemenu = tk.Menu(menubar)
filemenu.add_command(label="Open", command=noop)
filemenu.add_command(label="Save", command=noop)
filemenu.add_separator()
filemenu.add_command(label="Exit", command=root.quit)
menubar.add_cascade(label="File", menu=filemenu)
# create more pulldown menus
editmenu = tk.Menu(menubar)
editmenu.add_command(label="Cut", command=noop)
editmenu.add_command(label="Copy", command=noop)
editmenu.add_command(label="Paste", command=noop)
menubar.add_cascade(label="Edit", menu=editmenu)
helpmenu = tk.Menu(menubar)
helpmenu.add_command(label="About", command=noop)
menubar.add_cascade(label="Help", menu=helpmenu)
# display the menu
root.config(menu=menubar)
root.mainloop()
|
ynonp/python-examples-verint-2016-07
|
30_gui_widgets/05_menus.py
|
Python
|
mit
| 833
|
"""Test interact and interactive."""
# Copyright (c) IPython Development Team.
# Distributed under the terms of the Modified BSD License.
from __future__ import print_function
from collections import OrderedDict
import nose.tools as nt
import IPython.testing.tools as tt
from IPython.kernel.comm import Comm
from IPython.html import widgets
from IPython.html.widgets import interact, interactive, Widget, interaction
from IPython.utils.py3compat import annotate
#-----------------------------------------------------------------------------
# Utility stuff
#-----------------------------------------------------------------------------
class DummyComm(Comm):
comm_id = 'a-b-c-d'
def open(self, *args, **kwargs):
pass
def send(self, *args, **kwargs):
pass
def close(self, *args, **kwargs):
pass
_widget_attrs = {}
displayed = []
undefined = object()
def setup():
_widget_attrs['_comm_default'] = getattr(Widget, '_comm_default', undefined)
Widget._comm_default = lambda self: DummyComm()
_widget_attrs['_ipython_display_'] = Widget._ipython_display_
def raise_not_implemented(*args, **kwargs):
raise NotImplementedError()
Widget._ipython_display_ = raise_not_implemented
def teardown():
for attr, value in _widget_attrs.items():
if value is undefined:
delattr(Widget, attr)
else:
setattr(Widget, attr, value)
def f(**kwargs):
pass
def clear_display():
global displayed
displayed = []
def record_display(*args):
displayed.extend(args)
#-----------------------------------------------------------------------------
# Actual tests
#-----------------------------------------------------------------------------
def check_widget(w, **d):
"""Check a single widget against a dict"""
for attr, expected in d.items():
if attr == 'cls':
nt.assert_is(w.__class__, expected)
else:
value = getattr(w, attr)
nt.assert_equal(value, expected,
"%s.%s = %r != %r" % (w.__class__.__name__, attr, value, expected)
)
def check_widgets(container, **to_check):
"""Check that widgets are created as expected"""
# build a widget dictionary, so it matches
widgets = {}
for w in container.children:
widgets[w.description] = w
for key, d in to_check.items():
nt.assert_in(key, widgets)
check_widget(widgets[key], **d)
def test_single_value_string():
a = u'hello'
c = interactive(f, a=a)
w = c.children[0]
check_widget(w,
cls=widgets.Text,
description='a',
value=a,
)
def test_single_value_bool():
for a in (True, False):
c = interactive(f, a=a)
w = c.children[0]
check_widget(w,
cls=widgets.Checkbox,
description='a',
value=a,
)
def test_single_value_dict():
for d in [
dict(a=5),
dict(a=5, b='b', c=dict),
]:
c = interactive(f, d=d)
w = c.children[0]
check_widget(w,
cls=widgets.Dropdown,
description='d',
options=d,
value=next(iter(d.values())),
)
def test_single_value_float():
for a in (2.25, 1.0, -3.5):
c = interactive(f, a=a)
w = c.children[0]
check_widget(w,
cls=widgets.FloatSlider,
description='a',
value=a,
min= -a if a > 0 else 3*a,
max= 3*a if a > 0 else -a,
step=0.1,
readout=True,
)
def test_single_value_int():
for a in (1, 5, -3):
c = interactive(f, a=a)
nt.assert_equal(len(c.children), 1)
w = c.children[0]
check_widget(w,
cls=widgets.IntSlider,
description='a',
value=a,
min= -a if a > 0 else 3*a,
max= 3*a if a > 0 else -a,
step=1,
readout=True,
)
def test_list_tuple_2_int():
with nt.assert_raises(ValueError):
c = interactive(f, tup=(1,1))
with nt.assert_raises(ValueError):
c = interactive(f, tup=(1,-1))
for min, max in [ (0,1), (1,10), (1,2), (-5,5), (-20,-19) ]:
c = interactive(f, tup=(min, max), lis=[min, max])
nt.assert_equal(len(c.children), 2)
d = dict(
cls=widgets.IntSlider,
min=min,
max=max,
step=1,
readout=True,
)
check_widgets(c, tup=d, lis=d)
def test_list_tuple_3_int():
with nt.assert_raises(ValueError):
c = interactive(f, tup=(1,2,0))
with nt.assert_raises(ValueError):
c = interactive(f, tup=(1,2,-1))
for min, max, step in [ (0,2,1), (1,10,2), (1,100,2), (-5,5,4), (-100,-20,4) ]:
c = interactive(f, tup=(min, max, step), lis=[min, max, step])
nt.assert_equal(len(c.children), 2)
d = dict(
cls=widgets.IntSlider,
min=min,
max=max,
step=step,
readout=True,
)
check_widgets(c, tup=d, lis=d)
def test_list_tuple_2_float():
with nt.assert_raises(ValueError):
c = interactive(f, tup=(1.0,1.0))
with nt.assert_raises(ValueError):
c = interactive(f, tup=(0.5,-0.5))
for min, max in [ (0.5, 1.5), (1.1,10.2), (1,2.2), (-5.,5), (-20,-19.) ]:
c = interactive(f, tup=(min, max), lis=[min, max])
nt.assert_equal(len(c.children), 2)
d = dict(
cls=widgets.FloatSlider,
min=min,
max=max,
step=.1,
readout=True,
)
check_widgets(c, tup=d, lis=d)
def test_list_tuple_3_float():
with nt.assert_raises(ValueError):
c = interactive(f, tup=(1,2,0.0))
with nt.assert_raises(ValueError):
c = interactive(f, tup=(-1,-2,1.))
with nt.assert_raises(ValueError):
c = interactive(f, tup=(1,2.,-1.))
for min, max, step in [ (0.,2,1), (1,10.,2), (1,100,2.), (-5.,5.,4), (-100,-20.,4.) ]:
c = interactive(f, tup=(min, max, step), lis=[min, max, step])
nt.assert_equal(len(c.children), 2)
d = dict(
cls=widgets.FloatSlider,
min=min,
max=max,
step=step,
readout=True,
)
check_widgets(c, tup=d, lis=d)
def test_list_tuple_str():
values = ['hello', 'there', 'guy']
first = values[0]
c = interactive(f, tup=tuple(values), lis=list(values))
nt.assert_equal(len(c.children), 2)
d = dict(
cls=widgets.Dropdown,
value=first,
options=values
)
check_widgets(c, tup=d, lis=d)
def test_list_tuple_invalid():
for bad in [
(),
(5, 'hi'),
('hi', 5),
({},),
(None,),
]:
with nt.assert_raises(ValueError):
print(bad) # because there is no custom message in assert_raises
c = interactive(f, tup=bad)
def test_defaults():
@annotate(n=10)
def f(n, f=4.5, g=1):
pass
c = interactive(f)
check_widgets(c,
n=dict(
cls=widgets.IntSlider,
value=10,
),
f=dict(
cls=widgets.FloatSlider,
value=4.5,
),
g=dict(
cls=widgets.IntSlider,
value=1,
),
)
def test_default_values():
@annotate(n=10, f=(0, 10.), g=5, h={'a': 1, 'b': 2}, j=['hi', 'there'])
def f(n, f=4.5, g=1, h=2, j='there'):
pass
c = interactive(f)
check_widgets(c,
n=dict(
cls=widgets.IntSlider,
value=10,
),
f=dict(
cls=widgets.FloatSlider,
value=4.5,
),
g=dict(
cls=widgets.IntSlider,
value=5,
),
h=dict(
cls=widgets.Dropdown,
options={'a': 1, 'b': 2},
value=2
),
j=dict(
cls=widgets.Dropdown,
options=['hi', 'there'],
value='there'
),
)
def test_default_out_of_bounds():
@annotate(f=(0, 10.), h={'a': 1}, j=['hi', 'there'])
def f(f='hi', h=5, j='other'):
pass
c = interactive(f)
check_widgets(c,
f=dict(
cls=widgets.FloatSlider,
value=5.,
),
h=dict(
cls=widgets.Dropdown,
options={'a': 1},
value=1,
),
j=dict(
cls=widgets.Dropdown,
options=['hi', 'there'],
value='hi',
),
)
def test_annotations():
@annotate(n=10, f=widgets.FloatText())
def f(n, f):
pass
c = interactive(f)
check_widgets(c,
n=dict(
cls=widgets.IntSlider,
value=10,
),
f=dict(
cls=widgets.FloatText,
),
)
def test_priority():
@annotate(annotate='annotate', kwarg='annotate')
def f(kwarg='default', annotate='default', default='default'):
pass
c = interactive(f, kwarg='kwarg')
check_widgets(c,
kwarg=dict(
cls=widgets.Text,
value='kwarg',
),
annotate=dict(
cls=widgets.Text,
value='annotate',
),
)
@nt.with_setup(clear_display)
def test_decorator_kwarg():
with tt.monkeypatch(interaction, 'display', record_display):
@interact(a=5)
def foo(a):
pass
nt.assert_equal(len(displayed), 1)
w = displayed[0].children[0]
check_widget(w,
cls=widgets.IntSlider,
value=5,
)
@nt.with_setup(clear_display)
def test_interact_instancemethod():
class Foo(object):
def show(self, x):
print(x)
f = Foo()
with tt.monkeypatch(interaction, 'display', record_display):
g = interact(f.show, x=(1,10))
nt.assert_equal(len(displayed), 1)
w = displayed[0].children[0]
check_widget(w,
cls=widgets.IntSlider,
value=5,
)
@nt.with_setup(clear_display)
def test_decorator_no_call():
with tt.monkeypatch(interaction, 'display', record_display):
@interact
def foo(a='default'):
pass
nt.assert_equal(len(displayed), 1)
w = displayed[0].children[0]
check_widget(w,
cls=widgets.Text,
value='default',
)
@nt.with_setup(clear_display)
def test_call_interact():
def foo(a='default'):
pass
with tt.monkeypatch(interaction, 'display', record_display):
ifoo = interact(foo)
nt.assert_equal(len(displayed), 1)
w = displayed[0].children[0]
check_widget(w,
cls=widgets.Text,
value='default',
)
@nt.with_setup(clear_display)
def test_call_interact_kwargs():
def foo(a='default'):
pass
with tt.monkeypatch(interaction, 'display', record_display):
ifoo = interact(foo, a=10)
nt.assert_equal(len(displayed), 1)
w = displayed[0].children[0]
check_widget(w,
cls=widgets.IntSlider,
value=10,
)
@nt.with_setup(clear_display)
def test_call_decorated_on_trait_change():
"""test calling @interact decorated functions"""
d = {}
with tt.monkeypatch(interaction, 'display', record_display):
@interact
def foo(a='default'):
d['a'] = a
return a
nt.assert_equal(len(displayed), 1)
w = displayed[0].children[0]
check_widget(w,
cls=widgets.Text,
value='default',
)
# test calling the function directly
a = foo('hello')
nt.assert_equal(a, 'hello')
nt.assert_equal(d['a'], 'hello')
# test that setting trait values calls the function
w.value = 'called'
nt.assert_equal(d['a'], 'called')
@nt.with_setup(clear_display)
def test_call_decorated_kwargs_on_trait_change():
"""test calling @interact(foo=bar) decorated functions"""
d = {}
with tt.monkeypatch(interaction, 'display', record_display):
@interact(a='kwarg')
def foo(a='default'):
d['a'] = a
return a
nt.assert_equal(len(displayed), 1)
w = displayed[0].children[0]
check_widget(w,
cls=widgets.Text,
value='kwarg',
)
# test calling the function directly
a = foo('hello')
nt.assert_equal(a, 'hello')
nt.assert_equal(d['a'], 'hello')
# test that setting trait values calls the function
w.value = 'called'
nt.assert_equal(d['a'], 'called')
def test_fixed():
c = interactive(f, a=widgets.fixed(5), b='text')
nt.assert_equal(len(c.children), 1)
w = c.children[0]
check_widget(w,
cls=widgets.Text,
value='text',
description='b',
)
def test_default_description():
c = interactive(f, b='text')
w = c.children[0]
check_widget(w,
cls=widgets.Text,
value='text',
description='b',
)
def test_custom_description():
d = {}
def record_kwargs(**kwargs):
d.clear()
d.update(kwargs)
c = interactive(record_kwargs, b=widgets.Text(value='text', description='foo'))
w = c.children[0]
check_widget(w,
cls=widgets.Text,
value='text',
description='foo',
)
w.value = 'different text'
nt.assert_equal(d, {'b': 'different text'})
def test_interact_manual_button():
c = interactive(f, __manual=True)
w = c.children[0]
check_widget(w, cls=widgets.Button)
def test_interact_manual_nocall():
callcount = 0
def calltest(testarg):
callcount += 1
c = interactive(calltest, testarg=5, __manual=True)
c.children[0].value = 10
nt.assert_equal(callcount, 0)
def test_int_range_logic():
irsw = widgets.IntRangeSlider
w = irsw(value=(2, 4), min=0, max=6)
check_widget(w, cls=irsw, value=(2, 4), min=0, max=6)
w.value = (4, 2)
check_widget(w, cls=irsw, value=(2, 4), min=0, max=6)
w.value = (-1, 7)
check_widget(w, cls=irsw, value=(0, 6), min=0, max=6)
w.min = 3
check_widget(w, cls=irsw, value=(3, 6), min=3, max=6)
w.max = 3
check_widget(w, cls=irsw, value=(3, 3), min=3, max=3)
w.min = 0
w.max = 6
w.lower = 2
w.upper = 4
check_widget(w, cls=irsw, value=(2, 4), min=0, max=6)
w.value = (0, 1) #lower non-overlapping range
check_widget(w, cls=irsw, value=(0, 1), min=0, max=6)
w.value = (5, 6) #upper non-overlapping range
check_widget(w, cls=irsw, value=(5, 6), min=0, max=6)
w.value = (-1, 4) #semi out-of-range
check_widget(w, cls=irsw, value=(0, 4), min=0, max=6)
w.lower = 2
check_widget(w, cls=irsw, value=(2, 4), min=0, max=6)
w.value = (-2, -1) #wholly out of range
check_widget(w, cls=irsw, value=(0, 0), min=0, max=6)
w.value = (7, 8)
check_widget(w, cls=irsw, value=(6, 6), min=0, max=6)
with nt.assert_raises(ValueError):
w.min = 7
with nt.assert_raises(ValueError):
w.max = -1
with nt.assert_raises(ValueError):
w.lower = 5
with nt.assert_raises(ValueError):
w.upper = 1
w = irsw(min=2, max=3)
check_widget(w, min=2, max=3)
w = irsw(min=100, max=200)
check_widget(w, lower=125, upper=175, value=(125, 175))
with nt.assert_raises(ValueError):
irsw(value=(2, 4), lower=3)
with nt.assert_raises(ValueError):
irsw(value=(2, 4), upper=3)
with nt.assert_raises(ValueError):
irsw(value=(2, 4), lower=3, upper=3)
with nt.assert_raises(ValueError):
irsw(min=2, max=1)
with nt.assert_raises(ValueError):
irsw(lower=5)
with nt.assert_raises(ValueError):
irsw(upper=5)
def test_float_range_logic():
frsw = widgets.FloatRangeSlider
w = frsw(value=(.2, .4), min=0., max=.6)
check_widget(w, cls=frsw, value=(.2, .4), min=0., max=.6)
w.value = (.4, .2)
check_widget(w, cls=frsw, value=(.2, .4), min=0., max=.6)
w.value = (-.1, .7)
check_widget(w, cls=frsw, value=(0., .6), min=0., max=.6)
w.min = .3
check_widget(w, cls=frsw, value=(.3, .6), min=.3, max=.6)
w.max = .3
check_widget(w, cls=frsw, value=(.3, .3), min=.3, max=.3)
w.min = 0.
w.max = .6
w.lower = .2
w.upper = .4
check_widget(w, cls=frsw, value=(.2, .4), min=0., max=.6)
w.value = (0., .1) #lower non-overlapping range
check_widget(w, cls=frsw, value=(0., .1), min=0., max=.6)
w.value = (.5, .6) #upper non-overlapping range
check_widget(w, cls=frsw, value=(.5, .6), min=0., max=.6)
w.value = (-.1, .4) #semi out-of-range
check_widget(w, cls=frsw, value=(0., .4), min=0., max=.6)
w.lower = .2
check_widget(w, cls=frsw, value=(.2, .4), min=0., max=.6)
w.value = (-.2, -.1) #wholly out of range
check_widget(w, cls=frsw, value=(0., 0.), min=0., max=.6)
w.value = (.7, .8)
check_widget(w, cls=frsw, value=(.6, .6), min=.0, max=.6)
with nt.assert_raises(ValueError):
w.min = .7
with nt.assert_raises(ValueError):
w.max = -.1
with nt.assert_raises(ValueError):
w.lower = .5
with nt.assert_raises(ValueError):
w.upper = .1
w = frsw(min=2, max=3)
check_widget(w, min=2, max=3)
w = frsw(min=1., max=2.)
check_widget(w, lower=1.25, upper=1.75, value=(1.25, 1.75))
with nt.assert_raises(ValueError):
frsw(value=(2, 4), lower=3)
with nt.assert_raises(ValueError):
frsw(value=(2, 4), upper=3)
with nt.assert_raises(ValueError):
frsw(value=(2, 4), lower=3, upper=3)
with nt.assert_raises(ValueError):
frsw(min=.2, max=.1)
with nt.assert_raises(ValueError):
frsw(lower=5)
with nt.assert_raises(ValueError):
frsw(upper=5)
def test_multiple_selection():
smw = widgets.SelectMultiple
# degenerate multiple select
w = smw()
check_widget(w, value=tuple(), options=None, selected_labels=tuple())
# don't accept random other value when no options
with nt.assert_raises(KeyError):
w.value = (2,)
check_widget(w, value=tuple(), selected_labels=tuple())
# basic multiple select
w = smw(options=[(1, 1)], value=[1])
check_widget(w, cls=smw, value=(1,), options=[(1, 1)])
# don't accept random other value
with nt.assert_raises(KeyError):
w.value = w.value + (2,)
check_widget(w, value=(1,), selected_labels=(1,))
# change options
w.options = w.options + [(2, 2)]
check_widget(w, options=[(1, 1), (2,2)])
# change value
w.value = w.value + (2,)
check_widget(w, value=(1, 2), selected_labels=(1, 2))
# change value name
w.selected_labels = (1,)
check_widget(w, value=(1,))
# don't accept random other names when no options
with nt.assert_raises(KeyError):
w.selected_labels = (3,)
check_widget(w, value=(1,))
# don't accept selected_label (from superclass)
with nt.assert_raises(AttributeError):
w.selected_label = 3
# don't return selected_label (from superclass)
with nt.assert_raises(AttributeError):
print(w.selected_label)
# dict style
w.options = {1: 1}
check_widget(w, options={1: 1})
# updating
with nt.assert_raises(KeyError):
w.value = (2,)
check_widget(w, options={1: 1})
|
wolfram74/numerical_methods_iserles_notes
|
venv/lib/python2.7/site-packages/IPython/html/widgets/tests/test_interaction.py
|
Python
|
mit
| 19,299
|
##
# Copyright 2009-2021 Ghent University
#
# This file is part of EasyBuild,
# originally created by the HPC team of Ghent University (http://ugent.be/hpc/en),
# with support of Ghent University (http://ugent.be/hpc),
# the Flemish Supercomputer Centre (VSC) (https://vscentrum.be/nl/en),
# the Hercules foundation (http://www.herculesstichting.be/in_English)
# and the Department of Economy, Science and Innovation (EWI) (http://www.ewi-vlaanderen.be/en).
#
# https://github.com/easybuilders/easybuild
#
# EasyBuild is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation v2.
#
# EasyBuild is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with EasyBuild. If not, see <http://www.gnu.org/licenses/>.
##
"""
EasyBuild support for MyMediaLite, implemented as an easyblock
@author: Stijn De Weirdt (Ghent University)
@author: Dries Verdegem (Ghent University)
@author: Kenneth Hoste (Ghent University)
@author: Pieter De Baets (Ghent University)
@author: Jens Timmerman (Ghent University)
"""
from distutils.version import LooseVersion
from easybuild.easyblocks.generic.configuremake import ConfigureMake
from easybuild.tools.run import run_cmd
class EB_MyMediaLite(ConfigureMake):
"""Support for building/installing MyMediaLite."""
def configure_step(self):
"""Custom configure step for MyMediaLite, using "make CONFIGURE_OPTIONS='...' configure"."""
if LooseVersion(self.version) < LooseVersion('3'):
cmd = "make CONFIGURE_OPTIONS='--prefix=%s' configure" % self.installdir
run_cmd(cmd, log_all=True, simple=True)
else:
self.cfg.update('installopts', "PREFIX=%s" % self.installdir)
def build_step(self):
"""Custom build step for MyMediaLite, using 'make all' in 'src' directory."""
cmd = "cd src && make all && cd .."
run_cmd(cmd, log_all=True, simple=True)
def sanity_check_step(self):
"""Custom sanity check for MyMediaLite."""
if LooseVersion(self.version) < LooseVersion('3'):
bin_files = ["bin/%s_prediction" % x for x in ['item', 'mapping_item', 'mapping_rating', 'rating']]
else:
bin_files = ["bin/item_recommendation", "bin/rating_based_ranking", "bin/rating_prediction"]
custom_paths = {
'files': bin_files,
'dirs': ["lib/mymedialite"],
}
super(EB_MyMediaLite, self).sanity_check_step(custom_paths=custom_paths)
|
boegel/easybuild-easyblocks
|
easybuild/easyblocks/m/mymedialite.py
|
Python
|
gpl-2.0
| 2,788
|
#
# Written by Luke Kenneth Casson Leighton <lkcl@lkcl.net>
# This theme is demonstrates how to
#this import statement allows access to the karamba functions
import karamba
drop_txt = None
#this is called when you widget is initialized
def initWidget(widget):
# this resets the text to "" so we know we've never
# received anything yet from the other theme
name = karamba.getPrettyThemeName(widget)
print "2.py name: ", name
karamba.setIncomingData(widget, name, "")
karamba.redrawWidget(widget)
# this is a pain. in order to avoid memory-related threading problems,
# and also locking, with the communication between themes, the
# communication is done asynchronously. so you have to POLL for the
# information, by reading getIncomingData().
#
# therefore, you must set an interval=time_in_ms in your receiving .theme
# file (see 2.theme) and then call getIncomingData() in here.
#
# it's not ideal - but it works.
#
# NOTE: the data received - by getIncomingData - is NOT, i repeat NOT
# deleted when you call getIncomingData.
# so, obviously, you need to take care to not activate repeatedly.
# you could do this in several ways. one of them is to send, in
# the calling theme (the one that calls setIncomingData) a sequential
# number as part of the message.
#
# alternatively, you could reset the text to "" (see above)
expected_seq = 0
def widgetUpdated(widget):
global expected_seq # i hate globals. please write better code than this example.
# get the "message"...
disp = karamba.getIncomingData(widget)
if disp == "":
return
# decode it...
(seq, x, y, button) = eval(disp)
# if it's been seen before, skip it...
if seq <= expected_seq:
pass
expected_seq += 1
message = "seq:%d x:%d y:%d btn:%d" % (seq, x, y, button)
# delete previous text if exists.
global drop_txt
if drop_txt is not None:
karamba.deleteText(widget, drop_txt)
# display it...
drop_txt = karamba.createText(widget, 0, 20, 300, 20, message)
karamba.changeTextColor(widget, drop_txt, 252,252,252)
karamba.redrawWidget(widget)
pass
# This will be printed when the widget loads.
print "Loaded my python 2.py extension!"
|
serghei/kde3-kdeutils
|
superkaramba/examples/setIncomingData/2.py
|
Python
|
gpl-2.0
| 2,246
|
import sys
import os
print "-------------------------"
print "StegHide Options"
print "-------------------------"
print "Usage Example :"
print ""
print"To embed emb.txt in cvr.jpg: steghide embed -cf cvr.jpg -ef emb.txt"
print ""
print "To extract embedded data from stg.jpg: steghide extract -sf stg.jpg"
cmd1 = os.system ("xterm ")
|
krintoxi/NoobSec-Toolkit
|
NoobSecToolkit /scripts/pySteg/pysteg.py
|
Python
|
gpl-2.0
| 340
|
# This file is part of Invenio.
# Copyright (C) 2007, 2008, 2009, 2010, 2011, 2014 CERN.
#
# Invenio is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# Invenio is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Invenio; if not, write to the Free Software Foundation, Inc.,
# 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
"""BibObject Module providing BibObject prividing features for documents containing text (not necessarily as the main part of the content)"""
import os
import re
from datetime import datetime
from invenio.config import CFG_BIBINDEX_PERFORM_OCR_ON_DOCNAMES
from invenio.legacy.bibdocfile.api import BibDoc, InvenioBibDocFileError
from invenio.legacy.dbquery import run_sql
from invenio.ext.logging import register_exception
_RE_PERFORM_OCR = re.compile(CFG_BIBINDEX_PERFORM_OCR_ON_DOCNAMES)
class BibTextDoc(BibDoc):
def get_text(self, version=None):
"""
@param version: the requested version. If not set, the latest version
will be used.
@type version: integer
@return: the textual content corresponding to the specified version
of the document.
@rtype: string
"""
if version is None:
version = self.get_latest_version()
if self.has_text(version):
return open(os.path.join(self.basedir, '.text;%i' % version)).read()
else:
return ""
def is_ocr_required(self):
"""
Return True if this document require OCR in order to extract text from it.
"""
for bibrec_link in self.bibrec_links:
if _RE_PERFORM_OCR.match(bibrec_link['docname']):
return True
return False
def get_text_path(self, version=None):
"""
@param version: the requested version. If not set, the latest version
will be used.
@type version: int
@return: the full path to the textual content corresponding to the specified version
of the document.
@rtype: string
"""
if version is None:
version = self.get_latest_version()
if self.has_text(version):
return os.path.join(self.basedir, '.text;%i' % version)
else:
return ""
def extract_text(self, version=None, perform_ocr=False, ln='en'):
"""
Try what is necessary to extract the textual information of a document.
@param version: the version of the document for which text is required.
If not specified the text will be retrieved from the last version.
@type version: integer
@param perform_ocr: whether to perform OCR.
@type perform_ocr: bool
@param ln: a two letter language code to give as a hint to the OCR
procedure.
@type ln: string
@raise InvenioBibDocFileError: in case of error.
@note: the text is extracted and cached for later use. Use L{get_text}
to retrieve it.
"""
raise RuntimeError("Text extraction is not implemented.")
def pdf_a_p(self):
"""
@return: True if this document contains a PDF in PDF/A format.
@rtype: bool"""
return self.has_flag('PDF/A', 'pdf')
def has_text(self, require_up_to_date=False, version=None):
"""
Return True if the text of this document has already been extracted.
@param require_up_to_date: if True check the text was actually
extracted after the most recent format of the given version.
@type require_up_to_date: bool
@param version: a version for which the text should have been
extracted. If not specified the latest version is considered.
@type version: integer
@return: True if the text has already been extracted.
@rtype: bool
"""
if version is None:
version = self.get_latest_version()
if os.path.exists(os.path.join(self.basedir, '.text;%i' % version)):
if not require_up_to_date:
return True
else:
docfiles = self.list_version_files(version)
text_md = datetime.fromtimestamp(os.path.getmtime(os.path.join(self.basedir, '.text;%i' % version)))
for docfile in docfiles:
if text_md <= docfile.md:
return False
return True
return False
def __repr__(self):
return 'BibTextDoc(%s, %s, %s)' % (repr(self.id), repr(self.doctype), repr(self.human_readable))
def supports(doctype, extensions):
return doctype == "Fulltext" or reduce(lambda x, y: x or y.startswith(".pdf") or y.startswith(".ps") , extensions, False)
def create_instance(docid=None, doctype='Main', human_readable=False, # pylint: disable=W0613
initial_data = None):
return BibTextDoc(docid=docid, human_readable=human_readable,
initial_data = initial_data)
|
ludmilamarian/invenio
|
invenio/legacy/bibdocfile/plugins/bom_textdoc.py
|
Python
|
gpl-2.0
| 5,427
|
#
# Copyright 2014 eNovance
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Rest alarm notifier with trusted authentication."""
from keystoneclient.v3 import client as keystone_client
from oslo.config import cfg
from six.moves.urllib import parse
from ceilometer.alarm.notifier import rest
cfg.CONF.import_opt('http_timeout', 'ceilometer.service')
cfg.CONF.import_group('service_credentials', 'ceilometer.service')
class TrustRestAlarmNotifier(rest.RestAlarmNotifier):
"""Notifier supporting keystone trust authentication.
This alarm notifier is intended to be used to call an endpoint using
keystone authentication. It uses the ceilometer service user to
authenticate using the trust ID provided.
The URL must be in the form trust+http://trust-id@host/action.
"""
@staticmethod
def notify(action, alarm_id, previous, current, reason, reason_data):
trust_id = action.username
auth_url = cfg.CONF.service_credentials.os_auth_url.replace(
"v2.0", "v3")
client = keystone_client.Client(
username=cfg.CONF.service_credentials.os_username,
password=cfg.CONF.service_credentials.os_password,
cacert=cfg.CONF.service_credentials.os_cacert,
auth_url=auth_url,
region_name=cfg.CONF.service_credentials.os_region_name,
insecure=cfg.CONF.service_credentials.insecure,
timeout=cfg.CONF.http_timeout,
trust_id=trust_id)
# Remove the fake user
netloc = action.netloc.split("@")[1]
# Remove the trust prefix
scheme = action.scheme[6:]
action = parse.SplitResult(scheme, netloc, action.path, action.query,
action.fragment)
headers = {'X-Auth-Token': client.auth_token}
rest.RestAlarmNotifier.notify(
action, alarm_id, previous, current, reason, reason_data, headers)
|
ChinaMassClouds/copenstack-server
|
openstack/src/ceilometer-2014.2.2/ceilometer/alarm/notifier/trust.py
|
Python
|
gpl-2.0
| 2,433
|
# vim: set fileencoding=utf-8 :
# ***********************IMPORTANT NMAP LICENSE TERMS************************
# * *
# * The Nmap Security Scanner is (C) 1996-2013 Insecure.Com LLC. Nmap is *
# * also a registered trademark of Insecure.Com LLC. This program is free *
# * software; you may redistribute and/or modify it under the terms of the *
# * GNU General Public License as published by the Free Software *
# * Foundation; Version 2 ("GPL"), BUT ONLY WITH ALL OF THE CLARIFICATIONS *
# * AND EXCEPTIONS DESCRIBED HEREIN. This guarantees your right to use, *
# * modify, and redistribute this software under certain conditions. If *
# * you wish to embed Nmap technology into proprietary software, we sell *
# * alternative licenses (contact sales@nmap.com). Dozens of software *
# * vendors already license Nmap technology such as host discovery, port *
# * scanning, OS detection, version detection, and the Nmap Scripting *
# * Engine. *
# * *
# * Note that the GPL places important restrictions on "derivative works", *
# * yet it does not provide a detailed definition of that term. To avoid *
# * misunderstandings, we interpret that term as broadly as copyright law *
# * allows. For example, we consider an application to constitute a *
# * derivative work for the purpose of this license if it does any of the *
# * following with any software or content covered by this license *
# * ("Covered Software"): *
# * *
# * o Integrates source code from Covered Software. *
# * *
# * o Reads or includes copyrighted data files, such as Nmap's nmap-os-db *
# * or nmap-service-probes. *
# * *
# * o Is designed specifically to execute Covered Software and parse the *
# * results (as opposed to typical shell or execution-menu apps, which will *
# * execute anything you tell them to). *
# * *
# * o Includes Covered Software in a proprietary executable installer. The *
# * installers produced by InstallShield are an example of this. Including *
# * Nmap with other software in compressed or archival form does not *
# * trigger this provision, provided appropriate open source decompression *
# * or de-archiving software is widely available for no charge. For the *
# * purposes of this license, an installer is considered to include Covered *
# * Software even if it actually retrieves a copy of Covered Software from *
# * another source during runtime (such as by downloading it from the *
# * Internet). *
# * *
# * o Links (statically or dynamically) to a library which does any of the *
# * above. *
# * *
# * o Executes a helper program, module, or script to do any of the above. *
# * *
# * This list is not exclusive, but is meant to clarify our interpretation *
# * of derived works with some common examples. Other people may interpret *
# * the plain GPL differently, so we consider this a special exception to *
# * the GPL that we apply to Covered Software. Works which meet any of *
# * these conditions must conform to all of the terms of this license, *
# * particularly including the GPL Section 3 requirements of providing *
# * source code and allowing free redistribution of the work as a whole. *
# * *
# * As another special exception to the GPL terms, Insecure.Com LLC grants *
# * permission to link the code of this program with any version of the *
# * OpenSSL library which is distributed under a license identical to that *
# * listed in the included docs/licenses/OpenSSL.txt file, and distribute *
# * linked combinations including the two. *
# * *
# * Any redistribution of Covered Software, including any derived works, *
# * must obey and carry forward all of the terms of this license, including *
# * obeying all GPL rules and restrictions. For example, source code of *
# * the whole work must be provided and free redistribution must be *
# * allowed. All GPL references to "this License", are to be treated as *
# * including the terms and conditions of this license text as well. *
# * *
# * Because this license imposes special exceptions to the GPL, Covered *
# * Work may not be combined (even as part of a larger work) with plain GPL *
# * software. The terms, conditions, and exceptions of this license must *
# * be included as well. This license is incompatible with some other open *
# * source licenses as well. In some cases we can relicense portions of *
# * Nmap or grant special permissions to use it in other open source *
# * software. Please contact fyodor@nmap.org with any such requests. *
# * Similarly, we don't incorporate incompatible open source software into *
# * Covered Software without special permission from the copyright holders. *
# * *
# * If you have any questions about the licensing restrictions on using *
# * Nmap in other works, are happy to help. As mentioned above, we also *
# * offer alternative license to integrate Nmap into proprietary *
# * applications and appliances. These contracts have been sold to dozens *
# * of software vendors, and generally include a perpetual license as well *
# * as providing for priority support and updates. They also fund the *
# * continued development of Nmap. Please email sales@nmap.com for further *
# * information. *
# * *
# * If you have received a written license agreement or contract for *
# * Covered Software stating terms other than these, you may choose to use *
# * and redistribute Covered Software under those terms instead of these. *
# * *
# * Source is provided to this software because we believe users have a *
# * right to know exactly what a program is going to do before they run it. *
# * This also allows you to audit the software for security holes (none *
# * have been found so far). *
# * *
# * Source code also allows you to port Nmap to new platforms, fix bugs, *
# * and add new features. You are highly encouraged to send your changes *
# * to the dev@nmap.org mailing list for possible incorporation into the *
# * main distribution. By sending these changes to Fyodor or one of the *
# * Insecure.Org development mailing lists, or checking them into the Nmap *
# * source code repository, it is understood (unless you specify otherwise) *
# * that you are offering the Nmap Project (Insecure.Com LLC) the *
# * unlimited, non-exclusive right to reuse, modify, and relicense the *
# * code. Nmap will always be available Open Source, but this is important *
# * because the inability to relicense code has caused devastating problems *
# * for other Free Software projects (such as KDE and NASM). We also *
# * occasionally relicense the code to third parties as discussed above. *
# * If you wish to specify special license conditions of your *
# * contributions, just say so when you send them. *
# * *
# * This program is distributed in the hope that it will be useful, but *
# * WITHOUT ANY WARRANTY; without even the implied warranty of *
# * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the Nmap *
# * license file for more details (it's in a COPYING file included with *
# * Nmap, and also available from https://svn.nmap.org/nmap/COPYING *
# * *
# ***************************************************************************/
import gtk
import pango
import gobject
from radialnet.bestwidgets.boxes import *
from radialnet.bestwidgets.expanders import BWExpander
from radialnet.bestwidgets.labels import *
from radialnet.bestwidgets.textview import *
import zenmapCore.I18N
PORTS_HEADER = [
_('Port'), _('Protocol'), _('State'), _('Service'), _('Method')]
EXTRAPORTS_HEADER = [_('Count'), _('State'), _('Reasons')]
SERVICE_COLORS = {'open': '#ffd5d5',
'closed': '#d5ffd5',
'filtered': '#ffffd5',
'unfiltered': '#ffd5d5',
'open|filtered': '#ffd5d5',
'closed|filtered': '#d5ffd5'}
UNKNOWN_SERVICE_COLOR = '#d5d5d5'
TRACE_HEADER = [_('TTL'), _('RTT'), _('IP'), _('Hostname')]
TRACE_TEXT = _(
"Traceroute on port <b>%s/%s</b> totalized <b>%d</b> known hops.")
NO_TRACE_TEXT = _("No traceroute information available.")
HOP_COLOR = {'known': '#ffffff',
'unknown': '#cccccc'}
SYSTEM_ADDRESS_TEXT = "[%s] %s"
OSMATCH_HEADER = ['%', _('Name'), _('DB Line')]
OSCLASS_HEADER = ['%', _('Vendor'), _('Type'), _('Family'), _('Version')]
USED_PORTS_TEXT = "%d/%s %s"
TCP_SEQ_NOTE = _("""\
<b>*</b> TCP sequence <i>index</i> equal to %d and <i>difficulty</i> is "%s".\
""")
def get_service_color(state):
color = SERVICE_COLORS.get(state)
if color is None:
color = UNKNOWN_SERVICE_COLOR
return color
class NodeNotebook(gtk.Notebook):
"""
"""
def __init__(self, node):
"""
"""
gtk.Notebook.__init__(self)
self.set_tab_pos(gtk.POS_TOP)
self.__node = node
self.__create_widgets()
def __create_widgets(self):
"""
"""
# create body elements
self.__services_page = ServicesPage(self.__node)
self.__system_page = SystemPage(self.__node)
self.__trace_page = TraceroutePage(self.__node)
# packing notebook elements
self.append_page(self.__system_page, BWLabel(_('General')))
self.append_page(self.__services_page, BWLabel(_('Services')))
self.append_page(self.__trace_page, BWLabel(_('Traceroute')))
class ServicesPage(gtk.Notebook):
"""
"""
def __init__(self, node):
"""
"""
gtk.Notebook.__init__(self)
self.set_border_width(6)
self.set_tab_pos(gtk.POS_TOP)
self.__node = node
self.__font = pango.FontDescription('Monospace')
self.__create_widgets()
def __create_widgets(self):
"""
"""
self.__cell = gtk.CellRendererText()
# texteditor widgets
self.__texteditor = BWTextEditor()
self.__texteditor.bw_modify_font(self.__font)
self.__texteditor.bw_set_editable(False)
self.__texteditor.set_border_width(0)
self.__select_combobox = gtk.combo_box_new_text()
self.__select_combobox.connect('changed', self.__change_text_value)
self.__viewer = BWVBox(spacing=6)
self.__viewer.set_border_width(6)
self.__viewer.bw_pack_start_noexpand_nofill(self.__select_combobox)
self.__viewer.bw_pack_start_expand_fill(self.__texteditor)
self.__text = list()
# ports information
number_of_ports = len(self.__node.get_info('ports'))
self.__ports_label = BWLabel(_('Ports (%s)') % number_of_ports)
self.__ports_scroll = BWScrolledWindow()
self.__ports_store = gtk.TreeStore(gobject.TYPE_INT,
gobject.TYPE_STRING,
gobject.TYPE_STRING,
gobject.TYPE_STRING,
gobject.TYPE_STRING,
gobject.TYPE_STRING,
gobject.TYPE_BOOLEAN)
self.__ports_treeview = gtk.TreeView(self.__ports_store)
for port in self.__node.get_info('ports'):
color = get_service_color(port['state']['state'])
service_name = port['service'].get('name', _('<unknown>'))
service_method = port['service'].get('method', _('<none>'))
reference = self.__ports_store.append(None,
[port['id'],
port['protocol'],
port['state']['state'],
service_name,
service_method,
color,
True])
for key in port['state']:
self.__ports_store.append(reference,
[port['id'],
'state',
key,
port['state'][key],
'',
'white',
True])
for key in port['service']:
if key in ['servicefp']:
text = _('[%d] service: %s') % (port['id'], key)
self.__select_combobox.append_text(text)
self.__text.append(port['service'][key])
value = _('<special field>')
else:
value = port['service'][key]
self.__ports_store.append(reference,
[port['id'],
'service',
key,
value,
'',
'white',
True])
#for script in port['scripts']:
# text = _('[%d] script: %s') % (port['id'], script['id'])
# self.__select_combobox.append_text(text)
# self.__text.append(script['output'])
#
# self.__ports_store.append(reference,
# [port['id'],
# 'script',
# 'id',
# script['id'],
# _('<special field>'),
# 'white',
# True])
self.__ports_column = list()
for i in range(len(PORTS_HEADER)):
column = gtk.TreeViewColumn(PORTS_HEADER[i],
self.__cell,
text=i)
self.__ports_column.append(column)
self.__ports_column[i].set_reorderable(True)
self.__ports_column[i].set_resizable(True)
self.__ports_column[i].set_sort_column_id(i)
self.__ports_column[i].set_attributes(self.__cell,
text=i,
background=5,
editable=6)
self.__ports_treeview.append_column(self.__ports_column[i])
self.__ports_scroll.add_with_viewport(self.__ports_treeview)
# extraports information
number_of_xports = 0
self.__xports_scroll = BWScrolledWindow()
self.__xports_store = gtk.TreeStore(gobject.TYPE_INT,
gobject.TYPE_STRING,
gobject.TYPE_STRING,
gobject.TYPE_STRING,
gobject.TYPE_BOOLEAN)
self.__xports_treeview = gtk.TreeView(self.__xports_store)
for xports in self.__node.get_info('extraports'):
color = get_service_color(xports['state'])
number_of_xports += xports['count']
reference = self.__xports_store.append(
None, [xports['count'], xports['state'],
", ".join(xports['reason']), color, True])
for xreason in xports['all_reason']:
self.__xports_store.append(reference,
[xreason['count'],
xports['state'],
xreason['reason'],
'white',
True])
self.__xports_column = list()
for i in range(len(EXTRAPORTS_HEADER)):
column = gtk.TreeViewColumn(EXTRAPORTS_HEADER[i],
self.__cell,
text=i)
self.__xports_column.append(column)
self.__xports_column[i].set_reorderable(True)
self.__xports_column[i].set_resizable(True)
self.__xports_column[i].set_sort_column_id(i)
self.__xports_column[i].set_attributes(self.__cell,
text=i,
background=3,
editable=4)
self.__xports_treeview.append_column(self.__xports_column[i])
xports_label_text = _('Extraports (%s)') % number_of_xports
self.__xports_label = BWLabel(xports_label_text)
self.__xports_scroll.add_with_viewport(self.__xports_treeview)
self.append_page(self.__ports_scroll, self.__ports_label)
self.append_page(self.__xports_scroll, self.__xports_label)
self.append_page(self.__viewer, BWLabel(_('Special fields')))
if len(self.__text) > 0:
self.__select_combobox.set_active(0)
def __change_text_value(self, widget):
"""
"""
id = self.__select_combobox.get_active()
self.__texteditor.bw_set_text(self.__text[id])
class SystemPage(BWScrolledWindow):
"""
"""
def __init__(self, node):
"""
"""
BWScrolledWindow.__init__(self)
self.__node = node
self.__font = pango.FontDescription('Monospace')
self.__create_widgets()
def __create_widgets(self):
"""
"""
self.__vbox = BWVBox()
self.__vbox.set_border_width(6)
self.__cell = gtk.CellRendererText()
self.__general_frame = BWExpander(_('General information'))
self.__sequences_frame = BWExpander(_('Sequences'))
self.__os_frame = BWExpander(_('Operating System'))
self.__sequences_frame.bw_add(gtk.Label(_('No sequence information.')))
self.__os_frame.bw_add(gtk.Label(_('No OS information.')))
# general information widgets
self.__general = BWTable(3, 2)
self.__address_label = BWSectionLabel(_('Address:'))
self.__address_list = gtk.combo_box_entry_new_text()
self.__address_list.child.set_editable(False)
for address in self.__node.get_info('addresses'):
params = address['type'], address['addr']
address_text = SYSTEM_ADDRESS_TEXT % params
if address['vendor'] is not None and address['vendor'] != '':
address_text += " (%s)" % address['vendor']
self.__address_list.append_text(address_text)
self.__address_list.set_active(0)
self.__general.bw_attach_next(self.__address_label,
yoptions=gtk.FILL,
xoptions=gtk.FILL)
self.__general.bw_attach_next(self.__address_list, yoptions=gtk.FILL)
if self.__node.get_info('hostnames') is not None:
self.__hostname_label = BWSectionLabel(_('Hostname:'))
self.__hostname_list = gtk.combo_box_entry_new_text()
self.__hostname_list.child.set_editable(False)
for hostname in self.__node.get_info('hostnames'):
params = hostname['type'], hostname['name']
self.__hostname_list.append_text(SYSTEM_ADDRESS_TEXT % params)
self.__hostname_list.set_active(0)
self.__general.bw_attach_next(self.__hostname_label,
yoptions=gtk.FILL,
xoptions=gtk.FILL)
self.__general.bw_attach_next(self.__hostname_list,
yoptions=gtk.FILL)
if self.__node.get_info('uptime') is not None:
self.__uptime_label = BWSectionLabel(_('Last boot:'))
seconds = self.__node.get_info('uptime')['seconds']
lastboot = self.__node.get_info('uptime')['lastboot']
text = _('%s (%s seconds).') % (lastboot, seconds)
self.__uptime_value = BWLabel(text)
self.__uptime_value.set_selectable(True)
self.__uptime_value.set_line_wrap(False)
self.__general.bw_attach_next(self.__uptime_label,
yoptions=gtk.FILL,
xoptions=gtk.FILL)
self.__general.bw_attach_next(self.__uptime_value,
yoptions=gtk.FILL)
self.__general_frame.bw_add(self.__general)
self.__general_frame.set_expanded(True)
sequences = self.__node.get_info('sequences')
if len(sequences) > 0:
self.__sequences_frame.bw_add(
self.__create_sequences_widget(sequences))
# operating system information widgets
self.__os = gtk.Notebook()
os = self.__node.get_info('os')
if os is not None:
if 'matches' in os:
self.__match_scroll = BWScrolledWindow()
self.__match_store = gtk.ListStore(gobject.TYPE_STRING,
gobject.TYPE_STRING,
gobject.TYPE_INT,
gobject.TYPE_BOOLEAN)
self.__match_treeview = gtk.TreeView(self.__match_store)
for os_match in os['matches']:
self.__match_store.append([os_match['accuracy'],
os_match['name'],
#os_match['db_line'],
0, # unsupported
True])
self.__match_column = list()
for i in range(len(OSMATCH_HEADER)):
column = gtk.TreeViewColumn(OSMATCH_HEADER[i],
self.__cell,
text=i)
self.__match_column.append(column)
self.__match_column[i].set_reorderable(True)
self.__match_column[i].set_resizable(True)
self.__match_column[i].set_attributes(self.__cell,
text=i,
editable=3)
self.__match_column[i].set_sort_column_id(i)
self.__match_treeview.append_column(self.__match_column[i])
self.__match_scroll.add_with_viewport(self.__match_treeview)
self.__os.append_page(self.__match_scroll, BWLabel(_('Match')))
if 'classes' in os:
self.__class_scroll = BWScrolledWindow()
self.__class_store = gtk.ListStore(gobject.TYPE_STRING,
gobject.TYPE_STRING,
gobject.TYPE_STRING,
gobject.TYPE_STRING,
gobject.TYPE_STRING,
gobject.TYPE_BOOLEAN)
self.__class_treeview = gtk.TreeView(self.__class_store)
for os_class in os['classes']:
os_gen = os_class.get('os_gen', '')
self.__class_store.append([os_class['accuracy'],
os_class['vendor'],
os_class['type'],
os_class['os_family'],
os_gen,
True])
self.__class_column = list()
for i in range(len(OSCLASS_HEADER)):
column = gtk.TreeViewColumn(OSCLASS_HEADER[i],
self.__cell,
text=i)
self.__class_column.append(column)
self.__class_column[i].set_reorderable(True)
self.__class_column[i].set_resizable(True)
self.__class_column[i].set_attributes(self.__cell,
text=i,
editable=5)
self.__class_column[i].set_sort_column_id(i)
self.__class_treeview.append_column(self.__class_column[i])
self.__class_scroll.add_with_viewport(self.__class_treeview)
self.__os.append_page(self.__class_scroll, BWLabel(_('Class')))
self.__fp_viewer = BWTextEditor()
self.__fp_viewer.bw_modify_font(self.__font)
self.__fp_viewer.bw_set_editable(False)
self.__fp_viewer.bw_set_text(os['fingerprint'])
self.__fp_ports = BWHBox()
self.__fp_label = BWSectionLabel(_('Used ports:'))
self.__fp_ports_list = gtk.combo_box_entry_new_text()
self.__fp_ports_list.child.set_editable(False)
self.__fp_vbox = BWVBox()
if 'used_ports' in os:
used_ports = os['used_ports']
for port in used_ports:
params = port['id'], port['protocol'], port['state']
self.__fp_ports_list.append_text(USED_PORTS_TEXT % params)
self.__fp_ports_list.set_active(0)
self.__fp_ports.bw_pack_start_noexpand_nofill(self.__fp_label)
self.__fp_ports.bw_pack_start_expand_fill(self.__fp_ports_list)
self.__fp_vbox.bw_pack_start_noexpand_nofill(self.__fp_ports)
self.__os.append_page(self.__fp_viewer, BWLabel(_('Fingerprint')))
self.__fp_vbox.bw_pack_start_expand_fill(self.__os)
self.__os_frame.bw_add(self.__fp_vbox)
self.__os_frame.set_expanded(True)
self.__vbox.bw_pack_start_noexpand_nofill(self.__general_frame)
self.__vbox.bw_pack_start_expand_fill(self.__os_frame)
self.__vbox.bw_pack_start_noexpand_nofill(self.__sequences_frame)
self.add_with_viewport(self.__vbox)
def __create_sequences_widget(self, sequences):
"""Return a widget representing various OS detection sequences. The
sequences argument is a dict with zero or more of the keys 'tcp',
'ip_id', and 'tcp_ts'."""
# sequences information widgets
table = BWTable(5, 3)
table.attach(BWSectionLabel(_('Class')), 1, 2, 0, 1)
table.attach(BWSectionLabel(_('Values')), 2, 3, 0, 1)
table.attach(BWSectionLabel(_('TCP *')), 0, 1, 1, 2)
table.attach(BWSectionLabel(_('IP ID')), 0, 1, 2, 3)
table.attach(BWSectionLabel(_('TCP Timestamp')), 0, 1, 3, 4)
tcp = sequences.get('tcp')
if tcp is not None:
tcp_class = BWLabel(tcp['class'])
tcp_class.set_selectable(True)
table.attach(tcp_class, 1, 2, 1, 2)
tcp_values = gtk.combo_box_entry_new_text()
for value in tcp['values']:
tcp_values.append_text(value)
tcp_values.set_active(0)
table.attach(tcp_values, 2, 3, 1, 2)
tcp_note = BWLabel()
tcp_note.set_selectable(True)
tcp_note.set_line_wrap(False)
tcp_note.set_alignment(1.0, 0.5)
tcp_note.set_markup(
TCP_SEQ_NOTE % (tcp['index'], tcp['difficulty']))
table.attach(tcp_note, 0, 3, 4, 5)
ip_id = sequences.get('ip_id')
if ip_id is not None:
ip_id_class = BWLabel(ip_id['class'])
ip_id_class.set_selectable(True)
table.attach(ip_id_class, 1, 2, 2, 3)
ip_id_values = gtk.combo_box_entry_new_text()
for value in ip_id['values']:
ip_id_values.append_text(value)
ip_id_values.set_active(0)
table.attach(ip_id_values, 2, 3, 2, 3)
tcp_ts = sequences.get('tcp_ts')
if tcp_ts is not None:
tcp_ts_class = BWLabel(tcp_ts['class'])
tcp_ts_class.set_selectable(True)
table.attach(tcp_ts_class, 1, 2, 3, 4)
if tcp_ts['values'] is not None:
tcp_ts_values = gtk.combo_box_entry_new_text()
for value in tcp_ts['values']:
tcp_ts_values.append_text(value)
tcp_ts_values.set_active(0)
table.attach(tcp_ts_values, 2, 3, 3, 4)
return table
class TraceroutePage(BWVBox):
"""
"""
def __init__(self, node):
"""
"""
BWVBox.__init__(self)
self.set_border_width(6)
self.__node = node
self.__create_widgets()
def __create_widgets(self):
"""
"""
trace = self.__node.get_info('trace')
hops = None
if trace is not None:
hops = trace.get("hops")
if hops is None or len(hops) == 0:
self.__trace_label = gtk.Label(NO_TRACE_TEXT)
self.pack_start(self.__trace_label, True, True)
else:
# add hops
hops = self.__node.get_info('trace')['hops']
ttls = [int(i['ttl']) for i in hops]
self.__cell = gtk.CellRendererText()
self.__trace_scroll = BWScrolledWindow()
self.__trace_scroll.set_border_width(0)
self.__trace_store = gtk.ListStore(gobject.TYPE_INT,
gobject.TYPE_STRING,
gobject.TYPE_STRING,
gobject.TYPE_STRING,
gobject.TYPE_STRING,
gobject.TYPE_BOOLEAN)
self.__trace_treeview = gtk.TreeView(self.__trace_store)
count = 0
for i in range(1, max(ttls) + 1):
if i in ttls:
hop = hops[count]
count += 1
self.__trace_store.append([hop['ttl'],
hop['rtt'],
hop['ip'],
hop['hostname'],
HOP_COLOR['known'],
True])
else:
self.__trace_store.append([i,
'',
_('<unknown>'),
'',
HOP_COLOR['unknown'],
True])
self.__trace_column = list()
for i in range(len(TRACE_HEADER)):
column = gtk.TreeViewColumn(TRACE_HEADER[i],
self.__cell,
text=i)
self.__trace_column.append(column)
self.__trace_column[i].set_reorderable(True)
self.__trace_column[i].set_resizable(True)
self.__trace_column[i].set_attributes(self.__cell,
text=i,
background=4,
editable=5)
self.__trace_treeview.append_column(self.__trace_column[i])
self.__trace_column[0].set_sort_column_id(0)
self.__trace_scroll.add_with_viewport(self.__trace_treeview)
self.__trace_info = (self.__node.get_info('trace')['port'],
self.__node.get_info('trace')['protocol'],
len(self.__node.get_info('trace')['hops']))
self.__trace_label = BWLabel(TRACE_TEXT % self.__trace_info)
self.__trace_label.set_use_markup(True)
self.bw_pack_start_expand_fill(self.__trace_scroll)
self.bw_pack_start_noexpand_nofill(self.__trace_label)
|
markofu/scripts
|
nmap/nmap/zenmap/radialnet/gui/NodeNotebook.py
|
Python
|
gpl-2.0
| 34,656
|
from robottelo.decorators.func_shared.shared import ( # noqa
shared,
SharedFunctionError,
SharedFunctionException,
)
|
ldjebran/robottelo
|
robottelo/decorators/func_shared/__init__.py
|
Python
|
gpl-3.0
| 130
|
# Author: Nic Wolfe <nic@wolfeden.ca>
# URL: http://code.google.com/p/sickbeard/
#
# This file is part of Sick Beard.
#
# Sick Beard is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Sick Beard is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Sick Beard. If not, see <http://www.gnu.org/licenses/>.
from __future__ import with_statement
import os.path
import time
import urllib
import re
import threading
import datetime
import random
import locale
from Cheetah.Template import Template
import cherrypy.lib
import sickbeard
from sickbeard import config, sab
from sickbeard import clients
from sickbeard import history, notifiers, processTV
from sickbeard import ui
from sickbeard import logger, helpers, exceptions, classes, db
from sickbeard import encodingKludge as ek
from sickbeard import search_queue
from sickbeard import image_cache
from sickbeard import scene_exceptions
from sickbeard import naming
from sickbeard import subtitles
from sickbeard.providers import newznab
from sickbeard.common import Quality, Overview, statusStrings
from sickbeard.common import SNATCHED, SKIPPED, UNAIRED, IGNORED, ARCHIVED, WANTED
from sickbeard.exceptions import ex
from sickbeard.webapi import Api
from lib.tvdb_api import tvdb_api
from lib.dateutil import tz
import network_timezones
import subliminal
try:
import json
except ImportError:
from lib import simplejson as json
try:
import xml.etree.cElementTree as etree
except ImportError:
import xml.etree.ElementTree as etree
from sickbeard import browser
class PageTemplate (Template):
def __init__(self, *args, **KWs):
KWs['file'] = os.path.join(sickbeard.PROG_DIR, "data/interfaces/default/", KWs['file'])
super(PageTemplate, self).__init__(*args, **KWs)
self.sbRoot = sickbeard.WEB_ROOT
self.sbHttpPort = sickbeard.WEB_PORT
self.sbHttpsPort = sickbeard.WEB_PORT
self.sbHttpsEnabled = sickbeard.ENABLE_HTTPS
if cherrypy.request.headers['Host'][0] == '[':
self.sbHost = re.match("^\[.*\]", cherrypy.request.headers['Host'], re.X|re.M|re.S).group(0)
else:
self.sbHost = re.match("^[^:]+", cherrypy.request.headers['Host'], re.X|re.M|re.S).group(0)
self.projectHomePage = "http://code.google.com/p/sickbeard/"
if sickbeard.NZBS and sickbeard.NZBS_UID and sickbeard.NZBS_HASH:
logger.log(u"NZBs.org has been replaced, please check the config to configure the new provider!", logger.ERROR)
ui.notifications.error("NZBs.org Config Update", "NZBs.org has a new site. Please <a href=\""+sickbeard.WEB_ROOT+"/config/providers\">update your config</a> with the api key from <a href=\"http://nzbs.org/login\">http://nzbs.org</a> and then disable the old NZBs.org provider.")
if "X-Forwarded-Host" in cherrypy.request.headers:
self.sbHost = cherrypy.request.headers['X-Forwarded-Host']
if "X-Forwarded-Port" in cherrypy.request.headers:
self.sbHttpPort = cherrypy.request.headers['X-Forwarded-Port']
self.sbHttpsPort = self.sbHttpPort
if "X-Forwarded-Proto" in cherrypy.request.headers:
self.sbHttpsEnabled = True if cherrypy.request.headers['X-Forwarded-Proto'] == 'https' else False
logPageTitle = 'Logs & Errors'
if len(classes.ErrorViewer.errors):
logPageTitle += ' ('+str(len(classes.ErrorViewer.errors))+')'
self.logPageTitle = logPageTitle
self.sbPID = str(sickbeard.PID)
self.menu = [
{ 'title': 'Home', 'key': 'home' },
{ 'title': 'Coming Episodes', 'key': 'comingEpisodes' },
{ 'title': 'History', 'key': 'history' },
{ 'title': 'Manage', 'key': 'manage' },
{ 'title': 'Config', 'key': 'config' },
{ 'title': logPageTitle, 'key': 'errorlogs' },
]
def redirect(abspath, *args, **KWs):
assert abspath[0] == '/'
raise cherrypy.HTTPRedirect(sickbeard.WEB_ROOT + abspath, *args, **KWs)
class TVDBWebUI:
def __init__(self, config, log=None):
self.config = config
self.log = log
def selectSeries(self, allSeries):
searchList = ",".join([x['id'] for x in allSeries])
showDirList = ""
for curShowDir in self.config['_showDir']:
showDirList += "showDir="+curShowDir+"&"
redirect("/home/addShows/addShow?" + showDirList + "seriesList=" + searchList)
def _munge(string):
return unicode(string).encode('utf-8', 'xmlcharrefreplace')
def _genericMessage(subject, message):
t = PageTemplate(file="genericMessage.tmpl")
t.submenu = HomeMenu()
t.subject = subject
t.message = message
return _munge(t)
def _getEpisode(show, season, episode):
if show == None or season == None or episode == None:
return "Invalid parameters"
showObj = sickbeard.helpers.findCertainShow(sickbeard.showList, int(show))
if showObj == None:
return "Show not in show list"
epObj = showObj.getEpisode(int(season), int(episode))
if epObj == None:
return "Episode couldn't be retrieved"
return epObj
ManageMenu = [
{ 'title': 'Backlog Overview', 'path': 'manage/backlogOverview' },
{ 'title': 'Manage Searches', 'path': 'manage/manageSearches' },
{ 'title': 'Episode Status Management', 'path': 'manage/episodeStatuses' },
{ 'title': 'Manage Missed Subtitles', 'path': 'manage/subtitleMissed' },
]
if sickbeard.USE_SUBTITLES:
ManageMenu.append({ 'title': 'Missed Subtitle Management', 'path': 'manage/subtitleMissed' })
class ManageSearches:
@cherrypy.expose
def index(self):
t = PageTemplate(file="manage_manageSearches.tmpl")
#t.backlogPI = sickbeard.backlogSearchScheduler.action.getProgressIndicator()
t.backlogPaused = sickbeard.searchQueueScheduler.action.is_backlog_paused() #@UndefinedVariable
t.backlogRunning = sickbeard.searchQueueScheduler.action.is_backlog_in_progress() #@UndefinedVariable
t.searchStatus = sickbeard.currentSearchScheduler.action.amActive #@UndefinedVariable
t.submenu = ManageMenu
return _munge(t)
@cherrypy.expose
def forceSearch(self):
# force it to run the next time it looks
result = sickbeard.currentSearchScheduler.forceRun()
if result:
logger.log(u"Search forced")
ui.notifications.message('Episode search started',
'Note: RSS feeds may not be updated if retrieved recently')
redirect("/manage/manageSearches")
@cherrypy.expose
def pauseBacklog(self, paused=None):
if paused == "1":
sickbeard.searchQueueScheduler.action.pause_backlog() #@UndefinedVariable
else:
sickbeard.searchQueueScheduler.action.unpause_backlog() #@UndefinedVariable
redirect("/manage/manageSearches")
@cherrypy.expose
def forceVersionCheck(self):
# force a check to see if there is a new version
result = sickbeard.versionCheckScheduler.action.check_for_new_version(force=True) #@UndefinedVariable
if result:
logger.log(u"Forcing version check")
redirect("/manage/manageSearches")
class Manage:
manageSearches = ManageSearches()
@cherrypy.expose
def index(self):
t = PageTemplate(file="manage.tmpl")
t.submenu = ManageMenu
return _munge(t)
@cherrypy.expose
def showEpisodeStatuses(self, tvdb_id, whichStatus):
myDB = db.DBConnection()
status_list = [int(whichStatus)]
if status_list[0] == SNATCHED:
status_list = Quality.SNATCHED + Quality.SNATCHED_PROPER + Quality.SNATCHED_FRENCH
cur_show_results = myDB.select("SELECT season, episode, name FROM tv_episodes WHERE showid = ? AND season != 0 AND status IN ("+','.join(['?']*len(status_list))+")", [int(tvdb_id)] + status_list)
result = {}
for cur_result in cur_show_results:
cur_season = int(cur_result["season"])
cur_episode = int(cur_result["episode"])
if cur_season not in result:
result[cur_season] = {}
result[cur_season][cur_episode] = cur_result["name"]
return json.dumps(result)
@cherrypy.expose
def episodeStatuses(self, whichStatus=None):
if whichStatus:
whichStatus = int(whichStatus)
status_list = [whichStatus]
if status_list[0] == SNATCHED:
status_list = Quality.SNATCHED + Quality.SNATCHED_PROPER + Quality.SNATCHED_FRENCH
else:
status_list = []
t = PageTemplate(file="manage_episodeStatuses.tmpl")
t.submenu = ManageMenu
t.whichStatus = whichStatus
# if we have no status then this is as far as we need to go
if not status_list:
return _munge(t)
myDB = db.DBConnection()
status_results = myDB.select("SELECT show_name, tv_shows.tvdb_id as tvdb_id FROM tv_episodes, tv_shows WHERE tv_episodes.status IN ("+','.join(['?']*len(status_list))+") AND season != 0 AND tv_episodes.showid = tv_shows.tvdb_id ORDER BY show_name", status_list)
ep_counts = {}
show_names = {}
sorted_show_ids = []
for cur_status_result in status_results:
cur_tvdb_id = int(cur_status_result["tvdb_id"])
if cur_tvdb_id not in ep_counts:
ep_counts[cur_tvdb_id] = 1
else:
ep_counts[cur_tvdb_id] += 1
show_names[cur_tvdb_id] = cur_status_result["show_name"]
if cur_tvdb_id not in sorted_show_ids:
sorted_show_ids.append(cur_tvdb_id)
t.show_names = show_names
t.ep_counts = ep_counts
t.sorted_show_ids = sorted_show_ids
return _munge(t)
@cherrypy.expose
def changeEpisodeStatuses(self, oldStatus, newStatus, *args, **kwargs):
status_list = [int(oldStatus)]
if status_list[0] == SNATCHED:
status_list = Quality.SNATCHED + Quality.SNATCHED_PROPER + Quality.SNATCHED_FRENCH
to_change = {}
# make a list of all shows and their associated args
for arg in kwargs:
tvdb_id, what = arg.split('-')
# we don't care about unchecked checkboxes
if kwargs[arg] != 'on':
continue
if tvdb_id not in to_change:
to_change[tvdb_id] = []
to_change[tvdb_id].append(what)
myDB = db.DBConnection()
for cur_tvdb_id in to_change:
# get a list of all the eps we want to change if they just said "all"
if 'all' in to_change[cur_tvdb_id]:
all_eps_results = myDB.select("SELECT season, episode FROM tv_episodes WHERE status IN ("+','.join(['?']*len(status_list))+") AND season != 0 AND showid = ?", status_list + [cur_tvdb_id])
all_eps = [str(x["season"])+'x'+str(x["episode"]) for x in all_eps_results]
to_change[cur_tvdb_id] = all_eps
Home().setStatus(cur_tvdb_id, '|'.join(to_change[cur_tvdb_id]), newStatus, direct=True)
redirect('/manage/episodeStatuses')
@cherrypy.expose
def showSubtitleMissed(self, tvdb_id, whichSubs):
myDB = db.DBConnection()
cur_show_results = myDB.select("SELECT season, episode, name, subtitles FROM tv_episodes WHERE showid = ? AND season != 0 AND status LIKE '%4'", [int(tvdb_id)])
result = {}
for cur_result in cur_show_results:
if whichSubs == 'all':
if len(set(cur_result["subtitles"].split(',')).intersection(set(subtitles.wantedLanguages()))) >= len(subtitles.wantedLanguages()):
continue
elif whichSubs in cur_result["subtitles"].split(','):
continue
cur_season = int(cur_result["season"])
cur_episode = int(cur_result["episode"])
if cur_season not in result:
result[cur_season] = {}
if cur_episode not in result[cur_season]:
result[cur_season][cur_episode] = {}
result[cur_season][cur_episode]["name"] = cur_result["name"]
result[cur_season][cur_episode]["subtitles"] = ",".join(subliminal.language.Language(subtitle).alpha2 for subtitle in cur_result["subtitles"].split(',')) if not cur_result["subtitles"] == '' else ''
return json.dumps(result)
@cherrypy.expose
def subtitleMissed(self, whichSubs=None):
t = PageTemplate(file="manage_subtitleMissed.tmpl")
t.submenu = ManageMenu
t.whichSubs = whichSubs
if not whichSubs:
return _munge(t)
myDB = db.DBConnection()
status_results = myDB.select("SELECT show_name, tv_shows.tvdb_id as tvdb_id, tv_episodes.subtitles subtitles FROM tv_episodes, tv_shows WHERE tv_shows.subtitles = 1 AND tv_episodes.status LIKE '%4' AND tv_episodes.season != 0 AND tv_episodes.showid = tv_shows.tvdb_id ORDER BY show_name")
ep_counts = {}
show_names = {}
sorted_show_ids = []
for cur_status_result in status_results:
if whichSubs == 'all':
if len(set(cur_status_result["subtitles"].split(',')).intersection(set(subtitles.wantedLanguages()))) >= len(subtitles.wantedLanguages()):
continue
elif whichSubs in cur_status_result["subtitles"].split(','):
continue
cur_tvdb_id = int(cur_status_result["tvdb_id"])
if cur_tvdb_id not in ep_counts:
ep_counts[cur_tvdb_id] = 1
else:
ep_counts[cur_tvdb_id] += 1
show_names[cur_tvdb_id] = cur_status_result["show_name"]
if cur_tvdb_id not in sorted_show_ids:
sorted_show_ids.append(cur_tvdb_id)
t.show_names = show_names
t.ep_counts = ep_counts
t.sorted_show_ids = sorted_show_ids
return _munge(t)
@cherrypy.expose
def downloadSubtitleMissed(self, *args, **kwargs):
to_download = {}
# make a list of all shows and their associated args
for arg in kwargs:
tvdb_id, what = arg.split('-')
# we don't care about unchecked checkboxes
if kwargs[arg] != 'on':
continue
if tvdb_id not in to_download:
to_download[tvdb_id] = []
to_download[tvdb_id].append(what)
for cur_tvdb_id in to_download:
# get a list of all the eps we want to download subtitles if they just said "all"
if 'all' in to_download[cur_tvdb_id]:
myDB = db.DBConnection()
all_eps_results = myDB.select("SELECT season, episode FROM tv_episodes WHERE status LIKE '%4' AND season != 0 AND showid = ?", [cur_tvdb_id])
to_download[cur_tvdb_id] = [str(x["season"])+'x'+str(x["episode"]) for x in all_eps_results]
for epResult in to_download[cur_tvdb_id]:
season, episode = epResult.split('x');
show = sickbeard.helpers.findCertainShow(sickbeard.showList, int(cur_tvdb_id))
subtitles = show.getEpisode(int(season), int(episode)).downloadSubtitles()
redirect('/manage/subtitleMissed')
@cherrypy.expose
def backlogShow(self, tvdb_id):
show_obj = helpers.findCertainShow(sickbeard.showList, int(tvdb_id))
if show_obj:
sickbeard.backlogSearchScheduler.action.searchBacklog([show_obj]) #@UndefinedVariable
redirect("/manage/backlogOverview")
@cherrypy.expose
def backlogOverview(self):
t = PageTemplate(file="manage_backlogOverview.tmpl")
t.submenu = ManageMenu
myDB = db.DBConnection()
showCounts = {}
showCats = {}
showSQLResults = {}
for curShow in sickbeard.showList:
epCounts = {}
epCats = {}
epCounts[Overview.SKIPPED] = 0
epCounts[Overview.WANTED] = 0
epCounts[Overview.QUAL] = 0
epCounts[Overview.GOOD] = 0
epCounts[Overview.UNAIRED] = 0
epCounts[Overview.SNATCHED] = 0
sqlResults = myDB.select("SELECT * FROM tv_episodes WHERE showid = ? ORDER BY season DESC, episode DESC", [curShow.tvdbid])
for curResult in sqlResults:
curEpCat = curShow.getOverview(int(curResult["status"]))
epCats[str(curResult["season"]) + "x" + str(curResult["episode"])] = curEpCat
epCounts[curEpCat] += 1
showCounts[curShow.tvdbid] = epCounts
showCats[curShow.tvdbid] = epCats
showSQLResults[curShow.tvdbid] = sqlResults
t.showCounts = showCounts
t.showCats = showCats
t.showSQLResults = showSQLResults
return _munge(t)
@cherrypy.expose
def massEdit(self, toEdit=None):
t = PageTemplate(file="manage_massEdit.tmpl")
t.submenu = ManageMenu
if not toEdit:
redirect("/manage")
showIDs = toEdit.split("|")
showList = []
for curID in showIDs:
curID = int(curID)
showObj = helpers.findCertainShow(sickbeard.showList, curID)
if showObj:
showList.append(showObj)
flatten_folders_all_same = True
last_flatten_folders = None
paused_all_same = True
last_paused = None
frenched_all_same = True
last_frenched = None
quality_all_same = True
last_quality = None
subtitles_all_same = True
last_subtitles = None
lang_all_same = True
last_lang_metadata= None
lang_audio_all_same = True
last_lang_audio = None
root_dir_list = []
for curShow in showList:
cur_root_dir = ek.ek(os.path.dirname, curShow._location)
if cur_root_dir not in root_dir_list:
root_dir_list.append(cur_root_dir)
# if we know they're not all the same then no point even bothering
if paused_all_same:
# if we had a value already and this value is different then they're not all the same
if last_paused not in (curShow.paused, None):
paused_all_same = False
else:
last_paused = curShow.paused
if frenched_all_same:
# if we had a value already and this value is different then they're not all the same
if last_frenched not in (curShow.frenchsearch, None):
frenched_all_same = False
else:
last_frenched = curShow.frenchsearch
if flatten_folders_all_same:
if last_flatten_folders not in (None, curShow.flatten_folders):
flatten_folders_all_same = False
else:
last_flatten_folders = curShow.flatten_folders
if quality_all_same:
if last_quality not in (None, curShow.quality):
quality_all_same = False
else:
last_quality = curShow.quality
if subtitles_all_same:
if last_subtitles not in (None, curShow.subtitles):
subtitles_all_same = False
else:
last_subtitles = curShow.subtitles
if lang_all_same:
if last_lang_metadata not in (None, curShow.lang):
lang_all_same = False
else:
last_lang_metadata = curShow.lang
if lang_audio_all_same:
if last_lang_audio not in (None, curShow.audio_lang):
lang_audio_all_same = False
else:
last_lang_audio = curShow.audio_lang
t.showList = toEdit
t.paused_value = last_paused if paused_all_same else None
t.frenched_value = last_frenched if frenched_all_same else None
t.flatten_folders_value = last_flatten_folders if flatten_folders_all_same else None
t.quality_value = last_quality if quality_all_same else None
t.subtitles_value = last_subtitles if subtitles_all_same else None
t.root_dir_list = root_dir_list
t.lang_value = last_lang_metadata if lang_all_same else None
t.audio_value = last_lang_audio if lang_audio_all_same else None
return _munge(t)
@cherrypy.expose
def massEditSubmit(self, paused=None, frenched=None, flatten_folders=None, quality_preset=False, subtitles=None,
anyQualities=[], bestQualities=[], tvdbLang=None, audioLang = None, toEdit=None, *args, **kwargs):
dir_map = {}
for cur_arg in kwargs:
if not cur_arg.startswith('orig_root_dir_'):
continue
which_index = cur_arg.replace('orig_root_dir_', '')
end_dir = kwargs['new_root_dir_'+which_index]
dir_map[kwargs[cur_arg]] = end_dir
showIDs = toEdit.split("|")
errors = []
for curShow in showIDs:
curErrors = []
showObj = helpers.findCertainShow(sickbeard.showList, int(curShow))
if not showObj:
continue
cur_root_dir = ek.ek(os.path.dirname, showObj._location)
cur_show_dir = ek.ek(os.path.basename, showObj._location)
if cur_root_dir in dir_map and cur_root_dir != dir_map[cur_root_dir]:
new_show_dir = ek.ek(os.path.join, dir_map[cur_root_dir], cur_show_dir)
logger.log(u"For show "+showObj.name+" changing dir from "+showObj._location+" to "+new_show_dir)
else:
new_show_dir = showObj._location
if paused == 'keep':
new_paused = showObj.paused
else:
new_paused = True if paused == 'enable' else False
new_paused = 'on' if new_paused else 'off'
if frenched == 'keep':
new_frenched = showObj.frenchsearch
else:
new_frenched = True if frenched == 'enable' else False
new_frenched = 'on' if new_frenched else 'off'
if flatten_folders == 'keep':
new_flatten_folders = showObj.flatten_folders
else:
new_flatten_folders = True if flatten_folders == 'enable' else False
new_flatten_folders = 'on' if new_flatten_folders else 'off'
if subtitles == 'keep':
new_subtitles = showObj.subtitles
else:
new_subtitles = True if subtitles == 'enable' else False
new_subtitles = 'on' if new_subtitles else 'off'
if quality_preset == 'keep':
anyQualities, bestQualities = Quality.splitQuality(showObj.quality)
if tvdbLang == 'None':
new_lang = 'en'
else:
new_lang = tvdbLang
if audioLang == 'keep':
new_audio_lang = showObj.audio_lang;
else:
new_audio_lang = audioLang
exceptions_list = []
curErrors += Home().editShow(curShow, new_show_dir, anyQualities, bestQualities, exceptions_list, new_flatten_folders, new_paused, new_frenched, subtitles=new_subtitles, tvdbLang=new_lang, audio_lang=new_audio_lang, directCall=True)
if curErrors:
logger.log(u"Errors: "+str(curErrors), logger.ERROR)
errors.append('<b>%s:</b>\n<ul>' % showObj.name + ' '.join(['<li>%s</li>' % error for error in curErrors]) + "</ul>")
if len(errors) > 0:
ui.notifications.error('%d error%s while saving changes:' % (len(errors), "" if len(errors) == 1 else "s"),
" ".join(errors))
redirect("/manage")
@cherrypy.expose
def massUpdate(self, toUpdate=None, toRefresh=None, toRename=None, toDelete=None, toMetadata=None, toSubtitle=None):
if toUpdate != None:
toUpdate = toUpdate.split('|')
else:
toUpdate = []
if toRefresh != None:
toRefresh = toRefresh.split('|')
else:
toRefresh = []
if toRename != None:
toRename = toRename.split('|')
else:
toRename = []
if toSubtitle != None:
toSubtitle = toSubtitle.split('|')
else:
toSubtitle = []
if toDelete != None:
toDelete = toDelete.split('|')
else:
toDelete = []
if toMetadata != None:
toMetadata = toMetadata.split('|')
else:
toMetadata = []
errors = []
refreshes = []
updates = []
renames = []
subtitles = []
for curShowID in set(toUpdate+toRefresh+toRename+toSubtitle+toDelete+toMetadata):
if curShowID == '':
continue
showObj = sickbeard.helpers.findCertainShow(sickbeard.showList, int(curShowID))
if showObj == None:
continue
if curShowID in toDelete:
showObj.deleteShow()
# don't do anything else if it's being deleted
continue
if curShowID in toUpdate:
try:
sickbeard.showQueueScheduler.action.updateShow(showObj, True) #@UndefinedVariable
updates.append(showObj.name)
except exceptions.CantUpdateException, e:
errors.append("Unable to update show "+showObj.name+": "+ex(e))
# don't bother refreshing shows that were updated anyway
if curShowID in toRefresh and curShowID not in toUpdate:
try:
sickbeard.showQueueScheduler.action.refreshShow(showObj) #@UndefinedVariable
refreshes.append(showObj.name)
except exceptions.CantRefreshException, e:
errors.append("Unable to refresh show "+showObj.name+": "+ex(e))
if curShowID in toRename:
sickbeard.showQueueScheduler.action.renameShowEpisodes(showObj) #@UndefinedVariable
renames.append(showObj.name)
if curShowID in toSubtitle:
sickbeard.showQueueScheduler.action.downloadSubtitles(showObj) #@UndefinedVariable
subtitles.append(showObj.name)
if len(errors) > 0:
ui.notifications.error("Errors encountered",
'<br >\n'.join(errors))
messageDetail = ""
if len(updates) > 0:
messageDetail += "<br /><b>Updates</b><br /><ul><li>"
messageDetail += "</li><li>".join(updates)
messageDetail += "</li></ul>"
if len(refreshes) > 0:
messageDetail += "<br /><b>Refreshes</b><br /><ul><li>"
messageDetail += "</li><li>".join(refreshes)
messageDetail += "</li></ul>"
if len(renames) > 0:
messageDetail += "<br /><b>Renames</b><br /><ul><li>"
messageDetail += "</li><li>".join(renames)
messageDetail += "</li></ul>"
if len(subtitles) > 0:
messageDetail += "<br /><b>Subtitles</b><br /><ul><li>"
messageDetail += "</li><li>".join(subtitles)
messageDetail += "</li></ul>"
if len(updates+refreshes+renames+subtitles) > 0:
ui.notifications.message("The following actions were queued:",
messageDetail)
redirect("/manage")
class History:
@cherrypy.expose
def index(self, limit=100):
myDB = db.DBConnection()
# sqlResults = myDB.select("SELECT h.*, show_name, name FROM history h, tv_shows s, tv_episodes e WHERE h.showid=s.tvdb_id AND h.showid=e.showid AND h.season=e.season AND h.episode=e.episode ORDER BY date DESC LIMIT "+str(numPerPage*(p-1))+", "+str(numPerPage))
if limit == "0":
sqlResults = myDB.select("SELECT h.*, show_name FROM history h, tv_shows s WHERE h.showid=s.tvdb_id ORDER BY date DESC")
else:
sqlResults = myDB.select("SELECT h.*, show_name FROM history h, tv_shows s WHERE h.showid=s.tvdb_id ORDER BY date DESC LIMIT ?", [limit])
t = PageTemplate(file="history.tmpl")
t.historyResults = sqlResults
t.limit = limit
t.submenu = [
{ 'title': 'Clear History', 'path': 'history/clearHistory' },
{ 'title': 'Trim History', 'path': 'history/trimHistory' },
{ 'title': 'Trunc Episode Links', 'path': 'history/truncEplinks' },
{ 'title': 'Trunc Episode List Processed', 'path': 'history/truncEpListProc' },
]
return _munge(t)
@cherrypy.expose
def clearHistory(self):
myDB = db.DBConnection()
myDB.action("DELETE FROM history WHERE 1=1")
ui.notifications.message('History cleared')
redirect("/history")
@cherrypy.expose
def trimHistory(self):
myDB = db.DBConnection()
myDB.action("DELETE FROM history WHERE date < "+str((datetime.datetime.today()-datetime.timedelta(days=30)).strftime(history.dateFormat)))
ui.notifications.message('Removed history entries greater than 30 days old')
redirect("/history")
@cherrypy.expose
def truncEplinks(self):
myDB = db.DBConnection()
nbep=myDB.select("SELECT count(*) from episode_links")
myDB.action("DELETE FROM episode_links WHERE 1=1")
messnum = str(nbep[0][0]) + ' history links deleted'
ui.notifications.message('All Episode Links Removed', messnum)
redirect("/history")
@cherrypy.expose
def truncEpListProc(self):
myDB = db.DBConnection()
nbep=myDB.select("SELECT count(*) from processed_files")
myDB.action("DELETE FROM processed_files WHERE 1=1")
messnum = str(nbep[0][0]) + ' record for file processed delete'
ui.notifications.message('Clear list of file processed', messnum)
redirect("/history")
ConfigMenu = [
{ 'title': 'General', 'path': 'config/general/' },
{ 'title': 'Search Settings', 'path': 'config/search/' },
{ 'title': 'Search Providers', 'path': 'config/providers/' },
{ 'title': 'Subtitles Settings','path': 'config/subtitles/' },
{ 'title': 'Post Processing', 'path': 'config/postProcessing/' },
{ 'title': 'Notifications', 'path': 'config/notifications/' },
]
class ConfigGeneral:
@cherrypy.expose
def index(self):
t = PageTemplate(file="config_general.tmpl")
t.submenu = ConfigMenu
return _munge(t)
@cherrypy.expose
def saveRootDirs(self, rootDirString=None):
sickbeard.ROOT_DIRS = rootDirString
sickbeard.save_config()
@cherrypy.expose
def saveAddShowDefaults(self, defaultFlattenFolders, defaultStatus, anyQualities, bestQualities, audio_lang, subtitles=None):
if anyQualities:
anyQualities = anyQualities.split(',')
else:
anyQualities = []
if bestQualities:
bestQualities = bestQualities.split(',')
else:
bestQualities = []
newQuality = Quality.combineQualities(map(int, anyQualities), map(int, bestQualities))
sickbeard.STATUS_DEFAULT = int(defaultStatus)
sickbeard.QUALITY_DEFAULT = int(newQuality)
sickbeard.AUDIO_SHOW_DEFAULT = str(audio_lang)
if defaultFlattenFolders == "true":
defaultFlattenFolders = 1
else:
defaultFlattenFolders = 0
sickbeard.FLATTEN_FOLDERS_DEFAULT = int(defaultFlattenFolders)
if subtitles == "true":
subtitles = 1
else:
subtitles = 0
sickbeard.SUBTITLES_DEFAULT = int(subtitles)
sickbeard.save_config()
@cherrypy.expose
def generateKey(self):
""" Return a new randomized API_KEY
"""
try:
from hashlib import md5
except ImportError:
from md5 import md5
# Create some values to seed md5
t = str(time.time())
r = str(random.random())
# Create the md5 instance and give it the current time
m = md5(t)
# Update the md5 instance with the random variable
m.update(r)
# Return a hex digest of the md5, eg 49f68a5c8493ec2c0bf489821c21fc3b
logger.log(u"New API generated")
return m.hexdigest()
@cherrypy.expose
def saveGeneral(self, log_dir=None, web_port=None, web_log=None, web_ipv6=None,
update_shows_on_start=None,launch_browser=None, web_username=None, use_api=None, api_key=None,
web_password=None, version_notify=None, enable_https=None, https_cert=None, https_key=None, sort_article=None, french_column=None):
results = []
if web_ipv6 == "on":
web_ipv6 = 1
else:
web_ipv6 = 0
if web_log == "on":
web_log = 1
else:
web_log = 0
if launch_browser == "on":
launch_browser = 1
else:
launch_browser = 0
if update_shows_on_start == "on":
update_shows_on_start = 1
else:
update_shows_on_start = 0
if sort_article == "on":
sort_article = 1
else:
sort_article = 0
if french_column == "on":
french_column = 1
else:
french_column= 0
if version_notify == "on":
version_notify = 1
else:
version_notify = 0
if not config.change_LOG_DIR(log_dir):
results += ["Unable to create directory " + os.path.normpath(log_dir) + ", log dir not changed."]
sickbeard.UPDATE_SHOWS_ON_START = update_shows_on_start
sickbeard.LAUNCH_BROWSER = launch_browser
sickbeard.SORT_ARTICLE = sort_article
sickbeard.FRENCH_COLUMN = french_column
sickbeard.WEB_PORT = int(web_port)
sickbeard.WEB_IPV6 = web_ipv6
sickbeard.WEB_LOG = web_log
sickbeard.WEB_USERNAME = web_username
sickbeard.WEB_PASSWORD = web_password
if use_api == "on":
use_api = 1
else:
use_api = 0
sickbeard.USE_API = use_api
sickbeard.API_KEY = api_key
if enable_https == "on":
enable_https = 1
else:
enable_https = 0
sickbeard.ENABLE_HTTPS = enable_https
if not config.change_HTTPS_CERT(https_cert):
results += ["Unable to create directory " + os.path.normpath(https_cert) + ", https cert dir not changed."]
if not config.change_HTTPS_KEY(https_key):
results += ["Unable to create directory " + os.path.normpath(https_key) + ", https key dir not changed."]
config.change_VERSION_NOTIFY(version_notify)
sickbeard.save_config()
if len(results) > 0:
for x in results:
logger.log(x, logger.ERROR)
ui.notifications.error('Error(s) Saving Configuration',
'<br />\n'.join(results))
else:
ui.notifications.message('Configuration Saved', ek.ek(os.path.join, sickbeard.CONFIG_FILE) )
redirect("/config/general/")
class ConfigSearch:
@cherrypy.expose
def index(self):
t = PageTemplate(file="config_search.tmpl")
t.submenu = ConfigMenu
return _munge(t)
@cherrypy.expose
def saveSearch(self, use_nzbs=None, use_torrents=None, nzb_dir=None, sab_username=None, sab_password=None,
sab_apikey=None, sab_category=None, sab_host=None, nzbget_password=None, nzbget_category=None, nzbget_host=None,
torrent_dir=None,torrent_method=None, nzb_method=None, usenet_retention=None, search_frequency=None, french_delay=None,
download_propers=None, download_french=None, torrent_username=None, torrent_password=None, torrent_host=None,
torrent_label=None, torrent_path=None, torrent_custom_url=None, torrent_ratio=None, torrent_paused=None, ignore_words=None,
prefered_method=None, torrent_use_ftp = None, ftp_host=None, ftp_port=None, ftp_timeout=None, ftp_passive = None, ftp_login=None,
ftp_password=None, ftp_remotedir=None):
results = []
if not config.change_NZB_DIR(nzb_dir):
results += ["Unable to create directory " + os.path.normpath(nzb_dir) + ", dir not changed."]
if not config.change_TORRENT_DIR(torrent_dir):
results += ["Unable to create directory " + os.path.normpath(torrent_dir) + ", dir not changed."]
config.change_SEARCH_FREQUENCY(search_frequency)
if download_propers == "on":
download_propers = 1
else:
download_propers = 0
if download_french == "on":
download_french = 1
else:
download_french = 0
if use_nzbs == "on":
use_nzbs = 1
else:
use_nzbs = 0
if use_torrents == "on":
use_torrents = 1
else:
use_torrents = 0
if usenet_retention == None:
usenet_retention = 200
if french_delay == None:
french_delay = 120
if ignore_words == None:
ignore_words = ""
if ftp_port == None:
ftp_port = 21
if ftp_timeout == None:
ftp_timeout = 120
sickbeard.USE_NZBS = use_nzbs
sickbeard.USE_TORRENTS = use_torrents
sickbeard.NZB_METHOD = nzb_method
sickbeard.PREFERED_METHOD = prefered_method
sickbeard.TORRENT_METHOD = torrent_method
sickbeard.USENET_RETENTION = int(usenet_retention)
sickbeard.FRENCH_DELAY = int(french_delay)
sickbeard.IGNORE_WORDS = ignore_words
sickbeard.DOWNLOAD_PROPERS = download_propers
sickbeard.DOWNLOAD_FRENCH = download_french
sickbeard.SAB_USERNAME = sab_username
sickbeard.SAB_PASSWORD = sab_password
sickbeard.SAB_APIKEY = sab_apikey.strip()
sickbeard.SAB_CATEGORY = sab_category
if sab_host and not re.match('https?://.*', sab_host):
sab_host = 'http://' + sab_host
if not sab_host.endswith('/'):
sab_host = sab_host + '/'
sickbeard.SAB_HOST = sab_host
sickbeard.NZBGET_PASSWORD = nzbget_password
sickbeard.NZBGET_CATEGORY = nzbget_category
sickbeard.NZBGET_HOST = nzbget_host
sickbeard.TORRENT_USERNAME = torrent_username
sickbeard.TORRENT_PASSWORD = torrent_password
sickbeard.TORRENT_LABEL = torrent_label
sickbeard.TORRENT_PATH = torrent_path
if torrent_custom_url == "on":
torrent_custom_url = 1
else:
torrent_custom_url = 0
sickbeard.TORRENT_CUSTOM_URL = torrent_custom_url
sickbeard.TORRENT_RATIO = torrent_ratio
if torrent_paused == "on":
torrent_paused = 1
else:
torrent_paused = 0
sickbeard.TORRENT_PAUSED = torrent_paused
if torrent_host and not re.match('https?://.*', torrent_host):
torrent_host = 'http://' + torrent_host
if not torrent_host.endswith('/'):
torrent_host = torrent_host + '/'
sickbeard.TORRENT_HOST = torrent_host
if torrent_use_ftp == "on":
torrent_use_ftp = 1
else:
torrent_use_ftp = 0
sickbeard.USE_TORRENT_FTP = torrent_use_ftp
sickbeard.FTP_HOST = ftp_host
sickbeard.FTP_PORT = ftp_port
sickbeard.FTP_TIMEOUT = ftp_timeout
if ftp_passive == "on":
ftp_passive = 1
else:
ftp_passive = 0
sickbeard.FTP_PASSIVE = ftp_passive
sickbeard.FTP_LOGIN = ftp_login
sickbeard.FTP_PASSWORD = ftp_password
sickbeard.FTP_DIR = ftp_remotedir
sickbeard.save_config()
if len(results) > 0:
for x in results:
logger.log(x, logger.ERROR)
ui.notifications.error('Error(s) Saving Configuration',
'<br />\n'.join(results))
else:
ui.notifications.message('Configuration Saved', ek.ek(os.path.join, sickbeard.CONFIG_FILE) )
redirect("/config/search/")
class ConfigPostProcessing:
@cherrypy.expose
def index(self):
t = PageTemplate(file="config_postProcessing.tmpl")
t.submenu = ConfigMenu
return _munge(t)
@cherrypy.expose
def savePostProcessing(self, naming_pattern=None, naming_multi_ep=None,
xbmc_data=None, xbmc__frodo__data=None, mediabrowser_data=None, synology_data=None, sony_ps3_data=None, wdtv_data=None, tivo_data=None,
use_banner=None, keep_processed_dir=None, process_method=None, process_automatically=None, process_automatically_torrent=None, rename_episodes=None,
move_associated_files=None, tv_download_dir=None, torrent_download_dir=None, naming_custom_abd=None, naming_abd_pattern=None):
results = []
if not config.change_TV_DOWNLOAD_DIR(tv_download_dir):
results += ["Unable to create directory " + os.path.normpath(tv_download_dir) + ", dir not changed."]
if not config.change_TORRENT_DOWNLOAD_DIR(torrent_download_dir):
results += ["Unable to create directory " + os.path.normpath(torrent_download_dir) + ", dir not changed."]
if use_banner == "on":
use_banner = 1
else:
use_banner = 0
if process_automatically == "on":
process_automatically = 1
else:
process_automatically = 0
if process_automatically_torrent == "on":
process_automatically_torrent = 1
else:
process_automatically_torrent = 0
if rename_episodes == "on":
rename_episodes = 1
else:
rename_episodes = 0
if keep_processed_dir == "on":
keep_processed_dir = 1
else:
keep_processed_dir = 0
if move_associated_files == "on":
move_associated_files = 1
else:
move_associated_files = 0
if naming_custom_abd == "on":
naming_custom_abd = 1
else:
naming_custom_abd = 0
sickbeard.PROCESS_AUTOMATICALLY = process_automatically
sickbeard.PROCESS_AUTOMATICALLY_TORRENT = process_automatically_torrent
sickbeard.KEEP_PROCESSED_DIR = keep_processed_dir
sickbeard.PROCESS_METHOD = process_method
sickbeard.RENAME_EPISODES = rename_episodes
sickbeard.MOVE_ASSOCIATED_FILES = move_associated_files
sickbeard.NAMING_CUSTOM_ABD = naming_custom_abd
sickbeard.metadata_provider_dict['XBMC'].set_config(xbmc_data)
sickbeard.metadata_provider_dict['XBMC (Frodo)'].set_config(xbmc__frodo__data)
sickbeard.metadata_provider_dict['MediaBrowser'].set_config(mediabrowser_data)
sickbeard.metadata_provider_dict['Synology'].set_config(synology_data)
sickbeard.metadata_provider_dict['Sony PS3'].set_config(sony_ps3_data)
sickbeard.metadata_provider_dict['WDTV'].set_config(wdtv_data)
sickbeard.metadata_provider_dict['TIVO'].set_config(tivo_data)
if self.isNamingValid(naming_pattern, naming_multi_ep) != "invalid":
sickbeard.NAMING_PATTERN = naming_pattern
sickbeard.NAMING_MULTI_EP = int(naming_multi_ep)
sickbeard.NAMING_FORCE_FOLDERS = naming.check_force_season_folders()
else:
results.append("You tried saving an invalid naming config, not saving your naming settings")
if self.isNamingValid(naming_abd_pattern, None, True) != "invalid":
sickbeard.NAMING_ABD_PATTERN = naming_abd_pattern
elif naming_custom_abd:
results.append("You tried saving an invalid air-by-date naming config, not saving your air-by-date settings")
sickbeard.USE_BANNER = use_banner
sickbeard.save_config()
if len(results) > 0:
for x in results:
logger.log(x, logger.ERROR)
ui.notifications.error('Error(s) Saving Configuration',
'<br />\n'.join(results))
else:
ui.notifications.message('Configuration Saved', ek.ek(os.path.join, sickbeard.CONFIG_FILE) )
redirect("/config/postProcessing/")
@cherrypy.expose
def testNaming(self, pattern=None, multi=None, abd=False):
if multi != None:
multi = int(multi)
result = naming.test_name(pattern, multi, abd)
result = ek.ek(os.path.join, result['dir'], result['name'])
return result
@cherrypy.expose
def isNamingValid(self, pattern=None, multi=None, abd=False):
if pattern == None:
return "invalid"
# air by date shows just need one check, we don't need to worry about season folders
if abd:
is_valid = naming.check_valid_abd_naming(pattern)
require_season_folders = False
else:
# check validity of single and multi ep cases for the whole path
is_valid = naming.check_valid_naming(pattern, multi)
# check validity of single and multi ep cases for only the file name
require_season_folders = naming.check_force_season_folders(pattern, multi)
if is_valid and not require_season_folders:
return "valid"
elif is_valid and require_season_folders:
return "seasonfolders"
else:
return "invalid"
class ConfigProviders:
@cherrypy.expose
def index(self):
t = PageTemplate(file="config_providers.tmpl")
t.submenu = ConfigMenu
return _munge(t)
@cherrypy.expose
def canAddNewznabProvider(self, name):
if not name:
return json.dumps({'error': 'Invalid name specified'})
providerDict = dict(zip([x.getID() for x in sickbeard.newznabProviderList], sickbeard.newznabProviderList))
tempProvider = newznab.NewznabProvider(name, '')
if tempProvider.getID() in providerDict:
return json.dumps({'error': 'Exists as '+providerDict[tempProvider.getID()].name})
else:
return json.dumps({'success': tempProvider.getID()})
@cherrypy.expose
def saveNewznabProvider(self, name, url, key=''):
if not name or not url:
return '0'
if not url.endswith('/'):
url = url + '/'
providerDict = dict(zip([x.name for x in sickbeard.newznabProviderList], sickbeard.newznabProviderList))
if name in providerDict:
if not providerDict[name].default:
providerDict[name].name = name
providerDict[name].url = url
providerDict[name].key = key
return providerDict[name].getID() + '|' + providerDict[name].configStr()
else:
newProvider = newznab.NewznabProvider(name, url, key)
sickbeard.newznabProviderList.append(newProvider)
return newProvider.getID() + '|' + newProvider.configStr()
@cherrypy.expose
def deleteNewznabProvider(self, id):
providerDict = dict(zip([x.getID() for x in sickbeard.newznabProviderList], sickbeard.newznabProviderList))
if id not in providerDict or providerDict[id].default:
return '0'
# delete it from the list
sickbeard.newznabProviderList.remove(providerDict[id])
if id in sickbeard.PROVIDER_ORDER:
sickbeard.PROVIDER_ORDER.remove(id)
return '1'
@cherrypy.expose
def saveProviders(self, nzbmatrix_username=None, nzbmatrix_apikey=None,
nzbs_r_us_uid=None, nzbs_r_us_hash=None, newznab_string='',
omgwtfnzbs_uid=None, omgwtfnzbs_key=None,
tvtorrents_digest=None, tvtorrents_hash=None,
torrentleech_key=None,
btn_api_key=None,
newzbin_username=None, newzbin_password=None,t411_username=None,t411_password=None,ftdb_username=None,ftdb_password=None,addict_username=None,addict_password=None,fnt_username=None,fnt_password=None,libertalia_username=None,libertalia_password=None,xthor_username=None,xthor_password=None,thinkgeek_username=None,thinkgeek_password=None,
ethor_key=None,
provider_order=None):
results = []
provider_str_list = provider_order.split()
provider_list = []
newznabProviderDict = dict(zip([x.getID() for x in sickbeard.newznabProviderList], sickbeard.newznabProviderList))
finishedNames = []
# add all the newznab info we got into our list
for curNewznabProviderStr in newznab_string.split('!!!'):
if not curNewznabProviderStr:
continue
curName, curURL, curKey = curNewznabProviderStr.split('|')
newProvider = newznab.NewznabProvider(curName, curURL, curKey)
curID = newProvider.getID()
# if it already exists then update it
if curID in newznabProviderDict:
newznabProviderDict[curID].name = curName
newznabProviderDict[curID].url = curURL
newznabProviderDict[curID].key = curKey
else:
sickbeard.newznabProviderList.append(newProvider)
finishedNames.append(curID)
# delete anything that is missing
for curProvider in sickbeard.newznabProviderList:
if curProvider.getID() not in finishedNames:
sickbeard.newznabProviderList.remove(curProvider)
# do the enable/disable
for curProviderStr in provider_str_list:
curProvider, curEnabled = curProviderStr.split(':')
curEnabled = int(curEnabled)
provider_list.append(curProvider)
if curProvider == 'nzbs_r_us':
sickbeard.NZBSRUS = curEnabled
elif curProvider == 'nzbs_org_old':
sickbeard.NZBS = curEnabled
elif curProvider == 'nzbmatrix':
sickbeard.NZBMATRIX = curEnabled
elif curProvider == 'newzbin':
sickbeard.NEWZBIN = curEnabled
elif curProvider == 'bin_req':
sickbeard.BINREQ = curEnabled
elif curProvider == 'womble_s_index':
sickbeard.WOMBLE = curEnabled
elif curProvider == 'nzbx':
sickbeard.NZBX = curEnabled
elif curProvider == 'omgwtfnzbs':
sickbeard.OMGWTFNZBS = curEnabled
elif curProvider == 'ezrss':
sickbeard.EZRSS = curEnabled
elif curProvider == 'tvtorrents':
sickbeard.TVTORRENTS = curEnabled
elif curProvider == 'torrentleech':
sickbeard.TORRENTLEECH = curEnabled
elif curProvider == 'btn':
sickbeard.BTN = curEnabled
elif curProvider == 'binnewz':
sickbeard.BINNEWZ = curEnabled
elif curProvider == 't411':
sickbeard.T411 = curEnabled
elif curProvider == 'ftdb':
sickbeard.FTDB = curEnabled
elif curProvider == 'addict':
sickbeard.ADDICT = curEnabled
elif curProvider == 'fnt':
sickbeard.FNT = curEnabled
elif curProvider == 'libertalia':
sickbeard.LIBERTALIA = curEnabled
elif curProvider == 'xthor':
sickbeard.XTHOR = curEnabled
elif curProvider == 'thinkgeek':
sickbeard.THINKGEEK = curEnabled
elif curProvider == 'cpasbien':
sickbeard.Cpasbien = curEnabled
elif curProvider == 'kat':
sickbeard.kat = curEnabled
elif curProvider == 'piratebay':
sickbeard.THEPIRATEBAY = curEnabled
elif curProvider == 'ethor':
sickbeard.ETHOR = curEnabled
elif curProvider in newznabProviderDict:
newznabProviderDict[curProvider].enabled = bool(curEnabled)
else:
logger.log(u"don't know what " + curProvider + " is, skipping")
sickbeard.TVTORRENTS_DIGEST = tvtorrents_digest.strip()
sickbeard.TVTORRENTS_HASH = tvtorrents_hash.strip()
sickbeard.TORRENTLEECH_KEY = torrentleech_key.strip()
sickbeard.ETHOR_KEY = ethor_key.strip()
sickbeard.BTN_API_KEY = btn_api_key.strip()
sickbeard.T411_USERNAME = t411_username
sickbeard.T411_PASSWORD = t411_password
sickbeard.FTDB_USERNAME = ftdb_username
sickbeard.FTDB_PASSWORD = ftdb_password
sickbeard.ADDICT_USERNAME = addict_username
sickbeard.ADDICT_PASSWORD = addict_password
sickbeard.FNT_USERNAME = fnt_username
sickbeard.FNT_PASSWORD = fnt_password
sickbeard.LIBERTALIA_USERNAME = libertalia_username
sickbeard.LIBERTALIA_PASSWORD = libertalia_password
sickbeard.XTHOR_USERNAME = xthor_username
sickbeard.XTHOR_PASSWORD = xthor_password
sickbeard.THINKGEEK_USERNAME = thinkgeek_username
sickbeard.THINKGEEK_PASSWORD = thinkgeek_password
sickbeard.NZBSRUS_UID = nzbs_r_us_uid.strip()
sickbeard.NZBSRUS_HASH = nzbs_r_us_hash.strip()
sickbeard.OMGWTFNZBS_UID = omgwtfnzbs_uid.strip()
sickbeard.OMGWTFNZBS_KEY = omgwtfnzbs_key.strip()
sickbeard.PROVIDER_ORDER = provider_list
sickbeard.save_config()
if len(results) > 0:
for x in results:
logger.log(x, logger.ERROR)
ui.notifications.error('Error(s) Saving Configuration',
'<br />\n'.join(results))
else:
ui.notifications.message('Configuration Saved', ek.ek(os.path.join, sickbeard.CONFIG_FILE) )
redirect("/config/providers/")
class ConfigNotifications:
@cherrypy.expose
def index(self):
t = PageTemplate(file="config_notifications.tmpl")
t.submenu = ConfigMenu
return _munge(t)
@cherrypy.expose
def saveNotifications(self, use_xbmc=None, xbmc_notify_onsnatch=None, xbmc_notify_ondownload=None, xbmc_update_onlyfirst=None, xbmc_notify_onsubtitledownload=None,
xbmc_update_library=None, xbmc_update_full=None, xbmc_host=None, xbmc_username=None, xbmc_password=None,
use_plex=None, plex_notify_onsnatch=None, plex_notify_ondownload=None, plex_notify_onsubtitledownload=None, plex_update_library=None,
plex_server_host=None, plex_host=None, plex_username=None, plex_password=None,
use_growl=None, growl_notify_onsnatch=None, growl_notify_ondownload=None, growl_notify_onsubtitledownload=None, growl_host=None, growl_password=None,
use_prowl=None, prowl_notify_onsnatch=None, prowl_notify_ondownload=None, prowl_notify_onsubtitledownload=None, prowl_api=None, prowl_priority=0,
use_twitter=None, twitter_notify_onsnatch=None, twitter_notify_ondownload=None, twitter_notify_onsubtitledownload=None,
use_boxcar=None, boxcar_notify_onsnatch=None, boxcar_notify_ondownload=None, boxcar_notify_onsubtitledownload=None, boxcar_username=None,
use_boxcar2=None, boxcar2_notify_onsnatch=None, boxcar2_notify_ondownload=None, boxcar2_notify_onsubtitledownload=None, boxcar2_access_token=None, boxcar2_sound=None,
use_pushover=None, pushover_notify_onsnatch=None, pushover_notify_ondownload=None, pushover_notify_onsubtitledownload=None, pushover_userkey=None, pushover_prio=None,
use_libnotify=None, libnotify_notify_onsnatch=None, libnotify_notify_ondownload=None, libnotify_notify_onsubtitledownload=None,
use_nmj=None, nmj_host=None, nmj_database=None, nmj_mount=None, use_synoindex=None,
use_nmjv2=None, nmjv2_host=None, nmjv2_dbloc=None, nmjv2_database=None,
use_trakt=None, trakt_username=None, trakt_password=None, trakt_api=None,trakt_remove_watchlist=None,trakt_use_watchlist=None,trakt_start_paused=None,trakt_method_add=None,
use_betaseries=None, betaseries_username=None, betaseries_password=None,
use_synologynotifier=None, synologynotifier_notify_onsnatch=None, synologynotifier_notify_ondownload=None, synologynotifier_notify_onsubtitledownload=None,
use_pytivo=None, pytivo_notify_onsnatch=None, pytivo_notify_ondownload=None, pytivo_notify_onsubtitledownload=None, pytivo_update_library=None,
pytivo_host=None, pytivo_share_name=None, pytivo_tivo_name=None,
use_nma=None, nma_notify_onsnatch=None, nma_notify_ondownload=None, nma_notify_onsubtitledownload=None, nma_api=None, nma_priority=0,
use_pushalot=None, pushalot_notify_onsnatch=None, pushalot_notify_ondownload=None, pushalot_notify_onsubtitledownload=None, pushalot_authorizationtoken=None,
use_pushbullet=None, pushbullet_notify_onsnatch=None, pushbullet_notify_ondownload=None, pushbullet_notify_onsubtitledownload=None, pushbullet_api=None, pushbullet_device=None, pushbullet_device_list=None, pushbullet_channel_list=None,
use_mail=None, mail_username=None, mail_password=None, mail_server=None, mail_ssl=None, mail_from=None, mail_to=None, mail_notify_onsnatch=None ):
results = []
if xbmc_notify_onsnatch == "on":
xbmc_notify_onsnatch = 1
else:
xbmc_notify_onsnatch = 0
if xbmc_notify_ondownload == "on":
xbmc_notify_ondownload = 1
else:
xbmc_notify_ondownload = 0
if xbmc_notify_onsubtitledownload == "on":
xbmc_notify_onsubtitledownload = 1
else:
xbmc_notify_onsubtitledownload = 0
if xbmc_update_library == "on":
xbmc_update_library = 1
else:
xbmc_update_library = 0
if xbmc_update_full == "on":
xbmc_update_full = 1
else:
xbmc_update_full = 0
if xbmc_update_onlyfirst == "on":
xbmc_update_onlyfirst = 1
else:
xbmc_update_onlyfirst = 0
if use_xbmc == "on":
use_xbmc = 1
else:
use_xbmc = 0
if plex_update_library == "on":
plex_update_library = 1
else:
plex_update_library = 0
if plex_notify_onsnatch == "on":
plex_notify_onsnatch = 1
else:
plex_notify_onsnatch = 0
if plex_notify_ondownload == "on":
plex_notify_ondownload = 1
else:
plex_notify_ondownload = 0
if plex_notify_onsubtitledownload == "on":
plex_notify_onsubtitledownload = 1
else:
plex_notify_onsubtitledownload = 0
if use_plex == "on":
use_plex = 1
else:
use_plex = 0
if growl_notify_onsnatch == "on":
growl_notify_onsnatch = 1
else:
growl_notify_onsnatch = 0
if growl_notify_ondownload == "on":
growl_notify_ondownload = 1
else:
growl_notify_ondownload = 0
if growl_notify_onsubtitledownload == "on":
growl_notify_onsubtitledownload = 1
else:
growl_notify_onsubtitledownload = 0
if use_growl == "on":
use_growl = 1
else:
use_growl = 0
if prowl_notify_onsnatch == "on":
prowl_notify_onsnatch = 1
else:
prowl_notify_onsnatch = 0
if prowl_notify_ondownload == "on":
prowl_notify_ondownload = 1
else:
prowl_notify_ondownload = 0
if prowl_notify_onsubtitledownload == "on":
prowl_notify_onsubtitledownload = 1
else:
prowl_notify_onsubtitledownload = 0
if use_prowl == "on":
use_prowl = 1
else:
use_prowl = 0
if twitter_notify_onsnatch == "on":
twitter_notify_onsnatch = 1
else:
twitter_notify_onsnatch = 0
if twitter_notify_ondownload == "on":
twitter_notify_ondownload = 1
else:
twitter_notify_ondownload = 0
if twitter_notify_onsubtitledownload == "on":
twitter_notify_onsubtitledownload = 1
else:
twitter_notify_onsubtitledownload = 0
if use_twitter == "on":
use_twitter = 1
else:
use_twitter = 0
if boxcar_notify_onsnatch == "on":
boxcar_notify_onsnatch = 1
else:
boxcar_notify_onsnatch = 0
if boxcar_notify_ondownload == "on":
boxcar_notify_ondownload = 1
else:
boxcar_notify_ondownload = 0
if boxcar_notify_onsubtitledownload == "on":
boxcar_notify_onsubtitledownload = 1
else:
boxcar_notify_onsubtitledownload = 0
if use_boxcar == "on":
use_boxcar = 1
else:
use_boxcar = 0
if pushover_notify_onsnatch == "on":
pushover_notify_onsnatch = 1
else:
pushover_notify_onsnatch = 0
if pushover_notify_ondownload == "on":
pushover_notify_ondownload = 1
else:
pushover_notify_ondownload = 0
if pushover_notify_onsubtitledownload == "on":
pushover_notify_onsubtitledownload = 1
else:
pushover_notify_onsubtitledownload = 0
if use_pushover == "on":
use_pushover = 1
else:
use_pushover = 0
if use_nmj == "on":
use_nmj = 1
else:
use_nmj = 0
if use_synoindex == "on":
use_synoindex = 1
else:
use_synoindex = 0
if use_synologynotifier == "on":
use_synologynotifier = 1
else:
use_synologynotifier = 0
if synologynotifier_notify_onsnatch == "on":
synologynotifier_notify_onsnatch = 1
else:
synologynotifier_notify_onsnatch = 0
if synologynotifier_notify_ondownload == "on":
synologynotifier_notify_ondownload = 1
else:
synologynotifier_notify_ondownload = 0
if synologynotifier_notify_onsubtitledownload == "on":
synologynotifier_notify_onsubtitledownload = 1
else:
synologynotifier_notify_onsubtitledownload = 0
if use_nmjv2 == "on":
use_nmjv2 = 1
else:
use_nmjv2 = 0
if use_trakt == "on":
use_trakt = 1
else:
use_trakt = 0
if trakt_remove_watchlist == "on":
trakt_remove_watchlist = 1
else:
trakt_remove_watchlist = 0
if trakt_use_watchlist == "on":
trakt_use_watchlist = 1
else:
trakt_use_watchlist = 0
if trakt_start_paused == "on":
trakt_start_paused = 1
else:
trakt_start_paused = 0
if use_betaseries == "on":
use_betaseries = 1
else:
use_betaseries = 0
if use_pytivo == "on":
use_pytivo = 1
else:
use_pytivo = 0
if pytivo_notify_onsnatch == "on":
pytivo_notify_onsnatch = 1
else:
pytivo_notify_onsnatch = 0
if pytivo_notify_ondownload == "on":
pytivo_notify_ondownload = 1
else:
pytivo_notify_ondownload = 0
if pytivo_notify_onsubtitledownload == "on":
pytivo_notify_onsubtitledownload = 1
else:
pytivo_notify_onsubtitledownload = 0
if pytivo_update_library == "on":
pytivo_update_library = 1
else:
pytivo_update_library = 0
if use_nma == "on":
use_nma = 1
else:
use_nma = 0
if nma_notify_onsnatch == "on":
nma_notify_onsnatch = 1
else:
nma_notify_onsnatch = 0
if nma_notify_ondownload == "on":
nma_notify_ondownload = 1
else:
nma_notify_ondownload = 0
if nma_notify_onsubtitledownload == "on":
nma_notify_onsubtitledownload = 1
else:
nma_notify_onsubtitledownload = 0
if use_mail == "on":
use_mail = 1
else:
use_mail = 0
if mail_ssl == "on":
mail_ssl = 1
else:
mail_ssl = 0
if mail_notify_onsnatch == "on":
mail_notify_onsnatch = 1
else:
mail_notify_onsnatch = 0
if use_pushalot == "on":
use_pushalot = 1
else:
use_pushalot = 0
if pushalot_notify_onsnatch == "on":
pushalot_notify_onsnatch = 1
else:
pushalot_notify_onsnatch = 0
if pushalot_notify_ondownload == "on":
pushalot_notify_ondownload = 1
else:
pushalot_notify_ondownload = 0
if pushalot_notify_onsubtitledownload == "on":
pushalot_notify_onsubtitledownload = 1
else:
pushalot_notify_onsubtitledownload = 0
if use_pushbullet == "on":
use_pushbullet = 1
else:
use_pushbullet = 0
if pushbullet_notify_onsnatch == "on":
pushbullet_notify_onsnatch = 1
else:
pushbullet_notify_onsnatch = 0
if pushbullet_notify_ondownload == "on":
pushbullet_notify_ondownload = 1
else:
pushbullet_notify_ondownload = 0
if pushbullet_notify_onsubtitledownload == "on":
pushbullet_notify_onsubtitledownload = 1
else:
pushbullet_notify_onsubtitledownload = 0
if use_boxcar2=="on":
use_boxcar2=1
else:
use_boxcar2=0
if boxcar2_notify_onsnatch == "on":
boxcar2_notify_onsnatch = 1
else:
boxcar2_notify_onsnatch = 0
if boxcar2_notify_ondownload == "on":
boxcar2_notify_ondownload = 1
else:
boxcar2_notify_ondownload = 0
if boxcar2_notify_onsubtitledownload == "on":
boxcar2_notify_onsubtitledownload = 1
else:
boxcar2_notify_onsubtitledownload = 0
sickbeard.USE_XBMC = use_xbmc
sickbeard.XBMC_NOTIFY_ONSNATCH = xbmc_notify_onsnatch
sickbeard.XBMC_NOTIFY_ONDOWNLOAD = xbmc_notify_ondownload
sickbeard.XBMC_NOTIFY_ONSUBTITLEDOWNLOAD = xbmc_notify_onsubtitledownload
sickbeard.XBMC_UPDATE_LIBRARY = xbmc_update_library
sickbeard.XBMC_UPDATE_FULL = xbmc_update_full
sickbeard.XBMC_UPDATE_ONLYFIRST = xbmc_update_onlyfirst
sickbeard.XBMC_HOST = xbmc_host
sickbeard.XBMC_USERNAME = xbmc_username
sickbeard.XBMC_PASSWORD = xbmc_password
sickbeard.USE_PLEX = use_plex
sickbeard.PLEX_NOTIFY_ONSNATCH = plex_notify_onsnatch
sickbeard.PLEX_NOTIFY_ONDOWNLOAD = plex_notify_ondownload
sickbeard.PLEX_NOTIFY_ONSUBTITLEDOWNLOAD = plex_notify_onsubtitledownload
sickbeard.PLEX_UPDATE_LIBRARY = plex_update_library
sickbeard.PLEX_HOST = plex_host
sickbeard.PLEX_SERVER_HOST = plex_server_host
sickbeard.PLEX_USERNAME = plex_username
sickbeard.PLEX_PASSWORD = plex_password
sickbeard.USE_GROWL = use_growl
sickbeard.GROWL_NOTIFY_ONSNATCH = growl_notify_onsnatch
sickbeard.GROWL_NOTIFY_ONDOWNLOAD = growl_notify_ondownload
sickbeard.GROWL_NOTIFY_ONSUBTITLEDOWNLOAD = growl_notify_onsubtitledownload
sickbeard.GROWL_HOST = growl_host
sickbeard.GROWL_PASSWORD = growl_password
sickbeard.USE_PROWL = use_prowl
sickbeard.PROWL_NOTIFY_ONSNATCH = prowl_notify_onsnatch
sickbeard.PROWL_NOTIFY_ONDOWNLOAD = prowl_notify_ondownload
sickbeard.PROWL_NOTIFY_ONSUBTITLEDOWNLOAD = prowl_notify_onsubtitledownload
sickbeard.PROWL_API = prowl_api
sickbeard.PROWL_PRIORITY = prowl_priority
sickbeard.USE_TWITTER = use_twitter
sickbeard.TWITTER_NOTIFY_ONSNATCH = twitter_notify_onsnatch
sickbeard.TWITTER_NOTIFY_ONDOWNLOAD = twitter_notify_ondownload
sickbeard.TWITTER_NOTIFY_ONSUBTITLEDOWNLOAD = twitter_notify_onsubtitledownload
sickbeard.USE_BOXCAR = use_boxcar
sickbeard.BOXCAR_NOTIFY_ONSNATCH = boxcar_notify_onsnatch
sickbeard.BOXCAR_NOTIFY_ONDOWNLOAD = boxcar_notify_ondownload
sickbeard.BOXCAR_NOTIFY_ONSUBTITLEDOWNLOAD = boxcar_notify_onsubtitledownload
sickbeard.BOXCAR_USERNAME = boxcar_username
sickbeard.USE_BOXCAR2 = use_boxcar2
sickbeard.BOXCAR2_NOTIFY_ONSNATCH = boxcar2_notify_onsnatch
sickbeard.BOXCAR2_NOTIFY_ONDOWNLOAD = boxcar2_notify_ondownload
sickbeard.BOXCAR2_NOTIFY_ONSUBTITLEDOWNLOAD = boxcar2_notify_onsubtitledownload
sickbeard.BOXCAR2_ACCESS_TOKEN = boxcar2_access_token
sickbeard.BOXCAR2_SOUND = boxcar2_sound
sickbeard.USE_PUSHOVER = use_pushover
sickbeard.PUSHOVER_NOTIFY_ONSNATCH = pushover_notify_onsnatch
sickbeard.PUSHOVER_NOTIFY_ONDOWNLOAD = pushover_notify_ondownload
sickbeard.PUSHOVER_NOTIFY_ONSUBTITLEDOWNLOAD = pushover_notify_onsubtitledownload
sickbeard.PUSHOVER_USERKEY = pushover_userkey
sickbeard.PUSHOVER_PRIO = pushover_prio
sickbeard.USE_LIBNOTIFY = use_libnotify == "on"
sickbeard.LIBNOTIFY_NOTIFY_ONSNATCH = libnotify_notify_onsnatch == "on"
sickbeard.LIBNOTIFY_NOTIFY_ONDOWNLOAD = libnotify_notify_ondownload == "on"
sickbeard.LIBNOTIFY_NOTIFY_ONSUBTITLEDOWNLOAD = libnotify_notify_onsubtitledownload == "on"
sickbeard.USE_NMJ = use_nmj
sickbeard.NMJ_HOST = nmj_host
sickbeard.NMJ_DATABASE = nmj_database
sickbeard.NMJ_MOUNT = nmj_mount
sickbeard.USE_SYNOINDEX = use_synoindex
sickbeard.USE_SYNOLOGYNOTIFIER = use_synologynotifier
sickbeard.SYNOLOGYNOTIFIER_NOTIFY_ONSNATCH = synologynotifier_notify_onsnatch
sickbeard.SYNOLOGYNOTIFIER_NOTIFY_ONDOWNLOAD = synologynotifier_notify_ondownload
sickbeard.SYNOLOGYNOTIFIER_NOTIFY_ONSUBTITLEDOWNLOAD = synologynotifier_notify_onsubtitledownload
sickbeard.USE_NMJv2 = use_nmjv2
sickbeard.NMJv2_HOST = nmjv2_host
sickbeard.NMJv2_DATABASE = nmjv2_database
sickbeard.NMJv2_DBLOC = nmjv2_dbloc
sickbeard.USE_TRAKT = use_trakt
sickbeard.TRAKT_USERNAME = trakt_username
sickbeard.TRAKT_PASSWORD = trakt_password
sickbeard.TRAKT_API = trakt_api
sickbeard.TRAKT_REMOVE_WATCHLIST = trakt_remove_watchlist
sickbeard.TRAKT_USE_WATCHLIST = trakt_use_watchlist
sickbeard.TRAKT_METHOD_ADD = trakt_method_add
sickbeard.TRAKT_START_PAUSED = trakt_start_paused
sickbeard.USE_BETASERIES = use_betaseries
sickbeard.BETASERIES_USERNAME = betaseries_username
sickbeard.BETASERIES_PASSWORD = betaseries_password
sickbeard.USE_PYTIVO = use_pytivo
sickbeard.PYTIVO_NOTIFY_ONSNATCH = pytivo_notify_onsnatch == "off"
sickbeard.PYTIVO_NOTIFY_ONDOWNLOAD = pytivo_notify_ondownload == "off"
sickbeard.PYTIVO_NOTIFY_ONSUBTITLEDOWNLOAD = pytivo_notify_onsubtitledownload == "off"
sickbeard.PYTIVO_UPDATE_LIBRARY = pytivo_update_library
sickbeard.PYTIVO_HOST = pytivo_host
sickbeard.PYTIVO_SHARE_NAME = pytivo_share_name
sickbeard.PYTIVO_TIVO_NAME = pytivo_tivo_name
sickbeard.USE_NMA = use_nma
sickbeard.NMA_NOTIFY_ONSNATCH = nma_notify_onsnatch
sickbeard.NMA_NOTIFY_ONDOWNLOAD = nma_notify_ondownload
sickbeard.NMA_NOTIFY_ONSUBTITLEDOWNLOAD = nma_notify_onsubtitledownload
sickbeard.NMA_API = nma_api
sickbeard.NMA_PRIORITY = nma_priority
sickbeard.USE_MAIL = use_mail
sickbeard.MAIL_USERNAME = mail_username
sickbeard.MAIL_PASSWORD = mail_password
sickbeard.MAIL_SERVER = mail_server
sickbeard.MAIL_SSL = mail_ssl
sickbeard.MAIL_FROM = mail_from
sickbeard.MAIL_TO = mail_to
sickbeard.MAIL_NOTIFY_ONSNATCH = mail_notify_onsnatch
sickbeard.USE_PUSHALOT = use_pushalot
sickbeard.PUSHALOT_NOTIFY_ONSNATCH = pushalot_notify_onsnatch
sickbeard.PUSHALOT_NOTIFY_ONDOWNLOAD = pushalot_notify_ondownload
sickbeard.PUSHALOT_NOTIFY_ONSUBTITLEDOWNLOAD = pushalot_notify_onsubtitledownload
sickbeard.PUSHALOT_AUTHORIZATIONTOKEN = pushalot_authorizationtoken
sickbeard.USE_PUSHBULLET = use_pushbullet
sickbeard.PUSHBULLET_NOTIFY_ONSNATCH = pushbullet_notify_onsnatch
sickbeard.PUSHBULLET_NOTIFY_ONDOWNLOAD = pushbullet_notify_ondownload
sickbeard.PUSHBULLET_NOTIFY_ONSUBTITLEDOWNLOAD = pushbullet_notify_onsubtitledownload
sickbeard.PUSHBULLET_API = pushbullet_api
sickbeard.PUSHBULLET_DEVICE = pushbullet_device_list
sickbeard.PUSHBULLET_CHANNEL = pushbullet_channel_list
sickbeard.save_config()
if len(results) > 0:
for x in results:
logger.log(x, logger.ERROR)
ui.notifications.error('Error(s) Saving Configuration',
'<br />\n'.join(results))
else:
ui.notifications.message('Configuration Saved', ek.ek(os.path.join, sickbeard.CONFIG_FILE) )
redirect("/config/notifications/")
class ConfigSubtitles:
@cherrypy.expose
def index(self):
t = PageTemplate(file="config_subtitles.tmpl")
t.submenu = ConfigMenu
return _munge(t)
@cherrypy.expose
def saveSubtitles(self, use_subtitles=None, subsnewasold=None, subtitles_plugins=None, subtitles_languages=None, subtitles_dir=None, subtitles_dir_sub=None, subsnolang = None, service_order=None, subtitles_history=None, subtitles_clean_hi=None, subtitles_clean_team=None, subtitles_clean_music=None, subtitles_clean_punc=None):
results = []
if use_subtitles == "on":
use_subtitles = 1
if sickbeard.subtitlesFinderScheduler.thread == None or not sickbeard.subtitlesFinderScheduler.thread.isAlive():
sickbeard.subtitlesFinderScheduler.initThread()
else:
use_subtitles = 0
sickbeard.subtitlesFinderScheduler.abort = True
logger.log(u"Waiting for the SUBTITLESFINDER thread to exit")
try:
sickbeard.subtitlesFinderScheduler.thread.join(5)
except:
pass
if subtitles_history == "on":
subtitles_history = 1
else:
subtitles_history = 0
if subtitles_dir_sub == "on":
subtitles_dir_sub = 1
else:
subtitles_dir_sub = 0
if subsnewasold == "on":
subsnewasold = 1
else:
subsnewasold = 0
if subsnolang == "on":
subsnolang = 1
else:
subsnolang = 0
sickbeard.USE_SUBTITLES = use_subtitles
sickbeard.SUBSNEWASOLD = subsnewasold
sickbeard.SUBTITLES_LANGUAGES = [lang.alpha2 for lang in subtitles.isValidLanguage(subtitles_languages.replace(' ', '').split(','))] if subtitles_languages != '' else ''
sickbeard.SUBTITLES_DIR = subtitles_dir
sickbeard.SUBTITLES_DIR_SUB = subtitles_dir_sub
sickbeard.SUBSNOLANG = subsnolang
sickbeard.SUBTITLES_HISTORY = subtitles_history
# Subtitles services
services_str_list = service_order.split()
subtitles_services_list = []
subtitles_services_enabled = []
for curServiceStr in services_str_list:
curService, curEnabled = curServiceStr.split(':')
subtitles_services_list.append(curService)
subtitles_services_enabled.append(int(curEnabled))
sickbeard.SUBTITLES_SERVICES_LIST = subtitles_services_list
sickbeard.SUBTITLES_SERVICES_ENABLED = subtitles_services_enabled
#Subtitles Cleansing
if subtitles_clean_hi == "on":
subtitles_clean_hi = 1
else:
subtitles_clean_hi = 0
if subtitles_clean_team == "on":
subtitles_clean_team = 1
else:
subtitles_clean_team = 0
if subtitles_clean_music == "on":
subtitles_clean_music = 1
else:
subtitles_clean_music = 0
if subtitles_clean_punc == "on":
subtitles_clean_punc = 1
else:
subtitles_clean_punc = 0
sickbeard.SUBTITLES_CLEAN_HI = subtitles_clean_hi
sickbeard.SUBTITLES_CLEAN_TEAM = subtitles_clean_team
sickbeard.SUBTITLES_CLEAN_MUSIC = subtitles_clean_music
sickbeard.SUBTITLES_CLEAN_PUNC = subtitles_clean_punc
sickbeard.save_config()
if len(results) > 0:
for x in results:
logger.log(x, logger.ERROR)
ui.notifications.error('Error(s) Saving Configuration',
'<br />\n'.join(results))
else:
ui.notifications.message('Configuration Saved', ek.ek(os.path.join, sickbeard.CONFIG_FILE) )
redirect("/config/subtitles/")
class Config:
@cherrypy.expose
def index(self):
t = PageTemplate(file="config.tmpl")
t.submenu = ConfigMenu
return _munge(t)
general = ConfigGeneral()
search = ConfigSearch()
postProcessing = ConfigPostProcessing()
providers = ConfigProviders()
notifications = ConfigNotifications()
subtitles = ConfigSubtitles()
def haveXBMC():
return sickbeard.USE_XBMC and sickbeard.XBMC_UPDATE_LIBRARY
def havePLEX():
return sickbeard.USE_PLEX and sickbeard.PLEX_UPDATE_LIBRARY
def HomeMenu():
return [
{ 'title': 'Add Shows', 'path': 'home/addShows/', },
{ 'title': 'Manual Post-Processing', 'path': 'home/postprocess/' },
{ 'title': 'Update XBMC', 'path': 'home/updateXBMC/', 'requires': haveXBMC },
{ 'title': 'Update Plex', 'path': 'home/updatePLEX/', 'requires': havePLEX },
{ 'title': 'Update', 'path': 'manage/manageSearches/forceVersionCheck', 'confirm': True},
{ 'title': 'Restart', 'path': 'home/restart/?pid='+str(sickbeard.PID), 'confirm': True },
{ 'title': 'Shutdown', 'path': 'home/shutdown/?pid='+str(sickbeard.PID), 'confirm': True },
]
class HomePostProcess:
@cherrypy.expose
def index(self):
t = PageTemplate(file="home_postprocess.tmpl")
t.submenu = HomeMenu()
return _munge(t)
@cherrypy.expose
def processEpisode(self, dir=None, nzbName=None, jobName=None, quiet=None):
if not dir:
redirect("/home/postprocess")
else:
result = processTV.processDir(dir, nzbName)
if quiet != None and int(quiet) == 1:
return result
result = result.replace("\n","<br />\n")
return _genericMessage("Postprocessing results", result)
class NewHomeAddShows:
@cherrypy.expose
def index(self):
t = PageTemplate(file="home_addShows.tmpl")
t.submenu = HomeMenu()
return _munge(t)
@cherrypy.expose
def getTVDBLanguages(self):
result = tvdb_api.Tvdb().config['valid_languages']
# Make sure list is sorted alphabetically but 'fr' is in front
if 'fr' in result:
del result[result.index('fr')]
result.sort()
result.insert(0, 'fr')
return json.dumps({'results': result})
@cherrypy.expose
def sanitizeFileName(self, name):
return helpers.sanitizeFileName(name)
@cherrypy.expose
def searchTVDBForShowName(self, name, lang="fr"):
if not lang or lang == 'null':
lang = "fr"
baseURL = "http://thetvdb.com/api/GetSeries.php?"
nameUTF8 = name.encode('utf-8')
logger.log(u"Trying to find Show on thetvdb.com with: " + nameUTF8.decode('utf-8'), logger.DEBUG)
# Use each word in the show's name as a possible search term
keywords = nameUTF8.split(' ')
# Insert the whole show's name as the first search term so best results are first
# ex: keywords = ['Some Show Name', 'Some', 'Show', 'Name']
if len(keywords) > 1:
keywords.insert(0, nameUTF8)
# Query the TVDB for each search term and build the list of results
results = []
for searchTerm in keywords:
params = {'seriesname': searchTerm,
'language': lang}
finalURL = baseURL + urllib.urlencode(params)
logger.log(u"Searching for Show with searchterm: \'" + searchTerm.decode('utf-8') + u"\' on URL " + finalURL, logger.DEBUG)
urlData = helpers.getURL(finalURL)
if urlData is None:
# When urlData is None, trouble connecting to TVDB, don't try the rest of the keywords
logger.log(u"Unable to get URL: " + finalURL, logger.ERROR)
break
else:
try:
seriesXML = etree.ElementTree(etree.XML(urlData))
series = seriesXML.getiterator('Series')
except Exception, e:
# use finalURL in log, because urlData can be too much information
logger.log(u"Unable to parse XML for some reason: " + ex(e) + " from XML: " + finalURL, logger.ERROR)
series = ''
# add each result to our list
for curSeries in series:
tvdb_id = int(curSeries.findtext('seriesid'))
# don't add duplicates
if tvdb_id in [x[0] for x in results]:
continue
results.append((tvdb_id, curSeries.findtext('SeriesName'), curSeries.findtext('FirstAired')))
lang_id = tvdb_api.Tvdb().config['langabbv_to_id'][lang]
return json.dumps({'results': results, 'langid': lang_id})
@cherrypy.expose
def massAddTable(self, rootDir=None):
t = PageTemplate(file="home_massAddTable.tmpl")
t.submenu = HomeMenu()
myDB = db.DBConnection()
if not rootDir:
return "No folders selected."
elif type(rootDir) != list:
root_dirs = [rootDir]
else:
root_dirs = rootDir
root_dirs = [urllib.unquote_plus(x) for x in root_dirs]
default_index = int(sickbeard.ROOT_DIRS.split('|')[0])
if len(root_dirs) > default_index:
tmp = root_dirs[default_index]
if tmp in root_dirs:
root_dirs.remove(tmp)
root_dirs = [tmp]+root_dirs
dir_list = []
for root_dir in root_dirs:
try:
file_list = ek.ek(os.listdir, root_dir)
except:
continue
for cur_file in file_list:
cur_path = ek.ek(os.path.normpath, ek.ek(os.path.join, root_dir, cur_file))
if not ek.ek(os.path.isdir, cur_path):
continue
cur_dir = {
'dir': cur_path,
'display_dir': '<b>'+ek.ek(os.path.dirname, cur_path)+os.sep+'</b>'+ek.ek(os.path.basename, cur_path),
}
# see if the folder is in XBMC already
dirResults = myDB.select("SELECT * FROM tv_shows WHERE location = ?", [cur_path])
if dirResults:
cur_dir['added_already'] = True
else:
cur_dir['added_already'] = False
dir_list.append(cur_dir)
tvdb_id = ''
show_name = ''
for cur_provider in sickbeard.metadata_provider_dict.values():
(tvdb_id, show_name) = cur_provider.retrieveShowMetadata(cur_path)
if tvdb_id and show_name:
break
cur_dir['existing_info'] = (tvdb_id, show_name)
if tvdb_id and helpers.findCertainShow(sickbeard.showList, tvdb_id):
cur_dir['added_already'] = True
t.dirList = dir_list
return _munge(t)
@cherrypy.expose
def newShow(self, show_to_add=None, other_shows=None):
"""
Display the new show page which collects a tvdb id, folder, and extra options and
posts them to addNewShow
"""
t = PageTemplate(file="home_newShow.tmpl")
t.submenu = HomeMenu()
show_dir, tvdb_id, show_name = self.split_extra_show(show_to_add)
if tvdb_id and show_name:
use_provided_info = True
else:
use_provided_info = False
# tell the template whether we're giving it show name & TVDB ID
t.use_provided_info = use_provided_info
# use the given show_dir for the tvdb search if available
if not show_dir:
t.default_show_name = ''
elif not show_name:
t.default_show_name = ek.ek(os.path.basename, ek.ek(os.path.normpath, show_dir)).replace('.',' ')
else:
t.default_show_name = show_name
# carry a list of other dirs if given
if not other_shows:
other_shows = []
elif type(other_shows) != list:
other_shows = [other_shows]
if use_provided_info:
t.provided_tvdb_id = tvdb_id
t.provided_tvdb_name = show_name
t.provided_show_dir = show_dir
t.other_shows = other_shows
return _munge(t)
@cherrypy.expose
def addNewShow(self, whichSeries=None, tvdbLang="fr", rootDir=None, defaultStatus=None,
anyQualities=None, bestQualities=None, flatten_folders=None, subtitles=None, fullShowPath=None,
other_shows=None, skipShow=None, audio_lang=None):
"""
Receive tvdb id, dir, and other options and create a show from them. If extra show dirs are
provided then it forwards back to newShow, if not it goes to /home.
"""
# grab our list of other dirs if given
if not other_shows:
other_shows = []
elif type(other_shows) != list:
other_shows = [other_shows]
def finishAddShow():
# if there are no extra shows then go home
if not other_shows:
redirect('/home')
# peel off the next one
next_show_dir = other_shows[0]
rest_of_show_dirs = other_shows[1:]
# go to add the next show
return self.newShow(next_show_dir, rest_of_show_dirs)
# if we're skipping then behave accordingly
if skipShow:
return finishAddShow()
# sanity check on our inputs
if (not rootDir and not fullShowPath) or not whichSeries:
return "Missing params, no tvdb id or folder:"+repr(whichSeries)+" and "+repr(rootDir)+"/"+repr(fullShowPath)
# figure out what show we're adding and where
series_pieces = whichSeries.partition('|')
if len(series_pieces) < 3:
return "Error with show selection."
tvdb_id = int(series_pieces[0])
show_name = series_pieces[2]
# use the whole path if it's given, or else append the show name to the root dir to get the full show path
if fullShowPath:
show_dir = ek.ek(os.path.normpath, fullShowPath)
else:
show_dir = ek.ek(os.path.join, rootDir, helpers.sanitizeFileName(show_name))
# blanket policy - if the dir exists you should have used "add existing show" numbnuts
if ek.ek(os.path.isdir, show_dir) and not fullShowPath:
ui.notifications.error("Unable to add show", "Folder "+show_dir+" exists already")
redirect('/home/addShows/existingShows')
# don't create show dir if config says not to
if sickbeard.ADD_SHOWS_WO_DIR:
logger.log(u"Skipping initial creation of "+show_dir+" due to config.ini setting")
else:
dir_exists = helpers.makeDir(show_dir)
if not dir_exists:
logger.log(u"Unable to create the folder "+show_dir+", can't add the show", logger.ERROR)
ui.notifications.error("Unable to add show", "Unable to create the folder "+show_dir+", can't add the show")
redirect("/home")
else:
helpers.chmodAsParent(show_dir)
# prepare the inputs for passing along
if flatten_folders == "on":
flatten_folders = 1
else:
flatten_folders = 0
if subtitles == "on":
subtitles = 1
else:
subtitles = 0
if not anyQualities:
anyQualities = []
if not bestQualities:
bestQualities = []
if type(anyQualities) != list:
anyQualities = [anyQualities]
if type(bestQualities) != list:
bestQualities = [bestQualities]
newQuality = Quality.combineQualities(map(int, anyQualities), map(int, bestQualities))
# add the show
sickbeard.showQueueScheduler.action.addShow(tvdb_id, show_dir, int(defaultStatus), newQuality, flatten_folders, tvdbLang, subtitles, audio_lang) #@UndefinedVariable
ui.notifications.message('Show added', 'Adding the specified show into '+show_dir)
return finishAddShow()
@cherrypy.expose
def existingShows(self):
"""
Prints out the page to add existing shows from a root dir
"""
t = PageTemplate(file="home_addExistingShow.tmpl")
t.submenu = HomeMenu()
return _munge(t)
def split_extra_show(self, extra_show):
if not extra_show:
return (None, None, None)
split_vals = extra_show.split('|')
if len(split_vals) < 3:
return (extra_show, None, None)
show_dir = split_vals[0]
tvdb_id = split_vals[1]
show_name = '|'.join(split_vals[2:])
return (show_dir, tvdb_id, show_name)
@cherrypy.expose
def addExistingShows(self, shows_to_add=None, promptForSettings=None):
"""
Receives a dir list and add them. Adds the ones with given TVDB IDs first, then forwards
along to the newShow page.
"""
# grab a list of other shows to add, if provided
if not shows_to_add:
shows_to_add = []
elif type(shows_to_add) != list:
shows_to_add = [shows_to_add]
shows_to_add = [urllib.unquote_plus(x) for x in shows_to_add]
if promptForSettings == "on":
promptForSettings = 1
else:
promptForSettings = 0
tvdb_id_given = []
dirs_only = []
# separate all the ones with TVDB IDs
for cur_dir in shows_to_add:
if not '|' in cur_dir:
dirs_only.append(cur_dir)
else:
show_dir, tvdb_id, show_name = self.split_extra_show(cur_dir)
if not show_dir or not tvdb_id or not show_name:
continue
tvdb_id_given.append((show_dir, int(tvdb_id), show_name))
# if they want me to prompt for settings then I will just carry on to the newShow page
if promptForSettings and shows_to_add:
return self.newShow(shows_to_add[0], shows_to_add[1:])
# if they don't want me to prompt for settings then I can just add all the nfo shows now
num_added = 0
for cur_show in tvdb_id_given:
show_dir, tvdb_id, show_name = cur_show
# add the show
sickbeard.showQueueScheduler.action.addShow(tvdb_id, show_dir, int(sickbeard.STATUS_DEFAULT), sickbeard.QUALITY_DEFAULT, sickbeard.FLATTEN_FOLDERS_DEFAULT,"fr", sickbeard.SUBTITLES_DEFAULT, sickbeard.AUDIO_SHOW_DEFAULT) #@UndefinedVariable
num_added += 1
if num_added:
ui.notifications.message("Shows Added", "Automatically added "+str(num_added)+" from their existing metadata files")
# if we're done then go home
if not dirs_only:
redirect('/home')
# for the remaining shows we need to prompt for each one, so forward this on to the newShow page
return self.newShow(dirs_only[0], dirs_only[1:])
ErrorLogsMenu = [
{ 'title': 'Clear Errors', 'path': 'errorlogs/clearerrors' },
#{ 'title': 'View Log', 'path': 'errorlogs/viewlog' },
]
class ErrorLogs:
@cherrypy.expose
def index(self):
t = PageTemplate(file="errorlogs.tmpl")
t.submenu = ErrorLogsMenu
return _munge(t)
@cherrypy.expose
def clearerrors(self):
classes.ErrorViewer.clear()
redirect("/errorlogs")
@cherrypy.expose
def viewlog(self, minLevel=logger.MESSAGE, maxLines=500):
t = PageTemplate(file="viewlogs.tmpl")
t.submenu = ErrorLogsMenu
minLevel = int(minLevel)
data = []
if os.path.isfile(logger.sb_log_instance.log_file):
f = open(logger.sb_log_instance.log_file)
data = f.readlines()
f.close()
regex = "^(\w+).?\-(\d\d)\s+(\d\d)\:(\d\d):(\d\d)\s+([A-Z]+)\s+(.*)$"
finalData = []
numLines = 0
lastLine = False
numToShow = min(maxLines, len(data))
for x in reversed(data):
x = x.decode('utf-8')
match = re.match(regex, x)
if match:
level = match.group(6)
if level not in logger.reverseNames:
lastLine = False
continue
if logger.reverseNames[level] >= minLevel:
lastLine = True
finalData.append(x)
else:
lastLine = False
continue
elif lastLine:
finalData.append("AA"+x)
numLines += 1
if numLines >= numToShow:
break
result = "".join(finalData)
t.logLines = result
t.minLevel = minLevel
return _munge(t)
class Home:
@cherrypy.expose
def is_alive(self, *args, **kwargs):
if 'callback' in kwargs and '_' in kwargs:
callback, _ = kwargs['callback'], kwargs['_']
else:
return "Error: Unsupported Request. Send jsonp request with 'callback' variable in the query stiring."
cherrypy.response.headers['Cache-Control'] = "max-age=0,no-cache,no-store"
cherrypy.response.headers['Content-Type'] = 'text/javascript'
cherrypy.response.headers['Access-Control-Allow-Origin'] = '*'
cherrypy.response.headers['Access-Control-Allow-Headers'] = 'x-requested-with'
if sickbeard.started:
return callback+'('+json.dumps({"msg": str(sickbeard.PID)})+');'
else:
return callback+'('+json.dumps({"msg": "nope"})+');'
@cherrypy.expose
def index(self):
t = PageTemplate(file="home.tmpl")
t.submenu = HomeMenu()
return _munge(t)
addShows = NewHomeAddShows()
postprocess = HomePostProcess()
@cherrypy.expose
def testSABnzbd(self, host=None, username=None, password=None, apikey=None):
if not host.endswith("/"):
host = host + "/"
connection, accesMsg = sab.getSabAccesMethod(host, username, password, apikey)
if connection:
authed, authMsg = sab.testAuthentication(host, username, password, apikey) #@UnusedVariable
if authed:
return "Success. Connected and authenticated"
else:
return "Authentication failed. SABnzbd expects '"+accesMsg+"' as authentication method"
else:
return "Unable to connect to host"
@cherrypy.expose
def testTorrent(self, torrent_method=None, host=None, username=None, password=None):
if not host.endswith("/"):
host = host + "/"
client = clients.getClientIstance(torrent_method)
connection, accesMsg = client(host, username, password).testAuthentication()
return accesMsg
@cherrypy.expose
def testGrowl(self, host=None, password=None):
cherrypy.response.headers['Cache-Control'] = "max-age=0,no-cache,no-store"
result = notifiers.growl_notifier.test_notify(host, password)
if password==None or password=='':
pw_append = ''
else:
pw_append = " with password: " + password
if result:
return "Registered and Tested growl successfully "+urllib.unquote_plus(host)+pw_append
else:
return "Registration and Testing of growl failed "+urllib.unquote_plus(host)+pw_append
@cherrypy.expose
def testProwl(self, prowl_api=None, prowl_priority=0):
cherrypy.response.headers['Cache-Control'] = "max-age=0,no-cache,no-store"
result = notifiers.prowl_notifier.test_notify(prowl_api, prowl_priority)
if result:
return "Test prowl notice sent successfully"
else:
return "Test prowl notice failed"
@cherrypy.expose
def testBoxcar(self, username=None):
cherrypy.response.headers['Cache-Control'] = "max-age=0,no-cache,no-store"
result = notifiers.boxcar_notifier.test_notify(username)
if result:
return "Boxcar notification succeeded. Check your Boxcar clients to make sure it worked"
else:
return "Error sending Boxcar notification"
@cherrypy.expose
def testBoxcar2(self, accessToken=None, sound=None):
cherrypy.response.headers['Cache-Control'] = "max-age=0,no-cache,no-store"
result = notifiers.boxcar2_notifier.test_notify(accessToken, sound)
if result:
return "Boxcar2 notification succeeded. Check your Boxcar2 clients to make sure it worked"
else:
return "Error sending Boxcar2 notification"
@cherrypy.expose
def testPushover(self, userKey=None):
cherrypy.response.headers['Cache-Control'] = "max-age=0,no-cache,no-store"
result = notifiers.pushover_notifier.test_notify(userKey)
if result:
return "Pushover notification succeeded. Check your Pushover clients to make sure it worked"
else:
return "Error sending Pushover notification"
@cherrypy.expose
def twitterStep1(self):
cherrypy.response.headers['Cache-Control'] = "max-age=0,no-cache,no-store"
return notifiers.twitter_notifier._get_authorization()
@cherrypy.expose
def twitterStep2(self, key):
cherrypy.response.headers['Cache-Control'] = "max-age=0,no-cache,no-store"
result = notifiers.twitter_notifier._get_credentials(key)
logger.log(u"result: "+str(result))
if result:
return "Key verification successful"
else:
return "Unable to verify key"
@cherrypy.expose
def testTwitter(self):
cherrypy.response.headers['Cache-Control'] = "max-age=0,no-cache,no-store"
result = notifiers.twitter_notifier.test_notify()
if result:
return "Tweet successful, check your twitter to make sure it worked"
else:
return "Error sending tweet"
@cherrypy.expose
def testXBMC(self, host=None, username=None, password=None):
cherrypy.response.headers['Cache-Control'] = "max-age=0,no-cache,no-store"
finalResult = ''
for curHost in [x.strip() for x in host.split(",")]:
curResult = notifiers.xbmc_notifier.test_notify(urllib.unquote_plus(curHost), username, password)
if len(curResult.split(":")) > 2 and 'OK' in curResult.split(":")[2]:
finalResult += "Test XBMC notice sent successfully to " + urllib.unquote_plus(curHost)
else:
finalResult += "Test XBMC notice failed to " + urllib.unquote_plus(curHost)
finalResult += "<br />\n"
return finalResult
@cherrypy.expose
def testPLEX(self, host=None, username=None, password=None):
cherrypy.response.headers['Cache-Control'] = "max-age=0,no-cache,no-store"
finalResult = ''
for curHost in [x.strip() for x in host.split(",")]:
curResult = notifiers.plex_notifier.test_notify(urllib.unquote_plus(curHost), username, password)
if len(curResult.split(":")) > 2 and 'OK' in curResult.split(":")[2]:
finalResult += "Test Plex notice sent successfully to " + urllib.unquote_plus(curHost)
else:
finalResult += "Test Plex notice failed to " + urllib.unquote_plus(curHost)
finalResult += "<br />\n"
return finalResult
@cherrypy.expose
def testLibnotify(self):
cherrypy.response.headers['Cache-Control'] = "max-age=0,no-cache,no-store"
if notifiers.libnotify_notifier.test_notify():
return "Tried sending desktop notification via libnotify"
else:
return notifiers.libnotify.diagnose()
@cherrypy.expose
def testNMJ(self, host=None, database=None, mount=None):
cherrypy.response.headers['Cache-Control'] = "max-age=0,no-cache,no-store"
result = notifiers.nmj_notifier.test_notify(urllib.unquote_plus(host), database, mount)
if result:
return "Successfull started the scan update"
else:
return "Test failed to start the scan update"
@cherrypy.expose
def settingsNMJ(self, host=None):
cherrypy.response.headers['Cache-Control'] = "max-age=0,no-cache,no-store"
result = notifiers.nmj_notifier.notify_settings(urllib.unquote_plus(host))
if result:
return '{"message": "Got settings from %(host)s", "database": "%(database)s", "mount": "%(mount)s"}' % {"host": host, "database": sickbeard.NMJ_DATABASE, "mount": sickbeard.NMJ_MOUNT}
else:
return '{"message": "Failed! Make sure your Popcorn is on and NMJ is running. (see Log & Errors -> Debug for detailed info)", "database": "", "mount": ""}'
@cherrypy.expose
def testNMJv2(self, host=None):
cherrypy.response.headers['Cache-Control'] = "max-age=0,no-cache,no-store"
result = notifiers.nmjv2_notifier.test_notify(urllib.unquote_plus(host))
if result:
return "Test notice sent successfully to " + urllib.unquote_plus(host)
else:
return "Test notice failed to " + urllib.unquote_plus(host)
@cherrypy.expose
def settingsNMJv2(self, host=None, dbloc=None, instance=None):
cherrypy.response.headers['Cache-Control'] = "max-age=0,no-cache,no-store"
result = notifiers.nmjv2_notifier.notify_settings(urllib.unquote_plus(host), dbloc, instance)
if result:
return '{"message": "NMJ Database found at: %(host)s", "database": "%(database)s"}' % {"host": host, "database": sickbeard.NMJv2_DATABASE}
else:
return '{"message": "Unable to find NMJ Database at location: %(dbloc)s. Is the right location selected and PCH running?", "database": ""}' % {"dbloc": dbloc}
@cherrypy.expose
def testTrakt(self, api=None, username=None, password=None):
cherrypy.response.headers['Cache-Control'] = "max-age=0,no-cache,no-store"
result = notifiers.trakt_notifier.test_notify(api, username, password)
if result:
return "Test notice sent successfully to Trakt"
else:
return "Test notice failed to Trakt"
@cherrypy.expose
def testBetaSeries(self, username=None, password=None):
cherrypy.response.headers['Cache-Control'] = "max-age=0,no-cache,no-store"
result = notifiers.betaseries_notifier.test_notify(username, password)
if result:
return "Test notice sent successfully to BetaSeries"
else:
return "Test notice failed to BetaSeries"
@cherrypy.expose
def testMail(self, mail_from=None, mail_to=None, mail_server=None, mail_ssl=None, mail_user=None, mail_password=None):
cherrypy.response.headers['Cache-Control'] = "max-age=0,no-cache,no-store"
result = notifiers.mail_notifier.test_notify(mail_from, mail_to, mail_server, mail_ssl, mail_user, mail_password)
if result:
return "Mail sent"
else:
return "Can't sent mail."
@cherrypy.expose
def testNMA(self, nma_api=None, nma_priority=0):
cherrypy.response.headers['Cache-Control'] = "max-age=0,no-cache,no-store"
result = notifiers.nma_notifier.test_notify(nma_api, nma_priority)
if result:
return "Test NMA notice sent successfully"
else:
return "Test NMA notice failed"
@cherrypy.expose
def testPushalot(self, authorizationToken=None):
cherrypy.response.headers['Cache-Control'] = "max-age=0,no-cache,no-store"
result = notifiers.pushalot_notifier.test_notify(authorizationToken)
if result:
return "Pushalot notification succeeded. Check your Pushalot clients to make sure it worked"
else:
return "Error sending Pushalot notification"
@cherrypy.expose
def testPushbullet(self, api=None):
cherrypy.response.headers['Cache-Control'] = "max-age=0,no-cache,no-store"
result = notifiers.pushbullet_notifier.test_notify(api)
if result:
return "Pushbullet notification succeeded. Check your device to make sure it worked"
else:
return "Error sending Pushbullet notification"
@cherrypy.expose
def getPushbulletDevices(self, api=None):
cherrypy.response.headers['Cache-Control'] = "max-age=0,no-cache,no-store"
result = notifiers.pushbullet_notifier.get_devices(api)
if result:
return result
else:
return "Error sending Pushbullet notification"
@cherrypy.expose
#get channels
def getPushbulletChannels(self, api=None):
cherrypy.response.headers['Cache-Control'] = "max-age=0,no-cache,no-store"
result = notifiers.pushbullet_notifier.get_channels(api)
if result:
return result
else:
return "Error sending Pushbullet notification"
@cherrypy.expose
def shutdown(self, pid=None):
if str(pid) != str(sickbeard.PID):
redirect("/home")
threading.Timer(2, sickbeard.invoke_shutdown).start()
title = "Shutting down"
message = "Sick Beard is shutting down..."
return _genericMessage(title, message)
@cherrypy.expose
def restart(self, pid=None):
if str(pid) != str(sickbeard.PID):
redirect("/home")
t = PageTemplate(file="restart.tmpl")
t.submenu = HomeMenu()
# do a soft restart
threading.Timer(2, sickbeard.invoke_restart, [False]).start()
return _munge(t)
@cherrypy.expose
def update(self, pid=None):
if str(pid) != str(sickbeard.PID):
redirect("/home")
updated = sickbeard.versionCheckScheduler.action.update() #@UndefinedVariable
if updated:
# do a hard restart
threading.Timer(2, sickbeard.invoke_restart, [False]).start()
t = PageTemplate(file="restart_bare.tmpl")
return _munge(t)
else:
return _genericMessage("Update Failed","Update wasn't successful, not restarting. Check your log for more information.")
@cherrypy.expose
def displayShow(self, show=None):
if show == None:
return _genericMessage("Error", "Invalid show ID")
else:
showObj = sickbeard.helpers.findCertainShow(sickbeard.showList, int(show))
if showObj == None:
return _genericMessage("Error", "Show not in show list")
showObj.exceptions = scene_exceptions.get_scene_exceptions(showObj.tvdbid)
myDB = db.DBConnection()
seasonResults = myDB.select(
"SELECT DISTINCT season FROM tv_episodes WHERE showid = ? ORDER BY season desc",
[showObj.tvdbid]
)
sqlResults = myDB.select(
"SELECT * FROM tv_episodes WHERE showid = ? ORDER BY season DESC, episode DESC",
[showObj.tvdbid]
)
t = PageTemplate(file="displayShow.tmpl")
t.submenu = [ { 'title': 'Edit', 'path': 'home/editShow?show=%d'%showObj.tvdbid } ]
try:
t.showLoc = (showObj.location, True)
except sickbeard.exceptions.ShowDirNotFoundException:
t.showLoc = (showObj._location, False)
show_message = ''
if sickbeard.showQueueScheduler.action.isBeingAdded(showObj): #@UndefinedVariable
show_message = 'This show is in the process of being downloaded from theTVDB.com - the info below is incomplete.'
elif sickbeard.showQueueScheduler.action.isBeingUpdated(showObj): #@UndefinedVariable
show_message = 'The information below is in the process of being updated.'
elif sickbeard.showQueueScheduler.action.isBeingRefreshed(showObj): #@UndefinedVariable
show_message = 'The episodes below are currently being refreshed from disk'
elif sickbeard.showQueueScheduler.action.isBeingSubtitled(showObj): #@UndefinedVariable
show_message = 'Currently downloading subtitles for this show'
elif sickbeard.showQueueScheduler.action.isBeingCleanedSubtitle(showObj): #@UndefinedVariable
show_message = 'Currently cleaning subtitles for this show'
elif sickbeard.showQueueScheduler.action.isInRefreshQueue(showObj): #@UndefinedVariable
show_message = 'This show is queued to be refreshed.'
elif sickbeard.showQueueScheduler.action.isInUpdateQueue(showObj): #@UndefinedVariable
show_message = 'This show is queued and awaiting an update.'
elif sickbeard.showQueueScheduler.action.isInSubtitleQueue(showObj): #@UndefinedVariable
show_message = 'This show is queued and awaiting subtitles download.'
if not sickbeard.showQueueScheduler.action.isBeingAdded(showObj): #@UndefinedVariable
if not sickbeard.showQueueScheduler.action.isBeingUpdated(showObj): #@UndefinedVariable
t.submenu.append({ 'title': 'Delete', 'path': 'home/deleteShow?show=%d'%showObj.tvdbid, 'confirm': True })
t.submenu.append({ 'title': 'Re-scan files', 'path': 'home/refreshShow?show=%d'%showObj.tvdbid })
t.submenu.append({ 'title': 'Force Full Update', 'path': 'home/updateShow?show=%d&force=1'%showObj.tvdbid })
t.submenu.append({ 'title': 'Update show in XBMC', 'path': 'home/updateXBMC?showName=%s'%urllib.quote_plus(showObj.name.encode('utf-8')), 'requires': haveXBMC })
t.submenu.append({ 'title': 'Preview Rename', 'path': 'home/testRename?show=%d'%showObj.tvdbid })
t.submenu.append({ 'title': 'French Search', 'path': 'home/frenchSearch?show=%d'%showObj.tvdbid })
if sickbeard.USE_SUBTITLES and not sickbeard.showQueueScheduler.action.isBeingSubtitled(showObj) and not sickbeard.showQueueScheduler.action.isBeingCleanedSubtitle(showObj) and showObj.subtitles:
t.submenu.append({ 'title': 'Download Subtitles', 'path': 'home/subtitleShow?show=%d'%showObj.tvdbid })
t.submenu.append({ 'title': 'Clean Subtitles', 'path': 'home/subtitleShowClean?show=%d'%showObj.tvdbid })
t.show = showObj
t.sqlResults = sqlResults
t.seasonResults = seasonResults
t.show_message = show_message
epCounts = {}
epCats = {}
epCounts[Overview.SKIPPED] = 0
epCounts[Overview.WANTED] = 0
epCounts[Overview.QUAL] = 0
epCounts[Overview.GOOD] = 0
epCounts[Overview.UNAIRED] = 0
epCounts[Overview.SNATCHED] = 0
showSceneNumberColum = False
for curResult in sqlResults:
if not showSceneNumberColum and (isinstance(curResult["scene_season"], int) and isinstance(curResult["scene_episode"], int)):
showSceneNumberColum = True
curEpCat = showObj.getOverview(int(curResult["status"]))
epCats[str(curResult["season"])+"x"+str(curResult["episode"])] = curEpCat
epCounts[curEpCat] += 1
t.showSceneNumberColum = showSceneNumberColum
def titler(x):
if not x:
return x
if x.lower().startswith('a '):
x = x[2:]
elif x.lower().startswith('the '):
x = x[4:]
return x
t.sortedShowList = sorted(sickbeard.showList, lambda x, y: cmp(titler(x.name), titler(y.name)))
t.epCounts = epCounts
t.epCats = epCats
return _munge(t)
@cherrypy.expose
def plotDetails(self, show, season, episode):
result = db.DBConnection().action("SELECT description FROM tv_episodes WHERE showid = ? AND season = ? AND episode = ?", (show, season, episode)).fetchone()
return result['description'] if result else 'Episode not found.'
@cherrypy.expose
def editShow(self, show=None, location=None, anyQualities=[], bestQualities=[], exceptions_list=[], flatten_folders=None, paused=None, frenchsearch=None, directCall=False, air_by_date=None, tvdbLang=None, audio_lang=None, subtitles=None):
if show == None:
errString = "Invalid show ID: "+str(show)
if directCall:
return [errString]
else:
return _genericMessage("Error", errString)
showObj = sickbeard.helpers.findCertainShow(sickbeard.showList, int(show))
if showObj == None:
errString = "Unable to find the specified show: "+str(show)
if directCall:
return [errString]
else:
return _genericMessage("Error", errString)
showObj.exceptions = scene_exceptions.get_scene_exceptions(showObj.tvdbid)
if not location and not anyQualities and not bestQualities and not flatten_folders:
t = PageTemplate(file="editShow.tmpl")
t.submenu = HomeMenu()
with showObj.lock:
t.show = showObj
return _munge(t)
if flatten_folders == "on":
flatten_folders = 1
else:
flatten_folders = 0
logger.log(u"flatten folders: "+str(flatten_folders))
if paused == "on":
paused = 1
else:
paused = 0
if frenchsearch == "on":
frenchsearch = 1
else:
frenchsearch = 0
if air_by_date == "on":
air_by_date = 1
else:
air_by_date = 0
if subtitles == "on":
subtitles = 1
else:
subtitles = 0
if tvdbLang and tvdbLang in tvdb_api.Tvdb().config['valid_languages']:
tvdb_lang = tvdbLang
else:
tvdb_lang = showObj.lang
# if we changed the language then kick off an update
if tvdb_lang == showObj.lang:
do_update = False
else:
do_update = True
if type(anyQualities) != list:
anyQualities = [anyQualities]
if type(bestQualities) != list:
bestQualities = [bestQualities]
if type(exceptions_list) != list:
exceptions_list = [exceptions_list]
#If directCall from mass_edit_update no scene exceptions handling
if directCall:
do_update_exceptions = False
else:
if set(exceptions_list) == set(showObj.exceptions):
do_update_exceptions = False
else:
do_update_exceptions = True
errors = []
with showObj.lock:
newQuality = Quality.combineQualities(map(int, anyQualities), map(int, bestQualities))
showObj.quality = newQuality
# reversed for now
if bool(showObj.flatten_folders) != bool(flatten_folders):
showObj.flatten_folders = flatten_folders
try:
sickbeard.showQueueScheduler.action.refreshShow(showObj) #@UndefinedVariable
except exceptions.CantRefreshException, e:
errors.append("Unable to refresh this show: "+ex(e))
showObj.paused = paused
showObj.air_by_date = air_by_date
showObj.subtitles = subtitles
showObj.frenchsearch = frenchsearch
showObj.lang = tvdb_lang
showObj.audio_lang = audio_lang
# if we change location clear the db of episodes, change it, write to db, and rescan
if os.path.normpath(showObj._location) != os.path.normpath(location):
logger.log(os.path.normpath(showObj._location)+" != "+os.path.normpath(location), logger.DEBUG)
if not ek.ek(os.path.isdir, location):
errors.append("New location <tt>%s</tt> does not exist" % location)
# don't bother if we're going to update anyway
elif not do_update:
# change it
try:
showObj.location = location
try:
sickbeard.showQueueScheduler.action.refreshShow(showObj) #@UndefinedVariable
except exceptions.CantRefreshException, e:
errors.append("Unable to refresh this show:"+ex(e))
# grab updated info from TVDB
#showObj.loadEpisodesFromTVDB()
# rescan the episodes in the new folder
except exceptions.NoNFOException:
errors.append("The folder at <tt>%s</tt> doesn't contain a tvshow.nfo - copy your files to that folder before you change the directory in Sick Beard." % location)
# save it to the DB
showObj.saveToDB()
# force the update
if do_update:
try:
sickbeard.showQueueScheduler.action.updateShow(showObj, True) #@UndefinedVariable
time.sleep(1)
except exceptions.CantUpdateException, e:
errors.append("Unable to force an update on the show.")
if do_update_exceptions:
try:
scene_exceptions.update_scene_exceptions(showObj.tvdbid, exceptions_list) #@UndefinedVariable
time.sleep(1)
except exceptions.CantUpdateException, e:
errors.append("Unable to force an update on scene exceptions of the show.")
if directCall:
return errors
if len(errors) > 0:
ui.notifications.error('%d error%s while saving changes:' % (len(errors), "" if len(errors) == 1 else "s"),
'<ul>' + '\n'.join(['<li>%s</li>' % error for error in errors]) + "</ul>")
redirect("/home/displayShow?show=" + show)
@cherrypy.expose
def deleteShow(self, show=None):
if show == None:
return _genericMessage("Error", "Invalid show ID")
showObj = sickbeard.helpers.findCertainShow(sickbeard.showList, int(show))
if showObj == None:
return _genericMessage("Error", "Unable to find the specified show")
if sickbeard.showQueueScheduler.action.isBeingAdded(showObj) or sickbeard.showQueueScheduler.action.isBeingUpdated(showObj): #@UndefinedVariable
return _genericMessage("Error", "Shows can't be deleted while they're being added or updated.")
showObj.deleteShow()
ui.notifications.message('<b>%s</b> has been deleted' % showObj.name)
redirect("/home")
@cherrypy.expose
def refreshShow(self, show=None):
if show == None:
return _genericMessage("Error", "Invalid show ID")
showObj = sickbeard.helpers.findCertainShow(sickbeard.showList, int(show))
if showObj == None:
return _genericMessage("Error", "Unable to find the specified show")
# force the update from the DB
try:
sickbeard.showQueueScheduler.action.refreshShow(showObj) #@UndefinedVariable
except exceptions.CantRefreshException, e:
ui.notifications.error("Unable to refresh this show.",
ex(e))
time.sleep(3)
redirect("/home/displayShow?show="+str(showObj.tvdbid))
@cherrypy.expose
def updateShow(self, show=None, force=0):
if show == None:
return _genericMessage("Error", "Invalid show ID")
showObj = sickbeard.helpers.findCertainShow(sickbeard.showList, int(show))
if showObj == None:
return _genericMessage("Error", "Unable to find the specified show")
# force the update
try:
sickbeard.showQueueScheduler.action.updateShow(showObj, bool(force)) #@UndefinedVariable
except exceptions.CantUpdateException, e:
ui.notifications.error("Unable to update this show.",
ex(e))
# just give it some time
time.sleep(3)
redirect("/home/displayShow?show=" + str(showObj.tvdbid))
@cherrypy.expose
def subtitleShow(self, show=None, force=0):
if show == None:
return _genericMessage("Error", "Invalid show ID")
showObj = sickbeard.helpers.findCertainShow(sickbeard.showList, int(show))
if showObj == None:
return _genericMessage("Error", "Unable to find the specified show")
# search and download subtitles
sickbeard.showQueueScheduler.action.downloadSubtitles(showObj, bool(force)) #@UndefinedVariable
time.sleep(3)
redirect("/home/displayShow?show="+str(showObj.tvdbid))
@cherrypy.expose
def subtitleShowClean(self, show=None, force=0):
if show == None:
return _genericMessage("Error", "Invalid show ID")
showObj = sickbeard.helpers.findCertainShow(sickbeard.showList, int(show))
if showObj == None:
return _genericMessage("Error", "Unable to find the specified show")
# search and download subtitles
sickbeard.showQueueScheduler.action.cleanSubtitles(showObj, bool(force)) #@UndefinedVariable
time.sleep(3)
redirect("/home/displayShow?show="+str(showObj.tvdbid))
@cherrypy.expose
def frenchSearch(self, show=None, force=0):
if show == None:
return _genericMessage("Error", "Invalid show ID")
showObj = sickbeard.helpers.findCertainShow(sickbeard.showList, int(show))
if showObj == None:
return _genericMessage("Error", "Unable to find the specified show")
# search and download subtitles
sickbeard.showQueueScheduler.action.searchFrench(showObj, bool(force)) #@UndefinedVariable
time.sleep(3)
redirect("/home/displayShow?show="+str(showObj.tvdbid))
@cherrypy.expose
def updateXBMC(self, showName=None):
if sickbeard.XBMC_UPDATE_ONLYFIRST:
# only send update to first host in the list -- workaround for xbmc sql backend users
host = sickbeard.XBMC_HOST.split(",")[0].strip()
else:
host = sickbeard.XBMC_HOST
if notifiers.xbmc_notifier.update_library(showName=showName):
ui.notifications.message("Library update command sent to XBMC host(s): " + host)
else:
ui.notifications.error("Unable to contact one or more XBMC host(s): " + host)
redirect('/home')
@cherrypy.expose
def updatePLEX(self):
if notifiers.plex_notifier.update_library():
ui.notifications.message("Library update command sent to Plex Media Server host: " + sickbeard.PLEX_SERVER_HOST)
else:
ui.notifications.error("Unable to contact Plex Media Server host: " + sickbeard.PLEX_SERVER_HOST)
redirect('/home')
@cherrypy.expose
def setStatus(self, show=None, eps=None, status=None, direct=False):
if show == None or eps == None or status == None:
errMsg = "You must specify a show and at least one episode"
if direct:
ui.notifications.error('Error', errMsg)
return json.dumps({'result': 'error'})
else:
return _genericMessage("Error", errMsg)
if not statusStrings.has_key(int(status)):
errMsg = "Invalid status"
if direct:
ui.notifications.error('Error', errMsg)
return json.dumps({'result': 'error'})
else:
return _genericMessage("Error", errMsg)
showObj = sickbeard.helpers.findCertainShow(sickbeard.showList, int(show))
if showObj == None:
errMsg = "Error", "Show not in show list"
if direct:
ui.notifications.error('Error', errMsg)
return json.dumps({'result': 'error'})
else:
return _genericMessage("Error", errMsg)
segment_list = []
if eps != None:
for curEp in eps.split('|'):
logger.log(u"Attempting to set status on episode "+curEp+" to "+status, logger.DEBUG)
epInfo = curEp.split('x')
epObj = showObj.getEpisode(int(epInfo[0]), int(epInfo[1]))
if int(status) == WANTED:
# figure out what segment the episode is in and remember it so we can backlog it
if epObj.show.air_by_date:
ep_segment = str(epObj.airdate)[:7]
else:
ep_segment = epObj.season
if ep_segment not in segment_list:
segment_list.append(ep_segment)
if epObj == None:
return _genericMessage("Error", "Episode couldn't be retrieved")
with epObj.lock:
# don't let them mess up UNAIRED episodes
if epObj.status == UNAIRED:
logger.log(u"Refusing to change status of "+curEp+" because it is UNAIRED", logger.ERROR)
continue
if int(status) in Quality.DOWNLOADED and epObj.status not in Quality.SNATCHED + Quality.SNATCHED_PROPER + Quality.SNATCHED_FRENCH + Quality.DOWNLOADED + [IGNORED] and not ek.ek(os.path.isfile, epObj.location):
logger.log(u"Refusing to change status of "+curEp+" to DOWNLOADED because it's not SNATCHED/DOWNLOADED", logger.ERROR)
continue
epObj.status = int(status)
epObj.saveToDB()
msg = "Backlog was automatically started for the following seasons of <b>"+showObj.name+"</b>:<br />"
for cur_segment in segment_list:
msg += "<li>Season "+str(cur_segment)+"</li>"
logger.log(u"Sending backlog for "+showObj.name+" season "+str(cur_segment)+" because some eps were set to wanted")
cur_backlog_queue_item = search_queue.BacklogQueueItem(showObj, cur_segment)
sickbeard.searchQueueScheduler.action.add_item(cur_backlog_queue_item) #@UndefinedVariable
msg += "</ul>"
if segment_list:
ui.notifications.message("Backlog started", msg)
if direct:
return json.dumps({'result': 'success'})
else:
redirect("/home/displayShow?show=" + show)
@cherrypy.expose
def setAudio(self, show=None, eps=None, audio_langs=None, direct=False):
if show == None or eps == None or audio_langs == None:
errMsg = "You must specify a show and at least one episode"
if direct:
ui.notifications.error('Error', errMsg)
return json.dumps({'result': 'error'})
else:
return _genericMessage("Error", errMsg)
showObj = sickbeard.helpers.findCertainShow(sickbeard.showList, int(show))
if showObj == None:
return _genericMessage("Error", "Show not in show list")
try:
show_loc = showObj.location #@UnusedVariable
except exceptions.ShowDirNotFoundException:
return _genericMessage("Error", "Can't rename episodes when the show dir is missing.")
ep_obj_rename_list = []
for curEp in eps.split('|'):
logger.log(u"Attempting to set audio on episode "+curEp+" to "+audio_langs, logger.DEBUG)
epInfo = curEp.split('x')
epObj = showObj.getEpisode(int(epInfo[0]), int(epInfo[1]))
epObj.audio_langs = str(audio_langs)
epObj.saveToDB()
if direct:
return json.dumps({'result': 'success'})
else:
redirect("/home/displayShow?show=" + show)
@cherrypy.expose
def testRename(self, show=None):
if show == None:
return _genericMessage("Error", "You must specify a show")
showObj = sickbeard.helpers.findCertainShow(sickbeard.showList, int(show))
if showObj == None:
return _genericMessage("Error", "Show not in show list")
try:
show_loc = showObj.location #@UnusedVariable
except exceptions.ShowDirNotFoundException:
return _genericMessage("Error", "Can't rename episodes when the show dir is missing.")
ep_obj_rename_list = []
ep_obj_list = showObj.getAllEpisodes(has_location=True)
for cur_ep_obj in ep_obj_list:
# Only want to rename if we have a location
if cur_ep_obj.location:
if cur_ep_obj.relatedEps:
# do we have one of multi-episodes in the rename list already
have_already = False
for cur_related_ep in cur_ep_obj.relatedEps + [cur_ep_obj]:
if cur_related_ep in ep_obj_rename_list:
have_already = True
break
if not have_already:
ep_obj_rename_list.append(cur_ep_obj)
else:
ep_obj_rename_list.append(cur_ep_obj)
if ep_obj_rename_list:
# present season DESC episode DESC on screen
ep_obj_rename_list.reverse()
t = PageTemplate(file="testRename.tmpl")
t.submenu = [{'title': 'Edit', 'path': 'home/editShow?show=%d' % showObj.tvdbid}]
t.ep_obj_list = ep_obj_rename_list
t.show = showObj
return _munge(t)
@cherrypy.expose
def doRename(self, show=None, eps=None):
if show == None or eps == None:
errMsg = "You must specify a show and at least one episode"
return _genericMessage("Error", errMsg)
show_obj = sickbeard.helpers.findCertainShow(sickbeard.showList, int(show))
if show_obj == None:
errMsg = "Error", "Show not in show list"
return _genericMessage("Error", errMsg)
try:
show_loc = show_obj.location #@UnusedVariable
except exceptions.ShowDirNotFoundException:
return _genericMessage("Error", "Can't rename episodes when the show dir is missing.")
myDB = db.DBConnection()
if eps == None:
redirect("/home/displayShow?show=" + show)
for curEp in eps.split('|'):
epInfo = curEp.split('x')
# this is probably the worst possible way to deal with double eps but I've kinda painted myself into a corner here with this stupid database
ep_result = myDB.select("SELECT * FROM tv_episodes WHERE showid = ? AND season = ? AND episode = ? AND 5=5", [show, epInfo[0], epInfo[1]])
if not ep_result:
logger.log(u"Unable to find an episode for "+curEp+", skipping", logger.WARNING)
continue
related_eps_result = myDB.select("SELECT * FROM tv_episodes WHERE location = ? AND episode != ?", [ep_result[0]["location"], epInfo[1]])
root_ep_obj = show_obj.getEpisode(int(epInfo[0]), int(epInfo[1]))
for cur_related_ep in related_eps_result:
related_ep_obj = show_obj.getEpisode(int(cur_related_ep["season"]), int(cur_related_ep["episode"]))
if related_ep_obj not in root_ep_obj.relatedEps:
root_ep_obj.relatedEps.append(related_ep_obj)
root_ep_obj.rename()
redirect("/home/displayShow?show=" + show)
@cherrypy.expose
def trunchistory(self, epid):
myDB = db.DBConnection()
nbep = myDB.select("Select count(*) from episode_links where episode_id=?",[epid])
myDB.action("DELETE from episode_links where episode_id=?",[epid])
messnum = str(nbep[0][0]) + ' history links deleted'
ui.notifications.message('Episode History Truncated' , messnum)
return json.dumps({'result': 'ok'})
@cherrypy.expose
def searchEpisode(self, show=None, season=None, episode=None):
# retrieve the episode object and fail if we can't get one
ep_obj = _getEpisode(show, season, episode)
if isinstance(ep_obj, str):
return json.dumps({'result': 'failure'})
# make a queue item for it and put it on the queue
ep_queue_item = search_queue.ManualSearchQueueItem(ep_obj)
sickbeard.searchQueueScheduler.action.add_item(ep_queue_item) #@UndefinedVariable
# wait until the queue item tells us whether it worked or not
while ep_queue_item.success == None: #@UndefinedVariable
time.sleep(1)
# return the correct json value
if ep_queue_item.success:
return json.dumps({'result': statusStrings[ep_obj.status]})
return json.dumps({'result': 'failure'})
@cherrypy.expose
def searchEpisodeSubtitles(self, show=None, season=None, episode=None):
# retrieve the episode object and fail if we can't get one
ep_obj = _getEpisode(show, season, episode)
if isinstance(ep_obj, str):
return json.dumps({'result': 'failure'})
# try do download subtitles for that episode
previous_subtitles = ep_obj.subtitles
try:
subtitles = ep_obj.downloadSubtitles()
if sickbeard.SUBTITLES_DIR:
for video in subtitles:
subs_new_path = ek.ek(os.path.join, os.path.dirname(video.path), sickbeard.SUBTITLES_DIR)
dir_exists = helpers.makeDir(subs_new_path)
if not dir_exists:
logger.log(u"Unable to create subtitles folder "+subs_new_path, logger.ERROR)
else:
helpers.chmodAsParent(subs_new_path)
for subtitle in subtitles.get(video):
new_file_path = ek.ek(os.path.join, subs_new_path, os.path.basename(subtitle.path))
helpers.moveFile(subtitle.path, new_file_path)
if sickbeard.SUBSNOLANG:
helpers.copyFile(new_file_path,new_file_path[:-6]+"srt")
helpers.chmodAsParent(new_file_path[:-6]+"srt")
helpers.chmodAsParent(new_file_path)
else:
if sickbeard.SUBTITLES_DIR_SUB:
for video in subtitles:
subs_new_path = os.path.join(os.path.dirname(video.path),"Subs")
dir_exists = helpers.makeDir(subs_new_path)
if not dir_exists:
logger.log(u"Unable to create subtitles folder "+subs_new_path, logger.ERROR)
else:
helpers.chmodAsParent(subs_new_path)
for subtitle in subtitles.get(video):
new_file_path = ek.ek(os.path.join, subs_new_path, os.path.basename(subtitle.path))
helpers.moveFile(subtitle.path, new_file_path)
if sickbeard.SUBSNOLANG:
helpers.copyFile(new_file_path,new_file_path[:-6]+"srt")
helpers.chmodAsParent(new_file_path[:-6]+"srt")
helpers.chmodAsParent(new_file_path)
else:
for video in subtitles:
for subtitle in subtitles.get(video):
if sickbeard.SUBSNOLANG:
helpers.copyFile(subtitle.path,subtitle.path[:-6]+"srt")
helpers.chmodAsParent(subtitle.path[:-6]+"srt")
helpers.chmodAsParent(subtitle.path)
except:
return json.dumps({'result': 'failure'})
# return the correct json value
if previous_subtitles != ep_obj.subtitles:
status = 'New subtitles downloaded: %s' % ' '.join(["<img src='"+sickbeard.WEB_ROOT+"/images/flags/"+subliminal.language.Language(x).alpha2+".png' alt='"+subliminal.language.Language(x).name+"'/>" for x in sorted(list(set(ep_obj.subtitles).difference(previous_subtitles)))])
else:
status = 'No subtitles downloaded'
ui.notifications.message('Subtitles Search', status)
return json.dumps({'result': status, 'subtitles': ','.join([x for x in ep_obj.subtitles])})
@cherrypy.expose
def mergeEpisodeSubtitles(self, show=None, season=None, episode=None):
# retrieve the episode object and fail if we can't get one
ep_obj = _getEpisode(show, season, episode)
if isinstance(ep_obj, str):
return json.dumps({'result': 'failure'})
# try do merge subtitles for that episode
try:
ep_obj.mergeSubtitles()
except Exception as e:
return json.dumps({'result': 'failure', 'exception': str(e)})
# return the correct json value
status = 'Subtitles merged successfully '
ui.notifications.message('Merge Subtitles', status)
return json.dumps({'result': 'ok'})
class UI:
@cherrypy.expose
def add_message(self):
ui.notifications.message('Test 1', 'This is test number 1')
ui.notifications.error('Test 2', 'This is test number 2')
return "ok"
@cherrypy.expose
def get_messages(self):
messages = {}
cur_notification_num = 1
for cur_notification in ui.notifications.get_notifications():
messages['notification-'+str(cur_notification_num)] = {'title': cur_notification.title,
'message': cur_notification.message,
'type': cur_notification.type}
cur_notification_num += 1
return json.dumps(messages)
class WebInterface:
@cherrypy.expose
def index(self):
redirect("/home")
@cherrypy.expose
def showPoster(self, show=None, which=None):
#Redirect initial poster/banner thumb to default images
if which[0:6] == 'poster':
default_image_name = 'poster.png'
else:
default_image_name = 'banner.png'
default_image_path = ek.ek(os.path.join, sickbeard.PROG_DIR, 'data', 'images', default_image_name)
if show is None:
return cherrypy.lib.static.serve_file(default_image_path, content_type="image/png")
else:
showObj = sickbeard.helpers.findCertainShow(sickbeard.showList, int(show))
if showObj is None:
return cherrypy.lib.static.serve_file(default_image_path, content_type="image/png")
cache_obj = image_cache.ImageCache()
if which == 'poster':
image_file_name = cache_obj.poster_path(showObj.tvdbid)
if which == 'poster_thumb':
image_file_name = cache_obj.poster_thumb_path(showObj.tvdbid)
if which == 'banner':
image_file_name = cache_obj.banner_path(showObj.tvdbid)
if which == 'banner_thumb':
image_file_name = cache_obj.banner_thumb_path(showObj.tvdbid)
if ek.ek(os.path.isfile, image_file_name):
return cherrypy.lib.static.serve_file(image_file_name, content_type="image/jpeg")
else:
return cherrypy.lib.static.serve_file(default_image_path, content_type="image/png")
@cherrypy.expose
def setHomeLayout(self, layout):
if layout not in ('poster', 'banner', 'simple'):
layout = 'poster'
sickbeard.HOME_LAYOUT = layout
redirect("/home")
@cherrypy.expose
def setHomeSearch(self, search):
if search not in ('True', 'False'):
search = 'False'
sickbeard.TOGGLE_SEARCH= search
redirect("/home")
@cherrypy.expose
def toggleDisplayShowSpecials(self, show):
sickbeard.DISPLAY_SHOW_SPECIALS = not sickbeard.DISPLAY_SHOW_SPECIALS
redirect("/home/displayShow?show=" + show)
@cherrypy.expose
def setComingEpsLayout(self, layout):
if layout not in ('poster', 'banner', 'list'):
layout = 'banner'
sickbeard.COMING_EPS_LAYOUT = layout
redirect("/comingEpisodes")
@cherrypy.expose
def toggleComingEpsDisplayPaused(self):
sickbeard.COMING_EPS_DISPLAY_PAUSED = not sickbeard.COMING_EPS_DISPLAY_PAUSED
redirect("/comingEpisodes")
@cherrypy.expose
def setComingEpsSort(self, sort):
if sort not in ('date', 'network', 'show'):
sort = 'date'
sickbeard.COMING_EPS_SORT = sort
redirect("/comingEpisodes")
@cherrypy.expose
def comingEpisodes(self, layout="None"):
# get local timezone and load network timezones
sb_timezone = tz.tzlocal()
network_dict = network_timezones.load_network_dict()
myDB = db.DBConnection()
today1 = datetime.date.today()
today = today1.toordinal()
next_week1 = (datetime.date.today() + datetime.timedelta(days=7))
next_week = next_week1.toordinal()
recently = (datetime.date.today() - datetime.timedelta(days=sickbeard.COMING_EPS_MISSED_RANGE)).toordinal()
done_show_list = []
qualList = Quality.DOWNLOADED + Quality.SNATCHED + [ARCHIVED, IGNORED]
sql_results1 = myDB.select("SELECT *, 0 as localtime, tv_shows.status as show_status FROM tv_episodes, tv_shows WHERE season != 0 AND airdate >= ? AND airdate < ? AND tv_shows.tvdb_id = tv_episodes.showid AND tv_episodes.status NOT IN ("+','.join(['?']*len(qualList))+")", [today, next_week] + qualList)
for cur_result in sql_results1:
done_show_list.append(helpers.tryInt(cur_result["showid"]))
more_sql_results = myDB.select("SELECT *, tv_shows.status as show_status FROM tv_episodes outer_eps, tv_shows WHERE season != 0 AND showid NOT IN ("+','.join(['?']*len(done_show_list))+") AND tv_shows.tvdb_id = outer_eps.showid AND airdate IN (SELECT airdate FROM tv_episodes inner_eps WHERE inner_eps.showid = outer_eps.showid AND inner_eps.airdate >= ? AND inner_eps.status NOT IN ("+','.join(['?']*len(Quality.DOWNLOADED+Quality.SNATCHED))+") ORDER BY inner_eps.airdate ASC LIMIT 1)", done_show_list + [next_week] + Quality.DOWNLOADED + Quality.SNATCHED)
sql_results1 += more_sql_results
more_sql_results = myDB.select("SELECT *, 0 as localtime, tv_shows.status as show_status FROM tv_episodes, tv_shows WHERE season != 0 AND tv_shows.tvdb_id = tv_episodes.showid AND airdate < ? AND airdate >= ? AND tv_episodes.status = ? AND tv_episodes.status NOT IN ("+','.join(['?']*len(qualList))+")", [today, recently, WANTED] + qualList)
sql_results1 += more_sql_results
# sort by localtime
sorts = {
'date': (lambda x, y: cmp(x["localtime"], y["localtime"])),
'show': (lambda a, b: cmp((a["show_name"], a["localtime"]), (b["show_name"], b["localtime"]))),
'network': (lambda a, b: cmp((a["network"], a["localtime"]), (b["network"], b["localtime"]))),
}
# make a dict out of the sql results
sql_results = [dict(row) for row in sql_results1]
# regex to parse time (12/24 hour format)
time_regex = re.compile(r"(\d{1,2}):(\d{2,2})( [PA]M)?\b", flags=re.IGNORECASE)
# add localtime to the dict
for index, item in enumerate(sql_results1):
mo = time_regex.search(item['airs'])
if mo != None and len(mo.groups()) >= 2:
try:
hr = helpers.tryInt(mo.group(1))
m = helpers.tryInt(mo.group(2))
ap = mo.group(3)
# convert am/pm to 24 hour clock
if ap != None:
if ap.lower() == u" pm" and hr != 12:
hr += 12
elif ap.lower() == u" am" and hr == 12:
hr -= 12
except:
hr = 0
m = 0
else:
hr = 0
m = 0
if hr < 0 or hr > 23 or m < 0 or m > 59:
hr = 0
m = 0
te = datetime.datetime.fromordinal(helpers.tryInt(item['airdate']))
foreign_timezone = network_timezones.get_network_timezone(item['network'], network_dict, sb_timezone)
foreign_naive = datetime.datetime(te.year, te.month, te.day, hr, m,tzinfo=foreign_timezone)
sql_results[index]['localtime'] = foreign_naive.astimezone(sb_timezone)
#Normalize/Format the Airing Time
try:
locale.setlocale(locale.LC_TIME, 'us_US')
sql_results[index]['localtime_string'] = sql_results[index]['localtime'].strftime("%A %H:%M %p")
locale.setlocale(locale.LC_ALL, '') #Reseting to default locale
except:
sql_results[index]['localtime_string'] = sql_results[index]['localtime'].strftime("%A %H:%M %p")
sql_results.sort(sorts[sickbeard.COMING_EPS_SORT])
t = PageTemplate(file="comingEpisodes.tmpl")
# paused_item = { 'title': '', 'path': 'toggleComingEpsDisplayPaused' }
# paused_item['title'] = 'Hide Paused' if sickbeard.COMING_EPS_DISPLAY_PAUSED else 'Show Paused'
paused_item = { 'title': 'View Paused:', 'path': {'': ''} }
paused_item['path'] = {'Hide': 'toggleComingEpsDisplayPaused'} if sickbeard.COMING_EPS_DISPLAY_PAUSED else {'Show': 'toggleComingEpsDisplayPaused'}
t.submenu = [
{ 'title': 'Sort by:', 'path': {'Date': 'setComingEpsSort/?sort=date',
'Show': 'setComingEpsSort/?sort=show',
'Network': 'setComingEpsSort/?sort=network',
}},
{ 'title': 'Layout:', 'path': {'Banner': 'setComingEpsLayout/?layout=banner',
'Poster': 'setComingEpsLayout/?layout=poster',
'List': 'setComingEpsLayout/?layout=list',
}},
paused_item,
]
t.next_week = datetime.datetime.combine(next_week1, datetime.time(tzinfo=sb_timezone))
t.today = datetime.datetime.now().replace(tzinfo=sb_timezone)
t.sql_results = sql_results
# Allow local overriding of layout parameter
if layout and layout in ('poster', 'banner', 'list'):
t.layout = layout
else:
t.layout = sickbeard.COMING_EPS_LAYOUT
return _munge(t)
# Raw iCalendar implementation by Pedro Jose Pereira Vieito (@pvieito).
#
# iCalendar (iCal) - Standard RFC 5545 <http://tools.ietf.org/html/rfc5546>
# Works with iCloud, Google Calendar and Outlook.
@cherrypy.expose
def calendar(self):
""" Provides a subscribeable URL for iCal subscriptions
"""
logger.log(u"Receiving iCal request from %s" % cherrypy.request.remote.ip)
poster_url = cherrypy.url().replace('ical', '')
time_re = re.compile('([0-9]{1,2})\:([0-9]{2})(\ |)([AM|am|PM|pm]{2})')
# Create a iCal string
ical = 'BEGIN:VCALENDAR\n'
ical += 'VERSION:2.0\n'
ical += 'PRODID://Sick-Beard Upcoming Episodes//\n'
# Get shows info
myDB = db.DBConnection()
# Limit dates
past_date = (datetime.date.today() + datetime.timedelta(weeks=-2)).toordinal()
future_date = (datetime.date.today() + datetime.timedelta(weeks=52)).toordinal()
# Get all the shows that are not paused and are currently on air (from kjoconnor Fork)
calendar_shows = myDB.select("SELECT show_name, tvdb_id, network, airs, runtime FROM tv_shows WHERE status = 'Continuing' AND paused != '1'")
for show in calendar_shows:
# Get all episodes of this show airing between today and next month
episode_list = myDB.select("SELECT tvdbid, name, season, episode, description, airdate FROM tv_episodes WHERE airdate >= ? AND airdate < ? AND showid = ?", (past_date, future_date, int(show["tvdb_id"])))
# Get local timezone and load network timezones
local_zone = tz.tzlocal()
try:
network_zone = network_timezones.get_network_timezone(show['network'], network_timezones.load_network_dict(), local_zone)
except:
# Dummy network_zone for exceptions
network_zone = None
for episode in episode_list:
# Get the air date and time
air_date = datetime.datetime.fromordinal(int(episode['airdate']))
air_time = re.compile('([0-9]{1,2})\:([0-9]{2})(\ |)([AM|am|PM|pm]{2})').search(show["airs"])
# Parse out the air time
try:
if (air_time.group(4).lower() == 'pm' and int(air_time.group(1)) == 12):
t = datetime.time(12, int(air_time.group(2)), 0, tzinfo=network_zone)
elif (air_time.group(4).lower() == 'pm'):
t = datetime.time((int(air_time.group(1)) + 12), int(air_time.group(2)), 0, tzinfo=network_zone)
elif (air_time.group(4).lower() == 'am' and int(air_time.group(1)) == 12):
t = datetime.time(0, int(air_time.group(2)), 0, tzinfo=network_zone)
else:
t = datetime.time(int(air_time.group(1)), int(air_time.group(2)), 0, tzinfo=network_zone)
except:
# Dummy time for exceptions
t = datetime.time(22, 0, 0, tzinfo=network_zone)
# Combine air time and air date into one datetime object
air_date_time = datetime.datetime.combine(air_date, t).astimezone(local_zone)
# Create event for episode
ical = ical + 'BEGIN:VEVENT\n'
ical = ical + 'DTSTART:' + str(air_date_time.date()).replace("-", "") + '\n'
ical = ical + 'SUMMARY:' + show['show_name'] + ': ' + episode['name'] + '\n'
ical = ical + 'UID:' + str(datetime.date.today().isoformat()) + '-' + str(random.randint(10000,99999)) + '@Sick-Beard\n'
if (episode['description'] != ''):
ical = ical + 'DESCRIPTION:' + show['airs'] + ' on ' + show['network'] + '\\n\\n' + episode['description'] + '\n'
else:
ical = ical + 'DESCRIPTION:' + show['airs'] + ' on ' + show['network'] + '\n'
ical = ical + 'LOCATION:' + 'Episode ' + str(episode['episode']) + ' - Season ' + str(episode['season']) + '\n'
ical = ical + 'END:VEVENT\n'
# Ending the iCal
ical += 'END:VCALENDAR\n'
return ical
manage = Manage()
history = History()
config = Config()
home = Home()
api = Api()
browser = browser.WebFileBrowser()
errorlogs = ErrorLogs()
ui = UI()
|
nomaro/SickBeard_Backup
|
sickbeard/webserve.py
|
Python
|
gpl-3.0
| 155,575
|
import codecs
from ConfigParser import ConfigParser
import os
import subprocess
import sys
import six
import twiggy
from twiggy import log
from twiggy.levels import name2level
from xdg import BaseDirectory
def asbool(some_value):
""" Cast config values to boolean. """
return six.text_type(some_value).lower() in [
'y', 'yes', 't', 'true', '1', 'on'
]
def get_service_password(service, username, oracle=None, interactive=False):
"""
Retrieve the sensitive password for a service by:
* retrieving password from a secure store (@oracle:use_keyring, default)
* asking the password from the user (@oracle:ask_password, interactive)
* executing a command and use the output as password
(@oracle:eval:<command>)
Note that the keyring may or may not be locked
which requires that the user provides a password (interactive mode).
:param service: Service name, may be key into secure store (as string).
:param username: Username for the service (as string).
:param oracle: Hint which password oracle strategy to use.
:return: Retrieved password (as string)
.. seealso::
https://bitbucket.org/kang/python-keyring-lib
"""
import getpass
import keyring
password = None
if not oracle or oracle == "@oracle:use_keyring":
password = keyring.get_password(service, username)
if interactive and password is None:
# -- LEARNING MODE: Password is not stored in keyring yet.
oracle = "@oracle:ask_password"
password = get_service_password(service, username,
oracle, interactive=True)
if password:
keyring.set_password(service, username, password)
elif interactive and oracle == "@oracle:ask_password":
prompt = "%s password: " % service
password = getpass.getpass(prompt)
elif oracle.startswith('@oracle:eval:'):
command = oracle[13:]
p = subprocess.Popen(
command,
shell=True,
stdout=subprocess.PIPE,
#stderr=subprocess.STDOUT
)
password = p.stdout.read()[:-1]
if password is None:
die("MISSING PASSWORD: oracle='%s', interactive=%s for service=%s" %
(oracle, interactive, service))
return password
def load_example_rc():
fname = os.path.join(
os.path.dirname(__file__),
'docs/configuration.rst'
)
with open(fname, 'r') as f:
readme = f.read()
example = readme.split('.. example')[1][4:]
return example
error_template = """
*************************************************
* There was a problem with your bugwarriorrc *
* {msg}
* Here's an example template to help: *
*************************************************
{example}"""
def die(msg):
log.options(suppress_newlines=False).critical(
error_template,
msg=msg,
example=load_example_rc(),
)
sys.exit(1)
def validate_config(config, main_section):
if not config.has_section(main_section):
die("No [%s] section found." % main_section)
twiggy.quickSetup(
name2level(config.get(main_section, 'log.level')),
config.get(main_section, 'log.file')
)
if not config.has_option(main_section, 'targets'):
die("No targets= item in [%s] found." % main_section)
targets = config.get(main_section, 'targets')
targets = filter(lambda t: len(t), [t.strip() for t in targets.split(",")])
if not targets:
die("Empty targets= item in [%s]." % main_section)
for target in targets:
if target not in config.sections():
die("No [%s] section found." % target)
# Validate each target one by one.
for target in targets:
service = config.get(target, 'service')
if not service:
die("No 'service' in [%s]" % target)
if service not in SERVICES:
die("'%s' in [%s] is not a valid service." % (service, target))
# Call the service-specific validator
SERVICES[service].validate_config(config, target)
def load_config(main_section):
config = ConfigParser({'log.level': "DEBUG", 'log.file': None})
path = None
first_path = BaseDirectory.load_first_config('bugwarrior')
if first_path is not None:
path = os.path.join(first_path, 'bugwarriorrc')
old_path = os.path.expanduser("~/.bugwarriorrc")
if path is None or not os.path.exists(path):
if os.path.exists(old_path):
path = old_path
else:
path = os.path.join(BaseDirectory.save_config_path('bugwarrior'), 'bugwarriorrc')
config.readfp(
codecs.open(
path,
"r",
"utf-8",
)
)
config.interactive = False # TODO: make this a command-line option
validate_config(config, main_section)
return config
def get_taskrc_path(conf, main_section):
path = '~/.taskrc'
if conf.has_option(main_section, 'taskrc'):
path = conf.get(main_section, 'taskrc')
return os.path.normpath(
os.path.expanduser(path)
)
# This needs to be imported here and not above to avoid a circular-import.
from bugwarrior.services import SERVICES
|
zackp30/bugwarrior
|
bugwarrior/config.py
|
Python
|
gpl-3.0
| 5,298
|