code stringlengths 3 1.05M | repo_name stringlengths 5 104 | path stringlengths 4 251 | language stringclasses 1 value | license stringclasses 15 values | size int64 3 1.05M |
|---|---|---|---|---|---|
############################################################################
##
## Copyright (C) 2006-2007 University of Utah. All rights reserved.
##
## This file is part of VisTrails.
##
## This file may be used under the terms of the GNU General Public
## License version 2.0 as published by the Free Software Foundation
## and appearing in the file LICENSE.GPL included in the packaging of
## this file. Please review the following to ensure GNU General Public
## Licensing requirements will be met:
## http://www.opensource.org/licenses/gpl-license.php
##
## If you are unsure which license is appropriate for your use (for
## instance, you are interested in developing a commercial derivative
## of VisTrails), please contact us at contact@vistrails.org.
##
## This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
## WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE.
##
############################################################################
# This file was created automatically by SWIG 1.3.27.
# Don't modify this file, modify the SWIG interface instead.
import _TetGen
# This file is compatible with both classic and new-style classes.
def _swig_setattr_nondynamic(self,class_type,name,value,static=1):
if (name == "this"):
if isinstance(value, class_type):
self.__dict__[name] = value.this
if hasattr(value,"thisown"): self.__dict__["thisown"] = value.thisown
del value.thisown
return
method = class_type.__swig_setmethods__.get(name,None)
if method: return method(self,value)
if (not static) or hasattr(self,name) or (name == "thisown"):
self.__dict__[name] = value
else:
raise AttributeError("You cannot add attributes to %s" % self)
def _swig_setattr(self,class_type,name,value):
return _swig_setattr_nondynamic(self,class_type,name,value,0)
def _swig_getattr(self,class_type,name):
method = class_type.__swig_getmethods__.get(name,None)
if method: return method(self)
raise AttributeError,name
import types
try:
_object = types.ObjectType
_newclass = 1
except AttributeError:
class _object : pass
_newclass = 0
del types
class tetgenio(_object):
__swig_setmethods__ = {}
__setattr__ = lambda self, name, value: _swig_setattr(self, tetgenio, name, value)
__swig_getmethods__ = {}
__getattr__ = lambda self, name: _swig_getattr(self, tetgenio, name)
def __repr__(self):
return "<%s.%s; proxy of C++ tetgenio instance at %s>" % (self.__class__.__module__, self.__class__.__name__, self.this,)
FILENAMESIZE = _TetGen.tetgenio_FILENAMESIZE
INPUTLINESIZE = _TetGen.tetgenio_INPUTLINESIZE
__swig_getmethods__["init"] = lambda x: _TetGen.tetgenio_init
if _newclass:init = staticmethod(_TetGen.tetgenio_init)
__swig_setmethods__["firstnumber"] = _TetGen.tetgenio_firstnumber_set
__swig_getmethods__["firstnumber"] = _TetGen.tetgenio_firstnumber_get
if _newclass:firstnumber = property(_TetGen.tetgenio_firstnumber_get, _TetGen.tetgenio_firstnumber_set)
__swig_setmethods__["mesh_dim"] = _TetGen.tetgenio_mesh_dim_set
__swig_getmethods__["mesh_dim"] = _TetGen.tetgenio_mesh_dim_get
if _newclass:mesh_dim = property(_TetGen.tetgenio_mesh_dim_get, _TetGen.tetgenio_mesh_dim_set)
__swig_setmethods__["pointlist"] = _TetGen.tetgenio_pointlist_set
__swig_getmethods__["pointlist"] = _TetGen.tetgenio_pointlist_get
if _newclass:pointlist = property(_TetGen.tetgenio_pointlist_get, _TetGen.tetgenio_pointlist_set)
__swig_setmethods__["pointattributelist"] = _TetGen.tetgenio_pointattributelist_set
__swig_getmethods__["pointattributelist"] = _TetGen.tetgenio_pointattributelist_get
if _newclass:pointattributelist = property(_TetGen.tetgenio_pointattributelist_get, _TetGen.tetgenio_pointattributelist_set)
__swig_setmethods__["addpointlist"] = _TetGen.tetgenio_addpointlist_set
__swig_getmethods__["addpointlist"] = _TetGen.tetgenio_addpointlist_get
if _newclass:addpointlist = property(_TetGen.tetgenio_addpointlist_get, _TetGen.tetgenio_addpointlist_set)
__swig_setmethods__["addpointattributelist"] = _TetGen.tetgenio_addpointattributelist_set
__swig_getmethods__["addpointattributelist"] = _TetGen.tetgenio_addpointattributelist_get
if _newclass:addpointattributelist = property(_TetGen.tetgenio_addpointattributelist_get, _TetGen.tetgenio_addpointattributelist_set)
__swig_setmethods__["pointmarkerlist"] = _TetGen.tetgenio_pointmarkerlist_set
__swig_getmethods__["pointmarkerlist"] = _TetGen.tetgenio_pointmarkerlist_get
if _newclass:pointmarkerlist = property(_TetGen.tetgenio_pointmarkerlist_get, _TetGen.tetgenio_pointmarkerlist_set)
__swig_setmethods__["numberofpoints"] = _TetGen.tetgenio_numberofpoints_set
__swig_getmethods__["numberofpoints"] = _TetGen.tetgenio_numberofpoints_get
if _newclass:numberofpoints = property(_TetGen.tetgenio_numberofpoints_get, _TetGen.tetgenio_numberofpoints_set)
__swig_setmethods__["numberofpointattributes"] = _TetGen.tetgenio_numberofpointattributes_set
__swig_getmethods__["numberofpointattributes"] = _TetGen.tetgenio_numberofpointattributes_get
if _newclass:numberofpointattributes = property(_TetGen.tetgenio_numberofpointattributes_get, _TetGen.tetgenio_numberofpointattributes_set)
__swig_setmethods__["numberofaddpoints"] = _TetGen.tetgenio_numberofaddpoints_set
__swig_getmethods__["numberofaddpoints"] = _TetGen.tetgenio_numberofaddpoints_get
if _newclass:numberofaddpoints = property(_TetGen.tetgenio_numberofaddpoints_get, _TetGen.tetgenio_numberofaddpoints_set)
__swig_setmethods__["tetrahedronlist"] = _TetGen.tetgenio_tetrahedronlist_set
__swig_getmethods__["tetrahedronlist"] = _TetGen.tetgenio_tetrahedronlist_get
if _newclass:tetrahedronlist = property(_TetGen.tetgenio_tetrahedronlist_get, _TetGen.tetgenio_tetrahedronlist_set)
__swig_setmethods__["tetrahedronattributelist"] = _TetGen.tetgenio_tetrahedronattributelist_set
__swig_getmethods__["tetrahedronattributelist"] = _TetGen.tetgenio_tetrahedronattributelist_get
if _newclass:tetrahedronattributelist = property(_TetGen.tetgenio_tetrahedronattributelist_get, _TetGen.tetgenio_tetrahedronattributelist_set)
__swig_setmethods__["tetrahedronvolumelist"] = _TetGen.tetgenio_tetrahedronvolumelist_set
__swig_getmethods__["tetrahedronvolumelist"] = _TetGen.tetgenio_tetrahedronvolumelist_get
if _newclass:tetrahedronvolumelist = property(_TetGen.tetgenio_tetrahedronvolumelist_get, _TetGen.tetgenio_tetrahedronvolumelist_set)
__swig_setmethods__["neighborlist"] = _TetGen.tetgenio_neighborlist_set
__swig_getmethods__["neighborlist"] = _TetGen.tetgenio_neighborlist_get
if _newclass:neighborlist = property(_TetGen.tetgenio_neighborlist_get, _TetGen.tetgenio_neighborlist_set)
__swig_setmethods__["numberoftetrahedra"] = _TetGen.tetgenio_numberoftetrahedra_set
__swig_getmethods__["numberoftetrahedra"] = _TetGen.tetgenio_numberoftetrahedra_get
if _newclass:numberoftetrahedra = property(_TetGen.tetgenio_numberoftetrahedra_get, _TetGen.tetgenio_numberoftetrahedra_set)
__swig_setmethods__["numberofcorners"] = _TetGen.tetgenio_numberofcorners_set
__swig_getmethods__["numberofcorners"] = _TetGen.tetgenio_numberofcorners_get
if _newclass:numberofcorners = property(_TetGen.tetgenio_numberofcorners_get, _TetGen.tetgenio_numberofcorners_set)
__swig_setmethods__["numberoftetrahedronattributes"] = _TetGen.tetgenio_numberoftetrahedronattributes_set
__swig_getmethods__["numberoftetrahedronattributes"] = _TetGen.tetgenio_numberoftetrahedronattributes_get
if _newclass:numberoftetrahedronattributes = property(_TetGen.tetgenio_numberoftetrahedronattributes_get, _TetGen.tetgenio_numberoftetrahedronattributes_set)
__swig_setmethods__["facetlist"] = _TetGen.tetgenio_facetlist_set
__swig_getmethods__["facetlist"] = _TetGen.tetgenio_facetlist_get
if _newclass:facetlist = property(_TetGen.tetgenio_facetlist_get, _TetGen.tetgenio_facetlist_set)
__swig_setmethods__["facetmarkerlist"] = _TetGen.tetgenio_facetmarkerlist_set
__swig_getmethods__["facetmarkerlist"] = _TetGen.tetgenio_facetmarkerlist_get
if _newclass:facetmarkerlist = property(_TetGen.tetgenio_facetmarkerlist_get, _TetGen.tetgenio_facetmarkerlist_set)
__swig_setmethods__["numberoffacets"] = _TetGen.tetgenio_numberoffacets_set
__swig_getmethods__["numberoffacets"] = _TetGen.tetgenio_numberoffacets_get
if _newclass:numberoffacets = property(_TetGen.tetgenio_numberoffacets_get, _TetGen.tetgenio_numberoffacets_set)
__swig_setmethods__["holelist"] = _TetGen.tetgenio_holelist_set
__swig_getmethods__["holelist"] = _TetGen.tetgenio_holelist_get
if _newclass:holelist = property(_TetGen.tetgenio_holelist_get, _TetGen.tetgenio_holelist_set)
__swig_setmethods__["numberofholes"] = _TetGen.tetgenio_numberofholes_set
__swig_getmethods__["numberofholes"] = _TetGen.tetgenio_numberofholes_get
if _newclass:numberofholes = property(_TetGen.tetgenio_numberofholes_get, _TetGen.tetgenio_numberofholes_set)
__swig_setmethods__["regionlist"] = _TetGen.tetgenio_regionlist_set
__swig_getmethods__["regionlist"] = _TetGen.tetgenio_regionlist_get
if _newclass:regionlist = property(_TetGen.tetgenio_regionlist_get, _TetGen.tetgenio_regionlist_set)
__swig_setmethods__["numberofregions"] = _TetGen.tetgenio_numberofregions_set
__swig_getmethods__["numberofregions"] = _TetGen.tetgenio_numberofregions_get
if _newclass:numberofregions = property(_TetGen.tetgenio_numberofregions_get, _TetGen.tetgenio_numberofregions_set)
__swig_setmethods__["facetconstraintlist"] = _TetGen.tetgenio_facetconstraintlist_set
__swig_getmethods__["facetconstraintlist"] = _TetGen.tetgenio_facetconstraintlist_get
if _newclass:facetconstraintlist = property(_TetGen.tetgenio_facetconstraintlist_get, _TetGen.tetgenio_facetconstraintlist_set)
__swig_setmethods__["numberoffacetconstraints"] = _TetGen.tetgenio_numberoffacetconstraints_set
__swig_getmethods__["numberoffacetconstraints"] = _TetGen.tetgenio_numberoffacetconstraints_get
if _newclass:numberoffacetconstraints = property(_TetGen.tetgenio_numberoffacetconstraints_get, _TetGen.tetgenio_numberoffacetconstraints_set)
__swig_setmethods__["segmentconstraintlist"] = _TetGen.tetgenio_segmentconstraintlist_set
__swig_getmethods__["segmentconstraintlist"] = _TetGen.tetgenio_segmentconstraintlist_get
if _newclass:segmentconstraintlist = property(_TetGen.tetgenio_segmentconstraintlist_get, _TetGen.tetgenio_segmentconstraintlist_set)
__swig_setmethods__["numberofsegmentconstraints"] = _TetGen.tetgenio_numberofsegmentconstraints_set
__swig_getmethods__["numberofsegmentconstraints"] = _TetGen.tetgenio_numberofsegmentconstraints_get
if _newclass:numberofsegmentconstraints = property(_TetGen.tetgenio_numberofsegmentconstraints_get, _TetGen.tetgenio_numberofsegmentconstraints_set)
__swig_setmethods__["nodeconstraintlist"] = _TetGen.tetgenio_nodeconstraintlist_set
__swig_getmethods__["nodeconstraintlist"] = _TetGen.tetgenio_nodeconstraintlist_get
if _newclass:nodeconstraintlist = property(_TetGen.tetgenio_nodeconstraintlist_get, _TetGen.tetgenio_nodeconstraintlist_set)
__swig_setmethods__["numberofnodeconstraints"] = _TetGen.tetgenio_numberofnodeconstraints_set
__swig_getmethods__["numberofnodeconstraints"] = _TetGen.tetgenio_numberofnodeconstraints_get
if _newclass:numberofnodeconstraints = property(_TetGen.tetgenio_numberofnodeconstraints_get, _TetGen.tetgenio_numberofnodeconstraints_set)
__swig_setmethods__["pbcgrouplist"] = _TetGen.tetgenio_pbcgrouplist_set
__swig_getmethods__["pbcgrouplist"] = _TetGen.tetgenio_pbcgrouplist_get
if _newclass:pbcgrouplist = property(_TetGen.tetgenio_pbcgrouplist_get, _TetGen.tetgenio_pbcgrouplist_set)
__swig_setmethods__["numberofpbcgroups"] = _TetGen.tetgenio_numberofpbcgroups_set
__swig_getmethods__["numberofpbcgroups"] = _TetGen.tetgenio_numberofpbcgroups_get
if _newclass:numberofpbcgroups = property(_TetGen.tetgenio_numberofpbcgroups_get, _TetGen.tetgenio_numberofpbcgroups_set)
__swig_setmethods__["trifacelist"] = _TetGen.tetgenio_trifacelist_set
__swig_getmethods__["trifacelist"] = _TetGen.tetgenio_trifacelist_get
if _newclass:trifacelist = property(_TetGen.tetgenio_trifacelist_get, _TetGen.tetgenio_trifacelist_set)
__swig_setmethods__["adjtetlist"] = _TetGen.tetgenio_adjtetlist_set
__swig_getmethods__["adjtetlist"] = _TetGen.tetgenio_adjtetlist_get
if _newclass:adjtetlist = property(_TetGen.tetgenio_adjtetlist_get, _TetGen.tetgenio_adjtetlist_set)
__swig_setmethods__["trifacemarkerlist"] = _TetGen.tetgenio_trifacemarkerlist_set
__swig_getmethods__["trifacemarkerlist"] = _TetGen.tetgenio_trifacemarkerlist_get
if _newclass:trifacemarkerlist = property(_TetGen.tetgenio_trifacemarkerlist_get, _TetGen.tetgenio_trifacemarkerlist_set)
__swig_setmethods__["numberoftrifaces"] = _TetGen.tetgenio_numberoftrifaces_set
__swig_getmethods__["numberoftrifaces"] = _TetGen.tetgenio_numberoftrifaces_get
if _newclass:numberoftrifaces = property(_TetGen.tetgenio_numberoftrifaces_get, _TetGen.tetgenio_numberoftrifaces_set)
__swig_setmethods__["edgelist"] = _TetGen.tetgenio_edgelist_set
__swig_getmethods__["edgelist"] = _TetGen.tetgenio_edgelist_get
if _newclass:edgelist = property(_TetGen.tetgenio_edgelist_get, _TetGen.tetgenio_edgelist_set)
__swig_setmethods__["edgemarkerlist"] = _TetGen.tetgenio_edgemarkerlist_set
__swig_getmethods__["edgemarkerlist"] = _TetGen.tetgenio_edgemarkerlist_get
if _newclass:edgemarkerlist = property(_TetGen.tetgenio_edgemarkerlist_get, _TetGen.tetgenio_edgemarkerlist_set)
__swig_setmethods__["numberofedges"] = _TetGen.tetgenio_numberofedges_set
__swig_getmethods__["numberofedges"] = _TetGen.tetgenio_numberofedges_get
if _newclass:numberofedges = property(_TetGen.tetgenio_numberofedges_get, _TetGen.tetgenio_numberofedges_set)
def initialize(*args): return _TetGen.tetgenio_initialize(*args)
def deinitialize(*args): return _TetGen.tetgenio_deinitialize(*args)
def load_node_call(*args): return _TetGen.tetgenio_load_node_call(*args)
def load_node(*args): return _TetGen.tetgenio_load_node(*args)
def load_addnodes(*args): return _TetGen.tetgenio_load_addnodes(*args)
def load_pbc(*args): return _TetGen.tetgenio_load_pbc(*args)
def load_var(*args): return _TetGen.tetgenio_load_var(*args)
def load_mtr(*args): return _TetGen.tetgenio_load_mtr(*args)
def load_poly(*args): return _TetGen.tetgenio_load_poly(*args)
def load_off(*args): return _TetGen.tetgenio_load_off(*args)
def load_ply(*args): return _TetGen.tetgenio_load_ply(*args)
def load_stl(*args): return _TetGen.tetgenio_load_stl(*args)
def load_medit(*args): return _TetGen.tetgenio_load_medit(*args)
def load_plc(*args): return _TetGen.tetgenio_load_plc(*args)
def load_tetmesh(*args): return _TetGen.tetgenio_load_tetmesh(*args)
def save_nodes(*args): return _TetGen.tetgenio_save_nodes(*args)
def save_elements(*args): return _TetGen.tetgenio_save_elements(*args)
def save_faces(*args): return _TetGen.tetgenio_save_faces(*args)
def save_edges(*args): return _TetGen.tetgenio_save_edges(*args)
def save_neighbors(*args): return _TetGen.tetgenio_save_neighbors(*args)
def save_poly(*args): return _TetGen.tetgenio_save_poly(*args)
def readline(*args): return _TetGen.tetgenio_readline(*args)
def findnextfield(*args): return _TetGen.tetgenio_findnextfield(*args)
def readnumberline(*args): return _TetGen.tetgenio_readnumberline(*args)
def findnextnumber(*args): return _TetGen.tetgenio_findnextnumber(*args)
def __init__(self, *args):
_swig_setattr(self, tetgenio, 'this', _TetGen.new_tetgenio(*args))
_swig_setattr(self, tetgenio, 'thisown', 1)
def __del__(self, destroy=_TetGen.delete_tetgenio):
try:
if self.thisown: destroy(self)
except: pass
class tetgenioPtr(tetgenio):
def __init__(self, this):
_swig_setattr(self, tetgenio, 'this', this)
if not hasattr(self,"thisown"): _swig_setattr(self, tetgenio, 'thisown', 0)
self.__class__ = tetgenio
_TetGen.tetgenio_swigregister(tetgenioPtr)
tetgenio_init = _TetGen.tetgenio_init
class polygon(_object):
__swig_setmethods__ = {}
__setattr__ = lambda self, name, value: _swig_setattr(self, polygon, name, value)
__swig_getmethods__ = {}
__getattr__ = lambda self, name: _swig_getattr(self, polygon, name)
def __repr__(self):
return "<%s.%s; proxy of C++ tetgenio::polygon instance at %s>" % (self.__class__.__module__, self.__class__.__name__, self.this,)
def __init__(self, *args):
_swig_setattr(self, polygon, 'this', _TetGen.new_polygon(*args))
_swig_setattr(self, polygon, 'thisown', 1)
def __del__(self, destroy=_TetGen.delete_polygon):
try:
if self.thisown: destroy(self)
except: pass
class polygonPtr(polygon):
def __init__(self, this):
_swig_setattr(self, polygon, 'this', this)
if not hasattr(self,"thisown"): _swig_setattr(self, polygon, 'thisown', 0)
self.__class__ = polygon
_TetGen.polygon_swigregister(polygonPtr)
class facet(_object):
__swig_setmethods__ = {}
__setattr__ = lambda self, name, value: _swig_setattr(self, facet, name, value)
__swig_getmethods__ = {}
__getattr__ = lambda self, name: _swig_getattr(self, facet, name)
def __repr__(self):
return "<%s.%s; proxy of C++ tetgenio::facet instance at %s>" % (self.__class__.__module__, self.__class__.__name__, self.this,)
def __init__(self, *args):
_swig_setattr(self, facet, 'this', _TetGen.new_facet(*args))
_swig_setattr(self, facet, 'thisown', 1)
def __del__(self, destroy=_TetGen.delete_facet):
try:
if self.thisown: destroy(self)
except: pass
class facetPtr(facet):
def __init__(self, this):
_swig_setattr(self, facet, 'this', this)
if not hasattr(self,"thisown"): _swig_setattr(self, facet, 'thisown', 0)
self.__class__ = facet
_TetGen.facet_swigregister(facetPtr)
class pbcgroup(_object):
__swig_setmethods__ = {}
__setattr__ = lambda self, name, value: _swig_setattr(self, pbcgroup, name, value)
__swig_getmethods__ = {}
__getattr__ = lambda self, name: _swig_getattr(self, pbcgroup, name)
def __repr__(self):
return "<%s.%s; proxy of C++ tetgenio::pbcgroup instance at %s>" % (self.__class__.__module__, self.__class__.__name__, self.this,)
def __init__(self, *args):
_swig_setattr(self, pbcgroup, 'this', _TetGen.new_pbcgroup(*args))
_swig_setattr(self, pbcgroup, 'thisown', 1)
def __del__(self, destroy=_TetGen.delete_pbcgroup):
try:
if self.thisown: destroy(self)
except: pass
class pbcgroupPtr(pbcgroup):
def __init__(self, this):
_swig_setattr(self, pbcgroup, 'this', this)
if not hasattr(self,"thisown"): _swig_setattr(self, pbcgroup, 'thisown', 0)
self.__class__ = pbcgroup
_TetGen.pbcgroup_swigregister(pbcgroupPtr)
tetrahedralize = _TetGen.tetrahedralize
allocate_array = _TetGen.allocate_array
set_val = _TetGen.set_val
get_val = _TetGen.get_val
allocate_facet_array = _TetGen.allocate_facet_array
add_tri = _TetGen.add_tri
| Nikea/VisTrails | contrib/TetGenBridge/TetGen.py | Python | bsd-3-clause | 19,551 |
#! /usr/bin/env python
# -*- coding: utf-8 -*-
"""Scatter Plots.
For two algorithms, this generates the scatter plot of log(ERT1(df)) vs.
log(ERT0(df)), where ERT0(df) is the ERT of the reference algorithm,
ERT1(df) is the ERT of the algorithm of concern, both for target
precision df.
Different symbols are used for different dimension (see
:py:data:`markers` for the order of the markers, :py:data:`colors` for
the corresponding colors).
The target precisions considered are in :py:data:`targets`: by
default 46 targets are uniformly spread on the log-scale in
10**[-8:2].
Boxes correspond to the maximum numbers of function evaluations for
each algorithm in each dimension.
"""
from __future__ import absolute_import
"""For two algorithms, ERTs(given target function value) can also be
plotted in a scatter plot (log(ERT0) vs. log(ERT1)), which results in a
very attractive presentation, see the slides of Frank Hutter at
http://www.msr-inria.inria.fr/events-news/first-search-biology-day. The
advantage is that the absolute values do not get lost. The disadvantage
(in our case minor) is that there is an upper limit of data that can be
displayed.
"""
import os
import numpy
import numpy as np
from pdb import set_trace
from matplotlib import pyplot as plt
try:
from matplotlib.transforms import blended_transform_factory as blend
except ImportError:
# compatibility matplotlib 0.8
from matplotlib.transforms import blend_xy_sep_transform as blend
from .. import genericsettings, htmldesc, ppfigparam
from ..ppfig import saveFigure, save_single_functions_html, AlgorithmCount
from .. import toolsdivers
from .. import pproc
dimensions = (2, 3, 5, 10, 20, 40)
fixed_targets = pproc.TargetValues(np.logspace(-8, 2, 46))
#runlength_based_targets = pproc.RunlengthBasedTargetValues(np.logspace(numpy.log10(0.5), numpy.log10(50), 8))
# runlength_based_targets = pproc.RunlengthBasedTargetValues([0.5, 1, 3, 10, 50])
targets = fixed_targets # default
# formattings
colors = ('c', 'g', 'b', 'k', 'r', 'm', 'k', 'y', 'k', 'c', 'r', 'm')
markers = ('+', 'v', '*', 'o', 's', 'D', 'x')
markersize = 14 # modified in config.py
markersize_addon_beyond_maxevals = -6
linewidth_default = 0 # lines look ugly and are not necessary (anymore), because smaller symbols are used beyond maxevals
linewidth_rld_based = 2 # show lines because only 8 symbols are used
max_evals_line_length = 9 # length away from the diagonal as a factor, line indicates maximal evaluations for each data
offset = 0. #0.02 offset provides a way to move away the box boundaries to display the outer markers fully, clip_on=False is more effective
caption_start_fixed = r"""Expected running time (\ERT\ in $\log_{10}$ of number of function evaluations)
of \algorithmB\ ($x$-axis) versus \algorithmA\ ($y$-axis) for $NBTARGETS$ target values
$\Df \in [NBLOW, NBUP]$ in each dimension on functions #1. """
caption_start_rlbased = r"""Expected running time (\ERT\ in $\log_{10}$ of number of function evaluations)
of \algorithmA\ ($y$-axis) versus \algorithmB\ ($x$-axis) for $NBTARGETS$ runlength-based target
function values for budgets between $NBLOW$ and $NBUP$ evaluations.
Each runlength-based target $f$-value is chosen such that the \ERT{}s of the
REFERENCE_ALGORITHM artificial algorithm for the given and a slightly easier
target bracket the reference budget. """
caption_finish = r"""Markers on the upper or right edge indicate that the respective target
value was never reached. Markers represent dimension:
2:{\color{cyan}+},
3:{\color{green!45!black}$\triangledown$},
5:{\color{blue}$\star$},
10:$\circ$,
20:{\color{red}$\Box$},
40:{\color{magenta}$\Diamond$}. """
def figure_caption():
if isinstance(targets, pproc.RunlengthBasedTargetValues):
s = caption_start_rlbased
s = s.replace('NBTARGETS', str(len(targets)))
s = s.replace('NBLOW', toolsdivers.number_to_latex(targets.label(0)) +
r'\times\DIM' if targets.times_dimension else '')
s = s.replace('NBUP', toolsdivers.number_to_latex(targets.label(-1)) +
r'\times\DIM' if targets.times_dimension else '')
s = s.replace('REFERENCE_ALGORITHM', targets.reference_algorithm)
else:
s = caption_start_fixed
s = s.replace('NBTARGETS', str(len(targets)))
s = s.replace('NBLOW', toolsdivers.number_to_latex(targets.label(0)))
s = s.replace('NBUP', toolsdivers.number_to_latex(targets.label(-1)))
s += caption_finish
return s
def figure_caption_html():
if isinstance(targets, pproc.RunlengthBasedTargetValues):
s = htmldesc.getValue('##bbobppscatterlegendrlbased##')
s = s.replace('NBTARGETS', str(len(targets)))
s = s.replace('NBLOW', toolsdivers.number_to_html(targets.label(0)) +
r'\times\DIM' if targets.times_dimension else '')
s = s.replace('NBUP', toolsdivers.number_to_html(targets.label(-1)) +
r'\times\DIM' if targets.times_dimension else '')
s = s.replace('REFERENCEALGORITHM', targets.reference_algorithm)
else:
s = htmldesc.getValue('##bbobppscatterlegendfixed##')
s = s.replace('NBTARGETS', str(len(targets)))
s = s.replace('NBLOW', toolsdivers.number_to_html(targets.label(0)))
s = s.replace('NBUP', toolsdivers.number_to_html(targets.label(-1)))
s += htmldesc.getValue('##bbobppscatterlegendend##')
return s
def beautify():
a = plt.gca()
a.set_xscale('log')
a.set_yscale('log')
#a.set_xlabel('ERT0')
#a.set_ylabel('ERT1')
xmin, xmax = plt.xlim()
ymin, ymax = plt.ylim()
minbnd = min(xmin, ymin)
maxbnd = max(xmax, ymax)
maxbnd = maxbnd ** (1 + 11.*offset/(numpy.log10(float(maxbnd)/minbnd)))
plt.plot([minbnd, maxbnd], [minbnd, maxbnd], ls='-', color='k')
plt.plot([10*minbnd, 10*maxbnd], [minbnd, maxbnd], ls=':', color='k')
plt.plot([100*minbnd, 100*maxbnd], [minbnd, maxbnd], ls=':', color='k')
plt.plot([minbnd, maxbnd], [10*minbnd, 10*maxbnd], ls=':', color='k')
plt.plot([minbnd, maxbnd], [100*minbnd, 100*maxbnd], ls=':', color='k')
plt.xlim(minbnd, maxbnd)
plt.ylim(minbnd, maxbnd)
#a.set_aspect(1./a.get_data_ratio())
a.set_aspect('equal')
plt.grid(True)
tmp = a.get_yticks()
tmp2 = []
for i in tmp:
tmp2.append('%d' % round(numpy.log10(i)))
a.set_yticklabels(tmp2)
a.set_xticklabels(tmp2)
#for line in a.get_xticklines():# + a.get_yticklines():
# plt.setp(line, color='b', marker='o', markersize=10)
#set_trace()
def main(dsList0, dsList1, outputdir, verbose=True):
"""Generate a scatter plot figure.
TODO: """
#plt.rc("axes", labelsize=24, titlesize=24)
#plt.rc("xtick", labelsize=20)
#plt.rc("ytick", labelsize=20)
#plt.rc("font", size=20)
#plt.rc("legend", fontsize=20)
dictFunc0 = dsList0.dictByFunc()
dictFunc1 = dsList1.dictByFunc()
funcs = set(dictFunc0.keys()) & set(dictFunc1.keys())
if isinstance(targets, pproc.RunlengthBasedTargetValues):
linewidth = linewidth_rld_based
else:
linewidth = linewidth_default
funInfos = ppfigparam.read_fun_infos(dsList0.isBiobjective())
for f in funcs:
dictDim0 = dictFunc0[f].dictByDim()
dictDim1 = dictFunc1[f].dictByDim()
dims = set(dictDim0.keys()) & set(dictDim1.keys())
#set_trace()
for i, d in enumerate(dimensions):
try:
entry0 = dictDim0[d][0] # should be only one element
entry1 = dictDim1[d][0] # should be only one element
except (IndexError, KeyError):
continue
if linewidth: # plot all reliable ERT values as a line
all_targets = np.array(sorted(set(entry0.target).union(entry1.target), reverse=True))
assert entry0.detSuccessRates([all_targets[0]]) == 1.0
assert entry1.detSuccessRates([all_targets[0]]) == 1.0
all_targets = all_targets[np.where(all_targets <= targets((f, d))[0])[0]] #
xdata_all = np.array(entry0.detERT(all_targets))
ydata_all = np.array(entry1.detERT(all_targets))
# idx of reliable targets: last index where success rate >= 1/2 and ERT <= maxevals
idx = []
for ari in (np.where(entry0.detSuccessRates(all_targets) >= 0.5)[0],
np.where(entry1.detSuccessRates(all_targets) >= 0.5)[0],
np.where(xdata_all <= max(entry0.maxevals))[0],
np.where(ydata_all <= max(entry1.maxevals))[0]
):
if len(ari):
idx.append(ari[-1])
if len(idx) == 4:
max_idx = min(idx)
## at least up to the most difficult given target
## idx = max((idx, np.where(all_targets >= targets((f, d))[-1])[0][-1]))
xdata_all = xdata_all[:max_idx + 1]
ydata_all = ydata_all[:max_idx + 1]
idx = (numpy.isfinite(xdata_all)) * (numpy.isfinite(ydata_all))
assert idx.all()
if idx.any():
plt.plot(xdata_all[idx], ydata_all[idx], colors[i], ls='solid', lw=linewidth,
# TODO: ls has changed, check whether this works out
clip_on=False)
xdata = numpy.array(entry0.detERT(targets((f, d))))
ydata = numpy.array(entry1.detERT(targets((f, d))))
# plot "valid" data, those within maxevals
idx = np.logical_and(xdata < entry0.mMaxEvals(),
ydata < entry1.mMaxEvals())
# was:
# (numpy.isinf(xdata) == False) *
# (numpy.isinf(ydata) == False) *
# (xdata < entry0.mMaxEvals()) *
# (ydata < entry1.mMaxEvals()))
if idx.any():
try:
plt.plot(xdata[idx], ydata[idx], ls='',
markersize=markersize,
marker=markers[i], markerfacecolor='None',
markeredgecolor=colors[i], markeredgewidth=3,
clip_on=False)
except KeyError:
plt.plot(xdata[idx], ydata[idx], ls='', markersize=markersize,
marker='x', markerfacecolor='None',
markeredgecolor=colors[i], markeredgewidth=3,
clip_on=False)
#try:
# plt.scatter(xdata[idx], ydata[idx], s=10, marker=markers[i],
# facecolor='None', edgecolor=colors[i], linewidth=3)
#except ValueError:
# set_trace()
# plot beyond maxevals but finite data
idx = ((numpy.isinf(xdata) == False) *
(numpy.isinf(ydata) == False) *
np.logical_or(xdata >= entry0.mMaxEvals(),
ydata >= entry1.mMaxEvals()))
if idx.any():
try:
plt.plot(xdata[idx], ydata[idx], ls='',
markersize=markersize + markersize_addon_beyond_maxevals,
marker=markers[i], markerfacecolor='None',
markeredgecolor=colors[i], markeredgewidth=1,
clip_on=False)
except KeyError:
plt.plot(xdata[idx], ydata[idx], ls='', markersize=markersize,
marker='x', markerfacecolor='None',
markeredgecolor=colors[i], markeredgewidth=2,
clip_on=False)
#ax = plt.gca()
ax = plt.axes()
# plot data on the right edge
idx = numpy.isinf(xdata) * (numpy.isinf(ydata) == False)
if idx.any():
# This (seems to) transform inf to the figure limits!?
trans = blend(ax.transAxes, ax.transData)
#plt.scatter([1.]*numpy.sum(idx), ydata[idx], s=10, marker=markers[i],
# facecolor='None', edgecolor=colors[i], linewidth=3,
# transform=trans)
try:
plt.plot([1.]*numpy.sum(idx), ydata[idx],
markersize=markersize + markersize_addon_beyond_maxevals, ls='',
marker=markers[i], markerfacecolor='None',
markeredgecolor=colors[i], markeredgewidth=1,
transform=trans, clip_on=False)
except KeyError:
plt.plot([1.]*numpy.sum(idx), ydata[idx],
markersize=markersize, ls='',
marker='x', markerfacecolor='None',
markeredgecolor=colors[i], markeredgewidth=2,
transform=trans, clip_on=False)
#set_trace()
# plot data on the left edge
idx = (numpy.isinf(xdata)==False) * numpy.isinf(ydata)
if idx.any():
# This (seems to) transform inf to the figure limits!?
trans = blend(ax.transData, ax.transAxes)
# plt.scatter(xdata[idx], [1.-offset]*numpy.sum(idx), s=10, marker=markers[i],
# facecolor='None', edgecolor=colors[i], linewidth=3,
# transform=trans)
try:
plt.plot(xdata[idx], [1.-offset]*numpy.sum(idx),
markersize=markersize + markersize_addon_beyond_maxevals, ls='',
marker=markers[i], markerfacecolor='None',
markeredgecolor=colors[i], markeredgewidth=1,
transform=trans, clip_on=False)
except KeyError:
plt.plot(xdata[idx], [1.-offset]*numpy.sum(idx),
markersize=markersize, ls='',
marker='x', markerfacecolor='None',
markeredgecolor=colors[i], markeredgewidth=2,
transform=trans, clip_on=False)
# plot data in the top corner
idx = numpy.isinf(xdata) * numpy.isinf(ydata)
if idx.any():
# plt.scatter(xdata[idx], [1.-offset]*numpy.sum(idx), s=10, marker=markers[i],
# facecolor='None', edgecolor=colors[i], linewidth=3,
# transform=trans)
try:
plt.plot([1.-offset]*numpy.sum(idx), [1.-offset]*numpy.sum(idx),
markersize=markersize + markersize_addon_beyond_maxevals, ls='',
marker=markers[i], markerfacecolor='None',
markeredgecolor=colors[i], markeredgewidth=1,
transform=ax.transAxes, clip_on=False)
except KeyError:
plt.plot([1.-offset]*numpy.sum(idx), [1.-offset]*numpy.sum(idx),
markersize=markersize, ls='',
marker='x', markerfacecolor='None',
markeredgecolor=colors[i], markeredgewidth=2,
transform=ax.transAxes, clip_on=False)
#set_trace()
beautify()
for i, d in enumerate(dimensions):
try:
entry0 = dictDim0[d][0] # should be only one element
entry1 = dictDim1[d][0] # should be only one element
except (IndexError, KeyError):
continue
minbnd, maxbnd = plt.xlim()
plt.plot((entry0.mMaxEvals(), entry0.mMaxEvals()),
# (minbnd, entry1.mMaxEvals()), ls='-', color=colors[i],
(max([minbnd, entry1.mMaxEvals()/max_evals_line_length]), entry1.mMaxEvals()), ls='-', color=colors[i],
zorder=-1)
plt.plot(# (minbnd, entry0.mMaxEvals()),
(max([minbnd, entry0.mMaxEvals()/max_evals_line_length]), entry0.mMaxEvals()),
(entry1.mMaxEvals(), entry1.mMaxEvals()), ls='-',
color=colors[i], zorder=-1)
plt.xlim(minbnd, maxbnd)
plt.ylim(minbnd, maxbnd)
#Set the boundaries again: they changed due to new plots.
#plt.axvline(entry0.mMaxEvals(), ls='--', color=colors[i])
#plt.axhline(entry1.mMaxEvals(), ls='--', color=colors[i])
if f in funInfos.keys():
plt.ylabel(funInfos[f])
filename = os.path.join(outputdir, 'ppscatter_f%03d' % f)
saveFigure(filename, verbose=verbose)
if f == 1:
algName1 = toolsdivers.str_to_latex(toolsdivers.strip_pathname1(entry1.algId))
algName0 = toolsdivers.str_to_latex(toolsdivers.strip_pathname1(entry0.algId))
save_single_functions_html(
os.path.join(outputdir, genericsettings.two_algorithm_file_name),
"%s vs %s" % (algName1, algName0),
algorithmCount = AlgorithmCount.TWO,
isBiobjective = dsList0.isBiobjective(),
functionGroups = dsList0.getFuncGroups())
plt.close()
#plt.rcdefaults()
| oaelhara/numbbo | code-postprocessing/bbob_pproc/comp2/ppscatter.py | Python | bsd-3-clause | 17,703 |
"""
Authentication token detection tool for Python
==============================================
"""
__all__ = ['crawler']
| bluesurfer/AuthTokens | authtokens/__init__.py | Python | mit | 127 |
# Copyright (C) 2013 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# pylint: disable=relative-import
"""Generate template values for attributes.
Extends IdlType with property |constructor_type_name|.
Design doc: http://www.chromium.org/developers/design-documents/idl-compiler
"""
import idl_types
from idl_types import inherits_interface
from v8_globals import includes
import v8_types
import v8_utilities
from v8_utilities import (cpp_name_or_partial, capitalize, cpp_name, has_extended_attribute,
has_extended_attribute_value, scoped_name, strip_suffix,
uncapitalize, extended_attribute_value_as_list, is_unforgeable,
is_legacy_interface_type_checking)
def attribute_context(interface, attribute, interfaces):
"""Creates a Jinja template context for an attribute of an interface.
Args:
interface: An interface which |attribute| belongs to
attribute: An attribute to create the context for
interfaces: A dict which maps an interface name to the definition
which can be referred if needed
Returns:
A Jinja template context for |attribute|
"""
idl_type = attribute.idl_type
base_idl_type = idl_type.base_type
extended_attributes = attribute.extended_attributes
idl_type.add_includes_for_type(extended_attributes)
if idl_type.enum_values:
includes.add('core/inspector/ConsoleMessage.h')
# [CheckSecurity]
is_do_not_check_security = 'DoNotCheckSecurity' in extended_attributes
is_check_security_for_receiver = (
has_extended_attribute_value(interface, 'CheckSecurity', 'Receiver') and
not is_do_not_check_security)
is_check_security_for_return_value = (
has_extended_attribute_value(attribute, 'CheckSecurity', 'ReturnValue'))
if is_check_security_for_receiver or is_check_security_for_return_value:
includes.add('bindings/core/v8/BindingSecurity.h')
# [Constructor]
# TODO(yukishiino): Constructors are much like methods although constructors
# are not methods. Constructors must be data-type properties, and we can
# support them as a kind of methods.
constructor_type = idl_type.constructor_type_name if is_constructor_attribute(attribute) else None
# [CEReactions]
is_ce_reactions = 'CEReactions' in extended_attributes
if is_ce_reactions:
includes.add('core/dom/custom/CEReactionsScope.h')
# [CustomElementCallbacks], [Reflect]
is_custom_element_callbacks = 'CustomElementCallbacks' in extended_attributes
is_reflect = 'Reflect' in extended_attributes
if is_custom_element_callbacks or is_reflect:
includes.add('core/dom/custom/V0CustomElementProcessingStack.h')
# [ImplementedInPrivateScript]
is_implemented_in_private_script = 'ImplementedInPrivateScript' in extended_attributes
if is_implemented_in_private_script:
includes.add('bindings/core/v8/PrivateScriptRunner.h')
includes.add('core/frame/LocalFrame.h')
includes.add('platform/ScriptForbiddenScope.h')
# [OnlyExposedToPrivateScript]
is_only_exposed_to_private_script = 'OnlyExposedToPrivateScript' in extended_attributes
# [PerWorldBindings]
if 'PerWorldBindings' in extended_attributes:
assert idl_type.is_wrapper_type or 'LogActivity' in extended_attributes, '[PerWorldBindings] should only be used with wrapper types: %s.%s' % (interface.name, attribute.name)
# [SaveSameObject]
is_save_same_object = (
'SameObject' in attribute.extended_attributes and
'SaveSameObject' in attribute.extended_attributes)
if is_save_same_object:
includes.add('bindings/core/v8/V8PrivateProperty.h')
if (base_idl_type == 'EventHandler' and
interface.name in ['Window', 'WorkerGlobalScope'] and
attribute.name == 'onerror'):
includes.add('bindings/core/v8/V8ErrorHandler.h')
cached_attribute_validation_method = extended_attributes.get('CachedAttribute')
keep_alive_for_gc = is_keep_alive_for_gc(interface, attribute)
if cached_attribute_validation_method or keep_alive_for_gc:
includes.add('bindings/core/v8/V8HiddenValue.h')
# [CachedAccessor]
is_cached_accessor = 'CachedAccessor' in extended_attributes
if is_cached_accessor:
includes.add('bindings/core/v8/V8PrivateProperty.h')
context = {
'access_control_list': access_control_list(interface, attribute),
'activity_logging_world_list_for_getter': v8_utilities.activity_logging_world_list(attribute, 'Getter'), # [ActivityLogging]
'activity_logging_world_list_for_setter': v8_utilities.activity_logging_world_list(attribute, 'Setter'), # [ActivityLogging]
'activity_logging_world_check': v8_utilities.activity_logging_world_check(attribute), # [ActivityLogging]
'argument_cpp_type': idl_type.cpp_type_args(used_as_rvalue_type=True),
'cached_attribute_validation_method': cached_attribute_validation_method,
'constructor_type': constructor_type,
'cpp_name': cpp_name(attribute),
'cpp_type': idl_type.cpp_type,
'cpp_type_initializer': idl_type.cpp_type_initializer,
'deprecate_as': v8_utilities.deprecate_as(attribute), # [DeprecateAs]
'enum_type': idl_type.enum_type,
'enum_values': idl_type.enum_values,
'exposed_test': v8_utilities.exposed(attribute, interface), # [Exposed]
'has_custom_getter': has_custom_getter(attribute),
'has_custom_setter': has_custom_setter(attribute),
'has_setter': has_setter(interface, attribute),
'idl_type': str(idl_type), # need trailing [] on array for Dictionary::ConversionContext::setConversionType
'is_cached_accessor': is_cached_accessor,
'is_call_with_execution_context': has_extended_attribute_value(attribute, 'CallWith', 'ExecutionContext'),
'is_call_with_script_state': has_extended_attribute_value(attribute, 'CallWith', 'ScriptState'),
'is_ce_reactions': is_ce_reactions,
'is_check_security_for_receiver': is_check_security_for_receiver,
'is_check_security_for_return_value': is_check_security_for_return_value,
'is_custom_element_callbacks': is_custom_element_callbacks,
# TODO(yukishiino): Make all DOM attributes accessor-type properties.
'is_data_type_property': not ('CachedAccessor' in extended_attributes) and is_data_type_property(interface, attribute),
'is_getter_raises_exception': # [RaisesException]
'RaisesException' in extended_attributes and
extended_attributes['RaisesException'] in (None, 'Getter'),
'is_implemented_in_private_script': is_implemented_in_private_script,
'is_keep_alive_for_gc': keep_alive_for_gc,
'is_lenient_this': 'LenientThis' in extended_attributes,
'is_nullable': idl_type.is_nullable,
'is_explicit_nullable': idl_type.is_explicit_nullable,
'is_partial_interface_member':
'PartialInterfaceImplementedAs' in extended_attributes,
'is_per_world_bindings': 'PerWorldBindings' in extended_attributes,
'is_put_forwards': 'PutForwards' in extended_attributes,
'is_read_only': attribute.is_read_only,
'is_reflect': is_reflect,
'is_replaceable': 'Replaceable' in attribute.extended_attributes,
'is_save_same_object': is_save_same_object,
'is_static': attribute.is_static,
'is_url': 'URL' in extended_attributes,
'is_unforgeable': is_unforgeable(interface, attribute),
'on_instance': v8_utilities.on_instance(interface, attribute),
'on_interface': v8_utilities.on_interface(interface, attribute),
'on_prototype': v8_utilities.on_prototype(interface, attribute),
'origin_trial_enabled_function': v8_utilities.origin_trial_enabled_function_name(attribute), # [OriginTrialEnabled]
'origin_trial_feature_name': v8_utilities.origin_trial_feature_name(attribute), # [OriginTrialEnabled]
'use_output_parameter_for_result': idl_type.use_output_parameter_for_result,
'measure_as': v8_utilities.measure_as(attribute, interface), # [MeasureAs]
'name': attribute.name,
'only_exposed_to_private_script': is_only_exposed_to_private_script,
'private_script_v8_value_to_local_cpp_value': idl_type.v8_value_to_local_cpp_value(
extended_attributes, 'v8Value', 'cppValue', bailout_return_value='false', isolate='scriptState->isolate()'),
'property_attributes': property_attributes(interface, attribute),
'reflect_empty': extended_attributes.get('ReflectEmpty'),
'reflect_invalid': extended_attributes.get('ReflectInvalid', ''),
'reflect_missing': extended_attributes.get('ReflectMissing'),
'reflect_only': extended_attribute_value_as_list(attribute, 'ReflectOnly'),
'runtime_enabled_function': v8_utilities.runtime_enabled_function_name(attribute), # [RuntimeEnabled]
'runtime_feature_name': v8_utilities.runtime_feature_name(attribute), # [RuntimeEnabled]
'secure_context_test': v8_utilities.secure_context(attribute, interface), # [SecureContext]
'should_be_exposed_to_script': not (is_implemented_in_private_script and is_only_exposed_to_private_script),
'cached_accessor_name': '%s%sCachedAccessor' % (interface.name, attribute.name.capitalize()),
'world_suffixes': (
['', 'ForMainWorld']
if 'PerWorldBindings' in extended_attributes
else ['']), # [PerWorldBindings]
}
if is_constructor_attribute(attribute):
update_constructor_attribute_context(interface, attribute, context)
if not has_custom_getter(attribute):
getter_context(interface, attribute, context)
if not has_custom_setter(attribute) and has_setter(interface, attribute):
setter_context(interface, attribute, interfaces, context)
return context
def filter_has_accessor_configuration(attributes):
return [attribute for attribute in attributes if
not (attribute['exposed_test'] or
attribute['secure_context_test'] or
attribute['origin_trial_enabled_function'] or
attribute['runtime_enabled_function']) and
not attribute['is_data_type_property'] and
attribute['should_be_exposed_to_script']]
def filter_has_data_attribute_configuration(attributes):
return [attribute for attribute in attributes if
not (attribute['exposed_test'] or
attribute['secure_context_test'] or
attribute['origin_trial_enabled_function'] or
attribute['runtime_enabled_function']) and
attribute['is_data_type_property'] and
attribute['should_be_exposed_to_script']]
def is_lazy_data_attribute(attribute):
return attribute['constructor_type'] and not attribute['needs_constructor_getter_callback']
def filter_has_attribute_configuration(attributes):
return [attribute for attribute in filter_has_data_attribute_configuration(attributes) if not is_lazy_data_attribute(attribute)]
def filter_has_lazy_data_attribute_configuration(attributes):
return [attribute for attribute in filter_has_data_attribute_configuration(attributes) if is_lazy_data_attribute(attribute)]
def filter_origin_trial_enabled(attributes):
return [attribute for attribute in attributes if
attribute['origin_trial_feature_name'] and
not attribute['exposed_test']]
def filter_purely_runtime_enabled(attributes):
return [attribute for attribute in attributes if
not (attribute['exposed_test'] or
attribute['secure_context_test']) and
attribute['runtime_feature_name']]
def attribute_filters():
return {'has_accessor_configuration': filter_has_accessor_configuration,
'has_attribute_configuration': filter_has_attribute_configuration,
'has_lazy_data_attribute_configuration': filter_has_lazy_data_attribute_configuration,
'origin_trial_enabled_attributes': filter_origin_trial_enabled,
'purely_runtime_enabled_attributes': filter_purely_runtime_enabled}
################################################################################
# Getter
################################################################################
def getter_context(interface, attribute, context):
idl_type = attribute.idl_type
base_idl_type = idl_type.base_type
extended_attributes = attribute.extended_attributes
cpp_value = getter_expression(interface, attribute, context)
# Normally we can inline the function call into the return statement to
# avoid the overhead of using a Ref<> temporary, but for some cases
# (nullable types, EventHandler, [CachedAttribute], or if there are
# exceptions), we need to use a local variable.
# FIXME: check if compilers are smart enough to inline this, and if so,
# always use a local variable (for readability and CG simplicity).
if 'ImplementedInPrivateScript' in extended_attributes:
if (not idl_type.is_wrapper_type and
not idl_type.is_basic_type and
not idl_type.is_enum):
raise Exception('Private scripts supports only primitive types and DOM wrappers.')
context['cpp_value_original'] = cpp_value
cpp_value = 'result'
elif (idl_type.is_explicit_nullable or
base_idl_type == 'EventHandler' or
'CachedAttribute' in extended_attributes or
'ReflectOnly' in extended_attributes or
context['is_keep_alive_for_gc'] or
context['is_getter_raises_exception']):
context['cpp_value_original'] = cpp_value
cpp_value = 'cppValue'
def v8_set_return_value_statement(for_main_world=False):
if context['is_keep_alive_for_gc'] or 'CachedAttribute' in extended_attributes:
return 'v8SetReturnValue(info, v8Value)'
return idl_type.v8_set_return_value(
cpp_value, extended_attributes=extended_attributes, script_wrappable='impl',
for_main_world=for_main_world, is_static=attribute.is_static)
context.update({
'cpp_value': cpp_value,
'cpp_value_to_v8_value': idl_type.cpp_value_to_v8_value(
cpp_value=cpp_value, creation_context='holder',
extended_attributes=extended_attributes),
'v8_set_return_value_for_main_world': v8_set_return_value_statement(for_main_world=True),
'v8_set_return_value': v8_set_return_value_statement(),
})
def getter_expression(interface, attribute, context):
arguments = []
this_getter_base_name = getter_base_name(interface, attribute, arguments)
getter_name = scoped_name(interface, attribute, this_getter_base_name)
if 'ImplementedInPrivateScript' in attribute.extended_attributes:
arguments.append('toLocalFrame(toFrameIfNotDetached(info.GetIsolate()->GetCurrentContext()))')
arguments.append('impl')
arguments.append('&result')
arguments.extend(v8_utilities.call_with_arguments(
attribute.extended_attributes.get('CallWith')))
# Members of IDL partial interface definitions are implemented in C++ as
# static member functions, which for instance members (non-static members)
# take *impl as their first argument
if ('PartialInterfaceImplementedAs' in attribute.extended_attributes and
'ImplementedInPrivateScript' not in attribute.extended_attributes and
not attribute.is_static):
arguments.append('*impl')
if attribute.idl_type.is_explicit_nullable:
arguments.append('isNull')
if context['is_getter_raises_exception']:
arguments.append('exceptionState')
if attribute.idl_type.use_output_parameter_for_result:
arguments.append('result')
expression = '%s(%s)' % (getter_name, ', '.join(arguments))
# Needed to handle getter expressions returning Type& as the
# use site for |expression| expects Type*.
if attribute.idl_type.is_interface_type and len(arguments) == 0:
return 'WTF::getPtr(%s)' % expression
return expression
CONTENT_ATTRIBUTE_GETTER_NAMES = {
'boolean': 'fastHasAttribute',
'long': 'getIntegralAttribute',
'unsigned long': 'getUnsignedIntegralAttribute',
}
def getter_base_name(interface, attribute, arguments):
extended_attributes = attribute.extended_attributes
if 'ImplementedInPrivateScript' in extended_attributes:
return '%sAttributeGetter' % uncapitalize(cpp_name(attribute))
if 'Reflect' not in extended_attributes:
return uncapitalize(cpp_name(attribute))
content_attribute_name = extended_attributes['Reflect'] or attribute.name.lower()
if content_attribute_name in ['class', 'id', 'name']:
# Special-case for performance optimization.
return 'get%sAttribute' % content_attribute_name.capitalize()
arguments.append(scoped_content_attribute_name(interface, attribute))
base_idl_type = attribute.idl_type.base_type
if base_idl_type in CONTENT_ATTRIBUTE_GETTER_NAMES:
return CONTENT_ATTRIBUTE_GETTER_NAMES[base_idl_type]
if 'URL' in attribute.extended_attributes:
return 'getURLAttribute'
return 'fastGetAttribute'
def is_keep_alive_for_gc(interface, attribute):
idl_type = attribute.idl_type
base_idl_type = idl_type.base_type
extended_attributes = attribute.extended_attributes
return (
# For readonly attributes, for performance reasons we keep the attribute
# wrapper alive while the owner wrapper is alive, because the attribute
# never changes.
(attribute.is_read_only and
idl_type.is_wrapper_type and
# There are some exceptions, however:
not(
# Node lifetime is managed by object grouping.
inherits_interface(interface.name, 'Node') or
inherits_interface(base_idl_type, 'Node') or
# A self-reference is unnecessary.
attribute.name == 'self' or
# FIXME: Remove these hard-coded hacks.
base_idl_type in ['EventTarget', 'Window'] or
base_idl_type.startswith(('HTML', 'SVG')))))
################################################################################
# Setter
################################################################################
def setter_context(interface, attribute, interfaces, context):
if 'PutForwards' in attribute.extended_attributes:
# Use target interface and attribute in place of original interface and
# attribute from this point onwards.
target_interface_name = attribute.idl_type.base_type
target_attribute_name = attribute.extended_attributes['PutForwards']
interface = interfaces[target_interface_name]
try:
attribute = next(candidate
for candidate in interface.attributes
if candidate.name == target_attribute_name)
except StopIteration:
raise Exception('[PutForward] target not found:\n'
'Attribute "%s" is not present in interface "%s"' %
(target_attribute_name, target_interface_name))
if ('Replaceable' in attribute.extended_attributes):
context['cpp_setter'] = 'v8CallBoolean(info.Holder()->CreateDataProperty(info.GetIsolate()->GetCurrentContext(), propertyName, v8Value))'
return
extended_attributes = attribute.extended_attributes
idl_type = attribute.idl_type
# [RaisesException], [RaisesException=Setter]
is_setter_raises_exception = (
'RaisesException' in extended_attributes and
extended_attributes['RaisesException'] in [None, 'Setter'])
# [LegacyInterfaceTypeChecking]
has_type_checking_interface = (
not is_legacy_interface_type_checking(interface, attribute) and
idl_type.is_wrapper_type)
context.update({
'has_setter_exception_state':
is_setter_raises_exception or has_type_checking_interface or
idl_type.v8_conversion_needs_exception_state,
'has_type_checking_interface': has_type_checking_interface,
'is_setter_call_with_execution_context': has_extended_attribute_value(
attribute, 'SetterCallWith', 'ExecutionContext'),
'is_setter_raises_exception': is_setter_raises_exception,
'private_script_cpp_value_to_v8_value': idl_type.cpp_value_to_v8_value(
'cppValue', isolate='scriptState->isolate()',
creation_context='scriptState->context()->Global()'),
'v8_value_to_local_cpp_value': idl_type.v8_value_to_local_cpp_value(
extended_attributes, 'v8Value', 'cppValue'),
})
# setter_expression() depends on context values we set above.
context['cpp_setter'] = setter_expression(interface, attribute, context)
def setter_expression(interface, attribute, context):
extended_attributes = attribute.extended_attributes
arguments = v8_utilities.call_with_arguments(
extended_attributes.get('SetterCallWith') or
extended_attributes.get('CallWith'))
this_setter_base_name = setter_base_name(interface, attribute, arguments)
setter_name = scoped_name(interface, attribute, this_setter_base_name)
# Members of IDL partial interface definitions are implemented in C++ as
# static member functions, which for instance members (non-static members)
# take *impl as their first argument
if ('PartialInterfaceImplementedAs' in extended_attributes and
'ImplementedInPrivateScript' not in extended_attributes and
not attribute.is_static):
arguments.append('*impl')
idl_type = attribute.idl_type
if 'ImplementedInPrivateScript' in extended_attributes:
arguments.append('toLocalFrame(toFrameIfNotDetached(info.GetIsolate()->GetCurrentContext()))')
arguments.append('impl')
arguments.append('cppValue')
elif idl_type.base_type == 'EventHandler':
getter_name = scoped_name(interface, attribute, cpp_name(attribute))
context['event_handler_getter_expression'] = '%s(%s)' % (
getter_name, ', '.join(arguments))
if (interface.name in ['Window', 'WorkerGlobalScope'] and
attribute.name == 'onerror'):
includes.add('bindings/core/v8/V8ErrorHandler.h')
arguments.append(
'V8EventListenerHelper::ensureEventListener<V8ErrorHandler>(' +
'v8Value, true, ScriptState::forReceiverObject(info))')
else:
arguments.append(
'V8EventListenerHelper::getEventListener(' +
'ScriptState::forReceiverObject(info), v8Value, true, ' +
'ListenerFindOrCreate)')
else:
arguments.append('cppValue')
if context['is_setter_raises_exception']:
arguments.append('exceptionState')
return '%s(%s)' % (setter_name, ', '.join(arguments))
CONTENT_ATTRIBUTE_SETTER_NAMES = {
'boolean': 'setBooleanAttribute',
'long': 'setIntegralAttribute',
'unsigned long': 'setUnsignedIntegralAttribute',
}
def setter_base_name(interface, attribute, arguments):
if 'ImplementedInPrivateScript' in attribute.extended_attributes:
return '%sAttributeSetter' % uncapitalize(cpp_name(attribute))
if 'Reflect' not in attribute.extended_attributes:
return 'set%s' % capitalize(cpp_name(attribute))
arguments.append(scoped_content_attribute_name(interface, attribute))
base_idl_type = attribute.idl_type.base_type
if base_idl_type in CONTENT_ATTRIBUTE_SETTER_NAMES:
return CONTENT_ATTRIBUTE_SETTER_NAMES[base_idl_type]
return 'setAttribute'
def scoped_content_attribute_name(interface, attribute):
content_attribute_name = attribute.extended_attributes['Reflect'] or attribute.name.lower()
if interface.name.startswith('SVG'):
namespace = 'SVGNames'
else:
namespace = 'HTMLNames'
includes.add('core/%s.h' % namespace)
return '%s::%sAttr' % (namespace, content_attribute_name)
################################################################################
# Attribute configuration
################################################################################
# Property descriptor's {writable: boolean}
def is_writable(attribute):
return (not attribute.is_read_only or
'PutForwards' in attribute.extended_attributes or
'Replaceable' in attribute.extended_attributes)
def is_data_type_property(interface, attribute):
return (is_constructor_attribute(attribute) or
interface.name == 'Window' or
interface.name == 'Location')
# [PutForwards], [Replaceable]
def has_setter(interface, attribute):
if (is_data_type_property(interface, attribute) and
(is_constructor_attribute(attribute) or
'Replaceable' in attribute.extended_attributes)):
return False
return is_writable(attribute)
# [DoNotCheckSecurity], [Unforgeable]
def access_control_list(interface, attribute):
extended_attributes = attribute.extended_attributes
access_control = []
if 'DoNotCheckSecurity' in extended_attributes:
do_not_check_security = extended_attributes['DoNotCheckSecurity']
if do_not_check_security == 'Setter':
access_control.append('v8::ALL_CAN_WRITE')
else:
access_control.append('v8::ALL_CAN_READ')
if has_setter(interface, attribute):
access_control.append('v8::ALL_CAN_WRITE')
if is_unforgeable(interface, attribute):
access_control.append('v8::PROHIBITS_OVERWRITING')
return access_control or ['v8::DEFAULT']
# [NotEnumerable], [Unforgeable]
def property_attributes(interface, attribute):
extended_attributes = attribute.extended_attributes
property_attributes_list = []
if ('NotEnumerable' in extended_attributes or
is_constructor_attribute(attribute)):
property_attributes_list.append('v8::DontEnum')
if is_unforgeable(interface, attribute):
property_attributes_list.append('v8::DontDelete')
if not is_writable(attribute):
property_attributes_list.append('v8::ReadOnly')
return property_attributes_list or ['v8::None']
# [Custom], [Custom=Getter]
def has_custom_getter(attribute):
extended_attributes = attribute.extended_attributes
return ('Custom' in extended_attributes and
extended_attributes['Custom'] in [None, 'Getter'])
# [Custom], [Custom=Setter]
def has_custom_setter(attribute):
extended_attributes = attribute.extended_attributes
return (not attribute.is_read_only and
'Custom' in extended_attributes and
extended_attributes['Custom'] in [None, 'Setter'])
################################################################################
# Constructors
################################################################################
idl_types.IdlType.constructor_type_name = property(
# FIXME: replace this with a [ConstructorAttribute] extended attribute
lambda self: strip_suffix(self.base_type, 'Constructor'))
def is_constructor_attribute(attribute):
# FIXME: replace this with [ConstructorAttribute] extended attribute
return attribute.idl_type.name.endswith('Constructor')
def update_constructor_attribute_context(interface, attribute, context):
context['needs_constructor_getter_callback'] = context['measure_as'] or context['deprecate_as']
| geminy/aidear | oss/qt/qt-everywhere-opensource-src-5.9.0/qtwebengine/src/3rdparty/chromium/third_party/WebKit/Source/bindings/scripts/v8_attributes.py | Python | gpl-3.0 | 29,128 |
__author__ = "Wren J. R. (uberfastman)"
__email__ = "wrenjr@yahoo.com"
# code snippets taken from: http://stackoverflow.com/questions/24419188/automating-pydrive-verification-process
import datetime
import logging
from pathlib import Path
from pydrive.auth import GoogleAuth
from pydrive.drive import GoogleDrive
from report.logger import get_logger
from utils.app_config_parser import AppConfigParser
logger = get_logger(__name__, propagate=False)
# Suppress verbose googleapiclient info/warning logging
logging.getLogger("googleapiclient").setLevel(level=logging.ERROR)
logging.getLogger("googleapiclient.discovery").setLevel(level=logging.ERROR)
logging.getLogger("googleapiclient.discovery_cache").setLevel(level=logging.ERROR)
logging.getLogger("googleapiclient.discovery_cache.file_cache").setLevel(level=logging.ERROR)
class GoogleDriveUploader(object):
def __init__(self, filename, config):
logger.debug("Initializing Google Drive uploader.")
project_dir = Path(__file__).parents[1]
logger.debug("Authenticating with Google Drive.")
self.filename = Path(project_dir) / filename
self.config = config
self.gauth = GoogleAuth()
auth_token = Path(project_dir) / Path(self.config.get("Drive", "google_drive_auth_token"))
# Try to load saved client credentials
self.gauth.LoadCredentialsFile(auth_token)
if self.gauth.credentials is None:
# Authenticate if they're not there
self.gauth.LocalWebserverAuth()
elif self.gauth.access_token_expired:
# Refresh them if expired
self.gauth.Refresh()
else:
# Initialize the saved creds
self.gauth.Authorize()
# Save the current credentials to a file
self.gauth.SaveCredentialsFile(auth_token)
def upload_file(self, test=False):
logger.debug("Uploading file to Google Drive.")
# Create GoogleDrive instance with authenticated GoogleAuth instance.
drive = GoogleDrive(self.gauth)
# Get lists of folders
root_folders = drive.ListFile(
{"q": "'root' in parents and mimeType='application/vnd.google-apps.folder' and trashed=false"}).GetList()
google_drive_folder_path_default = self.config.get("Drive", "google_drive_folder_path_default")
google_drive_folder_path = Path(self.config.get(
"Drive", "google_drive_folder_path", fallback=google_drive_folder_path_default)).parts
google_drive_root_folder_id = self.make_root_folder(
drive,
self.check_file_existence(google_drive_folder_path[0], root_folders, "root"),
google_drive_folder_path[0]
)
if not test:
parent_folder_id = google_drive_root_folder_id
parent_folder_content_folders = drive.ListFile({
"q": (
f"'{parent_folder_id}' in parents and "
f"mimeType='application/vnd.google-apps.folder' and "
f"trashed=false"
)
}).GetList()
for folder in google_drive_folder_path[1:]:
# create folder chain in Google Drive
parent_folder_id = self.make_parent_folder(
drive,
self.check_file_existence(folder, parent_folder_content_folders, parent_folder_id),
folder,
parent_folder_id
)
parent_folder_content_folders = drive.ListFile({
"q": (
f"'{parent_folder_id}' in parents and "
f"mimeType='application/vnd.google-apps.folder' and "
f"trashed=false"
)
}).GetList()
# Check for season folder and create it if it does not exist
season_folder_name = Path(self.filename).parts[-3]
season_folder_id = self.make_parent_folder(
drive,
self.check_file_existence(season_folder_name, parent_folder_content_folders, parent_folder_id),
season_folder_name,
parent_folder_id
)
season_folder_content_folders = drive.ListFile({
"q": (
f"'{season_folder_id}' in parents and "
f"mimeType='application/vnd.google-apps.folder' and "
f"trashed=false"
)
}).GetList()
# Check for league folder and create it if it does not exist
league_folder_name = Path(self.filename).parts[-2].replace("-", "_")
league_folder_id = self.make_parent_folder(
drive,
self.check_file_existence(league_folder_name, season_folder_content_folders, season_folder_id),
league_folder_name, season_folder_id
)
league_folder_content_pdfs = drive.ListFile({
"q": (
f"'{league_folder_id}' in parents and "
f"mimeType='application/pdf' and "
f"trashed=false"
)
}).GetList()
# Check for league report and create if if it does not exist
report_file_name = Path(self.filename).parts[-1]
report_file = self.check_file_existence(report_file_name, league_folder_content_pdfs, league_folder_id)
else:
all_pdfs = drive.ListFile({"q": "mimeType='application/pdf' and trashed=false"}).GetList()
report_file_name = self.filename
report_file = self.check_file_existence(report_file_name, all_pdfs, "root")
league_folder_id = "root"
if report_file:
report_file.Delete()
upload_file = drive.CreateFile(
{
"title": report_file_name,
"mimeType": "application/pdf",
"parents": [
{
"kind": "drive#fileLink",
"id": league_folder_id
}
]
}
)
upload_file.SetContentFile(self.filename)
# Upload the file.
upload_file.Upload()
upload_file.InsertPermission(
{
"type": "anyone",
"role": "reader",
"withLink": True
}
)
return "\nFantasy Football Report\nGenerated %s\n*%s*\n\n_Google Drive Link:_\n%s" % (
"{:%Y-%b-%d %H:%M:%S}".format(datetime.datetime.now()), upload_file['title'], upload_file["alternateLink"])
@staticmethod
def check_file_existence(file_name, file_list, parent_id):
drive_file_name = file_name
google_drive_file = None
for drive_file in file_list:
if drive_file["title"] == drive_file_name:
for parent_folder in drive_file["parents"]:
if parent_folder["id"] == parent_id or parent_folder["isRoot"]:
google_drive_file = drive_file
return google_drive_file
@staticmethod
def make_root_folder(drive, folder, folder_name):
if not folder:
new_root_folder = drive.CreateFile(
{
"title": folder_name,
"parents": [
{
"kind": "drive#fileLink",
"isRoot": True,
"id": "root"
}
],
"mimeType": "application/vnd.google-apps.folder"
}
)
new_root_folder.Upload()
root_folder_id = new_root_folder["id"]
else:
root_folder_id = folder["id"]
return root_folder_id
@staticmethod
def make_parent_folder(drive, folder, folder_name, parent_folder_id):
if not folder:
new_parent_folder = drive.CreateFile(
{
"title": folder_name,
"parents": [
{
"kind": "drive#fileLink",
"id": parent_folder_id
}
],
"mimeType": "application/vnd.google-apps.folder"
}
)
new_parent_folder.Upload()
parent_folder_id = new_parent_folder["id"]
else:
parent_folder_id = folder["id"]
return parent_folder_id
if __name__ == "__main__":
local_config = AppConfigParser()
local_config.read(Path(__file__).parents[1] / "config.ini")
reupload_file = local_config.get("Drive", "google_drive_reupload_file")
google_drive_uploader = GoogleDriveUploader(reupload_file, local_config)
upload_message = google_drive_uploader.upload_file()
print(upload_message)
| uberfastman/yahoo-fantasy-football-metrics | integrations/drive_integration.py | Python | gpl-3.0 | 8,962 |
#!/usr/bin/env python
"""
process_file(filename)
takes templated file .xxx.src and produces .xxx file where .xxx
is .pyf .f90 or .f using the following template rules:
'<..>' denotes a template.
All function and subroutine blocks in a source file with names that
contain '<..>' will be replicated according to the rules in '<..>'.
The number of comma-separated words in '<..>' will determine the number of
replicates.
'<..>' may have two different forms, named and short. For example,
named:
<p=d,s,z,c> where anywhere inside a block '<p>' will be replaced with
'd', 's', 'z', and 'c' for each replicate of the block.
<_c> is already defined: <_c=s,d,c,z>
<_t> is already defined: <_t=real,double precision,complex,double complex>
short:
<s,d,c,z>, a short form of the named, useful when no <p> appears inside
a block.
In general, '<..>' contains a comma separated list of arbitrary
expressions. If these expression must contain a comma|leftarrow|rightarrow,
then prepend the comma|leftarrow|rightarrow with a backslash.
If an expression matches '\\<index>' then it will be replaced
by <index>-th expression.
Note that all '<..>' forms in a block must have the same number of
comma-separated entries.
Predefined named template rules:
<prefix=s,d,c,z>
<ftype=real,double precision,complex,double complex>
<ftypereal=real,double precision,\\0,\\1>
<ctype=float,double,complex_float,complex_double>
<ctypereal=float,double,\\0,\\1>
"""
from __future__ import division, absolute_import, print_function
__all__ = ['process_str', 'process_file']
import os
import sys
import re
routine_start_re = re.compile(r'(\n|\A)(( (\$|\*))|)\s*(subroutine|function)\b', re.I)
routine_end_re = re.compile(r'\n\s*end\s*(subroutine|function)\b.*(\n|\Z)', re.I)
function_start_re = re.compile(r'\n (\$|\*)\s*function\b', re.I)
def parse_structure(astr):
""" Return a list of tuples for each function or subroutine each
tuple is the start and end of a subroutine or function to be
expanded.
"""
spanlist = []
ind = 0
while True:
m = routine_start_re.search(astr, ind)
if m is None:
break
start = m.start()
if function_start_re.match(astr, start, m.end()):
while True:
i = astr.rfind('\n', ind, start)
if i==-1:
break
start = i
if astr[i:i+7]!='\n $':
break
start += 1
m = routine_end_re.search(astr, m.end())
ind = end = m and m.end()-1 or len(astr)
spanlist.append((start, end))
return spanlist
template_re = re.compile(r"<\s*(\w[\w\d]*)\s*>")
named_re = re.compile(r"<\s*(\w[\w\d]*)\s*=\s*(.*?)\s*>")
list_re = re.compile(r"<\s*((.*?))\s*>")
def find_repl_patterns(astr):
reps = named_re.findall(astr)
names = {}
for rep in reps:
name = rep[0].strip() or unique_key(names)
repl = rep[1].replace(r'\,', '@comma@')
thelist = conv(repl)
names[name] = thelist
return names
def find_and_remove_repl_patterns(astr):
names = find_repl_patterns(astr)
astr = re.subn(named_re, '', astr)[0]
return astr, names
item_re = re.compile(r"\A\\(?P<index>\d+)\Z")
def conv(astr):
b = astr.split(',')
l = [x.strip() for x in b]
for i in range(len(l)):
m = item_re.match(l[i])
if m:
j = int(m.group('index'))
l[i] = l[j]
return ','.join(l)
def unique_key(adict):
""" Obtain a unique key given a dictionary."""
allkeys = list(adict.keys())
done = False
n = 1
while not done:
newkey = '__l%s' % (n)
if newkey in allkeys:
n += 1
else:
done = True
return newkey
template_name_re = re.compile(r'\A\s*(\w[\w\d]*)\s*\Z')
def expand_sub(substr, names):
substr = substr.replace(r'\>', '@rightarrow@')
substr = substr.replace(r'\<', '@leftarrow@')
lnames = find_repl_patterns(substr)
substr = named_re.sub(r"<\1>", substr) # get rid of definition templates
def listrepl(mobj):
thelist = conv(mobj.group(1).replace(r'\,', '@comma@'))
if template_name_re.match(thelist):
return "<%s>" % (thelist)
name = None
for key in lnames.keys(): # see if list is already in dictionary
if lnames[key] == thelist:
name = key
if name is None: # this list is not in the dictionary yet
name = unique_key(lnames)
lnames[name] = thelist
return "<%s>" % name
substr = list_re.sub(listrepl, substr) # convert all lists to named templates
# newnames are constructed as needed
numsubs = None
base_rule = None
rules = {}
for r in template_re.findall(substr):
if r not in rules:
thelist = lnames.get(r, names.get(r, None))
if thelist is None:
raise ValueError('No replicates found for <%s>' % (r))
if r not in names and not thelist.startswith('_'):
names[r] = thelist
rule = [i.replace('@comma@', ',') for i in thelist.split(',')]
num = len(rule)
if numsubs is None:
numsubs = num
rules[r] = rule
base_rule = r
elif num == numsubs:
rules[r] = rule
else:
print("Mismatch in number of replacements (base <%s=%s>)"
" for <%s=%s>. Ignoring." %
(base_rule, ','.join(rules[base_rule]), r, thelist))
if not rules:
return substr
def namerepl(mobj):
name = mobj.group(1)
return rules.get(name, (k+1)*[name])[k]
newstr = ''
for k in range(numsubs):
newstr += template_re.sub(namerepl, substr) + '\n\n'
newstr = newstr.replace('@rightarrow@', '>')
newstr = newstr.replace('@leftarrow@', '<')
return newstr
def process_str(allstr):
newstr = allstr
writestr = ''
struct = parse_structure(newstr)
oldend = 0
names = {}
names.update(_special_names)
for sub in struct:
cleanedstr, defs = find_and_remove_repl_patterns(newstr[oldend:sub[0]])
writestr += cleanedstr
names.update(defs)
writestr += expand_sub(newstr[sub[0]:sub[1]], names)
oldend = sub[1]
writestr += newstr[oldend:]
return writestr
include_src_re = re.compile(r"(\n|\A)\s*include\s*['\"](?P<name>[\w\d./\\]+[.]src)['\"]", re.I)
def resolve_includes(source):
d = os.path.dirname(source)
fid = open(source)
lines = []
for line in fid:
m = include_src_re.match(line)
if m:
fn = m.group('name')
if not os.path.isabs(fn):
fn = os.path.join(d, fn)
if os.path.isfile(fn):
print('Including file', fn)
lines.extend(resolve_includes(fn))
else:
lines.append(line)
else:
lines.append(line)
fid.close()
return lines
def process_file(source):
lines = resolve_includes(source)
return process_str(''.join(lines))
_special_names = find_repl_patterns('''
<_c=s,d,c,z>
<_t=real,double precision,complex,double complex>
<prefix=s,d,c,z>
<ftype=real,double precision,complex,double complex>
<ctype=float,double,complex_float,complex_double>
<ftypereal=real,double precision,\\0,\\1>
<ctypereal=float,double,\\0,\\1>
''')
def main():
try:
file = sys.argv[1]
except IndexError:
fid = sys.stdin
outfile = sys.stdout
else:
fid = open(file, 'r')
(base, ext) = os.path.splitext(file)
newname = base
outfile = open(newname, 'w')
allstr = fid.read()
writestr = process_str(allstr)
outfile.write(writestr)
if __name__ == "__main__":
main()
| ryfeus/lambda-packs | pytorch/source/numpy/distutils/from_template.py | Python | mit | 7,979 |
# -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Removing unique constraint on 'DataPointAbstracted', fields ['region_id', 'campaign_id']
db.delete_unique('datapoint_abstracted', ['region_id', 'campaign_id'])
# Deleting field 'DataPointAbstracted.region_id'
db.delete_column('datapoint_abstracted', 'region_id')
# Deleting field 'DataPointAbstracted.campaign_id'
db.delete_column('datapoint_abstracted', 'campaign_id')
# Adding field 'DataPointAbstracted.region'
db.add_column('datapoint_abstracted', 'region',
self.gf('django.db.models.fields.related.ForeignKey')(default=12907, to=orm['datapoints.Region']),
keep_default=False)
# Adding field 'DataPointAbstracted.campaign'
db.add_column('datapoint_abstracted', 'campaign',
self.gf('django.db.models.fields.related.ForeignKey')(default=111, to=orm['datapoints.Campaign']),
keep_default=False)
# Adding unique constraint on 'DataPointAbstracted', fields ['region', 'campaign']
db.create_unique('datapoint_abstracted', ['region_id', 'campaign_id'])
def backwards(self, orm):
# Removing unique constraint on 'DataPointAbstracted', fields ['region', 'campaign']
db.delete_unique('datapoint_abstracted', ['region_id', 'campaign_id'])
# User chose to not deal with backwards NULL issues for 'DataPointAbstracted.region_id'
raise RuntimeError("Cannot reverse this migration. 'DataPointAbstracted.region_id' and its values cannot be restored.")
# The following code is provided here to aid in writing a correct migration # Adding field 'DataPointAbstracted.region_id'
db.add_column('datapoint_abstracted', 'region_id',
self.gf('django.db.models.fields.IntegerField')(),
keep_default=False)
# User chose to not deal with backwards NULL issues for 'DataPointAbstracted.campaign_id'
raise RuntimeError("Cannot reverse this migration. 'DataPointAbstracted.campaign_id' and its values cannot be restored.")
# The following code is provided here to aid in writing a correct migration # Adding field 'DataPointAbstracted.campaign_id'
db.add_column('datapoint_abstracted', 'campaign_id',
self.gf('django.db.models.fields.IntegerField')(),
keep_default=False)
# Deleting field 'DataPointAbstracted.region'
db.delete_column('datapoint_abstracted', 'region_id')
# Deleting field 'DataPointAbstracted.campaign'
db.delete_column('datapoint_abstracted', 'campaign_id')
# Adding unique constraint on 'DataPointAbstracted', fields ['region_id', 'campaign_id']
db.create_unique('datapoint_abstracted', ['region_id', 'campaign_id'])
models = {
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Group']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Permission']"}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'datapoints.aggdatapoint': {
'Meta': {'object_name': 'AggDataPoint', 'db_table': "'agg_datapoint'", 'managed': 'False'},
'campaign_id': ('django.db.models.fields.IntegerField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'indicator_id': ('django.db.models.fields.IntegerField', [], {}),
'region_id': ('django.db.models.fields.IntegerField', [], {}),
'value': ('django.db.models.fields.FloatField', [], {})
},
u'datapoints.calculatedindicatorcomponent': {
'Meta': {'object_name': 'CalculatedIndicatorComponent', 'db_table': "'calculated_indicator_component'"},
'calculation': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'indicator': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'indicator_master'", 'to': u"orm['datapoints.Indicator']"}),
'indicator_component': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'indicator_component'", 'to': u"orm['datapoints.Indicator']"})
},
u'datapoints.campaign': {
'Meta': {'ordering': "('-start_date',)", 'unique_together': "(('office', 'start_date'),)", 'object_name': 'Campaign', 'db_table': "'campaign'"},
'campaign_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['datapoints.CampaignType']"}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'end_date': ('django.db.models.fields.DateField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'office': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['datapoints.Office']"}),
'slug': ('autoslug.fields.AutoSlugField', [], {'unique': 'True', 'max_length': '50', 'populate_from': "'get_full_name'", 'unique_with': '()'}),
'start_date': ('django.db.models.fields.DateField', [], {})
},
u'datapoints.campaigntype': {
'Meta': {'object_name': 'CampaignType', 'db_table': "'campaign_type'"},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '55'})
},
u'datapoints.datapoint': {
'Meta': {'ordering': "['region', 'campaign']", 'unique_together': "(('indicator', 'region', 'campaign'),)", 'object_name': 'DataPoint', 'db_table': "'datapoint'"},
'campaign': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['datapoints.Campaign']"}),
'changed_by': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'indicator': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['datapoints.Indicator']"}),
'note': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'region': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['datapoints.Region']"}),
'source_datapoint': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['source_data.SourceDataPoint']"}),
'value': ('django.db.models.fields.FloatField', [], {})
},
u'datapoints.datapointabstracted': {
'Meta': {'unique_together': "(('region', 'campaign'),)", 'object_name': 'DataPointAbstracted', 'db_table': "'datapoint_abstracted'"},
'campaign': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['datapoints.Campaign']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'indicator_json': ('jsonfield.fields.JSONField', [], {}),
'region': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['datapoints.Region']"})
},
u'datapoints.datapointcomputed': {
'Meta': {'object_name': 'DataPointComputed', 'db_table': "'datapoint_with_computed'", 'managed': 'False'},
'campaign_id': ('django.db.models.fields.IntegerField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'indicator_id': ('django.db.models.fields.IntegerField', [], {}),
'region_id': ('django.db.models.fields.IntegerField', [], {}),
'value': ('django.db.models.fields.FloatField', [], {})
},
u'datapoints.expecteddata': {
'Meta': {'unique_together': "(('region', 'campaign'),)", 'object_name': 'ExpectedData', 'db_table': "'expected_data'"},
'campaign': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['datapoints.Campaign']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'parent_region': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'ex_parent_region'", 'to': u"orm['datapoints.Region']"}),
'region': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'ex_child_region'", 'to': u"orm['datapoints.Region']"})
},
u'datapoints.indicator': {
'Meta': {'ordering': "('name',)", 'object_name': 'Indicator', 'db_table': "'indicator'"},
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_reported': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'}),
'short_name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'}),
'slug': ('autoslug.fields.AutoSlugField', [], {'unique': 'True', 'max_length': '255', 'populate_from': "'name'", 'unique_with': '()'}),
'source': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['datapoints.Source']"})
},
u'datapoints.missingmapping': {
'Meta': {'object_name': 'MissingMapping', 'db_table': "'vw_missing_mappings'", 'managed': 'False'},
'datapoint': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['datapoints.DataPoint']"}),
'document': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['source_data.SourceDataPoint']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'what_is_missing': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
u'datapoints.office': {
'Meta': {'object_name': 'Office', 'db_table': "'office'"},
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '55'})
},
u'datapoints.recondata': {
'Meta': {'unique_together': "(('region', 'campaign', 'indicator'),)", 'object_name': 'ReconData', 'db_table': "'recon_data'"},
'campaign': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['datapoints.Campaign']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'indicator': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['datapoints.Indicator']"}),
'is_raw': ('django.db.models.fields.BooleanField', [], {}),
'region': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['datapoints.Region']"}),
'success_flag': ('django.db.models.fields.BooleanField', [], {}),
'target_value': ('django.db.models.fields.FloatField', [], {})
},
u'datapoints.region': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('name', 'region_type', 'office'),)", 'object_name': 'Region', 'db_table': "'region'"},
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_high_risk': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'latitude': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'longitude': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'}),
'office': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['datapoints.Office']"}),
'parent_region': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['datapoints.Region']", 'null': 'True'}),
'region_code': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'}),
'region_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['datapoints.RegionType']"}),
'shape_file_path': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'slug': ('autoslug.fields.AutoSlugField', [], {'unique': 'True', 'max_length': '255', 'populate_from': "'name'", 'unique_with': '()'}),
'source': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['datapoints.Source']"})
},
u'datapoints.regionheirarchy': {
'Meta': {'object_name': 'RegionHeirarchy', 'db_table': "'region_heirarchy_cache'", 'managed': 'False'},
'contained_by_region_id': ('django.db.models.fields.IntegerField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'region_id': ('django.db.models.fields.IntegerField', [], {}),
'region_type_id': ('django.db.models.fields.IntegerField', [], {})
},
u'datapoints.regionpolygon': {
'Meta': {'object_name': 'RegionPolygon', 'db_table': "'region_polygon'"},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'polygon': ('jsonfield.fields.JSONField', [], {}),
'region': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['datapoints.Region']", 'unique': 'True'}),
'shape_area': ('django.db.models.fields.FloatField', [], {}),
'shape_len': ('django.db.models.fields.FloatField', [], {})
},
u'datapoints.regiontype': {
'Meta': {'object_name': 'RegionType', 'db_table': "'region_type'"},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '55'})
},
u'datapoints.responsibility': {
'Meta': {'ordering': "('indicator',)", 'unique_together': "(('user', 'indicator', 'region'),)", 'object_name': 'Responsibility', 'db_table': "'responsibility'"},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'indicator': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['datapoints.Indicator']"}),
'region': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['datapoints.Region']"}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"})
},
u'datapoints.simpleregion': {
'Meta': {'object_name': 'SimpleRegion', 'db_table': "'vw_simple_region'", 'managed': 'False'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '55'}),
'parent_region_id': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'})
},
u'datapoints.source': {
'Meta': {'object_name': 'Source', 'db_table': "'source'"},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'source_description': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'}),
'source_name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '55'})
},
u'source_data.document': {
'Meta': {'ordering': "('-id',)", 'unique_together': "(('docfile', 'doc_text'),)", 'object_name': 'Document'},
'created_by': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"}),
'doc_text': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'docfile': ('django.db.models.fields.files.FileField', [], {'max_length': '100', 'null': 'True'}),
'guid': ('django.db.models.fields.CharField', [], {'max_length': '40'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_processed': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'master_datapoint_count': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'source_datapoint_count': ('django.db.models.fields.IntegerField', [], {'null': 'True'})
},
'source_data.processstatus': {
'Meta': {'object_name': 'ProcessStatus'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'status_description': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'status_text': ('django.db.models.fields.CharField', [], {'max_length': '25'})
},
'source_data.sourcedatapoint': {
'Meta': {'unique_together': "(('source', 'source_guid', 'indicator_string'),)", 'object_name': 'SourceDataPoint', 'db_table': "'source_datapoint'"},
'campaign_string': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'cell_value': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2015, 3, 4, 0, 0)'}),
'document': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['source_data.Document']"}),
'guid': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '40'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'indicator_string': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'region_code': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'row_number': ('django.db.models.fields.IntegerField', [], {}),
'source': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['datapoints.Source']"}),
'source_guid': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'status': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['source_data.ProcessStatus']"})
}
}
complete_apps = ['datapoints'] | SeedScientific/polio | datapoints/migrations/0073_auto__del_field_datapointabstracted_region_id__del_field_datapointabst.py | Python | agpl-3.0 | 22,049 |
"""SCons.Tool.Packaging.tarbz2
The tarbz2 SRC packager.
"""
#
# Copyright (c) 2001 - 2014 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "src/engine/SCons/Tool/packaging/tarbz2.py 2014/08/24 12:12:31 garyo"
from SCons.Tool.packaging import stripinstallbuilder, putintopackageroot
def package(env, target, source, PACKAGEROOT, **kw):
bld = env['BUILDERS']['Tar']
bld.set_suffix('.tar.gz')
target, source = putintopackageroot(target, source, env, PACKAGEROOT)
target, source = stripinstallbuilder(target, source, env)
return bld(env, target, source, TARFLAGS='-jc')
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
| engineer0x47/SCONS | engine/SCons/Tool/packaging/tarbz2.py | Python | mit | 1,750 |
import time
import inspect
# If the stack becomes too complex to figure out a caller we go through and assume the first valid module is the caller.
# This works reasonably well but isn't 100% accurate and will only happen if the caller is a thread.
def print_out(message, color):
stack = inspect.stack()
# Interestingly the if statement below is not executed when excepting KeyboardInterrupts. Weird.
# To prevent a crash we assume the module's name is 'Unknown'
module = "Unknown"
if inspect.getmodule(stack[2][0]) == None:
for i in stack[2:]:
if inspect.getmodule(i[0]) != None:
module = inspect.getmodule(i[0]).__name__
else:
module = inspect.getmodule(stack[2][0]).__name__
print("[%s] %s: %s%s\033[0m" % (time.strftime("%x %H:%M:%S"), module, color, message))
def info(message):
print_out(message, '')
def header(message):
print_out(message, '\033[95m')
def warning(message):
print_out(message, '\033[93m')
def error(message):
print_out(message, '\033[91m')
def success(message, color="green"):
if color == "green":
print_out(message, '\033[92m')
elif color == "blue":
print_out(message, '\033[94m')
def bold(message):
print_out(message, '\033[1m')
def underline(message):
print_out(message, '\033[1m') | PhilipTrauner/PackageUtil | PackageUtil/Logging.py | Python | mit | 1,253 |
# coding=utf-8
# Copyright 2014 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
from pants.backend.docgen.targets.doc import Page, Wiki, WikiArtifact
from pants.backend.docgen.tasks.generate_pants_reference import GeneratePantsReference
from pants.backend.docgen.tasks.markdown_to_html import MarkdownToHtml
from pants.build_graph.build_file_aliases import BuildFileAliases
from pants.goal.task_registrar import TaskRegistrar as task
def build_file_aliases():
return BuildFileAliases(
targets={
'page': Page,
},
objects={
'wiki_artifact': WikiArtifact,
# TODO: Why is this capitalized?
'Wiki': Wiki,
},
)
def register_goals():
task(name='markdown', action=MarkdownToHtml).install(),
task(name='reference', action=GeneratePantsReference).install()
| UnrememberMe/pants | src/python/pants/backend/docgen/register.py | Python | apache-2.0 | 1,019 |
"""
Course Goals Models
"""
import uuid
import logging
from datetime import datetime, timedelta
from django.contrib.auth import get_user_model
from django.db import models
from django.utils.translation import gettext_lazy as _
from edx_django_utils.cache import TieredCache
from model_utils import Choices
from model_utils.models import TimeStampedModel
from opaque_keys.edx.django.models import CourseKeyField
from simple_history.models import HistoricalRecords
from lms.djangoapps.courseware.masquerade import is_masquerading
from lms.djangoapps.courseware.context_processor import get_user_timezone_or_last_seen_timezone_or_utc
from openedx.core.lib.mobile_utils import is_request_from_mobile_app
from openedx.features.course_experience import ENABLE_COURSE_GOALS
# Each goal is represented by a goal key and a string description.
GOAL_KEY_CHOICES = Choices(
('certify', _('Earn a certificate')),
('complete', _('Complete the course')),
('explore', _('Explore the course')),
('unsure', _('Not sure yet')),
)
User = get_user_model()
log = logging.getLogger(__name__)
class CourseGoal(models.Model):
"""
Represents a course goal set by a user on the course home page.
.. no_pii:
"""
class Meta:
app_label = 'course_goals'
unique_together = ('user', 'course_key')
user = models.ForeignKey(User, on_delete=models.CASCADE)
course_key = CourseKeyField(max_length=255, db_index=True)
# The goal a user has set for the number of days they want to learn per week
days_per_week = models.PositiveIntegerField(default=0)
# Controls whether a user will receive emails reminding them to stay on track with their learning goal
subscribed_to_reminders = models.BooleanField(default=False)
# With this token, anyone can unsubscribe this user from reminders. That's a mild enough action that we don't stress
# about the risk of keeping this key around long term in the database or bother using a higher-security generator
# than uuid4. The worst someone can do with this is unsubscribe us. And we want old tokens sitting in folks' email
# inboxes to still be valid as long as possible.
unsubscribe_token = models.UUIDField(null=True, blank=True, unique=True, editable=False, default=uuid.uuid4,
help_text='Used to validate unsubscribe requests without requiring a login')
goal_key = models.CharField(max_length=100, choices=GOAL_KEY_CHOICES, default=GOAL_KEY_CHOICES.unsure)
history = HistoricalRecords()
def __str__(self):
return 'CourseGoal: {user} set goal to {goal} days per week for course {course}'.format(
user=self.user.username,
goal=self.days_per_week,
course=self.course_key,
)
def save(self, **kwargs): # pylint: disable=arguments-differ
# Ensure we have an unsubscribe token (lazy migration from old goals, before this field was added)
if self.unsubscribe_token is None:
self.unsubscribe_token = uuid.uuid4()
super().save(**kwargs)
class CourseGoalReminderStatus(TimeStampedModel):
"""
Tracks whether we've sent a reminder about a particular goal this week.
See the management command goal_reminder_email for more detail about how this is used.
"""
class Meta:
verbose_name_plural = "Course goal reminder statuses"
goal = models.OneToOneField(CourseGoal, on_delete=models.CASCADE, related_name='reminder_status')
email_reminder_sent = models.BooleanField(
default=False, help_text='Tracks if the email reminder to complete the Course Goal has been sent this week.'
)
class UserActivity(models.Model):
"""
Tracks the date a user performs an activity in a course for goal purposes.
To be used in conjunction with the CourseGoal model to establish if a learner is hitting
their desired days_per_week.
To start, this model will only be tracking page views that count towards a learner's goal,
but could grow to tracking other types of goal achieving activities in the future.
.. no_pii:
"""
class Meta:
constraints = [models.UniqueConstraint(fields=['user', 'course_key', 'date'], name='unique_user_course_date')]
indexes = [models.Index(fields=['user', 'course_key'], name='user_course_index')]
verbose_name_plural = 'User activities'
id = models.BigAutoField(primary_key=True)
user = models.ForeignKey(User, on_delete=models.CASCADE)
course_key = CourseKeyField(max_length=255)
date = models.DateField()
@classmethod
def record_user_activity(cls, user, course_key, request=None, only_if_mobile_app=False):
'''
Update the user activity table with a record for this activity.
Since we store one activity per date, we don't need to query the database
for every activity on a given date.
To avoid unnecessary queries, we store a record in a cache once we have an activity for the date,
which times out at the end of that date (in the user's timezone).
The request argument is only used to check if the request is coming from a mobile app.
Once the only_if_mobile_app argument is removed the request argument can be removed as well.
The return value is the id of the object that was created, or retrieved.
A return value of None signifies that a user activity record was not stored or retrieved
'''
if not ENABLE_COURSE_GOALS.is_enabled(course_key):
return None
if not (user and user.id) or not course_key:
return None
if only_if_mobile_app and request and not is_request_from_mobile_app(request):
return None
if is_masquerading(user, course_key):
return None
timezone = get_user_timezone_or_last_seen_timezone_or_utc(user)
now = datetime.now(timezone)
date = now.date()
cache_key = 'goals_user_activity_{}_{}_{}'.format(str(user.id), str(course_key), str(date))
cached_value = TieredCache.get_cached_response(cache_key)
if cached_value.is_found:
return cached_value.value, False
activity_object, __ = cls.objects.get_or_create(user=user, course_key=course_key, date=date)
# Cache result until the end of the day to avoid unnecessary database requests
tomorrow = now + timedelta(days=1)
midnight = datetime(year=tomorrow.year, month=tomorrow.month,
day=tomorrow.day, hour=0, minute=0, second=0, tzinfo=timezone)
seconds_until_midnight = (midnight - now).seconds
TieredCache.set_all_tiers(cache_key, activity_object.id, seconds_until_midnight)
# Temporary debugging log for testing mobile app connection
if request:
log.info(
'Set cached value with request {} for user and course combination {} {}'.format(
str(request.build_absolute_uri()), str(user.id), str(course_key)
)
)
return activity_object.id
| edx/edx-platform | lms/djangoapps/course_goals/models.py | Python | agpl-3.0 | 7,106 |
# -*- coding: utf-8 -*-
"""
celery.task.http
~~~~~~~~~~~~~~~~
Task webhooks implementation.
:copyright: (c) 2009 - 2012 by Ask Solem.
:license: BSD, see LICENSE for more details.
"""
from __future__ import absolute_import
import sys
import urllib2
from urllib import urlencode
from urlparse import urlparse
try:
from urlparse import parse_qsl
except ImportError: # pragma: no cover
from cgi import parse_qsl # noqa
from anyjson import deserialize
from .. import __version__ as celery_version
from .base import Task as BaseTask
GET_METHODS = frozenset(["GET", "HEAD"])
class InvalidResponseError(Exception):
"""The remote server gave an invalid response."""
class RemoteExecuteError(Exception):
"""The remote task gave a custom error."""
class UnknownStatusError(InvalidResponseError):
"""The remote server gave an unknown status."""
def maybe_utf8(value):
"""Encode to utf-8, only if the value is Unicode."""
if isinstance(value, unicode):
return value.encode("utf-8")
return value
if sys.version_info >= (3, 0):
def utf8dict(tup):
if not isinstance(tup, dict):
return dict(tup)
return tup
else:
def utf8dict(tup): # noqa
"""With a dict's items() tuple return a new dict with any utf-8
keys/values encoded."""
return dict((key.encode("utf-8"), maybe_utf8(value))
for key, value in tup)
def extract_response(raw_response):
"""Extract the response text from a raw JSON response."""
if not raw_response:
raise InvalidResponseError("Empty response")
try:
payload = deserialize(raw_response)
except ValueError, exc:
raise InvalidResponseError, InvalidResponseError(
str(exc)), sys.exc_info()[2]
status = payload["status"]
if status == "success":
return payload["retval"]
elif status == "failure":
raise RemoteExecuteError(payload.get("reason"))
else:
raise UnknownStatusError(str(status))
class MutableURL(object):
"""Object wrapping a Uniform Resource Locator.
Supports editing the query parameter list.
You can convert the object back to a string, the query will be
properly urlencoded.
Examples
>>> url = URL("http://www.google.com:6580/foo/bar?x=3&y=4#foo")
>>> url.query
{'x': '3', 'y': '4'}
>>> str(url)
'http://www.google.com:6580/foo/bar?y=4&x=3#foo'
>>> url.query["x"] = 10
>>> url.query.update({"George": "Costanza"})
>>> str(url)
'http://www.google.com:6580/foo/bar?y=4&x=10&George=Costanza#foo'
"""
def __init__(self, url):
self.parts = urlparse(url)
self.query = dict(parse_qsl(self.parts[4]))
def __str__(self):
scheme, netloc, path, params, query, fragment = self.parts
query = urlencode(utf8dict(self.query.items()))
components = [scheme + "://", netloc, path or "/",
";%s" % params if params else "",
"?%s" % query if query else "",
"#%s" % fragment if fragment else ""]
return "".join(filter(None, components))
def __repr__(self):
return "<%s: %s>" % (self.__class__.__name__, str(self))
class HttpDispatch(object):
"""Make task HTTP request and collect the task result.
:param url: The URL to request.
:param method: HTTP method used. Currently supported methods are `GET`
and `POST`.
:param task_kwargs: Task keyword arguments.
:param logger: Logger used for user/system feedback.
"""
user_agent = "celery/%s" % celery_version
timeout = 5
def __init__(self, url, method, task_kwargs, logger):
self.url = url
self.method = method
self.task_kwargs = task_kwargs
self.logger = logger
def make_request(self, url, method, params):
"""Makes an HTTP request and returns the response."""
request = urllib2.Request(url, params)
for key, val in self.http_headers.items():
request.add_header(key, val)
response = urllib2.urlopen(request) # user catches errors.
return response.read()
def dispatch(self):
"""Dispatch callback and return result."""
url = MutableURL(self.url)
params = None
if self.method in GET_METHODS:
url.query.update(self.task_kwargs)
else:
params = urlencode(utf8dict(self.task_kwargs.items()))
raw_response = self.make_request(str(url), self.method, params)
return extract_response(raw_response)
@property
def http_headers(self):
headers = {"User-Agent": self.user_agent}
return headers
class HttpDispatchTask(BaseTask):
"""Task dispatching to an URL.
:keyword url: The URL location of the HTTP callback task.
:keyword method: Method to use when dispatching the callback. Usually
`GET` or `POST`.
:keyword \*\*kwargs: Keyword arguments to pass on to the HTTP callback.
.. attribute:: url
If this is set, this is used as the default URL for requests.
Default is to require the user of the task to supply the url as an
argument, as this attribute is intended for subclasses.
.. attribute:: method
If this is set, this is the default method used for requests.
Default is to require the user of the task to supply the method as an
argument, as this attribute is intended for subclasses.
"""
url = None
method = None
def run(self, url=None, method="GET", **kwargs):
url = url or self.url
method = method or self.method
logger = self.get_logger(**kwargs)
return HttpDispatch(url, method, kwargs, logger).dispatch()
class URL(MutableURL):
"""HTTP Callback URL
Supports requesting an URL asynchronously.
:param url: URL to request.
:keyword dispatcher: Class used to dispatch the request.
By default this is :class:`HttpDispatchTask`.
"""
dispatcher = HttpDispatchTask
def __init__(self, url, dispatcher=None):
super(URL, self).__init__(url)
self.dispatcher = dispatcher or self.dispatcher
def get_async(self, **kwargs):
return self.dispatcher.delay(str(self), "GET", **kwargs)
def post_async(self, **kwargs):
return self.dispatcher.delay(str(self), "POST", **kwargs)
| mozilla/make.mozilla.org | vendor-local/lib/python/celery/task/http.py | Python | bsd-3-clause | 6,477 |
DEBUG = False
UNIT_TEST_DEBUG = False
import os
try:
import sublime
except ImportError:
from mocking.sublime import sublime
if UNIT_TEST_DEBUG:
DEBUG = True
print("SublimeWindowFunctions: sublime and sublime_plugin not imported in " + __file__)
else:
DEBUG = False
"""try:
from FileCreator import FileCreator
except ImportError:
from .FileCreator import FileCreator
"""
class SublimeWindowFunctions():
def __init__(self, windowInstance, settings):
self._windowInstance = windowInstance
self._settings = settings
# This function is horrible!!!!
"""def getCurrentDirectory(self):
view = self._windowInstance.active_view()
fileFolder = view.file_name()
result = None
if fileFolder is not None:
fileFolder = os.path.dirname(fileFolder)
fc = FileCreator(self._settings.get('base_path'), "")
fc.kind = FileCreator.KIND_IS_TEST
fc2 = FileCreator("", fileFolder)
fc2.kind = FileCreator.KIND_IS_TEST
basePath = fc.getBasePath()
pathName = fc2.getBasePath()
basePathLen = len(basePath)
fileBeginning = pathName[0:basePathLen]
if fileBeginning == basePath:
pathName = pathName[basePathLen:]
result = pathName
if len(result) > 0:
result += "/"
if result is None:
result = FileCreator.getStandardizedPath(self._settings.get('current_path'), False, True)
if result is None:
result = ""
return result.replace('//', '/') # TODO: This is a lazy fix for // appearing when not current_path was provided"""
#reexamine if still in use
def getCurrentFileName(self):
view = self._windowInstance.active_view()
fileName = view.file_name()
index = fileName.rfind("/")
result = fileName[index + 1:]
return result | anconaesselmann/ClassesAndTests | classes_and_tests/src/SublimeWindowFunctions.py | Python | mit | 1,987 |
import re
import collections
from enum import Enum
from ydk._core._dm_meta_info import _MetaInfoClassMember, _MetaInfoClass, _MetaInfoEnum
from ydk.types import Empty, YList, YLeafList, DELETE, Decimal64, FixedBitsDict
from ydk._core._dm_meta_info import ATTRIBUTE, REFERENCE_CLASS, REFERENCE_LIST, REFERENCE_LEAFLIST, REFERENCE_IDENTITY_CLASS, REFERENCE_ENUM_CLASS, REFERENCE_BITS, REFERENCE_UNION, ANYXML_CLASS
from ydk.errors import YPYError, YPYModelError
from ydk.providers._importer import _yang_ns
_meta_table = {
'EntrystatusEnum' : _MetaInfoEnum('EntrystatusEnum', 'ydk.models.cisco_ios_xe.TOKEN_RING_RMON_MIB',
{
'valid':'valid',
'createRequest':'createRequest',
'underCreation':'underCreation',
'invalid':'invalid',
}, 'TOKEN-RING-RMON-MIB', _yang_ns._namespaces['TOKEN-RING-RMON-MIB']),
'TokenRingRmonMib.Tokenringmlstatstable.Tokenringmlstatsentry' : {
'meta_info' : _MetaInfoClass('TokenRingRmonMib.Tokenringmlstatstable.Tokenringmlstatsentry',
False,
[
_MetaInfoClassMember('tokenRingMLStatsIndex', ATTRIBUTE, 'int' , None, None,
[('1', '65535')], [],
''' The value of this object uniquely identifies this
tokenRingMLStats entry.
''',
'tokenringmlstatsindex',
'TOKEN-RING-RMON-MIB', True),
_MetaInfoClassMember('tokenRingMLStatsAbortErrors', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' The total number of abort delimiters reported in
error reporting packets detected by the probe.
''',
'tokenringmlstatsaborterrors',
'TOKEN-RING-RMON-MIB', False),
_MetaInfoClassMember('tokenRingMLStatsACErrors', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' The total number of AC (Address Copied) errors
reported in error reporting packets detected by
the probe.
''',
'tokenringmlstatsacerrors',
'TOKEN-RING-RMON-MIB', False),
_MetaInfoClassMember('tokenRingMLStatsBeaconEvents', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' The total number of times that the ring enters a
beaconing state (beaconFrameStreamingState,
beaconBitStreamingState,
beaconSetRecoveryModeState, or
beaconRingSignalLossState) from a non-beaconing
state. Note that a change of the source address
of the beacon packet does not constitute a new
beacon event.
''',
'tokenringmlstatsbeaconevents',
'TOKEN-RING-RMON-MIB', False),
_MetaInfoClassMember('tokenRingMLStatsBeaconPkts', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' The total number of beacon MAC packets detected
by the probe.
''',
'tokenringmlstatsbeaconpkts',
'TOKEN-RING-RMON-MIB', False),
_MetaInfoClassMember('tokenRingMLStatsBeaconTime', ATTRIBUTE, 'int' , None, None,
[('-2147483648', '2147483647')], [],
''' The total amount of time that the ring has been
in the beaconing state.
''',
'tokenringmlstatsbeacontime',
'TOKEN-RING-RMON-MIB', False),
_MetaInfoClassMember('tokenRingMLStatsBurstErrors', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' The total number of burst errors reported in
error reporting packets detected by the probe.
''',
'tokenringmlstatsbursterrors',
'TOKEN-RING-RMON-MIB', False),
_MetaInfoClassMember('tokenRingMLStatsClaimTokenEvents', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' The total number of times that the ring enters
the claim token state from normal ring state or
ring purge state. The claim token state that
comes in response to a beacon state is not
counted.
''',
'tokenringmlstatsclaimtokenevents',
'TOKEN-RING-RMON-MIB', False),
_MetaInfoClassMember('tokenRingMLStatsClaimTokenPkts', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' The total number of claim token MAC packets
detected by the probe.
''',
'tokenringmlstatsclaimtokenpkts',
'TOKEN-RING-RMON-MIB', False),
_MetaInfoClassMember('tokenRingMLStatsCongestionErrors', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' The total number of receive congestion errors
reported in error reporting packets detected by
the probe.
''',
'tokenringmlstatscongestionerrors',
'TOKEN-RING-RMON-MIB', False),
_MetaInfoClassMember('tokenRingMLStatsDataSource', ATTRIBUTE, 'str' , None, None,
[], [b'(([0-1](\\.[1-3]?[0-9]))|(2\\.(0|([1-9]\\d*))))(\\.(0|([1-9]\\d*)))*'],
''' This object identifies the source of the data
that this tokenRingMLStats entry is configured to
analyze. This source can be any tokenRing
interface on this device. In order to identify a
particular interface, this object shall identify
the instance of the ifIndex object, defined in
MIB-II [3], for the desired interface. For
example, if an entry were to receive data from
interface #1, this object would be set to
ifIndex.1.
The statistics in this group reflect all error
reports on the local network segment attached to
the identified interface.
This object may not be modified if the associated
tokenRingMLStatsStatus object is equal to
valid(1).
''',
'tokenringmlstatsdatasource',
'TOKEN-RING-RMON-MIB', False),
_MetaInfoClassMember('tokenRingMLStatsDropEvents', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' The total number of events in which packets were
dropped by the probe due to lack of resources.
Note that this number is not necessarily the
number of packets dropped; it is just the number
of times this condition has been detected. This
value is the same as the corresponding
tokenRingPStatsDropEvents.
''',
'tokenringmlstatsdropevents',
'TOKEN-RING-RMON-MIB', False),
_MetaInfoClassMember('tokenRingMLStatsFrameCopiedErrors', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' The total number of frame copied errors reported
in error reporting packets detected by the probe.
''',
'tokenringmlstatsframecopiederrors',
'TOKEN-RING-RMON-MIB', False),
_MetaInfoClassMember('tokenRingMLStatsFrequencyErrors', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' The total number of frequency errors reported in
error reporting packets detected by the probe.
''',
'tokenringmlstatsfrequencyerrors',
'TOKEN-RING-RMON-MIB', False),
_MetaInfoClassMember('tokenRingMLStatsInternalErrors', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' The total number of adapter internal errors
reported in error reporting packets detected by
the probe.
''',
'tokenringmlstatsinternalerrors',
'TOKEN-RING-RMON-MIB', False),
_MetaInfoClassMember('tokenRingMLStatsLineErrors', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' The total number of line errors reported in error
reporting packets detected by the probe.
''',
'tokenringmlstatslineerrors',
'TOKEN-RING-RMON-MIB', False),
_MetaInfoClassMember('tokenRingMLStatsLostFrameErrors', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' The total number of lost frame errors reported in
error reporting packets detected by the probe.
''',
'tokenringmlstatslostframeerrors',
'TOKEN-RING-RMON-MIB', False),
_MetaInfoClassMember('tokenRingMLStatsMacOctets', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' The total number of octets of data in MAC packets
(excluding those that were not good frames)
received on the network (excluding framing bits
but including FCS octets).
''',
'tokenringmlstatsmacoctets',
'TOKEN-RING-RMON-MIB', False),
_MetaInfoClassMember('tokenRingMLStatsMacPkts', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' The total number of MAC packets (excluding
packets that were not good frames) received.
''',
'tokenringmlstatsmacpkts',
'TOKEN-RING-RMON-MIB', False),
_MetaInfoClassMember('tokenRingMLStatsNAUNChanges', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' The total number of NAUN changes detected by the
probe.
''',
'tokenringmlstatsnaunchanges',
'TOKEN-RING-RMON-MIB', False),
_MetaInfoClassMember('tokenRingMLStatsOwner', ATTRIBUTE, 'str' , None, None,
[], [],
''' The entity that configured this entry and is
therefore using the resources assigned to it.
''',
'tokenringmlstatsowner',
'TOKEN-RING-RMON-MIB', False),
_MetaInfoClassMember('tokenRingMLStatsRingPollEvents', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' The total number of ring poll events detected by
the probe (i.e. the number of ring polls initiated
by the active monitor that were detected).
''',
'tokenringmlstatsringpollevents',
'TOKEN-RING-RMON-MIB', False),
_MetaInfoClassMember('tokenRingMLStatsRingPurgeEvents', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' The total number of times that the ring enters
the ring purge state from normal ring state. The
ring purge state that comes in response to the
claim token or beacon state is not counted.
''',
'tokenringmlstatsringpurgeevents',
'TOKEN-RING-RMON-MIB', False),
_MetaInfoClassMember('tokenRingMLStatsRingPurgePkts', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' The total number of ring purge MAC packets
detected by probe.
''',
'tokenringmlstatsringpurgepkts',
'TOKEN-RING-RMON-MIB', False),
_MetaInfoClassMember('tokenRingMLStatsSoftErrorReports', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' The total number of soft error report frames
detected by the probe.
''',
'tokenringmlstatssofterrorreports',
'TOKEN-RING-RMON-MIB', False),
_MetaInfoClassMember('tokenRingMLStatsStatus', REFERENCE_ENUM_CLASS, 'EntrystatusEnum' , 'ydk.models.cisco_ios_xe.TOKEN_RING_RMON_MIB', 'EntrystatusEnum',
[], [],
''' The status of this tokenRingMLStats entry.
''',
'tokenringmlstatsstatus',
'TOKEN-RING-RMON-MIB', False),
_MetaInfoClassMember('tokenRingMLStatsTokenErrors', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' The total number of token errors reported in
error reporting packets detected by the probe.
''',
'tokenringmlstatstokenerrors',
'TOKEN-RING-RMON-MIB', False),
],
'TOKEN-RING-RMON-MIB',
'tokenRingMLStatsEntry',
_yang_ns._namespaces['TOKEN-RING-RMON-MIB'],
'ydk.models.cisco_ios_xe.TOKEN_RING_RMON_MIB'
),
},
'TokenRingRmonMib.Tokenringmlstatstable' : {
'meta_info' : _MetaInfoClass('TokenRingRmonMib.Tokenringmlstatstable',
False,
[
_MetaInfoClassMember('tokenRingMLStatsEntry', REFERENCE_LIST, 'Tokenringmlstatsentry' , 'ydk.models.cisco_ios_xe.TOKEN_RING_RMON_MIB', 'TokenRingRmonMib.Tokenringmlstatstable.Tokenringmlstatsentry',
[], [],
''' A collection of Mac-Layer statistics kept for a
particular Token Ring interface.
''',
'tokenringmlstatsentry',
'TOKEN-RING-RMON-MIB', False),
],
'TOKEN-RING-RMON-MIB',
'tokenRingMLStatsTable',
_yang_ns._namespaces['TOKEN-RING-RMON-MIB'],
'ydk.models.cisco_ios_xe.TOKEN_RING_RMON_MIB'
),
},
'TokenRingRmonMib.Tokenringpstatstable.Tokenringpstatsentry' : {
'meta_info' : _MetaInfoClass('TokenRingRmonMib.Tokenringpstatstable.Tokenringpstatsentry',
False,
[
_MetaInfoClassMember('tokenRingPStatsIndex', ATTRIBUTE, 'int' , None, None,
[('1', '65535')], [],
''' The value of this object uniquely identifies this
tokenRingPStats entry.
''',
'tokenringpstatsindex',
'TOKEN-RING-RMON-MIB', True),
_MetaInfoClassMember('tokenRingPStatsDataBroadcastPkts', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' The total number of good non-MAC frames received
that were directed to an LLC broadcast address
(0xFFFFFFFFFFFF or 0xC000FFFFFFFF).
''',
'tokenringpstatsdatabroadcastpkts',
'TOKEN-RING-RMON-MIB', False),
_MetaInfoClassMember('tokenRingPStatsDataMulticastPkts', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' The total number of good non-MAC frames received
that were directed to a local or global multicast
or functional address. Note that this number does
not include packets directed to the broadcast
address.
''',
'tokenringpstatsdatamulticastpkts',
'TOKEN-RING-RMON-MIB', False),
_MetaInfoClassMember('tokenRingPStatsDataOctets', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' The total number of octets of data in good frames
received on the network (excluding framing bits
but including FCS octets) in non-MAC packets.
''',
'tokenringpstatsdataoctets',
'TOKEN-RING-RMON-MIB', False),
_MetaInfoClassMember('tokenRingPStatsDataPkts', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' The total number of non-MAC packets in good
frames. received.
''',
'tokenringpstatsdatapkts',
'TOKEN-RING-RMON-MIB', False),
_MetaInfoClassMember('tokenRingPStatsDataPkts1024to2047Octets', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' The total number of good non-MAC frames received
that were between 1024 and 2047 octets in length
inclusive, excluding framing bits but including
FCS octets.
''',
'tokenringpstatsdatapkts1024to2047octets',
'TOKEN-RING-RMON-MIB', False),
_MetaInfoClassMember('tokenRingPStatsDataPkts128to255Octets', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' The total number of good non-MAC frames received
that were between 128 and 255 octets in length
inclusive, excluding framing bits but including
FCS octets.
''',
'tokenringpstatsdatapkts128to255octets',
'TOKEN-RING-RMON-MIB', False),
_MetaInfoClassMember('tokenRingPStatsDataPkts18to63Octets', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' The total number of good non-MAC frames received
that were between 18 and 63 octets in length
inclusive, excluding framing bits but including
FCS octets.
''',
'tokenringpstatsdatapkts18to63octets',
'TOKEN-RING-RMON-MIB', False),
_MetaInfoClassMember('tokenRingPStatsDataPkts2048to4095Octets', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' The total number of good non-MAC frames received
that were between 2048 and 4095 octets in length
inclusive, excluding framing bits but including
FCS octets.
''',
'tokenringpstatsdatapkts2048to4095octets',
'TOKEN-RING-RMON-MIB', False),
_MetaInfoClassMember('tokenRingPStatsDataPkts256to511Octets', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' The total number of good non-MAC frames received
that were between 256 and 511 octets in length
inclusive, excluding framing bits but including
FCS octets.
''',
'tokenringpstatsdatapkts256to511octets',
'TOKEN-RING-RMON-MIB', False),
_MetaInfoClassMember('tokenRingPStatsDataPkts4096to8191Octets', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' The total number of good non-MAC frames received
that were between 4096 and 8191 octets in length
inclusive, excluding framing bits but including
FCS octets.
''',
'tokenringpstatsdatapkts4096to8191octets',
'TOKEN-RING-RMON-MIB', False),
_MetaInfoClassMember('tokenRingPStatsDataPkts512to1023Octets', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' The total number of good non-MAC frames received
that were between 512 and 1023 octets in length
inclusive, excluding framing bits but including
FCS octets.
''',
'tokenringpstatsdatapkts512to1023octets',
'TOKEN-RING-RMON-MIB', False),
_MetaInfoClassMember('tokenRingPStatsDataPkts64to127Octets', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' The total number of good non-MAC frames received
that were between 64 and 127 octets in length
inclusive, excluding framing bits but including
FCS octets.
''',
'tokenringpstatsdatapkts64to127octets',
'TOKEN-RING-RMON-MIB', False),
_MetaInfoClassMember('tokenRingPStatsDataPkts8192to18000Octets', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' The total number of good non-MAC frames received
that were between 8192 and 18000 octets in length
inclusive, excluding framing bits but including
FCS octets.
''',
'tokenringpstatsdatapkts8192to18000octets',
'TOKEN-RING-RMON-MIB', False),
_MetaInfoClassMember('tokenRingPStatsDataPktsGreaterThan18000Octets', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' The total number of good non-MAC frames received
that were greater than 18000 octets in length,
excluding framing bits but including FCS octets.
''',
'tokenringpstatsdatapktsgreaterthan18000octets',
'TOKEN-RING-RMON-MIB', False),
_MetaInfoClassMember('tokenRingPStatsDataSource', ATTRIBUTE, 'str' , None, None,
[], [b'(([0-1](\\.[1-3]?[0-9]))|(2\\.(0|([1-9]\\d*))))(\\.(0|([1-9]\\d*)))*'],
''' This object identifies the source of the data
that this tokenRingPStats entry is configured to
analyze. This source can be any tokenRing
interface on this device. In order to identify a
particular interface, this object shall identify
the instance of the ifIndex object, defined in
MIB-II [3], for the desired interface. For
example, if an entry were to receive data from
interface #1, this object would be set to
ifIndex.1.
The statistics in this group reflect all non-MAC
packets on the local network segment attached to
the identified interface.
This object may not be modified if the associated
tokenRingPStatsStatus object is equal to
valid(1).
''',
'tokenringpstatsdatasource',
'TOKEN-RING-RMON-MIB', False),
_MetaInfoClassMember('tokenRingPStatsDropEvents', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' The total number of events in which packets were
dropped by the probe due to lack of resources.
Note that this number is not necessarily the
number of packets dropped; it is just the number
of times this condition has been detected. This
value is the same as the corresponding
tokenRingMLStatsDropEvents
''',
'tokenringpstatsdropevents',
'TOKEN-RING-RMON-MIB', False),
_MetaInfoClassMember('tokenRingPStatsOwner', ATTRIBUTE, 'str' , None, None,
[], [],
''' The entity that configured this entry and is
therefore using the resources assigned to it.
''',
'tokenringpstatsowner',
'TOKEN-RING-RMON-MIB', False),
_MetaInfoClassMember('tokenRingPStatsStatus', REFERENCE_ENUM_CLASS, 'EntrystatusEnum' , 'ydk.models.cisco_ios_xe.TOKEN_RING_RMON_MIB', 'EntrystatusEnum',
[], [],
''' The status of this tokenRingPStats entry.
''',
'tokenringpstatsstatus',
'TOKEN-RING-RMON-MIB', False),
],
'TOKEN-RING-RMON-MIB',
'tokenRingPStatsEntry',
_yang_ns._namespaces['TOKEN-RING-RMON-MIB'],
'ydk.models.cisco_ios_xe.TOKEN_RING_RMON_MIB'
),
},
'TokenRingRmonMib.Tokenringpstatstable' : {
'meta_info' : _MetaInfoClass('TokenRingRmonMib.Tokenringpstatstable',
False,
[
_MetaInfoClassMember('tokenRingPStatsEntry', REFERENCE_LIST, 'Tokenringpstatsentry' , 'ydk.models.cisco_ios_xe.TOKEN_RING_RMON_MIB', 'TokenRingRmonMib.Tokenringpstatstable.Tokenringpstatsentry',
[], [],
''' A collection of promiscuous statistics kept for
non-MAC packets on a particular Token Ring
interface.
''',
'tokenringpstatsentry',
'TOKEN-RING-RMON-MIB', False),
],
'TOKEN-RING-RMON-MIB',
'tokenRingPStatsTable',
_yang_ns._namespaces['TOKEN-RING-RMON-MIB'],
'ydk.models.cisco_ios_xe.TOKEN_RING_RMON_MIB'
),
},
'TokenRingRmonMib.Tokenringmlhistorytable.Tokenringmlhistoryentry' : {
'meta_info' : _MetaInfoClass('TokenRingRmonMib.Tokenringmlhistorytable.Tokenringmlhistoryentry',
False,
[
_MetaInfoClassMember('tokenRingMLHistoryIndex', ATTRIBUTE, 'int' , None, None,
[('1', '65535')], [],
''' The history of which this entry is a part. The
history identified by a particular value of this
index is the same history as identified by the
same value of historyControlIndex.
''',
'tokenringmlhistoryindex',
'TOKEN-RING-RMON-MIB', True),
_MetaInfoClassMember('tokenRingMLHistorySampleIndex', ATTRIBUTE, 'int' , None, None,
[('-2147483648', '2147483647')], [],
''' An index that uniquely identifies the particular
Mac-Layer sample this entry represents among all
Mac-Layer samples associated with the same
historyControlEntry. This index starts at 1 and
increases by one as each new sample is taken.
''',
'tokenringmlhistorysampleindex',
'TOKEN-RING-RMON-MIB', True),
_MetaInfoClassMember('tokenRingMLHistoryAbortErrors', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' The total number of abort delimiters reported in
error reporting packets detected by the probe
during this sampling interval.
''',
'tokenringmlhistoryaborterrors',
'TOKEN-RING-RMON-MIB', False),
_MetaInfoClassMember('tokenRingMLHistoryACErrors', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' The total number of AC (Address Copied) errors
reported in error reporting packets detected by
the probe during this sampling interval.
''',
'tokenringmlhistoryacerrors',
'TOKEN-RING-RMON-MIB', False),
_MetaInfoClassMember('tokenRingMLHistoryActiveStations', ATTRIBUTE, 'int' , None, None,
[('-2147483648', '2147483647')], [],
''' The maximum number of active stations on the ring
detected by the probe during this sampling
interval.
''',
'tokenringmlhistoryactivestations',
'TOKEN-RING-RMON-MIB', False),
_MetaInfoClassMember('tokenRingMLHistoryBeaconEvents', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' The total number of times that the ring enters a
beaconing state (beaconFrameStreamingState,
beaconBitStreamingState,
beaconSetRecoveryModeState, or
beaconRingSignalLossState) during this sampling
interval. Note that a change of the source
address of the beacon packet does not constitute a
new beacon event.
''',
'tokenringmlhistorybeaconevents',
'TOKEN-RING-RMON-MIB', False),
_MetaInfoClassMember('tokenRingMLHistoryBeaconPkts', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' The total number of beacon MAC packets detected
by the probe during this sampling interval.
''',
'tokenringmlhistorybeaconpkts',
'TOKEN-RING-RMON-MIB', False),
_MetaInfoClassMember('tokenRingMLHistoryBeaconTime', ATTRIBUTE, 'int' , None, None,
[('-2147483648', '2147483647')], [],
''' The amount of time that the ring has been in the
beaconing state during this sampling interval.
''',
'tokenringmlhistorybeacontime',
'TOKEN-RING-RMON-MIB', False),
_MetaInfoClassMember('tokenRingMLHistoryBurstErrors', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' The total number of burst errors reported in
error reporting packets detected by the probe
during this sampling interval.
''',
'tokenringmlhistorybursterrors',
'TOKEN-RING-RMON-MIB', False),
_MetaInfoClassMember('tokenRingMLHistoryClaimTokenEvents', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' The total number of times that the ring enters
the claim token state from normal ring state or
ring purge state during this sampling interval.
The claim token state that comes from the beacon
state is not counted.
''',
'tokenringmlhistoryclaimtokenevents',
'TOKEN-RING-RMON-MIB', False),
_MetaInfoClassMember('tokenRingMLHistoryClaimTokenPkts', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' The total number of claim token MAC packets
detected by the probe during this sampling
interval.
''',
'tokenringmlhistoryclaimtokenpkts',
'TOKEN-RING-RMON-MIB', False),
_MetaInfoClassMember('tokenRingMLHistoryCongestionErrors', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' The total number of receive congestion errors
reported in error reporting packets detected by
the probe during this sampling interval.
''',
'tokenringmlhistorycongestionerrors',
'TOKEN-RING-RMON-MIB', False),
_MetaInfoClassMember('tokenRingMLHistoryDropEvents', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' The total number of events in which packets were
dropped by the probe due to lack of resources
during this sampling interval. Note that this
number is not necessarily the number of packets
dropped, it is just the number of times this
condition has been detected.
''',
'tokenringmlhistorydropevents',
'TOKEN-RING-RMON-MIB', False),
_MetaInfoClassMember('tokenRingMLHistoryFrameCopiedErrors', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' The total number of frame copied errors reported
in error reporting packets detected by the probe
during this sampling interval.
''',
'tokenringmlhistoryframecopiederrors',
'TOKEN-RING-RMON-MIB', False),
_MetaInfoClassMember('tokenRingMLHistoryFrequencyErrors', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' The total number of frequency errors reported in
error reporting packets detected by the probe
during this sampling interval.
''',
'tokenringmlhistoryfrequencyerrors',
'TOKEN-RING-RMON-MIB', False),
_MetaInfoClassMember('tokenRingMLHistoryInternalErrors', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' The total number of adapter internal errors
reported in error reporting packets detected by
the probe during this sampling interval.
''',
'tokenringmlhistoryinternalerrors',
'TOKEN-RING-RMON-MIB', False),
_MetaInfoClassMember('tokenRingMLHistoryIntervalStart', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' The value of sysUpTime at the start of the
interval over which this sample was measured. If
the probe keeps track of the time of day, it
should start the first sample of the history at a
time such that when the next hour of the day
begins, a sample is started at that instant. Note
that following this rule may require the probe to
delay collecting the first sample of the history,
as each sample must be of the same interval. Also
note that the sample which is currently being
collected is not accessible in this table until
the end of its interval.
''',
'tokenringmlhistoryintervalstart',
'TOKEN-RING-RMON-MIB', False),
_MetaInfoClassMember('tokenRingMLHistoryLineErrors', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' The total number of line errors reported in error
reporting packets detected by the probe during
this sampling interval.
''',
'tokenringmlhistorylineerrors',
'TOKEN-RING-RMON-MIB', False),
_MetaInfoClassMember('tokenRingMLHistoryLostFrameErrors', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' The total number of lost frame errors reported in
error reporting packets detected by the probe
during this sampling interval.
''',
'tokenringmlhistorylostframeerrors',
'TOKEN-RING-RMON-MIB', False),
_MetaInfoClassMember('tokenRingMLHistoryMacOctets', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' The total number of octets of data in MAC packets
(excluding those that were not good frames)
received on the network during this sampling
interval (excluding framing bits but including FCS
octets).
''',
'tokenringmlhistorymacoctets',
'TOKEN-RING-RMON-MIB', False),
_MetaInfoClassMember('tokenRingMLHistoryMacPkts', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' The total number of MAC packets (excluding those
that were not good frames) received during this
sampling interval.
''',
'tokenringmlhistorymacpkts',
'TOKEN-RING-RMON-MIB', False),
_MetaInfoClassMember('tokenRingMLHistoryNAUNChanges', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' The total number of NAUN changes detected by the
probe during this sampling interval.
''',
'tokenringmlhistorynaunchanges',
'TOKEN-RING-RMON-MIB', False),
_MetaInfoClassMember('tokenRingMLHistoryRingPollEvents', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' The total number of ring poll events detected by
the probe during this sampling interval.
''',
'tokenringmlhistoryringpollevents',
'TOKEN-RING-RMON-MIB', False),
_MetaInfoClassMember('tokenRingMLHistoryRingPurgeEvents', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' The total number of times that the ring entered
the ring purge state from normal ring state during
this sampling interval. The ring purge state that
comes from the claim token or beacon state is not
counted.
''',
'tokenringmlhistoryringpurgeevents',
'TOKEN-RING-RMON-MIB', False),
_MetaInfoClassMember('tokenRingMLHistoryRingPurgePkts', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' The total number of Ring Purge MAC packets
detected by the probe during this sampling
interval.
''',
'tokenringmlhistoryringpurgepkts',
'TOKEN-RING-RMON-MIB', False),
_MetaInfoClassMember('tokenRingMLHistorySoftErrorReports', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' The total number of soft error report frames
detected by the probe during this sampling
interval.
''',
'tokenringmlhistorysofterrorreports',
'TOKEN-RING-RMON-MIB', False),
_MetaInfoClassMember('tokenRingMLHistoryTokenErrors', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' The total number of token errors reported in
error reporting packets detected by the probe
during this sampling interval.
''',
'tokenringmlhistorytokenerrors',
'TOKEN-RING-RMON-MIB', False),
],
'TOKEN-RING-RMON-MIB',
'tokenRingMLHistoryEntry',
_yang_ns._namespaces['TOKEN-RING-RMON-MIB'],
'ydk.models.cisco_ios_xe.TOKEN_RING_RMON_MIB'
),
},
'TokenRingRmonMib.Tokenringmlhistorytable' : {
'meta_info' : _MetaInfoClass('TokenRingRmonMib.Tokenringmlhistorytable',
False,
[
_MetaInfoClassMember('tokenRingMLHistoryEntry', REFERENCE_LIST, 'Tokenringmlhistoryentry' , 'ydk.models.cisco_ios_xe.TOKEN_RING_RMON_MIB', 'TokenRingRmonMib.Tokenringmlhistorytable.Tokenringmlhistoryentry',
[], [],
''' A collection of Mac-Layer statistics kept for a
particular Token Ring interface.
''',
'tokenringmlhistoryentry',
'TOKEN-RING-RMON-MIB', False),
],
'TOKEN-RING-RMON-MIB',
'tokenRingMLHistoryTable',
_yang_ns._namespaces['TOKEN-RING-RMON-MIB'],
'ydk.models.cisco_ios_xe.TOKEN_RING_RMON_MIB'
),
},
'TokenRingRmonMib.Tokenringphistorytable.Tokenringphistoryentry' : {
'meta_info' : _MetaInfoClass('TokenRingRmonMib.Tokenringphistorytable.Tokenringphistoryentry',
False,
[
_MetaInfoClassMember('tokenRingPHistoryIndex', ATTRIBUTE, 'int' , None, None,
[('1', '65535')], [],
''' The history of which this entry is a part. The
history identified by a particular value of this
index is the same history as identified by the
same value of historyControlIndex.
''',
'tokenringphistoryindex',
'TOKEN-RING-RMON-MIB', True),
_MetaInfoClassMember('tokenRingPHistorySampleIndex', ATTRIBUTE, 'int' , None, None,
[('-2147483648', '2147483647')], [],
''' An index that uniquely identifies the particular
sample this entry represents among all samples
associated with the same historyControlEntry.
This index starts at 1 and increases by one as
each new sample is taken.
''',
'tokenringphistorysampleindex',
'TOKEN-RING-RMON-MIB', True),
_MetaInfoClassMember('tokenRingPHistoryDataBroadcastPkts', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' The total number of good non-MAC frames received
during this sampling interval that were directed
to an LLC broadcast address (0xFFFFFFFFFFFF or
0xC000FFFFFFFF).
''',
'tokenringphistorydatabroadcastpkts',
'TOKEN-RING-RMON-MIB', False),
_MetaInfoClassMember('tokenRingPHistoryDataMulticastPkts', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' The total number of good non-MAC frames received
during this sampling interval that were directed
to a local or global multicast or functional
address. Note that this number does not include
packets directed to the broadcast address.
''',
'tokenringphistorydatamulticastpkts',
'TOKEN-RING-RMON-MIB', False),
_MetaInfoClassMember('tokenRingPHistoryDataOctets', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' The total number of octets of data in good frames
received on the network (excluding framing bits
but including FCS octets) in non-MAC packets
during this sampling interval.
''',
'tokenringphistorydataoctets',
'TOKEN-RING-RMON-MIB', False),
_MetaInfoClassMember('tokenRingPHistoryDataPkts', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' The total number of good non-MAC frames received
during this sampling interval.
''',
'tokenringphistorydatapkts',
'TOKEN-RING-RMON-MIB', False),
_MetaInfoClassMember('tokenRingPHistoryDataPkts1024to2047Octets', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' The total number of good non-MAC frames received
during this sampling interval that were between
1024 and 2047 octets in length inclusive,
excluding framing bits but including FCS octets.
''',
'tokenringphistorydatapkts1024to2047octets',
'TOKEN-RING-RMON-MIB', False),
_MetaInfoClassMember('tokenRingPHistoryDataPkts128to255Octets', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' The total number of good non-MAC frames received
during this sampling interval that were between
128 and 255 octets in length inclusive, excluding
framing bits but including FCS octets.
''',
'tokenringphistorydatapkts128to255octets',
'TOKEN-RING-RMON-MIB', False),
_MetaInfoClassMember('tokenRingPHistoryDataPkts18to63Octets', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' The total number of good non-MAC frames received
during this sampling interval that were between 18
and 63 octets in length inclusive, excluding
framing bits but including FCS octets.
''',
'tokenringphistorydatapkts18to63octets',
'TOKEN-RING-RMON-MIB', False),
_MetaInfoClassMember('tokenRingPHistoryDataPkts2048to4095Octets', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' The total number of good non-MAC frames received
during this sampling interval that were between
2048 and 4095 octets in length inclusive,
excluding framing bits but including FCS octets.
''',
'tokenringphistorydatapkts2048to4095octets',
'TOKEN-RING-RMON-MIB', False),
_MetaInfoClassMember('tokenRingPHistoryDataPkts256to511Octets', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' The total number of good non-MAC frames received
during this sampling interval that were between
256 and 511 octets in length inclusive, excluding
framing bits but including FCS octets.
''',
'tokenringphistorydatapkts256to511octets',
'TOKEN-RING-RMON-MIB', False),
_MetaInfoClassMember('tokenRingPHistoryDataPkts4096to8191Octets', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' The total number of good non-MAC frames received
during this sampling interval that were between
4096 and 8191 octets in length inclusive,
excluding framing bits but including FCS octets.
''',
'tokenringphistorydatapkts4096to8191octets',
'TOKEN-RING-RMON-MIB', False),
_MetaInfoClassMember('tokenRingPHistoryDataPkts512to1023Octets', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' The total number of good non-MAC frames received
during this sampling interval that were between
512 and 1023 octets in length inclusive, excluding
framing bits but including FCS octets.
''',
'tokenringphistorydatapkts512to1023octets',
'TOKEN-RING-RMON-MIB', False),
_MetaInfoClassMember('tokenRingPHistoryDataPkts64to127Octets', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' The total number of good non-MAC frames received
during this sampling interval that were between 64
and 127 octets in length inclusive, excluding
framing bits but including FCS octets.
''',
'tokenringphistorydatapkts64to127octets',
'TOKEN-RING-RMON-MIB', False),
_MetaInfoClassMember('tokenRingPHistoryDataPkts8192to18000Octets', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' The total number of good non-MAC frames received
during this sampling interval that were between
8192 and 18000 octets in length inclusive,
excluding framing bits but including FCS octets.
''',
'tokenringphistorydatapkts8192to18000octets',
'TOKEN-RING-RMON-MIB', False),
_MetaInfoClassMember('tokenRingPHistoryDataPktsGreaterThan18000Octets', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' The total number of good non-MAC frames received
during this sampling interval that were greater
than 18000 octets in length, excluding framing
bits but including FCS octets.
''',
'tokenringphistorydatapktsgreaterthan18000octets',
'TOKEN-RING-RMON-MIB', False),
_MetaInfoClassMember('tokenRingPHistoryDropEvents', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' The total number of events in which packets were
dropped by the probe due to lack of resources
during this sampling interval. Note that this
number is not necessarily the number of packets
dropped, it is just the number of times this
condition has been detected.
''',
'tokenringphistorydropevents',
'TOKEN-RING-RMON-MIB', False),
_MetaInfoClassMember('tokenRingPHistoryIntervalStart', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' The value of sysUpTime at the start of the
interval over which this sample was measured. If
the probe keeps track of the time of day, it
should start the first sample of the history at a
time such that when the next hour of the day
begins, a sample is started at that instant. Note
that following this rule may require the probe to
delay collecting the first sample of the history,
as each sample must be of the same interval. Also
note that the sample which is currently being
collected is not accessible in this table until
the end of its interval.
''',
'tokenringphistoryintervalstart',
'TOKEN-RING-RMON-MIB', False),
],
'TOKEN-RING-RMON-MIB',
'tokenRingPHistoryEntry',
_yang_ns._namespaces['TOKEN-RING-RMON-MIB'],
'ydk.models.cisco_ios_xe.TOKEN_RING_RMON_MIB'
),
},
'TokenRingRmonMib.Tokenringphistorytable' : {
'meta_info' : _MetaInfoClass('TokenRingRmonMib.Tokenringphistorytable',
False,
[
_MetaInfoClassMember('tokenRingPHistoryEntry', REFERENCE_LIST, 'Tokenringphistoryentry' , 'ydk.models.cisco_ios_xe.TOKEN_RING_RMON_MIB', 'TokenRingRmonMib.Tokenringphistorytable.Tokenringphistoryentry',
[], [],
''' A collection of promiscuous statistics kept for a
particular Token Ring interface.
''',
'tokenringphistoryentry',
'TOKEN-RING-RMON-MIB', False),
],
'TOKEN-RING-RMON-MIB',
'tokenRingPHistoryTable',
_yang_ns._namespaces['TOKEN-RING-RMON-MIB'],
'ydk.models.cisco_ios_xe.TOKEN_RING_RMON_MIB'
),
},
'TokenRingRmonMib.Ringstationcontroltable.Ringstationcontrolentry.RingstationcontrolringstateEnum' : _MetaInfoEnum('RingstationcontrolringstateEnum', 'ydk.models.cisco_ios_xe.TOKEN_RING_RMON_MIB',
{
'normalOperation':'normalOperation',
'ringPurgeState':'ringPurgeState',
'claimTokenState':'claimTokenState',
'beaconFrameStreamingState':'beaconFrameStreamingState',
'beaconBitStreamingState':'beaconBitStreamingState',
'beaconRingSignalLossState':'beaconRingSignalLossState',
'beaconSetRecoveryModeState':'beaconSetRecoveryModeState',
}, 'TOKEN-RING-RMON-MIB', _yang_ns._namespaces['TOKEN-RING-RMON-MIB']),
'TokenRingRmonMib.Ringstationcontroltable.Ringstationcontrolentry' : {
'meta_info' : _MetaInfoClass('TokenRingRmonMib.Ringstationcontroltable.Ringstationcontrolentry',
False,
[
_MetaInfoClassMember('ringStationControlIfIndex', ATTRIBUTE, 'int' , None, None,
[('1', '65535')], [],
''' The value of this object uniquely identifies the
interface on this remote network monitoring device
from which ringStation data is collected. The
interface identified by a particular value of this
object is the same interface as identified by the
same value of the ifIndex object, defined in MIB-
II [3].
''',
'ringstationcontrolifindex',
'TOKEN-RING-RMON-MIB', True),
_MetaInfoClassMember('ringStationControlActiveMonitor', ATTRIBUTE, 'str' , None, None,
[(6, None)], [],
''' The address of the Active Monitor on this
segment. If this address is unknown, this object
shall be equal to six octets of zero.
''',
'ringstationcontrolactivemonitor',
'TOKEN-RING-RMON-MIB', False),
_MetaInfoClassMember('ringStationControlActiveStations', ATTRIBUTE, 'int' , None, None,
[('-2147483648', '2147483647')], [],
''' The number of active ringStationEntries in the
ringStationTable associated with this
ringStationControlEntry.
''',
'ringstationcontrolactivestations',
'TOKEN-RING-RMON-MIB', False),
_MetaInfoClassMember('ringStationControlBeaconNAUN', ATTRIBUTE, 'str' , None, None,
[(6, None)], [],
''' The address of the NAUN in the last beacon frame
received by the probe on this ring. If no beacon
frames have been received, this object shall be
equal to six octets of zero.
''',
'ringstationcontrolbeaconnaun',
'TOKEN-RING-RMON-MIB', False),
_MetaInfoClassMember('ringStationControlBeaconSender', ATTRIBUTE, 'str' , None, None,
[(6, None)], [],
''' The address of the sender of the last beacon
frame received by the probe on this ring. If no
beacon frames have been received, this object
shall be equal to six octets of zero.
''',
'ringstationcontrolbeaconsender',
'TOKEN-RING-RMON-MIB', False),
_MetaInfoClassMember('ringStationControlOrderChanges', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' The number of add and delete events in the
ringStationOrderTable optionally associated with
this ringStationControlEntry.
''',
'ringstationcontrolorderchanges',
'TOKEN-RING-RMON-MIB', False),
_MetaInfoClassMember('ringStationControlOwner', ATTRIBUTE, 'str' , None, None,
[], [],
''' The entity that configured this entry and is
therefore using the resources assigned to it.
''',
'ringstationcontrolowner',
'TOKEN-RING-RMON-MIB', False),
_MetaInfoClassMember('ringStationControlRingState', REFERENCE_ENUM_CLASS, 'RingstationcontrolringstateEnum' , 'ydk.models.cisco_ios_xe.TOKEN_RING_RMON_MIB', 'TokenRingRmonMib.Ringstationcontroltable.Ringstationcontrolentry.RingstationcontrolringstateEnum',
[], [],
''' The current status of this ring.
''',
'ringstationcontrolringstate',
'TOKEN-RING-RMON-MIB', False),
_MetaInfoClassMember('ringStationControlStatus', REFERENCE_ENUM_CLASS, 'EntrystatusEnum' , 'ydk.models.cisco_ios_xe.TOKEN_RING_RMON_MIB', 'EntrystatusEnum',
[], [],
''' The status of this ringStationControl entry.
If this object is not equal to valid(1), all
associated entries in the ringStationTable shall
be deleted by the agent.
''',
'ringstationcontrolstatus',
'TOKEN-RING-RMON-MIB', False),
_MetaInfoClassMember('ringStationControlTableSize', ATTRIBUTE, 'int' , None, None,
[('-2147483648', '2147483647')], [],
''' The number of ringStationEntries in the
ringStationTable associated with this
ringStationControlEntry.
''',
'ringstationcontroltablesize',
'TOKEN-RING-RMON-MIB', False),
],
'TOKEN-RING-RMON-MIB',
'ringStationControlEntry',
_yang_ns._namespaces['TOKEN-RING-RMON-MIB'],
'ydk.models.cisco_ios_xe.TOKEN_RING_RMON_MIB'
),
},
'TokenRingRmonMib.Ringstationcontroltable' : {
'meta_info' : _MetaInfoClass('TokenRingRmonMib.Ringstationcontroltable',
False,
[
_MetaInfoClassMember('ringStationControlEntry', REFERENCE_LIST, 'Ringstationcontrolentry' , 'ydk.models.cisco_ios_xe.TOKEN_RING_RMON_MIB', 'TokenRingRmonMib.Ringstationcontroltable.Ringstationcontrolentry',
[], [],
''' A list of parameters that set up the discovery of
stations on a particular interface and the
collection of statistics about these stations.
''',
'ringstationcontrolentry',
'TOKEN-RING-RMON-MIB', False),
],
'TOKEN-RING-RMON-MIB',
'ringStationControlTable',
_yang_ns._namespaces['TOKEN-RING-RMON-MIB'],
'ydk.models.cisco_ios_xe.TOKEN_RING_RMON_MIB'
),
},
'TokenRingRmonMib.Ringstationtable.Ringstationentry.RingstationstationstatusEnum' : _MetaInfoEnum('RingstationstationstatusEnum', 'ydk.models.cisco_ios_xe.TOKEN_RING_RMON_MIB',
{
'active':'active',
'inactive':'inactive',
'forcedRemoval':'forcedRemoval',
}, 'TOKEN-RING-RMON-MIB', _yang_ns._namespaces['TOKEN-RING-RMON-MIB']),
'TokenRingRmonMib.Ringstationtable.Ringstationentry' : {
'meta_info' : _MetaInfoClass('TokenRingRmonMib.Ringstationtable.Ringstationentry',
False,
[
_MetaInfoClassMember('ringStationIfIndex', ATTRIBUTE, 'int' , None, None,
[('-2147483648', '2147483647')], [],
''' The value of this object uniquely identifies the
interface on this remote network monitoring device
on which this station was detected. The interface
identified by a particular value of this object is
the same interface as identified by the same value
of the ifIndex object, defined in MIB-II [3].
''',
'ringstationifindex',
'TOKEN-RING-RMON-MIB', True),
_MetaInfoClassMember('ringStationMacAddress', ATTRIBUTE, 'str' , None, None,
[(6, None)], [],
''' The physical address of this station.
''',
'ringstationmacaddress',
'TOKEN-RING-RMON-MIB', True),
_MetaInfoClassMember('ringStationAbortErrors', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' The total number of abort delimiters reported by
this station in error reporting packets detected
by the probe.
''',
'ringstationaborterrors',
'TOKEN-RING-RMON-MIB', False),
_MetaInfoClassMember('ringStationACErrors', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' The total number of AC (Address Copied) errors
reported in error reporting packets sent by the
nearest active downstream neighbor of this station
and detected by the probe.
''',
'ringstationacerrors',
'TOKEN-RING-RMON-MIB', False),
_MetaInfoClassMember('ringStationCongestionErrors', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' The total number of receive congestion errors
reported by this station in error reporting
packets detected by the probe.
''',
'ringstationcongestionerrors',
'TOKEN-RING-RMON-MIB', False),
_MetaInfoClassMember('ringStationDuplicateAddresses', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' The number of times this station experienced a
duplicate address error.
''',
'ringstationduplicateaddresses',
'TOKEN-RING-RMON-MIB', False),
_MetaInfoClassMember('ringStationFrameCopiedErrors', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' The total number of frame copied errors reported
by this station in error reporting packets
detected by the probe.
''',
'ringstationframecopiederrors',
'TOKEN-RING-RMON-MIB', False),
_MetaInfoClassMember('ringStationFrequencyErrors', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' The total number of frequency errors reported by
this station in error reporting packets detected
by the probe.
''',
'ringstationfrequencyerrors',
'TOKEN-RING-RMON-MIB', False),
_MetaInfoClassMember('ringStationInBeaconErrors', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' The total number of beacon frames sent by this
station and detected by the probe.
''',
'ringstationinbeaconerrors',
'TOKEN-RING-RMON-MIB', False),
_MetaInfoClassMember('ringStationInBurstErrors', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' The total number of burst errors reported by this
station in error reporting packets detected by the
probe.
''',
'ringstationinbursterrors',
'TOKEN-RING-RMON-MIB', False),
_MetaInfoClassMember('ringStationInLineErrors', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' The total number of line errors reported by this
station in error reporting packets detected by the
probe.
''',
'ringstationinlineerrors',
'TOKEN-RING-RMON-MIB', False),
_MetaInfoClassMember('ringStationInsertions', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' The number of times the probe detected this
station inserting onto the ring.
''',
'ringstationinsertions',
'TOKEN-RING-RMON-MIB', False),
_MetaInfoClassMember('ringStationInternalErrors', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' The total number of adapter internal errors
reported by this station in error reporting
packets detected by the probe.
''',
'ringstationinternalerrors',
'TOKEN-RING-RMON-MIB', False),
_MetaInfoClassMember('ringStationLastEnterTime', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' The value of sysUpTime at the time this station
last entered the ring. If the time is unknown,
this value shall be zero.
''',
'ringstationlastentertime',
'TOKEN-RING-RMON-MIB', False),
_MetaInfoClassMember('ringStationLastExitTime', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' The value of sysUpTime at the time the probe
detected that this station last exited the ring.
If the time is unknown, this value shall be zero.
''',
'ringstationlastexittime',
'TOKEN-RING-RMON-MIB', False),
_MetaInfoClassMember('ringStationLastNAUN', ATTRIBUTE, 'str' , None, None,
[(6, None)], [],
''' The physical address of last known NAUN of this
station.
''',
'ringstationlastnaun',
'TOKEN-RING-RMON-MIB', False),
_MetaInfoClassMember('ringStationLostFrameErrors', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' The total number of lost frame errors reported by
this station in error reporting packets detected
by the probe.
''',
'ringstationlostframeerrors',
'TOKEN-RING-RMON-MIB', False),
_MetaInfoClassMember('ringStationOutBeaconErrors', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' The total number of beacon frames detected by the
probe that name this station as the NAUN.
''',
'ringstationoutbeaconerrors',
'TOKEN-RING-RMON-MIB', False),
_MetaInfoClassMember('ringStationOutBurstErrors', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' The total number of burst errors reported in
error reporting packets sent by the nearest active
downstream neighbor of this station and detected
by the probe.
''',
'ringstationoutbursterrors',
'TOKEN-RING-RMON-MIB', False),
_MetaInfoClassMember('ringStationOutLineErrors', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' The total number of line errors reported in error
reporting packets sent by the nearest active
downstream neighbor of this station and detected
by the probe.
''',
'ringstationoutlineerrors',
'TOKEN-RING-RMON-MIB', False),
_MetaInfoClassMember('ringStationStationStatus', REFERENCE_ENUM_CLASS, 'RingstationstationstatusEnum' , 'ydk.models.cisco_ios_xe.TOKEN_RING_RMON_MIB', 'TokenRingRmonMib.Ringstationtable.Ringstationentry.RingstationstationstatusEnum',
[], [],
''' The status of this station on the ring.
''',
'ringstationstationstatus',
'TOKEN-RING-RMON-MIB', False),
_MetaInfoClassMember('ringStationTokenErrors', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' The total number of token errors reported by this
station in error reporting frames detected by the
probe.
''',
'ringstationtokenerrors',
'TOKEN-RING-RMON-MIB', False),
],
'TOKEN-RING-RMON-MIB',
'ringStationEntry',
_yang_ns._namespaces['TOKEN-RING-RMON-MIB'],
'ydk.models.cisco_ios_xe.TOKEN_RING_RMON_MIB'
),
},
'TokenRingRmonMib.Ringstationtable' : {
'meta_info' : _MetaInfoClass('TokenRingRmonMib.Ringstationtable',
False,
[
_MetaInfoClassMember('ringStationEntry', REFERENCE_LIST, 'Ringstationentry' , 'ydk.models.cisco_ios_xe.TOKEN_RING_RMON_MIB', 'TokenRingRmonMib.Ringstationtable.Ringstationentry',
[], [],
''' A collection of statistics for a particular
station that has been discovered on a ring
monitored by this device.
''',
'ringstationentry',
'TOKEN-RING-RMON-MIB', False),
],
'TOKEN-RING-RMON-MIB',
'ringStationTable',
_yang_ns._namespaces['TOKEN-RING-RMON-MIB'],
'ydk.models.cisco_ios_xe.TOKEN_RING_RMON_MIB'
),
},
'TokenRingRmonMib.Ringstationordertable.Ringstationorderentry' : {
'meta_info' : _MetaInfoClass('TokenRingRmonMib.Ringstationordertable.Ringstationorderentry',
False,
[
_MetaInfoClassMember('ringStationOrderIfIndex', ATTRIBUTE, 'int' , None, None,
[('-2147483648', '2147483647')], [],
''' The value of this object uniquely identifies the
interface on this remote network monitoring device
on which this station was detected. The interface
identified by a particular value of this object is
the same interface as identified by the same value
of the ifIndex object, defined in MIB-II [3].
''',
'ringstationorderifindex',
'TOKEN-RING-RMON-MIB', True),
_MetaInfoClassMember('ringStationOrderOrderIndex', ATTRIBUTE, 'int' , None, None,
[('-2147483648', '2147483647')], [],
''' This index denotes the location of this station
with respect to other stations on the ring. This
index is one more than the number of hops
downstream that this station is from the rmon
probe. The rmon probe itself gets the value one.
''',
'ringstationorderorderindex',
'TOKEN-RING-RMON-MIB', True),
_MetaInfoClassMember('ringStationOrderMacAddress', ATTRIBUTE, 'str' , None, None,
[(6, None)], [],
''' The physical address of this station.
''',
'ringstationordermacaddress',
'TOKEN-RING-RMON-MIB', False),
],
'TOKEN-RING-RMON-MIB',
'ringStationOrderEntry',
_yang_ns._namespaces['TOKEN-RING-RMON-MIB'],
'ydk.models.cisco_ios_xe.TOKEN_RING_RMON_MIB'
),
},
'TokenRingRmonMib.Ringstationordertable' : {
'meta_info' : _MetaInfoClass('TokenRingRmonMib.Ringstationordertable',
False,
[
_MetaInfoClassMember('ringStationOrderEntry', REFERENCE_LIST, 'Ringstationorderentry' , 'ydk.models.cisco_ios_xe.TOKEN_RING_RMON_MIB', 'TokenRingRmonMib.Ringstationordertable.Ringstationorderentry',
[], [],
''' A collection of statistics for a particular
station that is active on a ring monitored by this
device. This table will contain information for
every interface that has a
ringStationControlStatus equal to valid.
''',
'ringstationorderentry',
'TOKEN-RING-RMON-MIB', False),
],
'TOKEN-RING-RMON-MIB',
'ringStationOrderTable',
_yang_ns._namespaces['TOKEN-RING-RMON-MIB'],
'ydk.models.cisco_ios_xe.TOKEN_RING_RMON_MIB'
),
},
'TokenRingRmonMib.Ringstationconfigcontroltable.Ringstationconfigcontrolentry.RingstationconfigcontrolremoveEnum' : _MetaInfoEnum('RingstationconfigcontrolremoveEnum', 'ydk.models.cisco_ios_xe.TOKEN_RING_RMON_MIB',
{
'stable':'stable',
'removing':'removing',
}, 'TOKEN-RING-RMON-MIB', _yang_ns._namespaces['TOKEN-RING-RMON-MIB']),
'TokenRingRmonMib.Ringstationconfigcontroltable.Ringstationconfigcontrolentry.RingstationconfigcontrolupdatestatsEnum' : _MetaInfoEnum('RingstationconfigcontrolupdatestatsEnum', 'ydk.models.cisco_ios_xe.TOKEN_RING_RMON_MIB',
{
'stable':'stable',
'updating':'updating',
}, 'TOKEN-RING-RMON-MIB', _yang_ns._namespaces['TOKEN-RING-RMON-MIB']),
'TokenRingRmonMib.Ringstationconfigcontroltable.Ringstationconfigcontrolentry' : {
'meta_info' : _MetaInfoClass('TokenRingRmonMib.Ringstationconfigcontroltable.Ringstationconfigcontrolentry',
False,
[
_MetaInfoClassMember('ringStationConfigControlIfIndex', ATTRIBUTE, 'int' , None, None,
[('-2147483648', '2147483647')], [],
''' The value of this object uniquely identifies the
interface on this remote network monitoring device
on which this station was detected. The interface
identified by a particular value of this object is
the same interface as identified by the same value
of the ifIndex object, defined in MIB-II [3].
''',
'ringstationconfigcontrolifindex',
'TOKEN-RING-RMON-MIB', True),
_MetaInfoClassMember('ringStationConfigControlMacAddress', ATTRIBUTE, 'str' , None, None,
[(6, None)], [],
''' The physical address of this station.
''',
'ringstationconfigcontrolmacaddress',
'TOKEN-RING-RMON-MIB', True),
_MetaInfoClassMember('ringStationConfigControlRemove', REFERENCE_ENUM_CLASS, 'RingstationconfigcontrolremoveEnum' , 'ydk.models.cisco_ios_xe.TOKEN_RING_RMON_MIB', 'TokenRingRmonMib.Ringstationconfigcontroltable.Ringstationconfigcontrolentry.RingstationconfigcontrolremoveEnum',
[], [],
''' Setting this object to `removing(2)' causes a
Remove Station MAC frame to be sent. The agent
will set this object to `stable(1)' after
processing the request.
''',
'ringstationconfigcontrolremove',
'TOKEN-RING-RMON-MIB', False),
_MetaInfoClassMember('ringStationConfigControlUpdateStats', REFERENCE_ENUM_CLASS, 'RingstationconfigcontrolupdatestatsEnum' , 'ydk.models.cisco_ios_xe.TOKEN_RING_RMON_MIB', 'TokenRingRmonMib.Ringstationconfigcontroltable.Ringstationconfigcontrolentry.RingstationconfigcontrolupdatestatsEnum',
[], [],
''' Setting this object to `updating(2)' causes the
configuration information associate with this
entry to be updated. The agent will set this
object to `stable(1)' after processing the
request.
''',
'ringstationconfigcontrolupdatestats',
'TOKEN-RING-RMON-MIB', False),
],
'TOKEN-RING-RMON-MIB',
'ringStationConfigControlEntry',
_yang_ns._namespaces['TOKEN-RING-RMON-MIB'],
'ydk.models.cisco_ios_xe.TOKEN_RING_RMON_MIB'
),
},
'TokenRingRmonMib.Ringstationconfigcontroltable' : {
'meta_info' : _MetaInfoClass('TokenRingRmonMib.Ringstationconfigcontroltable',
False,
[
_MetaInfoClassMember('ringStationConfigControlEntry', REFERENCE_LIST, 'Ringstationconfigcontrolentry' , 'ydk.models.cisco_ios_xe.TOKEN_RING_RMON_MIB', 'TokenRingRmonMib.Ringstationconfigcontroltable.Ringstationconfigcontrolentry',
[], [],
''' This entry controls active management of stations
by the probe. One entry exists in this table for
each active station in the ringStationTable.
''',
'ringstationconfigcontrolentry',
'TOKEN-RING-RMON-MIB', False),
],
'TOKEN-RING-RMON-MIB',
'ringStationConfigControlTable',
_yang_ns._namespaces['TOKEN-RING-RMON-MIB'],
'ydk.models.cisco_ios_xe.TOKEN_RING_RMON_MIB'
),
},
'TokenRingRmonMib.Ringstationconfigtable.Ringstationconfigentry' : {
'meta_info' : _MetaInfoClass('TokenRingRmonMib.Ringstationconfigtable.Ringstationconfigentry',
False,
[
_MetaInfoClassMember('ringStationConfigIfIndex', ATTRIBUTE, 'int' , None, None,
[('-2147483648', '2147483647')], [],
''' The value of this object uniquely identifies the
interface on this remote network monitoring device
on which this station was detected. The interface
identified by a particular value of this object is
the same interface as identified by the same value
of the ifIndex object, defined in MIB-II [3].
''',
'ringstationconfigifindex',
'TOKEN-RING-RMON-MIB', True),
_MetaInfoClassMember('ringStationConfigMacAddress', ATTRIBUTE, 'str' , None, None,
[(6, None)], [],
''' The physical address of this station.
''',
'ringstationconfigmacaddress',
'TOKEN-RING-RMON-MIB', True),
_MetaInfoClassMember('ringStationConfigFunctionalAddress', ATTRIBUTE, 'str' , None, None,
[(4, None)], [],
''' the functional addresses recognized by this
station.
''',
'ringstationconfigfunctionaladdress',
'TOKEN-RING-RMON-MIB', False),
_MetaInfoClassMember('ringStationConfigGroupAddress', ATTRIBUTE, 'str' , None, None,
[(4, None)], [],
''' The low-order 4 octets of the group address
recognized by this station.
''',
'ringstationconfiggroupaddress',
'TOKEN-RING-RMON-MIB', False),
_MetaInfoClassMember('ringStationConfigLocation', ATTRIBUTE, 'str' , None, None,
[(4, None)], [],
''' The assigned physical location of this station.
''',
'ringstationconfiglocation',
'TOKEN-RING-RMON-MIB', False),
_MetaInfoClassMember('ringStationConfigMicrocode', ATTRIBUTE, 'str' , None, None,
[(10, None)], [],
''' The microcode EC level of this station.
''',
'ringstationconfigmicrocode',
'TOKEN-RING-RMON-MIB', False),
_MetaInfoClassMember('ringStationConfigUpdateTime', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' The value of sysUpTime at the time this
configuration information was last updated
(completely).
''',
'ringstationconfigupdatetime',
'TOKEN-RING-RMON-MIB', False),
],
'TOKEN-RING-RMON-MIB',
'ringStationConfigEntry',
_yang_ns._namespaces['TOKEN-RING-RMON-MIB'],
'ydk.models.cisco_ios_xe.TOKEN_RING_RMON_MIB'
),
},
'TokenRingRmonMib.Ringstationconfigtable' : {
'meta_info' : _MetaInfoClass('TokenRingRmonMib.Ringstationconfigtable',
False,
[
_MetaInfoClassMember('ringStationConfigEntry', REFERENCE_LIST, 'Ringstationconfigentry' , 'ydk.models.cisco_ios_xe.TOKEN_RING_RMON_MIB', 'TokenRingRmonMib.Ringstationconfigtable.Ringstationconfigentry',
[], [],
''' A collection of statistics for a particular
station that has been discovered on a ring
monitored by this probe.
''',
'ringstationconfigentry',
'TOKEN-RING-RMON-MIB', False),
],
'TOKEN-RING-RMON-MIB',
'ringStationConfigTable',
_yang_ns._namespaces['TOKEN-RING-RMON-MIB'],
'ydk.models.cisco_ios_xe.TOKEN_RING_RMON_MIB'
),
},
'TokenRingRmonMib.Sourceroutingstatstable.Sourceroutingstatsentry' : {
'meta_info' : _MetaInfoClass('TokenRingRmonMib.Sourceroutingstatstable.Sourceroutingstatsentry',
False,
[
_MetaInfoClassMember('sourceRoutingStatsIfIndex', ATTRIBUTE, 'int' , None, None,
[('-2147483648', '2147483647')], [],
''' The value of this object uniquely identifies the
interface on this remote network monitoring device
on which source routing statistics will be
detected. The interface identified by a
particular value of this object is the same
interface as identified by the same value of the
ifIndex object, defined in MIB-II [3].
''',
'sourceroutingstatsifindex',
'TOKEN-RING-RMON-MIB', True),
_MetaInfoClassMember('sourceRoutingStats1HopFrames', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' The total number of frames received whose route
had 1 hop, were not All Route Broadcast Frames,
and whose source or destination were on this ring
(i.e. frames that had a RIF field and had this
ring number in the first or last entry of the RIF
field).
''',
'sourceroutingstats1hopframes',
'TOKEN-RING-RMON-MIB', False),
_MetaInfoClassMember('sourceRoutingStats2HopsFrames', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' The total number of frames received whose route
had 2 hops, were not All Route Broadcast Frames,
and whose source or destination were on this ring
(i.e. frames that had a RIF field and had this
ring number in the first or last entry of the RIF
field).
''',
'sourceroutingstats2hopsframes',
'TOKEN-RING-RMON-MIB', False),
_MetaInfoClassMember('sourceRoutingStats3HopsFrames', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' The total number of frames received whose route
had 3 hops, were not All Route Broadcast Frames,
and whose source or destination were on this ring
(i.e. frames that had a RIF field and had this
ring number in the first or last entry of the RIF
field).
''',
'sourceroutingstats3hopsframes',
'TOKEN-RING-RMON-MIB', False),
_MetaInfoClassMember('sourceRoutingStats4HopsFrames', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' The total number of frames received whose route
had 4 hops, were not All Route Broadcast Frames,
and whose source or destination were on this ring
(i.e. frames that had a RIF field and had this
ring number in the first or last entry of the RIF
field).
''',
'sourceroutingstats4hopsframes',
'TOKEN-RING-RMON-MIB', False),
_MetaInfoClassMember('sourceRoutingStats5HopsFrames', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' The total number of frames received whose route
had 5 hops, were not All Route Broadcast Frames,
and whose source or destination were on this ring
(i.e. frames that had a RIF field and had this
ring number in the first or last entry of the RIF
field).
''',
'sourceroutingstats5hopsframes',
'TOKEN-RING-RMON-MIB', False),
_MetaInfoClassMember('sourceRoutingStats6HopsFrames', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' The total number of frames received whose route
had 6 hops, were not All Route Broadcast Frames,
and whose source or destination were on this ring
(i.e. frames that had a RIF field and had this
ring number in the first or last entry of the RIF
field).
''',
'sourceroutingstats6hopsframes',
'TOKEN-RING-RMON-MIB', False),
_MetaInfoClassMember('sourceRoutingStats7HopsFrames', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' The total number of frames received whose route
had 7 hops, were not All Route Broadcast Frames,
and whose source or destination were on this ring
(i.e. frames that had a RIF field and had this
ring number in the first or last entry of the RIF
field).
''',
'sourceroutingstats7hopsframes',
'TOKEN-RING-RMON-MIB', False),
_MetaInfoClassMember('sourceRoutingStats8HopsFrames', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' The total number of frames received whose route
had 8 hops, were not All Route Broadcast Frames,
and whose source or destination were on this ring
(i.e. frames that had a RIF field and had this
ring number in the first or last entry of the RIF
field).
''',
'sourceroutingstats8hopsframes',
'TOKEN-RING-RMON-MIB', False),
_MetaInfoClassMember('sourceRoutingStatsAllRoutesBroadcastFrames', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' The total number of good frames received that
were All Routes Broadcast.
''',
'sourceroutingstatsallroutesbroadcastframes',
'TOKEN-RING-RMON-MIB', False),
_MetaInfoClassMember('sourceRoutingStatsAllRoutesBroadcastOctets', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' The total number of octets in good frames
received that were All Routes Broadcast.
''',
'sourceroutingstatsallroutesbroadcastoctets',
'TOKEN-RING-RMON-MIB', False),
_MetaInfoClassMember('sourceRoutingStatsInFrames', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' The count of frames sent into this ring from
another ring.
''',
'sourceroutingstatsinframes',
'TOKEN-RING-RMON-MIB', False),
_MetaInfoClassMember('sourceRoutingStatsInOctets', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' The count of octets in good frames sent into this
ring from another ring.
''',
'sourceroutingstatsinoctets',
'TOKEN-RING-RMON-MIB', False),
_MetaInfoClassMember('sourceRoutingStatsLocalLLCFrames', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' The total number of frames received who had no
RIF field (or had a RIF field that only included
the local ring's number) and were not All Route
Broadcast Frames.
''',
'sourceroutingstatslocalllcframes',
'TOKEN-RING-RMON-MIB', False),
_MetaInfoClassMember('sourceRoutingStatsMoreThan8HopsFrames', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' The total number of frames received whose route
had more than 8 hops, were not All Route Broadcast
Frames, and whose source or destination were on
this ring (i.e. frames that had a RIF field and
had this ring number in the first or last entry of
the RIF field).
''',
'sourceroutingstatsmorethan8hopsframes',
'TOKEN-RING-RMON-MIB', False),
_MetaInfoClassMember('sourceRoutingStatsOutFrames', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' The count of frames sent from this ring to
another ring.
''',
'sourceroutingstatsoutframes',
'TOKEN-RING-RMON-MIB', False),
_MetaInfoClassMember('sourceRoutingStatsOutOctets', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' The count of octets in good frames sent from this
ring to another ring.
''',
'sourceroutingstatsoutoctets',
'TOKEN-RING-RMON-MIB', False),
_MetaInfoClassMember('sourceRoutingStatsOwner', ATTRIBUTE, 'str' , None, None,
[], [],
''' The entity that configured this entry and is
therefore using the resources assigned to it.
''',
'sourceroutingstatsowner',
'TOKEN-RING-RMON-MIB', False),
_MetaInfoClassMember('sourceRoutingStatsRingNumber', ATTRIBUTE, 'int' , None, None,
[('-2147483648', '2147483647')], [],
''' The ring number of the ring monitored by this
entry. When any object in this entry is created,
the probe will attempt to discover the ring
number. Only after the ring number is discovered
will this object be created. After creating an
object in this entry, the management station
should poll this object to detect when it is
created. Only after this object is created can
the management station set the
sourceRoutingStatsStatus entry to valid(1).
''',
'sourceroutingstatsringnumber',
'TOKEN-RING-RMON-MIB', False),
_MetaInfoClassMember('sourceRoutingStatsSingleRouteBroadcastFrames', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' The total number of good frames received that
were Single Route Broadcast.
''',
'sourceroutingstatssingleroutebroadcastframes',
'TOKEN-RING-RMON-MIB', False),
_MetaInfoClassMember('sourceRoutingStatsSingleRoutesBroadcastOctets', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' The total number of octets in good frames
received that were Single Route Broadcast.
''',
'sourceroutingstatssingleroutesbroadcastoctets',
'TOKEN-RING-RMON-MIB', False),
_MetaInfoClassMember('sourceRoutingStatsStatus', REFERENCE_ENUM_CLASS, 'EntrystatusEnum' , 'ydk.models.cisco_ios_xe.TOKEN_RING_RMON_MIB', 'EntrystatusEnum',
[], [],
''' The status of this sourceRoutingStats entry.
''',
'sourceroutingstatsstatus',
'TOKEN-RING-RMON-MIB', False),
_MetaInfoClassMember('sourceRoutingStatsThroughFrames', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' The count of frames sent from another ring,
through this ring, to another ring.
''',
'sourceroutingstatsthroughframes',
'TOKEN-RING-RMON-MIB', False),
_MetaInfoClassMember('sourceRoutingStatsThroughOctets', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' The count of octets in good frames sent another
ring, through this ring, to another ring.
''',
'sourceroutingstatsthroughoctets',
'TOKEN-RING-RMON-MIB', False),
],
'TOKEN-RING-RMON-MIB',
'sourceRoutingStatsEntry',
_yang_ns._namespaces['TOKEN-RING-RMON-MIB'],
'ydk.models.cisco_ios_xe.TOKEN_RING_RMON_MIB'
),
},
'TokenRingRmonMib.Sourceroutingstatstable' : {
'meta_info' : _MetaInfoClass('TokenRingRmonMib.Sourceroutingstatstable',
False,
[
_MetaInfoClassMember('sourceRoutingStatsEntry', REFERENCE_LIST, 'Sourceroutingstatsentry' , 'ydk.models.cisco_ios_xe.TOKEN_RING_RMON_MIB', 'TokenRingRmonMib.Sourceroutingstatstable.Sourceroutingstatsentry',
[], [],
''' A collection of source routing statistics kept
for a particular Token Ring interface.
''',
'sourceroutingstatsentry',
'TOKEN-RING-RMON-MIB', False),
],
'TOKEN-RING-RMON-MIB',
'sourceRoutingStatsTable',
_yang_ns._namespaces['TOKEN-RING-RMON-MIB'],
'ydk.models.cisco_ios_xe.TOKEN_RING_RMON_MIB'
),
},
'TokenRingRmonMib' : {
'meta_info' : _MetaInfoClass('TokenRingRmonMib',
False,
[
_MetaInfoClassMember('ringStationConfigControlTable', REFERENCE_CLASS, 'Ringstationconfigcontroltable' , 'ydk.models.cisco_ios_xe.TOKEN_RING_RMON_MIB', 'TokenRingRmonMib.Ringstationconfigcontroltable',
[], [],
''' A list of ring station configuration control
entries.
''',
'ringstationconfigcontroltable',
'TOKEN-RING-RMON-MIB', False),
_MetaInfoClassMember('ringStationConfigTable', REFERENCE_CLASS, 'Ringstationconfigtable' , 'ydk.models.cisco_ios_xe.TOKEN_RING_RMON_MIB', 'TokenRingRmonMib.Ringstationconfigtable',
[], [],
''' A list of configuration entries for stations on a
ring monitored by this probe.
''',
'ringstationconfigtable',
'TOKEN-RING-RMON-MIB', False),
_MetaInfoClassMember('ringStationControlTable', REFERENCE_CLASS, 'Ringstationcontroltable' , 'ydk.models.cisco_ios_xe.TOKEN_RING_RMON_MIB', 'TokenRingRmonMib.Ringstationcontroltable',
[], [],
''' A list of ringStation table control entries.
''',
'ringstationcontroltable',
'TOKEN-RING-RMON-MIB', False),
_MetaInfoClassMember('ringStationOrderTable', REFERENCE_CLASS, 'Ringstationordertable' , 'ydk.models.cisco_ios_xe.TOKEN_RING_RMON_MIB', 'TokenRingRmonMib.Ringstationordertable',
[], [],
''' A list of ring station entries for stations in
the ring poll, ordered by their ring-order.
''',
'ringstationordertable',
'TOKEN-RING-RMON-MIB', False),
_MetaInfoClassMember('ringStationTable', REFERENCE_CLASS, 'Ringstationtable' , 'ydk.models.cisco_ios_xe.TOKEN_RING_RMON_MIB', 'TokenRingRmonMib.Ringstationtable',
[], [],
''' A list of ring station entries. An entry will
exist for each station that is now or has
previously been detected as physically present on
this ring.
''',
'ringstationtable',
'TOKEN-RING-RMON-MIB', False),
_MetaInfoClassMember('sourceRoutingStatsTable', REFERENCE_CLASS, 'Sourceroutingstatstable' , 'ydk.models.cisco_ios_xe.TOKEN_RING_RMON_MIB', 'TokenRingRmonMib.Sourceroutingstatstable',
[], [],
''' A list of source routing statistics entries.
''',
'sourceroutingstatstable',
'TOKEN-RING-RMON-MIB', False),
_MetaInfoClassMember('tokenRingMLHistoryTable', REFERENCE_CLASS, 'Tokenringmlhistorytable' , 'ydk.models.cisco_ios_xe.TOKEN_RING_RMON_MIB', 'TokenRingRmonMib.Tokenringmlhistorytable',
[], [],
''' A list of Mac-Layer Token Ring statistics
entries.
''',
'tokenringmlhistorytable',
'TOKEN-RING-RMON-MIB', False),
_MetaInfoClassMember('tokenRingMLStatsTable', REFERENCE_CLASS, 'Tokenringmlstatstable' , 'ydk.models.cisco_ios_xe.TOKEN_RING_RMON_MIB', 'TokenRingRmonMib.Tokenringmlstatstable',
[], [],
''' A list of Mac-Layer Token Ring statistics
entries.
''',
'tokenringmlstatstable',
'TOKEN-RING-RMON-MIB', False),
_MetaInfoClassMember('tokenRingPHistoryTable', REFERENCE_CLASS, 'Tokenringphistorytable' , 'ydk.models.cisco_ios_xe.TOKEN_RING_RMON_MIB', 'TokenRingRmonMib.Tokenringphistorytable',
[], [],
''' A list of promiscuous Token Ring statistics
entries.
''',
'tokenringphistorytable',
'TOKEN-RING-RMON-MIB', False),
_MetaInfoClassMember('tokenRingPStatsTable', REFERENCE_CLASS, 'Tokenringpstatstable' , 'ydk.models.cisco_ios_xe.TOKEN_RING_RMON_MIB', 'TokenRingRmonMib.Tokenringpstatstable',
[], [],
''' A list of promiscuous Token Ring statistics
entries.
''',
'tokenringpstatstable',
'TOKEN-RING-RMON-MIB', False),
],
'TOKEN-RING-RMON-MIB',
'TOKEN-RING-RMON-MIB',
_yang_ns._namespaces['TOKEN-RING-RMON-MIB'],
'ydk.models.cisco_ios_xe.TOKEN_RING_RMON_MIB'
),
},
}
_meta_table['TokenRingRmonMib.Tokenringmlstatstable.Tokenringmlstatsentry']['meta_info'].parent =_meta_table['TokenRingRmonMib.Tokenringmlstatstable']['meta_info']
_meta_table['TokenRingRmonMib.Tokenringpstatstable.Tokenringpstatsentry']['meta_info'].parent =_meta_table['TokenRingRmonMib.Tokenringpstatstable']['meta_info']
_meta_table['TokenRingRmonMib.Tokenringmlhistorytable.Tokenringmlhistoryentry']['meta_info'].parent =_meta_table['TokenRingRmonMib.Tokenringmlhistorytable']['meta_info']
_meta_table['TokenRingRmonMib.Tokenringphistorytable.Tokenringphistoryentry']['meta_info'].parent =_meta_table['TokenRingRmonMib.Tokenringphistorytable']['meta_info']
_meta_table['TokenRingRmonMib.Ringstationcontroltable.Ringstationcontrolentry']['meta_info'].parent =_meta_table['TokenRingRmonMib.Ringstationcontroltable']['meta_info']
_meta_table['TokenRingRmonMib.Ringstationtable.Ringstationentry']['meta_info'].parent =_meta_table['TokenRingRmonMib.Ringstationtable']['meta_info']
_meta_table['TokenRingRmonMib.Ringstationordertable.Ringstationorderentry']['meta_info'].parent =_meta_table['TokenRingRmonMib.Ringstationordertable']['meta_info']
_meta_table['TokenRingRmonMib.Ringstationconfigcontroltable.Ringstationconfigcontrolentry']['meta_info'].parent =_meta_table['TokenRingRmonMib.Ringstationconfigcontroltable']['meta_info']
_meta_table['TokenRingRmonMib.Ringstationconfigtable.Ringstationconfigentry']['meta_info'].parent =_meta_table['TokenRingRmonMib.Ringstationconfigtable']['meta_info']
_meta_table['TokenRingRmonMib.Sourceroutingstatstable.Sourceroutingstatsentry']['meta_info'].parent =_meta_table['TokenRingRmonMib.Sourceroutingstatstable']['meta_info']
_meta_table['TokenRingRmonMib.Tokenringmlstatstable']['meta_info'].parent =_meta_table['TokenRingRmonMib']['meta_info']
_meta_table['TokenRingRmonMib.Tokenringpstatstable']['meta_info'].parent =_meta_table['TokenRingRmonMib']['meta_info']
_meta_table['TokenRingRmonMib.Tokenringmlhistorytable']['meta_info'].parent =_meta_table['TokenRingRmonMib']['meta_info']
_meta_table['TokenRingRmonMib.Tokenringphistorytable']['meta_info'].parent =_meta_table['TokenRingRmonMib']['meta_info']
_meta_table['TokenRingRmonMib.Ringstationcontroltable']['meta_info'].parent =_meta_table['TokenRingRmonMib']['meta_info']
_meta_table['TokenRingRmonMib.Ringstationtable']['meta_info'].parent =_meta_table['TokenRingRmonMib']['meta_info']
_meta_table['TokenRingRmonMib.Ringstationordertable']['meta_info'].parent =_meta_table['TokenRingRmonMib']['meta_info']
_meta_table['TokenRingRmonMib.Ringstationconfigcontroltable']['meta_info'].parent =_meta_table['TokenRingRmonMib']['meta_info']
_meta_table['TokenRingRmonMib.Ringstationconfigtable']['meta_info'].parent =_meta_table['TokenRingRmonMib']['meta_info']
_meta_table['TokenRingRmonMib.Sourceroutingstatstable']['meta_info'].parent =_meta_table['TokenRingRmonMib']['meta_info']
| 111pontes/ydk-py | cisco-ios-xe/ydk/models/cisco_ios_xe/_meta/_TOKEN_RING_RMON_MIB.py | Python | apache-2.0 | 104,936 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Plotting functions for magnetic resonance data."""
import numpy as np
from pygimli.viewer.mpl import drawModel1D
# def drawModel1D(ax, thickness, values, plotfunction='plot',
# xlabel='', *args, **kwargs):
# """Draw 1d block model into axis ax defined by values and thickness vectors
# using plotfunction."""
#
# nLayers = len(thickness) + 1
# px = np.zeros(nLayers * 2)
# pz = np.zeros(nLayers * 2)
# z1 = np.cumsum(thickness)
#
# for i in range(nLayers):
# px[2 * i] = values[i]
# px[2 * i + 1] = values[i]
#
# if i == nLayers - 1:
# pz[2 * i + 1] = z1[i - 1] * 1.2
# else:
# pz[2 * i + 1] = z1[i]
# pz[2 * i + 2] = z1[i]
#
# if plotfunction == 'loglog' or plotfunction == 'semilogy':
# pz[0] = thickness[0] * 0.8
#
# try:
# plot = getattr(ax, plotfunction)
# plot(px, pz, *args, **kwargs)
# except Exception as e:
# print(e)
#
# ax.set_ylabel('Depth (m)')
# ax.set_xlabel(xlabel)
# ax.set_ylim(pz[-1], pz[0])
# ax.grid(True)
def showErrorBars(ax, thk, val, thkL, thkU, valL, valU, *args, **kwargs):
"""Plot wc and t2 models with error bars."""
zb = np.cumsum(thk)
zm = np.hstack((zb - thk / 2, zb[-1] * 1.2)) # zb[-1]+thk[-1]/2))
valm = (val[:-1] + val[1:]) / 2
xerr = [val - valL, valU - val]
yerr = [thk - thkL, thkU - thk]
ax.errorbar(val, zm, fmt='.', xerr=xerr, ecolor='r', **kwargs)
ax.errorbar(valm, zb, fmt='.', yerr=yerr, ecolor='g', **kwargs)
ax.set_ylim(bottom=zm[-1] * 1.02, top=0)
def showWC(ax, thk, wc, wmin=0., wmax=0.45, maxdep=0., dw=0.05, **kwargs):
"""Show water content function nicely."""
drawModel1D(ax, thk, wc, xlabel=r'$\theta$')
ax.set_xlim(0., 0.45)
if maxdep > 0.:
ax.set_ylim(maxdep, 0.)
wt = np.arange(wmin, wmax, dw)
ax.set_xticks(wt)
ax.set_xticklabels([str(wi) for wi in wt])
def showT2(ax, thk, t2, maxdep=0., **kwargs):
"""Show T2 function nicely."""
drawModel1D(ax, thk, t2*1e3, xlabel=r'$T_2^*$ [ms]',
plot='semilogx')
tmin = min(20, min(t2) * 0.9e3)
tmax = max(500, max(t2) * 1.1e3)
ax.set_xlim(tmin, tmax)
if maxdep > 0.:
ax.set_ylim(maxdep, 0.)
xt = [20, 50, 100, 200, 500]
ax.set_xticks(xt)
ax.set_xticklabels([str(ai) for ai in xt])
if __name__ == "__main__":
pass
| gimli-org/gimli | pygimli/physics/sNMR/plotting.py | Python | apache-2.0 | 2,475 |
#------------------------------------------------
# Monkeyrunner Test Report
# 10/08/2012
#------------------------------------------------
from email.MIMEBase import MIMEBase
from email.MIMEText import MIMEText
from email.MIMEMultipart import MIMEMultipart
from email.utils import COMMASPACE,formatdate
from email import Encoders
from email.header import Header
import smtplib,email,os,sys
if os.path.exists(os.getcwd()+'\\monkeyrunner_Error.log') or os.path.exists(os.getcwd()+'/monkeyrunner_Error.log'):
print "Sending Monkeyrunner Test Report..."
mail_from = 'redmine@cocos2d-x.org' #where the mail from
mail_to = ['739657621@qq.com','yangguangzaidongji@hotmail.com','yangguangzaidongji@gmail.com']
to_string =''
for item in mail_to:
to_string += item +','
mail_subject = "Monkeyrunner Test Report"
msg = MIMEMultipart()
#msg = MIMEText('body')
mail_attachment = 'monkeyrunner_Error.log'
#msg = "\nhell"
print mail_to
username = 'redmine@cocos2d-x.org'
password = 'cocos2d-x.org'
msg["From"] = mail_from
msg["To"] = to_string
msg["Subject"] = mail_subject
msg["Date"] = formatdate(localtime=True)
mail_body = "Monkeyrunner Test Finish! See attachment for logs."
msg.attach(MIMEText(mail_body))
#Add attachment.
fp = open(mail_attachment,"rb")
part = MIMEBase("application", "octet-stream")
part.set_payload(fp.read())
fp.close()
Encoders.encode_base64(part)
part.add_header("Content-Disposition", "attachment; filename=%s" % mail_attachment)
msg.attach(part)
#Send email.
server = smtplib.SMTP('smtp.gmail.com:587')
server.starttls()
server.login(username,password)
server.sendmail(mail_from, mail_to, msg.as_string())
print 'Eamil success!'
server.quit()
| sdkbox/sdkbox-facebook-sample-v2 | tools/jenkins_scripts/ReportManager.py | Python | mit | 1,901 |
# -*- coding: utf-8 -*-
#
# Cherokee-admin
#
# Authors:
# Alvaro Lopez Ortega <alvaro@alobbs.com>
#
# Copyright (C) 2001-2010 Alvaro Lopez Ortega
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of version 2 of the GNU General Public
# License as published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
# 02110-1301, USA.
#
AVAILABLE_LANGUAGES = [
('en', N_('English')),
('es', N_('Spanish')),
('de', N_('German')),
('fr', N_('French')),
('nl', N_('Dutch')),
('pl', N_('Polish')),
('sv_SE', N_('Swedish')),
('po_BR', N_('Brazilian Portuguese')),
('zh_CN', N_('Chinese Simplified')),
('ca', N_('Catalan'))
]
PRODUCT_TOKENS = [
('', N_('Default')),
('product', N_('Product only')),
('minor', N_('Product + Minor version')),
('minimal', N_('Product + Minimal version')),
('os', N_('Product + Platform')),
('full', N_('Full Server string'))
]
HANDLERS = [
('', N_('None')),
('common', N_('List & Send')),
('file', N_('Static Content')),
('dirlist', N_('Only Listing')),
('redir', N_('Redirection')),
('fcgi', N_('FastCGI')),
('scgi', N_('SCGI')),
('uwsgi', N_('uWSGI')),
('proxy', N_('HTTP Reverse Proxy')),
('post_report', N_('Upload Reporting')),
('streaming', N_('Audio/Video Streaming')),
('cgi', N_('CGI')),
('ssi', N_('Server Side Includes')),
('secdownload', N_('Hidden Downloads')),
('server_info', N_('Server Info')),
('dbslayer', N_('MySQL Bridge')),
('custom_error', N_('HTTP Error')),
('admin', N_('Remote Administration')),
('empty_gif', N_('1x1 Transparent GIF'))
]
ERROR_HANDLERS = [
('', N_('Default errors')),
('error_redir', N_('Custom redirections')),
('error_nn', N_('Closest match'))
]
VALIDATORS = [
('', N_('None')),
('plain', N_('Plain text file')),
('htpasswd', N_('Htpasswd file')),
('htdigest', N_('Htdigest file')),
('ldap', N_('LDAP server')),
('mysql', N_('MySQL server')),
('pam', N_('PAM')),
('authlist', N_('Fixed list'))
]
VALIDATOR_METHODS = [
('basic', N_('Basic')),
('digest', N_('Digest')),
('basic,digest', N_('Basic or Digest'))
]
LOGGERS = [
('', N_('None')),
('combined', N_('Apache compatible')),
('ncsa', N_('NCSA')),
('custom', N_('Custom'))
]
LOGGER_WRITERS = [
('file', N_('File')),
('syslog', N_('System logger')),
('stderr', N_('Standard Error')),
('exec', N_('Execute program'))
]
BALANCERS = [
('', N_('None')),
('round_robin', N_("Round Robin")),
('ip_hash', N_("IP Hash"))
]
SOURCE_TYPES = [
('interpreter', N_('Local interpreter')),
('host', N_('Remote host'))
]
ENCODERS = [
('gzip', N_('GZip')),
('deflate', N_('Deflate'))
]
THREAD_POLICY = [
('', N_('Default')),
('fifo', N_('FIFO')),
('rr', N_('Round-robin')),
('other', N_('Dynamic'))
]
POLL_METHODS = [
('', N_('Automatic')),
('epoll', 'epoll() - Linux >= 2.6'),
('kqueue', 'kqueue() - BSD, OS X'),
('ports', 'Solaris ports - >= 10'),
('poll', 'poll()'),
('select', 'select()'),
('win32', 'Win32')
]
REDIR_SHOW = [
('1', N_('External')),
('0', N_('Internal'))
]
ERROR_CODES = [
('400', '400 Bad Request'),
('401', '401 Unauthorized'),
('402', '402 Payment Required'),
('403', '403 Forbidden'),
('404', '404 Not Found'),
('405', '405 Method Not Allowed'),
('406', '406 Not Acceptable'),
('407', '407 Proxy Auth Required'),
('408', '408 Request Timeout'),
('409', '409 Conflict'),
('410', '410 Gone'),
('411', '411 Length Required'),
('412', '412 Precondition Failed'),
('413', '413 Request Entity too large'),
('414', '414 Request-URI too long'),
('415', '415 Unsupported Media Type'),
('416', '416 Requested range not satisfiable'),
('417', '417 Expectation Failed'),
('422', '422 Unprocessable Entity'),
('423', '423 Locked'),
('424', '424 Failed Dependency'),
('425', '425 Unordered Collection'),
('426', '426 Upgrade Required'),
('449', '449 Retry With'),
('500', '500 Internal Server Error'),
('501', '501 Not Implemented'),
('502', '502 Bad gateway'),
('503', '503 Service Unavailable'),
('504', '504 Gateway Timeout'),
('505', '505 HTTP Version Not Supported'),
('506', '506 Variant Also Negotiates'),
('507', '507 Insufficient Storage'),
('509', '509 Bandwidth Limit Exceeded'),
('510', '510 Not Extended')
]
RULES = [
('directory', N_('Directory')),
('extensions', N_('Extensions')),
('request', N_('Regular Expression')),
('header', N_('Header')),
('exists', N_('File Exists')),
('method', N_('HTTP Method')),
('bind', N_('Incoming IP/Port')),
('fullpath', N_('Full Path')),
('from', N_('Connected from')),
('url_arg', N_('URL Argument')),
('geoip', N_('GeoIP'))
]
VRULES = [
('', N_('Match Nickname')),
('wildcard', N_('Wildcards')),
('rehost', N_('Regular Expressions')),
('target_ip', N_('Server IP'))
]
EXPIRATION_TYPE = [
('', N_('Not set')),
('epoch', N_('Already expired on 1970')),
('max', N_('Do not expire until 2038')),
('time', N_('Custom value'))
]
CRYPTORS = [
('', N_('No TLS/SSL')),
('libssl', N_('OpenSSL / libssl'))
]
EVHOSTS = [
('', N_('Off')),
('evhost', N_('Enhanced Virtual Hosting'))
]
CLIENT_CERTS = [
('', N_('Skip')),
('accept', N_('Accept')),
('required', N_('Require'))
]
COLLECTORS = [
('', N_('Disabled')),
('rrd', N_('RRDtool graphs'))
]
UTC_TIME = [
('', N_('Local time')),
('1', N_('UTC: Coordinated Universal Time'))
]
DWRITER_LANGS = [
('json', N_('JSON')),
('python', N_('Python')),
('php', N_('PHP')),
('ruby', N_('Ruby'))
]
POST_TRACKERS = [
('', N_('Disabled')),
('post_track', N_('POST tracker'))
]
def trans (options):
"""Translate the options with gettext"""
return [(x[0], _(x[1])) for x in options]
| mdavid/cherokee-webserver-svnclone | admin/consts.py | Python | gpl-2.0 | 6,961 |
""" A toplevel window to display licensing-related widgets
"""
from Tkinter import *
from ttk import *
from widgets.licensing import Licensing
class LicensingWindow( Toplevel ):
def __init__( self, master ):
Toplevel.__init__( self, master )
self.geometry( "640x200-10-40" )
self.transient( master )
self.title( "Licensing and Attribution Information" )
self.licensing_window = Licensing( self )
self.licensing_window.pack( fill = BOTH, expand = True, padx = 5, pady = 5 )
| xaroth8088/sprite-magic | windows/licensing_window.py | Python | gpl-3.0 | 529 |
#!/usr/bin/env python
# Licensed under a 3-clause BSD style license - see LICENSE.rst
import glob
import os
import sys
import ah_bootstrap
from setuptools import setup
#A dirty hack to get around some early import/configurations ambiguities
if sys.version_info[0] >= 3:
import builtins
else:
import __builtin__ as builtins
builtins._ASTROPY_SETUP_ = True
from astropy_helpers.setup_helpers import (register_commands, get_debug_option,
get_package_info)
from astropy_helpers.git_helpers import get_git_devstr
from astropy_helpers.version_helpers import generate_version_py
# Get some values from the setup.cfg
try:
from ConfigParser import ConfigParser
except ImportError:
from configparser import ConfigParser
conf = ConfigParser()
conf.read(['setup.cfg'])
metadata = dict(conf.items('metadata'))
PACKAGENAME = metadata.get('package_name', 'packagename')
DESCRIPTION = metadata.get('description', 'Astropy-dependent package')
AUTHOR = metadata.get('author', '')
AUTHOR_EMAIL = metadata.get('author_email', '')
LICENSE = metadata.get('license', 'unknown')
URL = metadata.get('url', 'http://astropy.org')
# Get the long description from the package's docstring
__import__(PACKAGENAME)
package = sys.modules[PACKAGENAME]
LONG_DESCRIPTION = package.__doc__
# Store the package name in a built-in variable so it's easy
# to get from other parts of the setup infrastructure
builtins._ASTROPY_PACKAGE_NAME_ = PACKAGENAME
# VERSION should be PEP386 compatible (http://www.python.org/dev/peps/pep-0386)
VERSION = '0.4.dev'
# Indicates if this version is a release version
RELEASE = 'dev' not in VERSION
if not RELEASE:
VERSION += get_git_devstr(False)
# Populate the dict of setup command overrides; this should be done before
# invoking any other functionality from distutils since it can potentially
# modify distutils' behavior.
cmdclassd = register_commands(PACKAGENAME, VERSION, RELEASE)
# Freeze build information in version.py
generate_version_py(PACKAGENAME, VERSION, RELEASE,
get_debug_option(PACKAGENAME))
# Treat everything in scripts except README.rst as a script to be installed
scripts = [fname for fname in glob.glob(os.path.join('scripts', '*'))
if os.path.basename(fname) != 'README.rst']
# Get configuration information from all of the various subpackages.
# See the docstring for setup_helpers.update_package_files for more
# details.
package_info = get_package_info()
# Add the project-global data
package_info['package_data'].setdefault(PACKAGENAME, [])
package_info['package_data'][PACKAGENAME].append('data/*')
# Define entry points for command-line scripts
#entry_points = {'console_scripts': []}
#entry_point_list = conf.items('entry_points')
#for entry_point in entry_point_list:
# entry_points['console_scripts'].append('{0} = {1}'.format(entry_point[0],
# entry_point[1]))
# Include all .c files, recursively, including those generated by
# Cython, since we can not do this in MANIFEST.in with a "dynamic"
# directory name.
c_files = []
for root, dirs, files in os.walk(PACKAGENAME):
for filename in files:
if filename.endswith('.c'):
c_files.append(
os.path.join(
os.path.relpath(root, PACKAGENAME), filename))
package_info['package_data'][PACKAGENAME].extend(c_files)
# Note that requires and provides should not be included in the call to
# ``setup``, since these are now deprecated. See this link for more details:
# https://groups.google.com/forum/#!topic/astropy-dev/urYO8ckB2uM
setup(name=PACKAGENAME,
version=VERSION,
description=DESCRIPTION,
scripts=scripts,
install_requires=['astropy', 'sunpy'],
author=AUTHOR,
author_email=AUTHOR_EMAIL,
license=LICENSE,
url=URL,
long_description=LONG_DESCRIPTION,
cmdclass=cmdclassd,
zip_safe=False,
use_2to3=True,
# entry_points=entry_points,
**package_info
)
| bwgref/nustar_moving_target | setup.py | Python | mit | 4,050 |
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2016, Brian Coca <bcoca@ansible.com>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
DOCUMENTATION = '''
module: systemd
author:
- Ansible Core Team
version_added: "2.2"
short_description: Manage services
description:
- Controls systemd services on remote hosts.
options:
name:
description:
- Name of the service. This parameter takes the name of exactly one service to work with.
- When using in a chroot environment you always need to specify the full name i.e. (crond.service).
type: str
aliases: [ service, unit ]
state:
description:
- C(started)/C(stopped) are idempotent actions that will not run commands unless necessary.
C(restarted) will always bounce the service. C(reloaded) will always reload.
type: str
choices: [ reloaded, restarted, started, stopped ]
enabled:
description:
- Whether the service should start on boot. B(At least one of state and enabled are required.)
type: bool
force:
description:
- Whether to override existing symlinks.
type: bool
version_added: 2.6
masked:
description:
- Whether the unit should be masked or not, a masked unit is impossible to start.
type: bool
daemon_reload:
description:
- Run daemon-reload before doing any other operations, to make sure systemd has read any changes.
- When set to C(yes), runs daemon-reload even if the module does not start or stop anything.
type: bool
default: no
aliases: [ daemon-reload ]
daemon_reexec:
description:
- Run daemon_reexec command before doing any other operations, the systemd manager will serialize the manager state.
type: bool
default: no
aliases: [ daemon-reexec ]
version_added: "2.8"
scope:
description:
- run systemctl within a given service manager scope, either as the default system scope (system),
the current user's scope (user), or the scope of all users (global).
- "For systemd to work with 'user', the executing user must have its own instance of dbus started and accessible (systemd requirement)."
- "The user dbus process is normally started during normal login, but not during the run of Ansible tasks.
Otherwise you will probably get a 'Failed to connect to bus: no such file or directory' error."
- The user must have access, normally given via setting the ``XDG_RUNTIME_DIR`` variable, see example below.
type: str
choices: [ system, user, global ]
default: system
version_added: "2.7"
no_block:
description:
- Do not synchronously wait for the requested operation to finish.
Enqueued job will continue without Ansible blocking on its completion.
type: bool
default: no
version_added: "2.3"
notes:
- Since 2.4, one of the following options is required 'state', 'enabled', 'masked', 'daemon_reload', ('daemon_reexec' since 2.8),
and all except 'daemon_reload' (and 'daemon_reexec' since 2.8) also require 'name'.
- Before 2.4 you always required 'name'.
- Globs are not supported in name, i.e ``postgres*.service``.
requirements:
- A system managed by systemd.
'''
EXAMPLES = '''
- name: Make sure a service is running
systemd:
state: started
name: httpd
- name: Stop service cron on debian, if running
systemd:
name: cron
state: stopped
- name: Restart service cron on centos, in all cases, also issue daemon-reload to pick up config changes
systemd:
state: restarted
daemon_reload: yes
name: crond
- name: Reload service httpd, in all cases
systemd:
name: httpd
state: reloaded
- name: Enable service httpd and ensure it is not masked
systemd:
name: httpd
enabled: yes
masked: no
- name: Enable a timer for dnf-automatic
systemd:
name: dnf-automatic.timer
state: started
enabled: yes
- name: Just force systemd to reread configs (2.4 and above)
systemd:
daemon_reload: yes
- name: Just force systemd to re-execute itself (2.8 and above)
systemd:
daemon_reexec: yes
- name: run a user service when XDG_RUNTIME_DIR is not set on remote login.
systemd:
name: myservice
state: started
scope: user
environment:
XDG_RUNTIME_DIR: "/run/user/{{ myuid }}"
'''
RETURN = '''
status:
description: A dictionary with the key=value pairs returned from `systemctl show`
returned: success
type: complex
sample: {
"ActiveEnterTimestamp": "Sun 2016-05-15 18:28:49 EDT",
"ActiveEnterTimestampMonotonic": "8135942",
"ActiveExitTimestampMonotonic": "0",
"ActiveState": "active",
"After": "auditd.service systemd-user-sessions.service time-sync.target systemd-journald.socket basic.target system.slice",
"AllowIsolate": "no",
"Before": "shutdown.target multi-user.target",
"BlockIOAccounting": "no",
"BlockIOWeight": "1000",
"CPUAccounting": "no",
"CPUSchedulingPolicy": "0",
"CPUSchedulingPriority": "0",
"CPUSchedulingResetOnFork": "no",
"CPUShares": "1024",
"CanIsolate": "no",
"CanReload": "yes",
"CanStart": "yes",
"CanStop": "yes",
"CapabilityBoundingSet": "18446744073709551615",
"ConditionResult": "yes",
"ConditionTimestamp": "Sun 2016-05-15 18:28:49 EDT",
"ConditionTimestampMonotonic": "7902742",
"Conflicts": "shutdown.target",
"ControlGroup": "/system.slice/crond.service",
"ControlPID": "0",
"DefaultDependencies": "yes",
"Delegate": "no",
"Description": "Command Scheduler",
"DevicePolicy": "auto",
"EnvironmentFile": "/etc/sysconfig/crond (ignore_errors=no)",
"ExecMainCode": "0",
"ExecMainExitTimestampMonotonic": "0",
"ExecMainPID": "595",
"ExecMainStartTimestamp": "Sun 2016-05-15 18:28:49 EDT",
"ExecMainStartTimestampMonotonic": "8134990",
"ExecMainStatus": "0",
"ExecReload": "{ path=/bin/kill ; argv[]=/bin/kill -HUP $MAINPID ; ignore_errors=no ; start_time=[n/a] ; stop_time=[n/a] ; pid=0 ; code=(null) ; status=0/0 }",
"ExecStart": "{ path=/usr/sbin/crond ; argv[]=/usr/sbin/crond -n $CRONDARGS ; ignore_errors=no ; start_time=[n/a] ; stop_time=[n/a] ; pid=0 ; code=(null) ; status=0/0 }",
"FragmentPath": "/usr/lib/systemd/system/crond.service",
"GuessMainPID": "yes",
"IOScheduling": "0",
"Id": "crond.service",
"IgnoreOnIsolate": "no",
"IgnoreOnSnapshot": "no",
"IgnoreSIGPIPE": "yes",
"InactiveEnterTimestampMonotonic": "0",
"InactiveExitTimestamp": "Sun 2016-05-15 18:28:49 EDT",
"InactiveExitTimestampMonotonic": "8135942",
"JobTimeoutUSec": "0",
"KillMode": "process",
"KillSignal": "15",
"LimitAS": "18446744073709551615",
"LimitCORE": "18446744073709551615",
"LimitCPU": "18446744073709551615",
"LimitDATA": "18446744073709551615",
"LimitFSIZE": "18446744073709551615",
"LimitLOCKS": "18446744073709551615",
"LimitMEMLOCK": "65536",
"LimitMSGQUEUE": "819200",
"LimitNICE": "0",
"LimitNOFILE": "4096",
"LimitNPROC": "3902",
"LimitRSS": "18446744073709551615",
"LimitRTPRIO": "0",
"LimitRTTIME": "18446744073709551615",
"LimitSIGPENDING": "3902",
"LimitSTACK": "18446744073709551615",
"LoadState": "loaded",
"MainPID": "595",
"MemoryAccounting": "no",
"MemoryLimit": "18446744073709551615",
"MountFlags": "0",
"Names": "crond.service",
"NeedDaemonReload": "no",
"Nice": "0",
"NoNewPrivileges": "no",
"NonBlocking": "no",
"NotifyAccess": "none",
"OOMScoreAdjust": "0",
"OnFailureIsolate": "no",
"PermissionsStartOnly": "no",
"PrivateNetwork": "no",
"PrivateTmp": "no",
"RefuseManualStart": "no",
"RefuseManualStop": "no",
"RemainAfterExit": "no",
"Requires": "basic.target",
"Restart": "no",
"RestartUSec": "100ms",
"Result": "success",
"RootDirectoryStartOnly": "no",
"SameProcessGroup": "no",
"SecureBits": "0",
"SendSIGHUP": "no",
"SendSIGKILL": "yes",
"Slice": "system.slice",
"StandardError": "inherit",
"StandardInput": "null",
"StandardOutput": "journal",
"StartLimitAction": "none",
"StartLimitBurst": "5",
"StartLimitInterval": "10000000",
"StatusErrno": "0",
"StopWhenUnneeded": "no",
"SubState": "running",
"SyslogLevelPrefix": "yes",
"SyslogPriority": "30",
"TTYReset": "no",
"TTYVHangup": "no",
"TTYVTDisallocate": "no",
"TimeoutStartUSec": "1min 30s",
"TimeoutStopUSec": "1min 30s",
"TimerSlackNSec": "50000",
"Transient": "no",
"Type": "simple",
"UMask": "0022",
"UnitFileState": "enabled",
"WantedBy": "multi-user.target",
"Wants": "system.slice",
"WatchdogTimestampMonotonic": "0",
"WatchdogUSec": "0",
}
''' # NOQA
import os
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.facts.system.chroot import is_chroot
from ansible.module_utils.service import sysv_exists, sysv_is_enabled, fail_if_missing
from ansible.module_utils._text import to_native
def is_running_service(service_status):
return service_status['ActiveState'] in set(['active', 'activating'])
def is_deactivating_service(service_status):
return service_status['ActiveState'] in set(['deactivating'])
def request_was_ignored(out):
return '=' not in out and ('ignoring request' in out or 'ignoring command' in out)
def parse_systemctl_show(lines):
# The output of 'systemctl show' can contain values that span multiple lines. At first glance it
# appears that such values are always surrounded by {}, so the previous version of this code
# assumed that any value starting with { was a multi-line value; it would then consume lines
# until it saw a line that ended with }. However, it is possible to have a single-line value
# that starts with { but does not end with } (this could happen in the value for Description=,
# for example), and the previous version of this code would then consume all remaining lines as
# part of that value. Cryptically, this would lead to Ansible reporting that the service file
# couldn't be found.
#
# To avoid this issue, the following code only accepts multi-line values for keys whose names
# start with Exec (e.g., ExecStart=), since these are the only keys whose values are known to
# span multiple lines.
parsed = {}
multival = []
k = None
for line in lines:
if k is None:
if '=' in line:
k, v = line.split('=', 1)
if k.startswith('Exec') and v.lstrip().startswith('{'):
if not v.rstrip().endswith('}'):
multival.append(v)
continue
parsed[k] = v.strip()
k = None
else:
multival.append(line)
if line.rstrip().endswith('}'):
parsed[k] = '\n'.join(multival).strip()
multival = []
k = None
return parsed
# ===========================================
# Main control flow
def main():
# initialize
module = AnsibleModule(
argument_spec=dict(
name=dict(type='str', aliases=['service', 'unit']),
state=dict(type='str', choices=['reloaded', 'restarted', 'started', 'stopped']),
enabled=dict(type='bool'),
force=dict(type='bool'),
masked=dict(type='bool'),
daemon_reload=dict(type='bool', default=False, aliases=['daemon-reload']),
daemon_reexec=dict(type='bool', default=False, aliases=['daemon-reexec']),
scope=dict(type='str', default='system', choices=['system', 'user', 'global']),
no_block=dict(type='bool', default=False),
),
supports_check_mode=True,
required_one_of=[['state', 'enabled', 'masked', 'daemon_reload', 'daemon_reexec']],
required_by=dict(
state=('name', ),
enabled=('name', ),
masked=('name', ),
),
)
unit = module.params['name']
if unit is not None:
for globpattern in (r"*", r"?", r"["):
if globpattern in unit:
module.fail_json(msg="This module does not currently support using glob patterns, found '%s' in service name: %s" % (globpattern, unit))
systemctl = module.get_bin_path('systemctl', True)
if os.getenv('XDG_RUNTIME_DIR') is None:
os.environ['XDG_RUNTIME_DIR'] = '/run/user/%s' % os.geteuid()
''' Set CLI options depending on params '''
# if scope is 'system' or None, we can ignore as there is no extra switch.
# The other choices match the corresponding switch
if module.params['scope'] != 'system':
systemctl += " --%s" % module.params['scope']
if module.params['no_block']:
systemctl += " --no-block"
if module.params['force']:
systemctl += " --force"
rc = 0
out = err = ''
result = dict(
name=unit,
changed=False,
status=dict(),
)
# Run daemon-reload first, if requested
if module.params['daemon_reload'] and not module.check_mode:
(rc, out, err) = module.run_command("%s daemon-reload" % (systemctl))
if rc != 0:
module.fail_json(msg='failure %d during daemon-reload: %s' % (rc, err))
# Run daemon-reexec
if module.params['daemon_reexec'] and not module.check_mode:
(rc, out, err) = module.run_command("%s daemon-reexec" % (systemctl))
if rc != 0:
module.fail_json(msg='failure %d during daemon-reexec: %s' % (rc, err))
if unit:
found = False
is_initd = sysv_exists(unit)
is_systemd = False
# check service data, cannot error out on rc as it changes across versions, assume not found
(rc, out, err) = module.run_command("%s show '%s'" % (systemctl, unit))
if rc == 0 and not (request_was_ignored(out) or request_was_ignored(err)):
# load return of systemctl show into dictionary for easy access and return
if out:
result['status'] = parse_systemctl_show(to_native(out).split('\n'))
is_systemd = 'LoadState' in result['status'] and result['status']['LoadState'] != 'not-found'
is_masked = 'LoadState' in result['status'] and result['status']['LoadState'] == 'masked'
# Check for loading error
if is_systemd and not is_masked and 'LoadError' in result['status']:
module.fail_json(msg="Error loading unit file '%s': %s" % (unit, result['status']['LoadError']))
# Workaround for https://github.com/ansible/ansible/issues/71528
elif err and rc == 1 and 'Failed to parse bus message' in err:
result['status'] = parse_systemctl_show(to_native(out).split('\n'))
unit, sep, suffix = unit.partition('@')
unit_search = '{unit}{sep}*'.format(unit=unit, sep=sep)
(rc, out, err) = module.run_command("{systemctl} list-unit-files '{unit_search}'".format(systemctl=systemctl, unit_search=unit_search))
is_systemd = unit in out
(rc, out, err) = module.run_command("{systemctl} is-active '{unit}'".format(systemctl=systemctl, unit=unit))
result['status']['ActiveState'] = out.rstrip('\n')
else:
# list taken from man systemctl(1) for systemd 244
valid_enabled_states = [
"enabled",
"enabled-runtime",
"linked",
"linked-runtime",
"masked",
"masked-runtime",
"static",
"indirect",
"disabled",
"generated",
"transient"]
(rc, out, err) = module.run_command("%s is-enabled '%s'" % (systemctl, unit))
if out.strip() in valid_enabled_states:
is_systemd = True
else:
# fallback list-unit-files as show does not work on some systems (chroot)
# not used as primary as it skips some services (like those using init.d) and requires .service/etc notation
(rc, out, err) = module.run_command("%s list-unit-files '%s'" % (systemctl, unit))
if rc == 0:
is_systemd = True
else:
# Check for systemctl command
module.run_command(systemctl, check_rc=True)
# Does service exist?
found = is_systemd or is_initd
if is_initd and not is_systemd:
module.warn('The service (%s) is actually an init script but the system is managed by systemd' % unit)
# mask/unmask the service, if requested, can operate on services before they are installed
if module.params['masked'] is not None:
# state is not masked unless systemd affirms otherwise
(rc, out, err) = module.run_command("%s is-enabled '%s'" % (systemctl, unit))
masked = out.strip() == "masked"
if masked != module.params['masked']:
result['changed'] = True
if module.params['masked']:
action = 'mask'
else:
action = 'unmask'
if not module.check_mode:
(rc, out, err) = module.run_command("%s %s '%s'" % (systemctl, action, unit))
if rc != 0:
# some versions of system CAN mask/unmask non existing services, we only fail on missing if they don't
fail_if_missing(module, found, unit, msg='host')
# Enable/disable service startup at boot if requested
if module.params['enabled'] is not None:
if module.params['enabled']:
action = 'enable'
else:
action = 'disable'
fail_if_missing(module, found, unit, msg='host')
# do we need to enable the service?
enabled = False
(rc, out, err) = module.run_command("%s is-enabled '%s'" % (systemctl, unit))
# check systemctl result or if it is a init script
if rc == 0:
enabled = True
elif rc == 1:
# if not a user or global user service and both init script and unit file exist stdout should have enabled/disabled, otherwise use rc entries
if module.params['scope'] == 'system' and \
is_initd and \
not out.strip().endswith('disabled') and \
sysv_is_enabled(unit):
enabled = True
# default to current state
result['enabled'] = enabled
# Change enable/disable if needed
if enabled != module.params['enabled']:
result['changed'] = True
if not module.check_mode:
(rc, out, err) = module.run_command("%s %s '%s'" % (systemctl, action, unit))
if rc != 0:
module.fail_json(msg="Unable to %s service %s: %s" % (action, unit, out + err))
result['enabled'] = not enabled
# set service state if requested
if module.params['state'] is not None:
fail_if_missing(module, found, unit, msg="host")
# default to desired state
result['state'] = module.params['state']
# What is current service state?
if 'ActiveState' in result['status']:
action = None
if module.params['state'] == 'started':
if not is_running_service(result['status']):
action = 'start'
elif module.params['state'] == 'stopped':
if is_running_service(result['status']) or is_deactivating_service(result['status']):
action = 'stop'
else:
if not is_running_service(result['status']):
action = 'start'
else:
action = module.params['state'][:-2] # remove 'ed' from restarted/reloaded
result['state'] = 'started'
if action:
result['changed'] = True
if not module.check_mode:
(rc, out, err) = module.run_command("%s %s '%s'" % (systemctl, action, unit))
if rc != 0:
module.fail_json(msg="Unable to %s service %s: %s" % (action, unit, err))
# check for chroot
elif is_chroot(module) or os.environ.get('SYSTEMD_OFFLINE') == '1':
module.warn("Target is a chroot or systemd is offline. This can lead to false positives or prevent the init system tools from working.")
else:
# this should not happen?
module.fail_json(msg="Service is in unknown state", status=result['status'])
module.exit_json(**result)
if __name__ == '__main__':
main()
| Fale/ansible | lib/ansible/modules/systemd.py | Python | gpl-3.0 | 22,523 |
# vim:ts=4:sts=4:sw=4:expandtab
import os
import satori.web.setup
def manage():
from django.core.management import execute_manager
import satori.web.settings as settings
# HACK
import django.core.management
old_fmm = django.core.management.find_management_module
def find_management_module(app_name):
if app_name == 'satori.web':
return os.path.join(os.path.dirname(__file__), 'management')
else:
return old_fmm(app_name)
django.core.management.find_management_module = find_management_module
# END OF HACK
execute_manager(settings)
| zielmicha/satori | satori.web/satori/web/__init__.py | Python | mit | 616 |
from django.contrib import admin
from . import models as mdpage
# The following classes define the admin interface for your models.
# See http://docs.djangoproject.com/en/dev/ref/contrib/admin/ for
# a full list of the options you can use in these classes.
@admin.register(mdpage.MarkdownPage)
class MarkdownPageAdmin(admin.ModelAdmin):
list_display = (
'title',
'type',
'status',
'created',
'updated',
)
search_fields = ('title', 'source')
list_filter = ('type', 'status')
# ordering = ('',)
@admin.register(mdpage.MarkdownPageArchive)
class MarkdownPageArchiveAdmin(admin.ModelAdmin):
list_display = (
'page_title',
'page_type',
'created',
)
def page_title(self, obj):
return obj.page.title
def page_type(self, obj):
return obj.page.type
@admin.register(mdpage.StaticContent)
class StaticContentAdmin(admin.ModelAdmin):
list_display = (
'label',
'description',
'type',
'subtype',
'created',
'updated',
)
@admin.register(mdpage.MarkdownPageType)
class MarkdownPageTypeAdmin(admin.ModelAdmin):
list_display = (
'prefix',
'description',
'created',
'updated',
'pub_date',
'end_date',
'status',
)
| dakrauth/django-markdown-page | mdpage/admin.py | Python | mit | 1,348 |
# -*- coding: utf-8 -*-
from http import client
import json
from openfisca_country_template import entities
# /entities
def test_return_code(test_client):
entities_response = test_client.get('/entities')
assert entities_response.status_code == client.OK
def test_response_data(test_client):
entities_response = test_client.get('/entities')
entities_dict = json.loads(entities_response.data.decode('utf-8'))
test_documentation = entities.Household.doc.strip()
assert entities_dict['household'] == {
'description': 'All the people in a family or group who live together in the same place.',
'documentation': test_documentation,
'plural': 'households',
'roles': {
'child': {
'description': 'Other individuals living in the household.',
'plural': 'children',
},
'parent': {
'description': 'The one or two adults in charge of the household.',
'plural': 'parents',
'max': 2,
}
}
}
| openfisca/openfisca-core | tests/web_api/test_entities.py | Python | agpl-3.0 | 1,096 |
# Copyright 2014-2017 The ODL contributors
#
# This file is part of ODL.
#
# This Source Code Form is subject to the terms of the Mozilla Public License,
# v. 2.0. If a copy of the MPL was not distributed with this file, You can
# obtain one at https://mozilla.org/MPL/2.0/.
"""Sparse implementations of n-dimensional sampling grids.
Sampling grids are collections of points in an n-dimensional coordinate
space with a certain structure which is exploited to minimize storage.
"""
from __future__ import print_function, division, absolute_import
import numpy as np
from odl.set import Set, IntervalProd
from odl.util import (
normalized_index_expression, normalized_scalar_param_list, safe_int_conv,
array_str, signature_string, indent, npy_printoptions)
__all__ = ('RectGrid', 'uniform_grid', 'uniform_grid_fromintv')
def sparse_meshgrid(*x):
"""Make a sparse `meshgrid` by adding empty dimensions.
Parameters
----------
x1,...,xN : `array-like`
Input arrays to turn into sparse meshgrid vectors.
Returns
-------
meshgrid : tuple of `numpy.ndarray`'s
Sparse coordinate vectors representing an N-dimensional grid.
See Also
--------
numpy.meshgrid : dense or sparse meshgrids
Examples
--------
>>> x, y = [0, 1], [2, 3, 4]
>>> mesh = sparse_meshgrid(x, y)
>>> sum(xi for xi in mesh).ravel() # first axis slowest
array([2, 3, 4, 3, 4, 5])
"""
n = len(x)
mesh = []
for ax, xi in enumerate(x):
xi = np.asarray(xi)
slc = [None] * n
slc[ax] = slice(None)
mesh.append(np.ascontiguousarray(xi[slc]))
return tuple(mesh)
class RectGrid(Set):
"""An n-dimensional rectilinear grid.
A rectilinear grid is the set of points defined by all possible
combination of coordinates taken from fixed coordinate vectors.
The storage need for a rectilinear grid is only the sum of the lengths
of the coordinate vectors, while the total number of points is
the product of these lengths. This class makes use of that
sparse storage scheme.
See ``Notes`` for details.
"""
def __init__(self, *coord_vectors):
"""Initialize a new instance.
Parameters
----------
vec1,...,vecN : `array-like`
The coordinate vectors defining the grid points. They must
be sorted in ascending order and may not contain
duplicates. Empty vectors are not allowed.
Examples
--------
>>> g = RectGrid([1, 2, 5], [-2, 1.5, 2])
>>> g
RectGrid(
[ 1., 2., 5.],
[-2. , 1.5, 2. ]
)
>>> g.ndim # number of axes
2
>>> g.shape # points per axis
(3, 3)
>>> g.size # total number of points
9
Grid points can be extracted with index notation (NOTE: This is
slow, do not loop over the grid using indices!):
>>> g = RectGrid([-1, 0, 3], [2, 4, 5], [5], [2, 4, 7])
>>> g[0, 0, 0, 0]
array([-1., 2., 5., 2.])
Slices and ellipsis are also supported:
>>> g[:, 0, 0, 0]
RectGrid(
[-1., 0., 3.],
[ 2.],
[ 5.],
[ 2.]
)
>>> g[0, ..., 1:]
RectGrid(
[-1.],
[ 2., 4., 5.],
[ 5.],
[ 4., 7.]
)
Notes
-----
In 2 dimensions, for example, given two coordinate vectors
.. math::
v_1 = (-1, 0, 2),\ v_2 = (0, 1)
the corresponding rectilinear grid :math:`G` is the set of all
2d points whose first component is from :math:`v_1` and the
second component from :math:`v_2`:
.. math::
G = \{(-1, 0), (-1, 1), (0, 0), (0, 1), (2, 0), (2, 1)\}
Here is a graphical representation::
: : :
: : :
1 -x----x--------x-...
| | |
0 -x----x--------x-...
| | |
-1 0 2
Apparently, this structure can represent grids with arbitrary step
sizes in each axis.
Note that the above ordering of points is the standard ``'C'``
ordering where the first axis (:math:`v_1`) varies slowest.
Ordering is only relevant when the point array is actually created;
the grid itself is independent of this ordering.
"""
super(RectGrid, self).__init__()
vecs = tuple(np.atleast_1d(vec).astype('float64')
for vec in coord_vectors)
for i, vec in enumerate(vecs):
if len(vec) == 0:
raise ValueError('vector {} has zero length'
''.format(i + 1))
if not np.all(np.isfinite(vec)):
raise ValueError('vector {} contains invalid entries'
''.format(i + 1))
if vec.ndim != 1:
raise ValueError('vector {} has {} dimensions instead of 1'
''.format(i + 1, vec.ndim))
sorted_vec = np.sort(vec)
if np.any(vec != sorted_vec):
raise ValueError('vector {} not sorted'
''.format(i + 1))
if np.any(np.diff(vec) == 0):
raise ValueError('vector {} contains duplicates'
''.format(i + 1))
# Lazily evaluates strides when needed but stores the result
self.__stride = None
self.__coord_vectors = vecs
# Non-degenerate axes
self.__nondegen_byaxis = tuple(len(v) > 1 for v in self.coord_vectors)
# Uniformity, setting True in degenerate axes
diffs = [np.diff(v) for v in self.coord_vectors]
self.__is_uniform_byaxis = tuple(
(diff.size == 0) or np.allclose(diff, diff[0])
for diff in diffs)
# Attributes
@property
def coord_vectors(self):
"""Coordinate vectors of the grid.
Returns
-------
coord_vectors : tuple of `numpy.ndarray`'s
Examples
--------
>>> g = RectGrid([0, 1], [-1, 0, 2])
>>> x, y = g.coord_vectors
>>> x
array([ 0., 1.])
>>> y
array([-1., 0., 2.])
See Also
--------
meshgrid : Same result but with nd arrays
"""
return self.__coord_vectors
@property
def ndim(self):
"""Number of dimensions of the grid."""
try:
return self.__ndim
except AttributeError:
ndim = len(self.coord_vectors)
self.__ndim = ndim
return ndim
@property
def shape(self):
"""Number of grid points per axis."""
try:
return self.__shape
except AttributeError:
shape = tuple(len(vec) for vec in self.coord_vectors)
self.__shape = shape
return shape
@property
def size(self):
"""Total number of grid points."""
# Since np.prod(()) == 1.0 we need to handle that by ourselves
return 0 if self.shape == () else np.prod(self.shape)
def __len__(self):
"""Return ``len(self)``.
The length along the first dimension.
Examples
--------
>>> g = RectGrid([0, 1], [-1, 0, 2], [4, 5, 6])
>>> len(g)
2
See Also
--------
size : The total number of elements.
"""
return 0 if self.shape == () else self.shape[0]
@property
def min_pt(self):
"""Vector containing the minimal grid coordinates per axis.
Examples
--------
>>> g = RectGrid([1, 2, 5], [-2, 1.5, 2])
>>> g.min_pt
array([ 1., -2.])
"""
return np.array([vec[0] for vec in self.coord_vectors])
@property
def max_pt(self):
"""Vector containing the maximal grid coordinates per axis.
Examples
--------
>>> g = RectGrid([1, 2, 5], [-2, 1.5, 2])
>>> g.max_pt
array([ 5., 2.])
"""
return np.array([vec[-1] for vec in self.coord_vectors])
@property
def nondegen_byaxis(self):
"""Boolean array with ``True`` entries for non-degenerate axes.
Examples
--------
>>> g = uniform_grid([0, 0], [1, 1], (5, 1))
>>> g.nondegen_byaxis
(True, False)
"""
return self.__nondegen_byaxis
@property
def is_uniform_byaxis(self):
"""Boolean tuple showing uniformity of this grid per axis."""
return self.__is_uniform_byaxis
@property
def is_uniform(self):
"""``True`` if this grid is uniform in all axes, else ``False``."""
return all(self.is_uniform_byaxis)
# min, max and extent are for set duck-typing
def min(self, **kwargs):
"""Return `min_pt`.
Parameters
----------
kwargs
For duck-typing with `numpy.amin`
See Also
--------
max
odl.set.domain.IntervalProd.min
Examples
--------
>>> g = RectGrid([1, 2, 5], [-2, 1.5, 2])
>>> g.min()
array([ 1., -2.])
Also works with Numpy:
>>> np.min(g)
array([ 1., -2.])
"""
out = kwargs.get('out', None)
if out is not None:
out[:] = self.min_pt
return out
else:
return self.min_pt
def max(self, **kwargs):
"""Return `max_pt`.
Parameters
----------
kwargs
For duck-typing with `numpy.amax`
See Also
--------
min
odl.set.domain.IntervalProd.max
Examples
--------
>>> g = RectGrid([1, 2, 5], [-2, 1.5, 2])
>>> g.max()
array([ 5., 2.])
Also works with Numpy:
>>> np.max(g)
array([ 5., 2.])
"""
out = kwargs.get('out', None)
if out is not None:
out[:] = self.max_pt
return out
else:
return self.max_pt
@property
def mid_pt(self):
"""Midpoint of the grid, not necessarily a grid point.
Examples
--------
>>> rg = uniform_grid([-1.5, -1], [-0.5, 3], (2, 3))
>>> rg.mid_pt
array([-1., 1.])
"""
return (self.max_pt + self.min_pt) / 2
@property
def stride(self):
"""Step per axis between neighboring points of a uniform grid.
If the grid contains axes that are not uniform, ``stride`` has
a ``NaN`` entry.
For degenerate (length 1) axes, ``stride`` has value ``0.0``.
Returns
-------
stride : numpy.array
Array of dtype ``float`` and length `ndim`.
Examples
--------
>>> rg = uniform_grid([-1.5, -1], [-0.5, 3], (2, 3))
>>> rg.stride
array([ 1., 2.])
NaN returned for non-uniform dimension:
>>> g = RectGrid([0, 1, 2], [0, 1, 4])
>>> g.stride
array([ 1., nan])
0.0 returned for degenerate dimension:
>>> g = RectGrid([0, 1, 2], [0])
>>> g.stride
array([ 1., 0.])
"""
# Cache for efficiency instead of re-computing
if self.__stride is None:
strd = []
for i in range(self.ndim):
if not self.is_uniform_byaxis[i]:
strd.append(float('nan'))
elif self.nondegen_byaxis[i]:
strd.append(self.extent[i] / (self.shape[i] - 1.0))
else:
strd.append(0.0)
self.__stride = np.array(strd)
return self.__stride.copy()
@property
def extent(self):
"""Return the edge lengths of this grid's minimal bounding box.
Examples
--------
>>> g = RectGrid([1, 2, 5], [-2, 1.5, 2])
>>> g.extent
array([ 4., 4.])
"""
return self.max_pt - self.min_pt
def convex_hull(self):
"""Return the smallest `IntervalProd` containing this grid.
The convex hull of a set is the union of all line segments
between points in the set. For a rectilinear grid, it is the
interval product given by the extremal coordinates.
Returns
-------
convex_hull : `IntervalProd`
Interval product defined by the minimum and maximum points
of the grid.
Examples
--------
>>> g = RectGrid([-1, 0, 3], [2, 4], [5], [2, 4, 7])
>>> g.convex_hull()
IntervalProd([-1., 2., 5., 2.], [ 3., 4., 5., 7.])
"""
return IntervalProd(self.min(), self.max())
def element(self):
"""An arbitrary element, the minimum coordinates."""
return self.min_pt
def approx_equals(self, other, atol):
"""Test if this grid is equal to another grid.
Parameters
----------
other :
Object to be tested
atol : float
Allow deviations up to this number in absolute value
per vector entry.
Returns
-------
equals : bool
``True`` if ``other`` is a `RectGrid` instance with all
coordinate vectors equal (up to the given tolerance), to
the ones of this grid, ``False`` otherwise.
Examples
--------
>>> g1 = RectGrid([0, 1], [-1, 0, 2])
>>> g2 = RectGrid([-0.1, 1.1], [-1, 0.1, 2])
>>> g1.approx_equals(g2, atol=0)
False
>>> g1.approx_equals(g2, atol=0.15)
True
"""
if other is self:
return True
return (type(other) is type(self) and
self.ndim == other.ndim and
self.shape == other.shape and
all(np.allclose(vec_s, vec_o, atol=atol, rtol=0.0)
for (vec_s, vec_o) in zip(self.coord_vectors,
other.coord_vectors)))
def __eq__(self, other):
"""Return ``self == other``.
"""
# Implemented separately for performance reasons
if other is self:
return True
return (type(other) is type(self) and
self.shape == other.shape and
all(np.array_equal(vec_s, vec_o)
for (vec_s, vec_o) in zip(self.coord_vectors,
other.coord_vectors)))
def __hash__(self):
"""Return ``hash(self)``."""
# TODO: update with #841
coord_vec_str = tuple(cv.tobytes() for cv in self.coord_vectors)
return hash((type(self), coord_vec_str))
def approx_contains(self, other, atol):
"""Test if ``other`` belongs to this grid up to a tolerance.
Parameters
----------
other : `array-like` or float
The object to test for membership in this grid
atol : float
Allow deviations up to this number in absolute value
per vector entry.
Examples
--------
>>> g = RectGrid([0, 1], [-1, 0, 2])
>>> g.approx_contains([0, 0], atol=0.0)
True
>>> [0, 0] in g # equivalent
True
>>> g.approx_contains([0.1, -0.1], atol=0.0)
False
>>> g.approx_contains([0.1, -0.1], atol=0.15)
True
"""
other = np.atleast_1d(other)
return (other.shape == (self.ndim,) and
all(np.any(np.isclose(vector, coord, atol=atol, rtol=0.0))
for vector, coord in zip(self.coord_vectors, other)))
def __contains__(self, other):
"""Return ``other in self``."""
other = np.atleast_1d(other)
if other.dtype == np.dtype(object):
return False
return (other.shape == (self.ndim,) and
all(coord in vector
for vector, coord in zip(self.coord_vectors, other)))
def is_subgrid(self, other, atol=0.0):
"""Return ``True`` if this grid is a subgrid of ``other``.
Parameters
----------
other : `TensorGrid`
The other grid which is supposed to contain this grid
atol : float, optional
Allow deviations up to this number in absolute value
per coordinate vector entry.
Returns
-------
is_subgrid : bool
``True`` if all coordinate vectors of ``self`` are within
absolute distance ``atol`` of the other grid, else ``False``.
Examples
--------
>>> rg = uniform_grid([-2, -2], [0, 4], (3, 4))
>>> rg.coord_vectors
(array([-2., -1., 0.]), array([-2., 0., 2., 4.]))
>>> rg_sub = uniform_grid([-1, 2], [0, 4], (2, 2))
>>> rg_sub.coord_vectors
(array([-1., 0.]), array([ 2., 4.]))
>>> rg_sub.is_subgrid(rg)
True
Fuzzy check is also possible. Note that the tolerance still
applies to the coordinate vectors.
>>> rg_sub = uniform_grid([-1.015, 2], [0, 3.99], (2, 2))
>>> rg_sub.is_subgrid(rg, atol=0.01)
False
>>> rg_sub.is_subgrid(rg, atol=0.02)
True
"""
# Optimization for some common cases
if other is self:
return True
if not isinstance(other, RectGrid):
return False
if not all(self.shape[i] <= other.shape[i] and
self.min_pt[i] >= other.min_pt[i] - atol and
self.max_pt[i] <= other.max_pt[i] + atol
for i in range(self.ndim)):
return False
if self.size == 0:
return True
if self.is_uniform and other.is_uniform:
# For uniform grids, it suffices to show that min_pt, max_pt
# and g[1,...,1] are contained in the other grid. For axes
# with less than 2 points, this reduces to min_pt and max_pt,
# and the corresponding indices in the other check point are
# set to 0.
minmax_contained = (
other.approx_contains(self.min_pt, atol=atol) and
other.approx_contains(self.max_pt, atol=atol))
check_idx = np.zeros(self.ndim, dtype=int)
check_idx[np.array(self.shape) >= 3] = 1
checkpt_contained = other.approx_contains(self[tuple(check_idx)],
atol=atol)
return minmax_contained and checkpt_contained
else:
# Array version of the fuzzy subgrid test, about 3 times faster
# than the loop version.
for vec_o, vec_s in zip(other.coord_vectors, self.coord_vectors):
# Create array of differences of all entries in vec_o and
# vec_s. If there is no almost zero entry in each row,
# return False.
vec_o_mg, vec_s_mg = sparse_meshgrid(vec_o, vec_s)
if not np.all(np.any(np.isclose(vec_s_mg, vec_o_mg, atol=atol),
axis=0)):
return False
return True
def insert(self, index, *grids):
"""Return a copy with ``grids`` inserted before ``index``.
The given grids are inserted (as a block) into ``self``, yielding
a new grid whose number of dimensions is the sum of the numbers of
dimensions of all involved grids.
Note that no changes are made in-place.
Parameters
----------
index : int
The index of the dimension before which ``grids`` are to
be inserted. Negative indices count backwards from
``self.ndim``.
grid1, ..., gridN : `RectGrid`
The grids to be inserted into ``self``.
Returns
-------
newgrid : `RectGrid`
The enlarged grid.
Examples
--------
>>> g1 = RectGrid([0, 1], [-1, 0, 2])
>>> g2 = RectGrid([1], [-6, 15])
>>> g1.insert(1, g2)
RectGrid(
[ 0., 1.],
[ 1.],
[ -6., 15.],
[-1., 0., 2.]
)
>>> g1.insert(1, g2, g2)
RectGrid(
[ 0., 1.],
[ 1.],
[ -6., 15.],
[ 1.],
[ -6., 15.],
[-1., 0., 2.]
)
See Also
--------
append
"""
index, index_in = safe_int_conv(index), index
if not -self.ndim <= index <= self.ndim:
raise IndexError('index {0} outside the valid range -{1} ... {1}'
''.format(index_in, self.ndim))
if index < 0:
index += self.ndim
if len(grids) == 0:
# Copy of `self`
return RectGrid(*self.coord_vectors)
elif len(grids) == 1:
# Insert single grid
grid = grids[0]
if not isinstance(grid, RectGrid):
raise TypeError('{!r} is not a `RectGrid` instance'
''.format(grid))
new_vecs = (self.coord_vectors[:index] + grid.coord_vectors +
self.coord_vectors[index:])
return RectGrid(*new_vecs)
else:
# Recursively insert first grid and the remaining into the result
return self.insert(index, grids[0]).insert(
index + grids[0].ndim, *(grids[1:]))
def append(self, *grids):
"""Insert ``grids`` at the end as a block.
Parameters
----------
grid1, ..., gridN : `RectGrid`
The grids to be appended to ``self``.
Returns
-------
newgrid : `RectGrid`
The enlarged grid.
Examples
--------
>>> g1 = RectGrid([0, 1], [-1, 0, 2])
>>> g2 = RectGrid([1], [-6, 15])
>>> g1.append(g2)
RectGrid(
[ 0., 1.],
[-1., 0., 2.],
[ 1.],
[ -6., 15.]
)
>>> g1.append(g2, g2)
RectGrid(
[ 0., 1.],
[-1., 0., 2.],
[ 1.],
[ -6., 15.],
[ 1.],
[ -6., 15.]
)
See Also
--------
insert
"""
return self.insert(self.ndim, *grids)
def squeeze(self, axis=None):
"""Return the grid with removed degenerate (length 1) dimensions.
Parameters
----------
axis : None or index expression, optional
Subset of the axes to squeeze. Default: All axes.
Returns
-------
squeezed : `RectGrid`
Squeezed grid.
Examples
--------
>>> g = RectGrid([0, 1], [-1], [-1, 0, 2])
>>> g.squeeze()
RectGrid(
[ 0., 1.],
[-1., 0., 2.]
)
"""
if axis is None:
rng = range(self.ndim)
else:
rng = list(np.atleast_1d(np.arange(self.ndim)[axis]))
new_indcs = [i for i in range(self.ndim)
if i not in rng or self.nondegen_byaxis[i]]
coord_vecs = [self.coord_vectors[axis] for axis in new_indcs]
return RectGrid(*coord_vecs)
def points(self, order='C'):
"""All grid points in a single array.
Parameters
----------
order : {'C', 'F'}, optional
Axis ordering in the resulting point array.
Returns
-------
points : `numpy.ndarray`
The shape of the array is ``size x ndim``, i.e. the points
are stored as rows.
Examples
--------
>>> g = RectGrid([0, 1], [-1, 0, 2])
>>> g.points()
array([[ 0., -1.],
[ 0., 0.],
[ 0., 2.],
[ 1., -1.],
[ 1., 0.],
[ 1., 2.]])
>>> g.points(order='F')
array([[ 0., -1.],
[ 1., -1.],
[ 0., 0.],
[ 1., 0.],
[ 0., 2.],
[ 1., 2.]])
"""
if str(order).upper() not in ('C', 'F'):
raise ValueError('order {!r} not recognized'.format(order))
else:
order = str(order).upper()
axes = range(self.ndim) if order == 'C' else reversed(range(self.ndim))
shape = self.shape if order == 'C' else tuple(reversed(self.shape))
point_arr = np.empty((self.size, self.ndim))
for i, axis in enumerate(axes):
view = point_arr[:, axis].reshape(shape)
coord_shape = (1,) * i + (-1,) + (1,) * (self.ndim - i - 1)
view[:] = self.coord_vectors[axis].reshape(coord_shape)
return point_arr
def corner_grid(self):
"""Return a grid with only the corner points.
Returns
-------
cgrid : `RectGrid`
Grid with size 2 in non-degenerate dimensions and 1
in degenerate ones
Examples
--------
>>> g = RectGrid([0, 1], [-1, 0, 2])
>>> g.corner_grid()
uniform_grid([ 0., -1.], [ 1., 2.], (2, 2))
"""
minmax_vecs = []
for axis in range(self.ndim):
if self.shape[axis] == 1:
minmax_vecs.append(self.coord_vectors[axis][0])
else:
minmax_vecs.append((self.coord_vectors[axis][0],
self.coord_vectors[axis][-1]))
return RectGrid(*minmax_vecs)
def corners(self, order='C'):
"""Corner points of the grid in a single array.
Parameters
----------
order : {'C', 'F'}, optional
Axis ordering in the resulting point array
Returns
-------
corners : `numpy.ndarray`
The size of the array is 2^m x ndim, where m is the number
of non-degenerate axes, i.e. the corners are stored as rows.
Examples
--------
>>> g = RectGrid([0, 1], [-1, 0, 2])
>>> g.corners()
array([[ 0., -1.],
[ 0., 2.],
[ 1., -1.],
[ 1., 2.]])
>>> g.corners(order='F')
array([[ 0., -1.],
[ 1., -1.],
[ 0., 2.],
[ 1., 2.]])
"""
return self.corner_grid().points(order=order)
@property
def meshgrid(self):
"""A grid suitable for function evaluation.
Returns
-------
meshgrid : tuple of `numpy.ndarray`'s
Function evaluation grid with ``ndim`` axes
See Also
--------
numpy.meshgrid
Coordinate matrices from coordinate vectors.
We use ``indexing='ij'`` and ``copy=True``
Examples
--------
>>> g = RectGrid([0, 1], [-1, 0, 2])
>>> x, y = g.meshgrid
>>> x
array([[ 0.],
[ 1.]])
>>> y
array([[-1., 0., 2.]])
Easy function evaluation via broadcasting:
>>> x ** 2 - y ** 2
array([[-1., 0., -4.],
[ 0., 1., -3.]])
"""
return sparse_meshgrid(*self.coord_vectors)
def __getitem__(self, indices):
"""Return ``self[indices]``.
Parameters
----------
indices : index expression
Object determining which parts of the grid to extract.
``None`` (new axis) and empty axes are not supported.
Examples
--------
Indexing with integers along all axes produces an array (a point):
>>> g = RectGrid([-1, 0, 3], [2, 4, 5], [5], [2, 4, 7])
>>> g[0, 0, 0, 0]
array([-1., 2., 5., 2.])
Otherwise, a new RectGrid is returned:
>>> g[:, 0, 0, 0]
RectGrid(
[-1., 0., 3.],
[ 2.],
[ 5.],
[ 2.]
)
>>> g[0, ..., 1:]
RectGrid(
[-1.],
[ 2., 4., 5.],
[ 5.],
[ 4., 7.]
)
>>> g[::2, ..., ::2]
RectGrid(
[-1., 3.],
[ 2., 4., 5.],
[ 5.],
[ 2., 7.]
)
Too few indices are filled up with an ellipsis from the right:
>>> g[0]
RectGrid(
[-1.],
[ 2., 4., 5.],
[ 5.],
[ 2., 4., 7.]
)
>>> g[0] == g[0, :, :, :] == g[0, ...]
True
"""
if isinstance(indices, list):
if indices == []:
new_coord_vecs = []
else:
new_coord_vecs = [self.coord_vectors[0][indices]]
new_coord_vecs += self.coord_vectors[1:]
return RectGrid(*new_coord_vecs)
indices = normalized_index_expression(indices, self.shape,
int_to_slice=False)
# If all indices are integers, return an array (a point). Otherwise,
# create a new grid.
if all(np.isscalar(idx) for idx in indices):
return np.fromiter(
(v[int(idx)] for idx, v in zip(indices, self.coord_vectors)),
dtype=float)
else:
new_coord_vecs = [vec[idx]
for idx, vec in zip(indices, self.coord_vectors)]
return RectGrid(*new_coord_vecs)
def __array__(self, dtype=None):
"""Used with ``numpy``. Returns `points`.
This allows usage of RectGrid with some numpy functions.
Parameters
----------
dtype : `numpy.dtype`
The Numpy data type of the result array. ``None`` means `float`.
Examples
--------
>>> g = RectGrid([0, 1], [-2, 0, 2])
Convert to an array:
>>> np.asarray(g)
array([[ 0., -2.],
[ 0., 0.],
[ 0., 2.],
[ 1., -2.],
[ 1., 0.],
[ 1., 2.]])
Calculate the midpoint:
>>> np.mean(g, axis=0)
array([ 0.5, 0. ])
"""
return self.points().astype(dtype)
def __repr__(self):
"""Return ``repr(self)``."""
if self.is_uniform:
ctor = 'uniform_grid'
posargs = [self.min_pt, self.max_pt, self.shape]
posmod = [array_str, array_str, '']
with npy_printoptions(precision=4):
inner_str = signature_string(posargs, [], mod=[posmod, ''])
return '{}({})'.format(ctor, inner_str)
else:
ctor = self.__class__.__name__
posargs = self.coord_vectors
posmod = array_str
inner_str = signature_string(posargs, [], sep=[',\n', ', ', ', '],
mod=[posmod, ''])
return '{}(\n{}\n)'.format(ctor, indent(inner_str))
__str__ = __repr__
def uniform_grid_fromintv(intv_prod, shape, nodes_on_bdry=True):
"""Return a grid from sampling an interval product uniformly.
The resulting grid will by default include ``intv_prod.min_pt`` and
``intv_prod.max_pt`` as grid points. If you want a subdivision into
equally sized cells with grid points in the middle, use
`uniform_partition` instead.
Parameters
----------
intv_prod : `IntervalProd`
Set to be sampled.
shape : int or sequence of ints
Number of nodes per axis. Entries corresponding to degenerate axes
must be equal to 1.
nodes_on_bdry : bool or sequence, optional
If a sequence is provided, it determines per axis whether to
place the last grid point on the boundary (``True``) or shift it
by half a cell size into the interior (``False``). In each axis,
an entry may consist in a single bool or a 2-tuple of
bool. In the latter case, the first tuple entry decides for
the left, the second for the right boundary. The length of the
sequence must be ``array.ndim``.
A single boolean is interpreted as a global choice for all
boundaries.
Returns
-------
sampling : `RectGrid`
Uniform sampling grid for the interval product.
Examples
--------
>>> rbox = odl.IntervalProd([-1.5, 2], [-0.5, 3])
>>> grid = uniform_grid_fromintv(rbox, (3, 3))
>>> grid.coord_vectors
(array([-1.5, -1. , -0.5]), array([ 2. , 2.5, 3. ]))
To have the nodes in the "middle", use ``nodes_on_bdry=False``:
>>> grid = uniform_grid_fromintv(rbox, (2, 2), nodes_on_bdry=False)
>>> grid.coord_vectors
(array([-1.25, -0.75]), array([ 2.25, 2.75]))
See Also
--------
uniform_grid : Create a uniform grid directly.
odl.discr.partition.uniform_partition_fromintv :
divide interval product into equally sized subsets
"""
if not isinstance(intv_prod, IntervalProd):
raise TypeError('{!r} is not an `IntervalProd` instance'
''.format(intv_prod))
if (np.any(np.isinf(intv_prod.min_pt)) or
np.any(np.isinf(intv_prod.max_pt))):
raise ValueError('`intv_prod` must be finite, got {!r}'
''.format('intv_prod'))
shape = normalized_scalar_param_list(shape, intv_prod.ndim, safe_int_conv)
if np.shape(nodes_on_bdry) == ():
nodes_on_bdry = ([(bool(nodes_on_bdry), bool(nodes_on_bdry))] *
intv_prod.ndim)
elif intv_prod.ndim == 1 and len(nodes_on_bdry) == 2:
nodes_on_bdry = [nodes_on_bdry]
elif len(nodes_on_bdry) != intv_prod.ndim:
raise ValueError('`nodes_on_bdry` has length {}, expected {}'
''.format(len(nodes_on_bdry), intv_prod.ndim))
else:
shape = tuple(int(n) for n in shape)
# We need to determine the placement of the grid minimum and maximum
# points based on the choices in nodes_on_bdry. If in a given axis,
# and for a given side (left or right), the entry is True, the node lies
# on the boundary, so this coordinate can simply be taken as-is.
#
# Otherwise, the following conditions must be met:
#
# 1. The node should be half a stride s away from the boundary
# 2. Adding or subtracting (n-1)*s should give the other extremal node.
#
# If both nodes are to be shifted half a stride inside,
# the second condition yields
# a + s/2 + (n-1)*s = b - s/2 => s = (b - a) / n,
# hence the extremal grid points are
# gmin = a + s/2 = a + (b - a) / (2 * n),
# gmax = b - s/2 = b - (b - a) / (2 * n).
#
# In the case where one node, say the rightmost, lies on the boundary,
# the condition 2. reads as
# a + s/2 + (n-1)*s = b => s = (b - a) / (n - 1/2),
# thus
# gmin = a + (b - a) / (2 * n - 1).
gmin, gmax = [], []
for n, xmin, xmax, on_bdry in zip(shape, intv_prod.min_pt,
intv_prod.max_pt, nodes_on_bdry):
# Unpack the tuple if possible, else use bool globally for this axis
try:
bdry_l, bdry_r = on_bdry
except TypeError:
bdry_l = bdry_r = on_bdry
if bdry_l and bdry_r:
gmin.append(xmin)
gmax.append(xmax)
elif bdry_l and not bdry_r:
gmin.append(xmin)
gmax.append(xmax - (xmax - xmin) / (2 * n - 1))
elif not bdry_l and bdry_r:
gmin.append(xmin + (xmax - xmin) / (2 * n - 1))
gmax.append(xmax)
else:
gmin.append(xmin + (xmax - xmin) / (2 * n))
gmax.append(xmax - (xmax - xmin) / (2 * n))
# Create the grid
coord_vecs = [np.linspace(mi, ma, num, endpoint=True, dtype=np.float64)
for mi, ma, num in zip(gmin, gmax, shape)]
return RectGrid(*coord_vecs)
def uniform_grid(min_pt, max_pt, shape, nodes_on_bdry=True):
"""Return a grid from sampling an implicit interval product uniformly.
Parameters
----------
min_pt : float or sequence of float
Vectors of lower ends of the intervals in the product.
max_pt : float or sequence of float
Vectors of upper ends of the intervals in the product.
shape : int or sequence of ints
Number of nodes per axis. Entries corresponding to degenerate axes
must be equal to 1.
nodes_on_bdry : bool or sequence, optional
If a sequence is provided, it determines per axis whether to
place the last grid point on the boundary (``True``) or shift it
by half a cell size into the interior (``False``). In each axis,
an entry may consist in a single bool or a 2-tuple of
bool. In the latter case, the first tuple entry decides for
the left, the second for the right boundary. The length of the
sequence must be ``array.ndim``.
A single boolean is interpreted as a global choice for all
boundaries.
Returns
-------
uniform_grid : `RectGrid`
The resulting uniform grid.
See Also
--------
uniform_grid_fromintv :
sample a given interval product
odl.discr.partition.uniform_partition :
divide implicitly defined interval product into equally
sized subsets
Examples
--------
By default, the min/max points are included in the grid:
>>> grid = odl.uniform_grid([-1.5, 2], [-0.5, 3], (3, 3))
>>> grid.coord_vectors
(array([-1.5, -1. , -0.5]), array([ 2. , 2.5, 3. ]))
If ``shape`` is supposed to refer to small subvolumes, and the grid
should be their centers, use the option ``nodes_on_bdry=False``:
>>> grid = odl.uniform_grid([-1.5, 2], [-0.5, 3], (2, 2),
... nodes_on_bdry=False)
>>> grid.coord_vectors
(array([-1.25, -0.75]), array([ 2.25, 2.75]))
In 1D, we don't need sequences:
>>> grid = odl.uniform_grid(0, 1, 3)
>>> grid.coord_vectors
(array([ 0. , 0.5, 1. ]),)
"""
return uniform_grid_fromintv(IntervalProd(min_pt, max_pt), shape,
nodes_on_bdry=nodes_on_bdry)
if __name__ == '__main__':
from odl.util.testutils import run_doctests
run_doctests()
| aringh/odl | odl/discr/grid.py | Python | mpl-2.0 | 38,354 |
# coding=utf-8
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# pytype: skip-file
def pardo_dofn(test=None):
# [START pardo_dofn]
import apache_beam as beam
class SplitWords(beam.DoFn):
def __init__(self, delimiter=','):
self.delimiter = delimiter
def process(self, text):
for word in text.split(self.delimiter):
yield word
with beam.Pipeline() as pipeline:
plants = (
pipeline
| 'Gardening plants' >> beam.Create([
'🍓Strawberry,🥕Carrot,🍆Eggplant',
'🍅Tomato,🥔Potato',
])
| 'Split words' >> beam.ParDo(SplitWords(','))
| beam.Map(print))
# [END pardo_dofn]
if test:
test(plants)
def pardo_dofn_params(test=None):
# pylint: disable=line-too-long
# [START pardo_dofn_params]
import apache_beam as beam
class AnalyzeElement(beam.DoFn):
def process(
self,
elem,
timestamp=beam.DoFn.TimestampParam,
window=beam.DoFn.WindowParam):
yield '\n'.join([
'# timestamp',
'type(timestamp) -> ' + repr(type(timestamp)),
'timestamp.micros -> ' + repr(timestamp.micros),
'timestamp.to_rfc3339() -> ' + repr(timestamp.to_rfc3339()),
'timestamp.to_utc_datetime() -> ' + repr(timestamp.to_utc_datetime()),
'',
'# window',
'type(window) -> ' + repr(type(window)),
'window.start -> {} ({})'.format(
window.start, window.start.to_utc_datetime()),
'window.end -> {} ({})'.format(
window.end, window.end.to_utc_datetime()),
'window.max_timestamp() -> {} ({})'.format(
window.max_timestamp(), window.max_timestamp().to_utc_datetime()),
])
with beam.Pipeline() as pipeline:
dofn_params = (
pipeline
| 'Create a single test element' >> beam.Create([':)'])
| 'Add timestamp (Spring equinox 2020)' >>
beam.Map(lambda elem: beam.window.TimestampedValue(elem, 1584675660))
|
'Fixed 30sec windows' >> beam.WindowInto(beam.window.FixedWindows(30))
| 'Analyze element' >> beam.ParDo(AnalyzeElement())
| beam.Map(print))
# [END pardo_dofn_params]
# pylint: enable=line-too-long
if test:
test(dofn_params)
def pardo_dofn_methods(test=None):
# [START pardo_dofn_methods]
import apache_beam as beam
class DoFnMethods(beam.DoFn):
def __init__(self):
print('__init__')
self.window = beam.window.GlobalWindow()
def setup(self):
print('setup')
def start_bundle(self):
print('start_bundle')
def process(self, element, window=beam.DoFn.WindowParam):
self.window = window
yield '* process: ' + element
def finish_bundle(self):
yield beam.utils.windowed_value.WindowedValue(
value='* finish_bundle: 🌱🌳🌍',
timestamp=0,
windows=[self.window],
)
def teardown(self):
print('teardown')
with beam.Pipeline() as pipeline:
results = (
pipeline
| 'Create inputs' >> beam.Create(['🍓', '🥕', '🍆', '🍅', '🥔'])
| 'DoFn methods' >> beam.ParDo(DoFnMethods())
| beam.Map(print))
# [END pardo_dofn_methods]
if test:
return test(results)
| lukecwik/incubator-beam | sdks/python/apache_beam/examples/snippets/transforms/elementwise/pardo.py | Python | apache-2.0 | 4,045 |
# Copyright 2014 Open Data Science Initiative and other authors. See AUTHORS.txt
# Licensed under the BSD 3-clause license (see LICENSE.txt)
import IPython
if int(IPython.__version__[0])>3:
from ipywidgets import interact, fixed
else:
from IPython.html.widgets.interaction import interact, fixed
from IPython.display import display, HTML
def display_url(target):
"""Displaying URL in an IPython notebook to allow the user to click and check on information. With thanks to Fernando Perez for putting together the implementation!
:param target: the url to display.
:type target: string."""
prefix = u"http://" if not target.startswith("http") else u""
target = prefix + target
display(HTML(u'<a href="{t}" target=_blank>{t}</a>'.format(t=target)))
def iframe_url(target, width=500, height=400, scrolling=True, border=0, frameborder=0):
"""Produce an iframe for displaying an item in HTML window.
:param target: the target url.
:type target: string
:param width: the width of the iframe (default 500).
:type width: int
:param height: the height of the iframe (default 400).
:type height: int
:param scrolling: whether or not to allow scrolling (default True).
:type scrolling: bool
:param border: width of the border.
:type border: int
:param frameborder: width of the frameborder.
:type frameborder: int"""
prefix = u"http://" if not target.startswith("http") else u""
target = prefix + target
if scrolling:
scroll_val = 'yes'
else:
scroll_val = 'no'
return u'<iframe frameborder="{frameborder}" scrolling="{scrolling}" style="border:{border}px" src="{url}", width={width} height={height}></iframe>'.format(frameborder=frameborder, scrolling=scroll_val, border=border, url=target, width=width, height=height)
def display_iframe_url(target, **kwargs):
"""Display the contents of a URL in an IPython notebook.
:param target: the target url.
:type target: string
.. seealso:: `iframe_url()` for additional arguments."""
txt = iframe_url(target, **kwargs)
display(HTML(txt))
def display_google_book(id, page, width=700, height=500, **kwargs):
"""Display an embedded version of a Google book.
:param id: the id of the google book to display.
:type id: string
:param page: the start page for the book.
:type id: string or int."""
url = 'http://books.google.co.uk/books?id={id}&pg=PA{page}&output=embed'.format(id=id, page=page)
display_iframe_url(url, width=width, height=height, **kwargs)
def code_toggle(start_show=False, message=None):
"""Toggling on and off code in a notebook.
:param start_show: Whether to display the code or not on first load (default is False).
:type start_show: bool
:param message: the message used to toggle display of the code.
:type message: string
The tip that this idea is
based on is from Damian Kao (http://blog.nextgenetics.net/?e=102)."""
html ='<script>\n'
if message is None:
message = u'The raw code for this jupyter notebook can be hidden for easier reading.'
if start_show:
html += u'code_show=true;\n'
else:
html += u'code_show=false;\n'
html+='''function code_toggle() {
if (code_show){
$('div.input').show();
} else {
$('div.input').hide();
}
code_show = !code_show
}
$( document ).ready(code_toggle);
</script>
'''
html += message + ' To toggle on/off the raw code, click <a href="javascript:code_toggle()">here</a>.'
display(HTML(html))
def display_prediction(basis, num_basis=4, wlim=(-1.,1.), fig=None, ax=None, xlim=None, ylim=None, num_points=1000, offset=0.0, **kwargs):
"""Interactive widget for displaying a prediction function based on summing separate basis functions.
:param basis: a function handle that calls the basis functions.
:type basis: function handle.
:param xlim: limits of the x axis to use.
:param ylim: limits of the y axis to use.
:param wlim: limits for the basis function weights."""
import numpy as np
import pylab as plt
if fig is not None:
if ax is None:
ax = fig.gca()
if xlim is None:
if ax is not None:
xlim = ax.get_xlim()
else:
xlim = (-2., 2.)
if ylim is None:
if ax is not None:
ylim = ax.get_ylim()
else:
ylim = (-1., 1.)
# initialise X and set up W arguments.
x = np.zeros((num_points, 1))
x[:, 0] = np.linspace(xlim[0], xlim[1], num_points)
param_args = {}
for i in range(num_basis):
lim = list(wlim)
if i ==0:
lim[0] += offset
lim[1] += offset
param_args['w_' + str(i)] = lim
# helper function for making basis prediction.
def predict_basis(w, basis, x, num_basis, **kwargs):
Phi = basis(x, num_basis, **kwargs)
f = np.dot(Phi, w)
return f, Phi
if type(basis) is dict:
use_basis = basis[list(basis.keys())[0]]
else:
use_basis = basis
f, Phi = predict_basis(np.zeros((num_basis, 1)),
use_basis, x, num_basis,
**kwargs)
if fig is None:
fig, ax=plt.subplots(figsize=(12,4))
ax.set_ylim(ylim)
ax.set_xlim(xlim)
predline = ax.plot(x, f, linewidth=2)[0]
basislines = []
for i in range(num_basis):
basislines.append(ax.plot(x, Phi[:, i], 'r')[0])
ax.set_ylim(ylim)
ax.set_xlim(xlim)
def generate_function(basis, num_basis, predline, basislines, basis_args, display_basis, offset, **kwargs):
w = np.zeros((num_basis, 1))
for i in range(num_basis):
w[i] = kwargs['w_'+ str(i)]
f, Phi = predict_basis(w, basis, x, num_basis, **basis_args)
predline.set_xdata(x[:, 0])
predline.set_ydata(f)
for i in range(num_basis):
basislines[i].set_xdata(x[:, 0])
basislines[i].set_ydata(Phi[:, i])
if display_basis:
for i in range(num_basis):
basislines[i].set_alpha(1) # make visible
else:
for i in range(num_basis):
basislines[i].set_alpha(0)
display(fig)
if type(basis) is not dict:
basis = fixed(basis)
plt.close(fig)
interact(generate_function,
basis=basis,
num_basis=fixed(num_basis),
predline=fixed(predline),
basislines=fixed(basislines),
basis_args=fixed(kwargs),
offset = fixed(offset),
display_basis = False,
**param_args)
def display_plots(filebase, directory=None, width=700, height=500, **kwargs):
"""Display a series of plots controlled by sliders. The function relies on Python string format functionality to index through a series of plots."""
def show_figure(filebase, directory, **kwargs):
"""Helper function to load in the relevant plot for display."""
filename = filebase.format(**kwargs)
if directory is not None:
filename = directory + '/' + filename
display(HTML("<img src='{filename}'>".format(filename=filename)))
interact(show_figure, filebase=fixed(filebase), directory=fixed(directory), **kwargs)
| alansaul/ods | pods/notebook.py | Python | bsd-3-clause | 7,352 |
"""
@author: CarolinaFernandez
"""
from django.conf.urls.defaults import patterns, url
urlpatterns = patterns('expedient.clearinghouse.help.views',
url(r'^', 'home', name="help_home"),
)
| dana-i2cat/felix | expedient/src/python/expedient/clearinghouse/help/urls.py | Python | apache-2.0 | 193 |
# -*- coding: ISO-8859-1 -*-
# Copyright 2010 Dirk Holtwick, holtwick.it
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
__reversion__ = "$Revision: 20 $"
__author__ = "$Author: holtwick $"
__date__ = "$Date: 2007-10-09 12:58:24 +0200 (Di, 09 Okt 2007) $"
from pisa_tags import pisaTag
from pisa_util import *
from pisa_reportlab import PmlTable, TableStyle, PmlKeepInFrame
import copy
import sys
import logging
log = logging.getLogger("ho.pisa")
def _width(value=None):
if value is None:
return None
value = str(value)
if value.endswith("%"):
return value
return getSize(value)
class TableData:
def __init__(self):
self.data = []
self.styles = []
self.span = []
self.mode = ""
self.padding = 0
self.col = 0
# self.c = None
def add_cell(self, data=None):
self.col += 1
self.data[len(self.data) - 1].append(data)
def add_style(self, data):
# print self.mode, data
# Do we have color and
# width = data[3]
#if data[0].startswith("LINE"):
# color = data[4]
# if color is None:
# return
self.styles.append(copy.copy(data))
def add_empty(self, x, y):
self.span.append((x, y))
def get_data(self):
data = self.data
for x, y in self.span:
try:
data[y].insert(x, '')
except:
pass
return data
def add_cell_styles(self, c, begin, end, mode="td"):
def getColor(a, b):
return a
self.mode = mode.upper()
if c.frag.backColor and mode != "tr": # XXX Stimmt das so?
self.add_style(('BACKGROUND', begin, end, c.frag.backColor))
# print 'BACKGROUND', begin, end, c.frag.backColor
if 0:
log.debug("%r", (
begin,
end,
c.frag.borderTopWidth,
c.frag.borderTopStyle,
c.frag.borderTopColor,
c.frag.borderBottomWidth,
c.frag.borderBottomStyle,
c.frag.borderBottomColor,
c.frag.borderLeftWidth,
c.frag.borderLeftStyle,
c.frag.borderLeftColor,
c.frag.borderRightWidth,
c.frag.borderRightStyle,
c.frag.borderRightColor,
))
if getBorderStyle(c.frag.borderTopStyle) and c.frag.borderTopWidth and c.frag.borderTopColor is not None:
self.add_style(('LINEABOVE', begin, (end[0], begin[1]),
c.frag.borderTopWidth,
c.frag.borderTopColor,
"squared"))
if getBorderStyle(c.frag.borderLeftStyle) and c.frag.borderLeftWidth and c.frag.borderLeftColor is not None:
self.add_style(('LINEBEFORE', begin, (begin[0], end[1]),
c.frag.borderLeftWidth,
c.frag.borderLeftColor,
"squared"))
if getBorderStyle(c.frag.borderRightStyle) and c.frag.borderRightWidth and c.frag.borderRightColor is not None:
self.add_style(('LINEAFTER', (end[0], begin[1]), end,
c.frag.borderRightWidth,
c.frag.borderRightColor,
"squared"))
if getBorderStyle(c.frag.borderBottomStyle) and c.frag.borderBottomWidth and c.frag.borderBottomColor is not None:
self.add_style(('LINEBELOW', (begin[0], end[1]), end,
c.frag.borderBottomWidth,
c.frag.borderBottomColor,
"squared"))
self.add_style(('LEFTPADDING', begin, end, c.frag.paddingLeft or self.padding))
self.add_style(('RIGHTPADDING', begin, end, c.frag.paddingRight or self.padding))
self.add_style(('TOPPADDING', begin, end, c.frag.paddingTop or self.padding))
self.add_style(('BOTTOMPADDING', begin, end, c.frag.paddingBottom or self.padding))
class pisaTagTABLE(pisaTag):
def start(self, c):
c.addPara()
attrs = self.attr
# Swap table data
c.tableData, self.tableData = TableData(), c.tableData
tdata = c.tableData
# border
#tdata.border = attrs.border
#tdata.bordercolor = attrs.bordercolor
begin = (0, 0)
end = (-1, - 1)
if attrs.border and attrs.bordercolor:
frag = c.frag
frag.borderLeftWidth = attrs.border
frag.borderLeftColor = attrs.bordercolor
frag.borderLeftStyle = "solid"
frag.borderRightWidth = attrs.border
frag.borderRightColor = attrs.bordercolor
frag.borderRightStyle = "solid"
frag.borderTopWidth = attrs.border
frag.borderTopColor = attrs.bordercolor
frag.borderTopStyle = "solid"
frag.borderBottomWidth = attrs.border
frag.borderBottomColor = attrs.bordercolor
frag.borderBottomStyle = "solid"
# tdata.add_style(("GRID", begin, end, attrs.border, attrs.bordercolor))
tdata.padding = attrs.cellpadding
#if 0: #attrs.cellpadding:
# tdata.add_style(('LEFTPADDING', begin, end, attrs.cellpadding))
# tdata.add_style(('RIGHTPADDING', begin, end, attrs.cellpadding))
# tdata.add_style(('TOPPADDING', begin, end, attrs.cellpadding))
# tdata.add_style(('BOTTOMPADDING', begin, end, attrs.cellpadding))
# alignment
#~ tdata.add_style(('VALIGN', (0,0), (-1,-1), attrs.valign.upper()))
# Set Border and padding styles
tdata.add_cell_styles(c, (0, 0), (-1, - 1), "table")
# bgcolor
#if attrs.bgcolor is not None:
# tdata.add_style(('BACKGROUND', (0, 0), (-1, -1), attrs.bgcolor))
tdata.align = attrs.align.upper()
tdata.col = 0
tdata.row = 0
tdata.colw = []
tdata.rowh = []
tdata.repeat = attrs.repeat
tdata.width = _width(attrs.width)
# self.tabdata.append(tdata)
def end(self, c):
tdata = c.tableData
data = tdata.get_data()
# Add missing columns so that each row has the same count of columns
# This prevents errors in Reportlab table
try:
maxcols = max([len(row) for row in data] or [0])
except ValueError:
log.warn(c.warning("<table> rows seem to be inconsistent"))
maxcols = [0]
for i, row in enumerate(data):
data[i] += [''] * (maxcols - len(row))
try:
if tdata.data:
# log.debug("Table sryles %r", tdata.styles)
t = PmlTable(
data,
colWidths=tdata.colw,
rowHeights=tdata.rowh,
# totalWidth = tdata.width,
splitByRow=1,
# repeatCols = 1,
repeatRows=tdata.repeat,
hAlign=tdata.align,
vAlign='TOP',
style=TableStyle(tdata.styles))
t.totalWidth = _width(tdata.width)
t.spaceBefore = c.frag.spaceBefore
t.spaceAfter = c.frag.spaceAfter
# XXX Maybe we need to copy some more properties?
t.keepWithNext = c.frag.keepWithNext
# t.hAlign = tdata.align
c.addStory(t)
else:
log.warn(c.warning("<table> is empty"))
except:
log.warn(c.warning("<table>"), exc_info=1)
# Cleanup and re-swap table data
c.clearFrag()
c.tableData, self.tableData = self.tableData, None
class pisaTagTR(pisaTag):
def start(self, c):
tdata = c.tableData
row = tdata.row
begin = (0, row)
end = (-1, row)
tdata.add_cell_styles(c, begin, end, "tr")
c.frag.vAlign = self.attr.valign or c.frag.vAlign
tdata.col = 0
tdata.data.append([])
def end(self, c):
c.tableData.row += 1
class pisaTagTD(pisaTag):
def start(self, c):
if self.attr.align is not None:
#print self.attr.align, getAlign(self.attr.align)
c.frag.alignment = getAlign(self.attr.align)
c.clearFrag()
self.story = c.swapStory()
# print "#", len(c.story)
attrs = self.attr
tdata = c.tableData
cspan = attrs.colspan
rspan = attrs.rowspan
row = tdata.row
col = tdata.col
while 1:
for x, y in tdata.span:
if x == col and y == row:
col += 1
tdata.col += 1
break
#cs = 0
#rs = 0
begin = (col, row)
end = (col, row)
if cspan:
end = (end[0] + cspan - 1, end[1])
if rspan:
end = (end[0], end[1] + rspan - 1)
if begin != end:
#~ print begin, end
tdata.add_style(('SPAN', begin, end))
for x in range(begin[0], end[0] + 1):
for y in range(begin[1], end[1] + 1):
if x != begin[0] or y != begin[1]:
tdata.add_empty(x, y)
# Set Border and padding styles
tdata.add_cell_styles(c, begin, end, "td")
# Calculate widths
# Add empty placeholders for new columns
if (col + 1) > len(tdata.colw):
tdata.colw = tdata.colw + ((col + 1 - len(tdata.colw)) * [_width()])
# Get value of with, if no spanning
if not cspan:
# print c.frag.width
width = c.frag.width or self.attr.width #self._getStyle(None, attrs, "width", "width", mode)
# If is value, the set it in the right place in the arry
# print width, _width(width)
if width is not None:
tdata.colw[col] = _width(width)
# Calculate heights
if row + 1 > len(tdata.rowh):
tdata.rowh = tdata.rowh + ((row + 1 - len(tdata.rowh)) * [_width()])
if not rspan:
height = None #self._getStyle(None, attrs, "height", "height", mode)
if height is not None:
tdata.rowh[row] = _width(height)
tdata.add_style(('FONTSIZE', begin, end, 1.0))
tdata.add_style(('LEADING', begin, end, 1.0))
# Vertical align
valign = self.attr.valign or c.frag.vAlign
if valign is not None:
tdata.add_style(('VALIGN', begin, end, valign.upper()))
# Reset border, otherwise the paragraph block will have borders too
frag = c.frag
frag.borderLeftWidth = 0
frag.borderLeftColor = None
frag.borderLeftStyle = None
frag.borderRightWidth = 0
frag.borderRightColor = None
frag.borderRightStyle = None
frag.borderTopWidth = 0
frag.borderTopColor = None
frag.borderTopStyle = None
frag.borderBottomWidth = 0
frag.borderBottomColor = None
frag.borderBottomStyle = None
def end(self, c):
tdata = c.tableData
c.addPara()
cell = c.story
# Handle empty cells, they otherwise collapse
#if not cell:
# cell = ' '
# Keep in frame if needed since Reportlab does no split inside of cells
if (not c.frag.insideStaticFrame) and (c.frag.keepInFrameMode is not None):
# tdata.keepinframe["content"] = cell
cell = PmlKeepInFrame(
maxWidth=0,
maxHeight=0,
mode=c.frag.keepInFrameMode,
content=cell)
c.swapStory(self.story)
tdata.add_cell(cell)
class pisaTagTH(pisaTagTD):
pass
'''
end_th = end_td
def start_keeptogether(self, attrs):
self.story.append([])
self.next_para()
def end_keeptogether(self):
if not self.story[-1]:
self.add_noop()
self.next_para()
s = self.story.pop()
self.add_story(KeepTogether(s))
def start_keepinframe(self, attrs):
self.story.append([])
self.keepinframe = {
"maxWidth": attrs["maxwidth"],
"maxHeight": attrs["maxheight"],
"mode": attrs["mode"],
"name": attrs["name"],
"mergeSpace": attrs["mergespace"]
}
# print self.keepinframe
self.next_para()
def end_keepinframe(self):
if not self.story[-1]:
self.add_noop()
self.next_para()
self.keepinframe["content"] = self.story.pop()
self.add_story(KeepInFrame(**self.keepinframe))
''' | bbondy/brianbondy.gae | libs/sx/pisa3/pisa_tables.py | Python | mit | 13,877 |
from hachoir_py3.field import Field, FieldSet, ParserError
class GenericVector(FieldSet):
def __init__(self, parent, name, nb_items, item_class, item_name="item", description=None):
# Sanity checks
assert issubclass(item_class, Field)
assert isinstance(item_class.static_size, int)
if not(0 < nb_items):
raise ParserError('Unable to create empty vector "%s" in %s'
% (name, parent.path))
size = nb_items * item_class.static_size
self.__nb_items = nb_items
self._item_class = item_class
self._item_name = item_name
FieldSet.__init__(self, parent, name, description, size=size)
def __len__(self):
return self.__nb_items
def createFields(self):
name = self._item_name + "[]"
parser = self._item_class
for index in range(len(self)):
yield parser(self, name)
class UserVector(GenericVector):
"""
To implement:
- item_name: name of a field without [] (eg. "color" becomes "color[0]"),
default value is "item"
- item_class: class of an item
"""
item_class = None
item_name = "item"
def __init__(self, parent, name, nb_items, description=None):
GenericVector.__init__(self, parent, name, nb_items,
self.item_class, self.item_name, description)
| SickGear/SickGear | lib/hachoir_py3/field/vector.py | Python | gpl-3.0 | 1,391 |
import sys, os, json, time
from shapely.geometry import Polygon
# http://toblerity.org/shapely/manual.html
contains = {}
intersects = {}
dPoly = {}
unmatched = []
TRACTCOL = 'BoroCT2010' # rename this for 2000 census
def addPoly(coords):
polys = []
if (isinstance(coords[0][0], float)):
polys.append(Polygon(coords))
else:
for (c) in coords:
polys.extend(addPoly(c))
return polys
def inDistrict(tract):
tPoly = addPoly(tract['geometry']['coordinates'])
tractNum = tract['properties'][TRACTCOL]
intersects = set()
area = 0
intersection = {}
iap = {}
for (i) in range (0, len(tPoly)):
tractPolygon = tPoly[i]
area += tractPolygon.area
for (dn, dp) in dPoly.items():
for (p) in dp:
if (p.contains(tractPolygon)):
iap[dn] = 1
break;
elif (p.intersects(tractPolygon)):
intersects.add(dn)
if dn not in intersection:
intersection[dn] = p.intersection(tractPolygon).area
else:
intersection[dn] += p.intersection(tractPolygon).area
if (len(intersection) > 0):
for (dn, inter) in intersection.items():
iap[dn] = inter / area
return (tractNum, iap)
if __name__ == '__main__':
if (len(sys.argv) < 2):
print ("Usage: tract2council.py tract.json council.json")
exit()
tractfile = sys.argv[1]
councilfile = sys.argv[2]
for (f) in (tractfile, councilfile):
if (not os.path.isfile(f)):
print ("File " + f + " is not readable")
exit()
try:
with open(tractfile) as tractfo:
tractData = json.load(tractfo)
except Exception:
print ("Unable to read tract file " + tractfile)
exit()
try:
with open(councilfile) as councilfo:
councilData = json.load(councilfo)
except Exception as e:
print ("Unable to read council file " + councilfile+": {0}".format(e))
exit()
for (district) in councilData['features']:
dn = district['properties']['CounDist']
c = district['geometry']['coordinates']
dPoly[dn] = addPoly(c)
print ("there are " + str(len(tractData['features'])) + " census tracts")
for (tract) in tractData['features']:
(tn, i) = inDistrict(tract)
intersects[tn] = i
intersectsFile = 'tracts_' + str(round(time.time())) + '.json'
with open(intersectsFile, 'w') as intersectsfo:
json.dump(intersects, intersectsfo)
| capntransit/tract2council | tract2council.py | Python | gpl-3.0 | 2,643 |
from django.shortcuts import render
from django.core import serializers
from django.http import HttpResponse
from django.contrib.auth.models import User
from django.contrib.auth import authenticate, login, logout
from django.contrib.auth.decorators import login_required
from django.conf import settings
from registrar.models import Course
from registrar.models import Student
from registrar.models import Assignment
from registrar.models import AssignmentSubmission
from registrar.models import EssayQuestion
from registrar.models import EssaySubmission
from registrar.models import MultipleChoiceQuestion
from registrar.models import MultipleChoiceSubmission
from registrar.models import ResponseQuestion
from registrar.models import ResponseSubmission
from registrar.models import TrueFalseQuestion
from registrar.models import TrueFalseSubmission
import json
import datetime
# Forms
from student.forms import EssaySubmissionForm
from student.forms import AssignmentSubmissionForm
# Private Functions
def get_submitted_assignments(course, student):
# Fetch all the assignments for this course.
try:
assignments = Assignment.objects.filter(course=course).order_by('assignment_num')
except Assignment.DoesNotExist:
assignment = None
# Fetch all submitted assignments
try:
submitted_assignments = AssignmentSubmission.objects.filter(
assignment__course=course,
student=student
)
except AssignmentSubmission.DoesNotExist:
submitted_assignments = None
# If the submissions & assignment counts do not equal, then we have to
# iterate through all the assignments and create the missing 'submission'
# entries for our system.
if len(assignments) != len(submitted_assignments):
for assignment in assignments:
found_assignment = False
for submitted_assignment in submitted_assignments:
if assignment.assignment_id == submitted_assignment.assignment_id:
found_assignment = True
if not found_assignment:
submission = AssignmentSubmission.objects.create(
student=student,
assignment=assignment,
)
submission.save()
# Once we saved the data, we will have to fetch the results again.
submitted_assignments = AssignmentSubmission.objects.filter(
assignment__course=course,
student=student
)
return submitted_assignments
# Public Functions
@login_required(login_url='/landpage')
def assignments_page(request, course_id):
course = Course.objects.get(id=course_id)
student = Student.objects.get(user=request.user)
return render(request, 'course/assignment/assignments_view.html',{
'course' : course,
'submitted_assignments': get_submitted_assignments(course, student),
'user' : request.user,
'tab' : 'assignments',
'subtab' : '',
'HAS_ADVERTISMENT': settings.APPLICATION_HAS_ADVERTISMENT,
'local_css_urls' : settings.SB_ADMIN_2_CSS_LIBRARY_URLS,
'local_js_urls' : settings.SB_ADMIN_2_JS_LIBRARY_URLS,
})
@login_required(login_url='/landpage')
def assignments_table(request, course_id):
course = Course.objects.get(id=course_id)
student = Student.objects.get(user=request.user)
return render(request, 'course/assignment/assignments_table.html',{
'course' : course,
'user' : request.user,
'submitted_assignments': get_submitted_assignments(course, student),
'ESSAY_QUESTION_TYPE' : settings.ESSAY_QUESTION_TYPE,
'MULTIPLECHOICE_QUESTION_TYPE' : settings.MULTIPLECHOICE_QUESTION_TYPE,
'TRUEFALSE_QUESTION_TYPE' : settings.TRUEFALSE_QUESTION_TYPE,
'RESPONSE_QUESTION_TYPE' : settings.RESPONSE_QUESTION_TYPE,
})
@login_required(login_url='/landpage')
def assignment_page(request, course_id, assignment_id):
course = Course.objects.get(id=course_id)
student = Student.objects.get(user=request.user)
assignment = Assignment.objects.get(assignment_id=assignment_id)
# Load all essay type questions for this assignment.
try:
e_questions = EssayQuestion.objects.filter(assignment=assignment).order_by('question_num')
except EssayQuestion.DoesNotExist:
e_questions = None
try:
e_submissions = EssaySubmission.objects.filter(question__assignment=assignment, student=student)
except EssayQuestion.DoesNotExist:
e_submissions = None
# Load all multiple-choice type questions/submissions for this assignment.
try:
mc_questions = MultipleChoiceQuestion.objects.filter(assignment=assignment).order_by('question_num')
except MultipleChoiceQuestion.DoesNotExist:
mc_questions = None
try:
mc_submissions = MultipleChoiceSubmission.objects.filter(question__assignment=assignment, student=student)
except MultipleChoiceSubmission.DoesNotExist:
mc_submissions = None
# Load all true/false type questions/submissions for this assignment.
try:
tf_questions = TrueFalseQuestion.objects.filter(assignment=assignment).order_by('question_num')
except TrueFalseQuestion.DoesNotExist:
tf_questions = None
try:
tf_submissions = TrueFalseSubmission.objects.filter(question__assignment=assignment, student=student)
except tf_submissions.DoesNotExist:
tf_submissions = None
# Load all response type questions for this assignment.
try:
r_questions = ResponseQuestion.objects.filter(assignment=assignment).order_by('question_num')
except ResponseQuestion.DoesNotExist:
r_questions = None
try:
r_submissions = ResponseSubmission.objects.filter(question__assignment=assignment, student=student)
except ResponseQuestion.DoesNotExist:
r_submissions = None
return render(request, 'course/assignment/question_view.html',{
'student': student,
'course': course,
'assignment': assignment,
'e_questions': e_questions,
'e_submissions': e_submissions,
'mc_questions': mc_questions,
'mc_submissions': mc_submissions,
'tf_questions': tf_questions,
'tf_submissions': tf_submissions,
'r_questions': r_questions,
'r_submissions': r_submissions,
'ESSAY_QUESTION_TYPE': settings.ESSAY_QUESTION_TYPE,
'MULTIPLECHOICE_QUESTION_TYPE': settings.MULTIPLECHOICE_QUESTION_TYPE,
'TRUEFALSE_QUESTION_TYPE': settings.TRUEFALSE_QUESTION_TYPE,
'RESPONSE_QUESTION_TYPE': settings.RESPONSE_QUESTION_TYPE,
'user': request.user,
'tab': 'assignment',
'HAS_ADVERTISMENT': settings.APPLICATION_HAS_ADVERTISMENT,
'local_css_urls': settings.SB_ADMIN_2_CSS_LIBRARY_URLS,
'local_js_urls': settings.SB_ADMIN_2_JS_LIBRARY_URLS,
})
@login_required()
def delete_assignment(request, course_id):
response_data = {'status' : 'failed', 'message' : 'unknown error with deletion'}
if request.is_ajax():
if request.method == 'POST':
assignment_id = int(request.POST['assignment_id'])
course = Course.objects.get(id=course_id)
student = Student.objects.get(user=request.user)
assignment = Assignment.objects.get(assignment_id=assignment_id)
# Set 'is_finished' to false to indicate we need to take the
# assignment all over.
try:
submission = AssignmentSubmission.objects.get(
student=student,
assignment=assignment,
)
submission.is_finished = False
submission.save()
except AssignmentSubmission.DoesNotExist:
return HttpResponse(json.dumps({
'status' : 'failed',
'message' : 'record does not exist'
}), content_type="application/json")
# Delete all previous entries.
try:
e_submissions = EssaySubmission.objects.filter(question__assignment=assignment, student=student)
e_submissions.delete()
except EssayQuestion.DoesNotExist:
pass
try:
mc_submissions = MultipleChoiceSubmission.objects.filter(question__assignment=assignment, student=student)
mc_submissions.delete()
except MultipleChoiceSubmission.DoesNotExist:
pass
try:
tf_submissions = TrueFalseSubmission.objects.filter(question__assignment=assignment, student=student)
tf_submissions.delete()
except tf_submissions.DoesNotExist:
pass
try:
r_submissions = ResponseSubmission.objects.filter(question__assignment=assignment, student=student)
r_submissions.delete()
except ResponseQuestion.DoesNotExist:
pass
response_data = {'status' : 'success', 'message' : 'assignment was deleted'}
return HttpResponse(json.dumps(response_data), content_type="application/json")
@login_required()
def submit_e_assignment_answer(request, course_id, assignment_id):
response_data = {'status' : 'failed', 'message' : 'error submitting'}
if request.is_ajax():
if request.method == 'POST':
# Extract parameters from post
question_id = int(request.POST['question_id'])
try:
file = request.FILES["file"]
except:
response_data = {'status' : 'failed', 'message' : 'missing file'}
return HttpResponse(json.dumps(response_data), content_type="application/json")
# Fetch from database
course = Course.objects.get(id=course_id)
student = Student.objects.get(user=request.user)
assignment = Assignment.objects.get(assignment_id=assignment_id)
# Fetch question and error if not found.
try:
question = EssayQuestion.objects.get(
assignment=assignment,
question_id=question_id,
)
except MultipleChoiceQuestion.DoesNotExist:
response_data = {'status' : 'failed', 'message' : 'cannot find question'}
return HttpResponse(json.dumps(response_data), content_type="application/json")
try:
submission = EssaySubmission.objects.get(
student=student,
question=question,
)
except EssaySubmission.DoesNotExist:
submission = EssaySubmission.objects.create(
student=student,
question=question,
)
submission.file = file
submission.save()
response_data = {'status' : 'success', 'message' : 'submitted'}
# form = EssaySubmissionForm(instance=submission, files=request.FILES)
# if form.is_valid():
# form.save() # Save the form contents to the model
# response_data = {'status' : 'success', 'message' : 'submitted'}
# else:
# response_data = {'status' : 'failed', 'message' : json.dumps(form.errors)}
return HttpResponse(json.dumps(response_data), content_type="application/json")
@login_required()
def submit_mc_assignment_answer(request, course_id, assignment_id):
if request.is_ajax():
if request.method == 'POST':
# Extract parameters from post
question_id = int(request.POST['question_id'])
answer = request.POST['answer']
# Fetch from database
course = Course.objects.get(id=course_id)
assignment = Assignment.objects.get(assignment_id=assignment_id)
student = Student.objects.get(user=request.user)
# Fetch question and error if not found.
try:
question = MultipleChoiceQuestion.objects.get(
assignment=assignment,
question_id=question_id,
)
except MultipleChoiceQuestion.DoesNotExist:
response_data = {'status' : 'failed', 'message' : 'cannot find question'}
return HttpResponse(json.dumps(response_data), content_type="application/json")
# Fetch submission and create new submission if not found.
try:
submission = MultipleChoiceSubmission.objects.get(
student=student,
question=question,
)
except MultipleChoiceSubmission.DoesNotExist:
submission = MultipleChoiceSubmission.objects.create(
student=student,
question=question,
)
# Save answer
if answer == 'A':
submission.a = not submission.a
if answer == 'B':
submission.b = not submission.b
if answer == 'C':
submission.c = not submission.c
if answer == 'D':
submission.d = not submission.d
if answer == 'E':
submission.e = not submission.e
if answer == 'F':
submission.f = not submission.f
submission.save()
# Caclulate score
total = 6
correct = 0
if submission.a == submission.question.a_is_correct:
correct += 1;
if submission.b == submission.question.b_is_correct:
correct += 1;
if submission.c == submission.question.c_is_correct:
correct += 1;
if submission.d == submission.question.d_is_correct:
correct += 1;
if submission.e == submission.question.e_is_correct:
correct += 1;
if submission.f == submission.question.f_is_correct:
correct += 1;
# If all choices have been correctly selected, then give full credit.
if total == correct:
submission.marks = submission.question.marks
else:
submission.marks = 0
submission.save()
# Return success results
response_data = {'status' : 'success', 'message' : 'submitted'}
return HttpResponse(json.dumps(response_data), content_type="application/json")
response_data = {'status' : 'failed', 'message' : 'error submitting'}
return HttpResponse(json.dumps(response_data), content_type="application/json")
@login_required()
def submit_tf_assignment_answer(request, course_id, assignment_id):
if request.is_ajax():
if request.method == 'POST':
# Extract parameters from post
question_id = int(request.POST['question_id'])
answer = request.POST['answer']
# Fetch from database
course = Course.objects.get(id=course_id)
assignment = Assignment.objects.get(assignment_id=assignment_id)
student = Student.objects.get(user=request.user)
# Fetch question and error if not found.
try:
question = TrueFalseQuestion.objects.get(
assignment=assignment,
question_id=question_id,
)
except MultipleChoiceQuestion.DoesNotExist:
response_data = {'status' : 'failed', 'message' : 'cannot find question'}
return HttpResponse(json.dumps(response_data), content_type="application/json")
# Fetch submission and create new submission if not found.
try:
submission = TrueFalseSubmission.objects.get(
student=student,
question_id=question_id,
)
except TrueFalseSubmission.DoesNotExist:
submission = TrueFalseSubmission.objects.create(
student=student,
question_id=question_id,
)
# Process the answer
submission.answer = answer == "true"
submission.save()
# Calculate the marks
if submission.answer == submission.question.answer:
submission.marks = submission.question.marks
else:
submission.marks = 0
submission.save()
response_data = {'status' : 'success', 'message' : 'submitted'}
return HttpResponse(json.dumps(response_data), content_type="application/json")
@login_required()
def submit_r_assignment_answer(request, course_id, assignment_id):
if request.is_ajax():
if request.method == 'POST':
# Extract parameters from post
question_id = int(request.POST['question_id'])
answer = request.POST['answer']
# Fetch from database
course = Course.objects.get(id=course_id)
assignment = Assignment.objects.get(assignment_id=assignment_id)
student = Student.objects.get(user=request.user)
# Fetch question and error if not found.
try:
question = ResponseQuestion.objects.get(
assignment=assignment,
question_id=question_id,
)
except ResponseQuestion.DoesNotExist:
response_data = {'status' : 'failed', 'message' : 'cannot find question'}
return HttpResponse(json.dumps(response_data), content_type="application/json")
# Fetch submission and create new submission if not found.
try:
submission = ResponseSubmission.objects.get(
student=student,
question_id=question_id,
)
except ResponseSubmission.DoesNotExist:
submission = ResponseSubmission.objects.create(
student=student,
question_id=question_id,
)
# Process the answer
# Return success results
submission.answer = answer
submission.save()
response_data = {'status' : 'success', 'message' : 'submitted'}
return HttpResponse(json.dumps(response_data), content_type="application/json")
@login_required()
def submit_assignment(request, course_id, assignment_id):
if request.is_ajax():
if request.method == 'POST':
# Fetch from database
course = Course.objects.get(id=course_id)
assignment = Assignment.objects.get(assignment_id=assignment_id)
student = Student.objects.get(user=request.user)
# Defensive Code: Prevent submitting assignment with missing questions
#TODO: IMPL
# Fetch submission and create new submission if not found.
try:
submission = AssignmentSubmission.objects.get(
student=student,
assignment=assignment,
)
except AssignmentSubmission.DoesNotExist:
submission = AssignmentSubmission.objects.create(
student=student,
assignment=assignment,
)
submission.is_finished = True
submission.save()
# Compute the final score of the assignment
compute_score(student, assignment, submission)
response_data = {'status' : 'success', 'message' : 'submitted'}
return HttpResponse(json.dumps(response_data), content_type="application/json")
#-------------------#
# Private Functions #
#-------------------#
def compute_score(student, assignment, submission):
submission.total_marks = 0
submission.earned_marks = 0
# Essay Submission(s)
e_submissions = EssaySubmission.objects.filter(
student=student,
question__assignment=assignment,
)
for e_submission in e_submissions:
submission.total_marks += e_submission.question.marks
submission.earned_marks += e_submission.marks
# Multiple Choice Submission(s)
mc_submissions = MultipleChoiceSubmission.objects.filter(
student=student,
question__assignment=assignment,
)
for mc_submission in mc_submissions:
submission.total_marks += mc_submission.question.marks
submission.earned_marks += mc_submission.marks
# True / False Submission(s)
tf_submissions = TrueFalseSubmission.objects.filter(
student=student,
question__assignment=assignment,
)
for tf_submission in tf_submissions:
submission.total_marks += tf_submission.question.marks
submission.earned_marks += tf_submission.marks
# Response Submission(s)
r_submissions = ResponseSubmission.objects.filter(
student=student,
question__assignment=assignment,
)
for r_submission in r_submissions:
submission.total_marks += r_submission.question.marks
submission.earned_marks += r_submission.marks
# Compute Percent
try:
submission.percent = round((submission.earned_marks / submission.total_marks) * 100)
except ZeroDivisionError:
submission.percent = 0
# Save calculation
submission.save()
| AcademicsToday/py-academicstoday | academicstoday_project/student/views/assignment.py | Python | apache-2.0 | 21,683 |
#
# ovirt-engine-setup -- ovirt engine setup
# Copyright (C) 2013-2014 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
AIO cpu check plugin.
"""
import sys
import gettext
_ = lambda m: gettext.dgettext(message=m, domain='ovirt-engine-setup')
from otopi import util
from otopi import plugin
from ovirt_engine_setup.engine import constants as oenginecons
@util.export
class Plugin(plugin.PluginBase):
"""
AIO cpu check plugin.
"""
# CPU list from: git grep ServerCPUList | grep 3.5
CPU_FAMILIES = (
{'model': 'model_Haswell', 'name': 'Intel Haswell Family'},
{'model': 'model_SandyBridge', 'name': 'Intel SandyBridge Family'},
{'model': 'model_Westmere', 'name': 'Intel Westmere Family'},
{'model': 'model_Nehalem', 'name': 'Intel Nehalem Family'},
{'model': 'model_Penryn', 'name': 'Intel Penryn Family'},
{'model': 'model_Conroe', 'name': 'Intel Conroe Family'},
{'model': 'model_Opteron_G5', 'name': 'AMD Opteron G5'},
{'model': 'model_Opteron_G4', 'name': 'AMD Opteron G4'},
{'model': 'model_Opteron_G3', 'name': 'AMD Opteron G3'},
{'model': 'model_Opteron_G2', 'name': 'AMD Opteron G2'},
{'model': 'model_Opteron_G1', 'name': 'AMD Opteron G1'},
)
LIBVIRTD_SERVICE_NAME = 'libvirtd'
def __init__(self, context):
super(Plugin, self).__init__(context=context)
def _startLibvirt(self):
"""
Starts libvirt service
"""
ret = False
if not self.services.status(
name=self.LIBVIRTD_SERVICE_NAME
):
self.services.state(
name=self.LIBVIRTD_SERVICE_NAME,
state=True,
)
# TODO: wait for libvirt to start
ret = True
return ret
def _getCompatibleCpuModels(self):
self.logger.debug('Attempting to load the caps vdsm module')
savedPath = sys.path
ret = None
try:
sys.path.append(oenginecons.FileLocations.AIO_VDSM_PATH)
caps = util.loadModule(
path=oenginecons.FileLocations.AIO_VDSM_PATH,
name='caps',
)
ret = (
caps.CpuInfo().model(),
caps._getCompatibleCpuModels(),
)
finally:
sys.path = savedPath
return ret
@plugin.event(
stage=plugin.Stages.STAGE_INIT,
)
def _init(self):
self.environment[oenginecons.AIOEnv.VDSM_CPU] = None
self.environment[oenginecons.AIOEnv.SUPPORTED] = False
@plugin.event(
stage=plugin.Stages.STAGE_SETUP,
condition=lambda self: self.environment[
oenginecons.AIOEnv.ENABLE
],
priority=plugin.Stages.PRIORITY_HIGH,
)
def _setup(self):
from ovirt_host_deploy import hardware
virtualization = hardware.Virtualization()
result = virtualization.detect()
if result == virtualization.DETECT_RESULT_SUPPORTED:
self.logger.info(_('Hardware supports virtualization'))
self.environment[oenginecons.AIOEnv.SUPPORTED] = True
@plugin.event(
stage=plugin.Stages.STAGE_VALIDATION,
condition=lambda self: self.environment[
oenginecons.AIOEnv.CONFIGURE
],
)
def _validation(self):
shouldStopLibvirt = self._startLibvirt()
try:
cpu, compatible = self._getCompatibleCpuModels()
self.logger.debug(
'Compatible CPU models are: %s',
compatible,
)
supported = (
set([entry['model'] for entry in self.CPU_FAMILIES]) &
set(compatible)
)
# All-in-one want the best cpu between compatible.
# The preference is defined by the order of
# CPU_FAMILIES
# We need to save the corresponding CPU name for cluster
# creation.
for entry in self.CPU_FAMILIES:
if entry['model'] in supported:
self.environment[
oenginecons.AIOEnv.VDSM_CPU
] = entry['name']
break
finally:
if shouldStopLibvirt:
self.services.state(
name=self.LIBVIRTD_SERVICE_NAME,
state=False,
)
# vim: expandtab tabstop=4 shiftwidth=4
| phoenixsbk/kvmmgr | packaging/setup/plugins/ovirt-engine-setup/ovirt-engine/all-in-one/cpu.py | Python | apache-2.0 | 4,987 |
#!/usr/bin/python
#
# Dicziunari-Hunspell -- Rhaeto-Romance hunspell dictionary generation
#
# Copyright (C) 2012-2013 Uli Franke (cls) et al.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# IMPORTANT NOTICE: All software, content, intellectual property coming
# with this program (usually contained in files) can not be used in any
# way by the Lia Rumantscha (www.liarumantscha.ch/) without explicit
# permission, as they actively block software innovation targeting the
# Rhaeto-Romance language.
#
# Word list methods
def load(path):
f = open(path)
lines = f.read().decode("utf-8").splitlines()
f.close
l = set()
for i in lines:
if not i.startswith(u"#"):
l.add(i)
return l
def store(path, l):
f = open(path, "w")
for w in l:
w = u"%s\n" % w
f.write(w.encode("utf-8"))
f.close()
| cls-nebadje/dicziunari-hunspell | tools/dzhs/wordlist.py | Python | gpl-3.0 | 1,455 |
# CTK: Cherokee Toolkit
#
# Authors:
# Alvaro Lopez Ortega <alvaro@alobbs.com>
#
# Copyright (C) 2009-2010 Alvaro Lopez Ortega
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of version 2 of the GNU General Public
# License as published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
# 02110-1301, USA.
#
import re
import sys
import types
import traceback
try:
import json
except ImportError:
import json_embedded as json
#
# Strings
#
def formatter (string, props):
""" This function does a regular substitution 'str%(dict)' with a
little difference. It takes care of the escaped percentage chars,
so strings can be replaced an arbitrary number of times."""
s2 = ''
n = 0
while n < len(string):
if n<len(string)-1 and string[n] == string[n+1] == '%':
s2 += '%%%%'
n += 2
else:
s2 += string[n]
n += 1
return s2 %(props)
#
# HTML Tag properties
#
def props_to_str (props):
assert type(props) == dict
tmp = []
for p in props:
val = props[p]
if val:
tmp.append ('%s="%s"'%(p, val))
else:
tmp.append (p)
return ' '.join(tmp)
#
# Copying and Cloning
#
def find_copy_name (orig, names):
# Clean up name
cropped = re.search (r' Copy( \d+|)$', orig) != None
if cropped:
orig = orig[:orig.rindex(' Copy')]
# Find higher copy
similar = filter (lambda x: x.startswith(orig), names)
if '%s Copy'%(orig) in similar:
higher = 1
else:
higher = 0
for tmp in [re.findall(r' Copy (\d)+', x) for x in similar]:
if not tmp: continue
higher = max (int(tmp[0]), higher)
# Copy X
if higher == 0:
return '%s Copy' %(orig)
return '%s Copy %d' %(orig, higher+1)
#
# JSon
#
def json_dump (obj):
# Python 2.6, and json_embeded
if hasattr (json, 'dumps'):
return json.dumps (obj)
# Python 2.5
return json.write(obj)
#
# Unicode, UTF-8
#
def to_utf8(s):
if type(s) == types.StringType:
return s
elif type(s) == types.UnicodeType:
return s.encode('utf-8')
elif type(s) == types.ListType:
return [to_utf8(x) for x in s]
elif type(s) == types.TupleType:
return tuple([to_utf8(x) for x in s])
elif type(s) == types.DictType:
for k in s.keys():
s[k] = to_utf8(s[k])
return s
return str(s)
#
# Debug
#
def print_exception (output = sys.stderr):
print >> output, traceback.format_exc()
| chetan/cherokee | admin/CTK/CTK/util.py | Python | gpl-2.0 | 3,017 |
class Application:
def __init__(self, application_id, application_key, application_secret_key, redirect_uri):
self.id = application_id
self.key = application_key
self.secret_key = application_secret_key
self.redirect_uri = redirect_uri
| wi34rd/pyokapi | pyokapi/application.py | Python | mit | 272 |
#! /usr/bin/env python
# encoding: utf-8
# WARNING! Do not edit! https://waf.io/book/index.html#_obtaining_the_waf_file
import os
from waflib import Options,Utils,Task
from waflib.TaskGen import before_method,feature,extension
from waflib.Configure import conf
@feature('rubyext')
@before_method('apply_incpaths','process_source','apply_bundle','apply_link')
def init_rubyext(self):
self.install_path='${ARCHDIR_RUBY}'
self.uselib=self.to_list(getattr(self,'uselib',''))
if not'RUBY'in self.uselib:
self.uselib.append('RUBY')
if not'RUBYEXT'in self.uselib:
self.uselib.append('RUBYEXT')
@feature('rubyext')
@before_method('apply_link','propagate_uselib_vars')
def apply_ruby_so_name(self):
self.env.cshlib_PATTERN=self.env.cxxshlib_PATTERN=self.env.rubyext_PATTERN
@conf
def check_ruby_version(self,minver=()):
ruby=self.find_program('ruby',var='RUBY',value=Options.options.rubybinary)
try:
version=self.cmd_and_log(ruby+['-e','puts defined?(VERSION) ? VERSION : RUBY_VERSION']).strip()
except Exception:
self.fatal('could not determine ruby version')
self.env.RUBY_VERSION=version
try:
ver=tuple(map(int,version.split(".")))
except Exception:
self.fatal('unsupported ruby version %r'%version)
cver=''
if minver:
cver='> '+'.'.join(str(x)for x in minver)
if ver<minver:
self.fatal('ruby is too old %r'%ver)
self.msg('Checking for ruby version %s'%cver,version)
@conf
def check_ruby_ext_devel(self):
if not self.env.RUBY:
self.fatal('ruby detection is required first')
if not self.env.CC_NAME and not self.env.CXX_NAME:
self.fatal('load a c/c++ compiler first')
version=tuple(map(int,self.env.RUBY_VERSION.split(".")))
def read_out(cmd):
return Utils.to_list(self.cmd_and_log(self.env.RUBY+['-rrbconfig','-e',cmd]))
def read_config(key):
return read_out('puts RbConfig::CONFIG[%r]'%key)
cpppath=archdir=read_config('archdir')
if version>=(1,9,0):
ruby_hdrdir=read_config('rubyhdrdir')
cpppath+=ruby_hdrdir
if version>=(2,0,0):
cpppath+=read_config('rubyarchhdrdir')
cpppath+=[os.path.join(ruby_hdrdir[0],read_config('arch')[0])]
self.check(header_name='ruby.h',includes=cpppath,errmsg='could not find ruby header file',link_header_test=False)
self.env.LIBPATH_RUBYEXT=read_config('libdir')
self.env.LIBPATH_RUBYEXT+=archdir
self.env.INCLUDES_RUBYEXT=cpppath
self.env.CFLAGS_RUBYEXT=read_config('CCDLFLAGS')
self.env.rubyext_PATTERN='%s.'+read_config('DLEXT')[0]
flags=read_config('LDSHARED')
while flags and flags[0][0]!='-':
flags=flags[1:]
if len(flags)>1 and flags[1]=="ppc":
flags=flags[2:]
self.env.LINKFLAGS_RUBYEXT=flags
self.env.LINKFLAGS_RUBYEXT+=read_config('LIBS')
self.env.LINKFLAGS_RUBYEXT+=read_config('LIBRUBYARG_SHARED')
if Options.options.rubyarchdir:
self.env.ARCHDIR_RUBY=Options.options.rubyarchdir
else:
self.env.ARCHDIR_RUBY=read_config('sitearchdir')[0]
if Options.options.rubylibdir:
self.env.LIBDIR_RUBY=Options.options.rubylibdir
else:
self.env.LIBDIR_RUBY=read_config('sitelibdir')[0]
@conf
def check_ruby_module(self,module_name):
self.start_msg('Ruby module %s'%module_name)
try:
self.cmd_and_log(self.env.RUBY+['-e','require \'%s\';puts 1'%module_name])
except Exception:
self.end_msg(False)
self.fatal('Could not find the ruby module %r'%module_name)
self.end_msg(True)
@extension('.rb')
def process(self,node):
return self.create_task('run_ruby',node)
class run_ruby(Task.Task):
run_str='${RUBY} ${RBFLAGS} -I ${SRC[0].parent.abspath()} ${SRC}'
def options(opt):
opt.add_option('--with-ruby-archdir',type='string',dest='rubyarchdir',help='Specify directory where to install arch specific files')
opt.add_option('--with-ruby-libdir',type='string',dest='rubylibdir',help='Specify alternate ruby library path')
opt.add_option('--with-ruby-binary',type='string',dest='rubybinary',help='Specify alternate ruby binary')
| Gnurou/glmark2 | waflib/Tools/ruby.py | Python | gpl-3.0 | 3,848 |
#!/usr/bin/env python
class RoundFloatManual(object):
def __init__(self, val):
assert isinstance(val, float), \
"Value must be a float!"
self.value = round(val, 2)
def __str__(self):
return '%.2f' % self.value
__repr__ = __str__
| opensvn/test | src/study/python/cpp/ch13/roundFloat2.py | Python | gpl-2.0 | 280 |
import datetime
from imap_tools import EmailAddress
DATA = dict(
subject='Redacted',
from_='redacted@flashmail.net',
to=('redacted@Enron.com',),
cc=(),
bcc=(),
reply_to=(),
date=datetime.datetime(1900, 1, 1, 0, 0),
date_str='',
text='',
html='<p>foo</p>\n',
headers={'from': ('<redacted@flashmail.net>',), 'subject': ('Redacted',), 'to': ('<redacted@Enron.com>',), 'message-id': ('<105647271315.NCV17523@x263.net>',), 'mime-version': ('1.0',), 'content-type': ('text/html;\n charset=utf-8',), 'content-disposition': ('=?utf-8?Q?invalid?=', '=?utf-8?Q?invalid?='), 'content-transfer-encoding': ('quoted-printable',)},
attachments=[],
from_values=EmailAddress('', 'redacted@flashmail.net', 'redacted@flashmail.net'),
to_values=(EmailAddress('', 'redacted@Enron.com', 'redacted@Enron.com'),),
cc_values=(),
bcc_values=(),
reply_to_values=(),
) | ikvk/imap_tools | tests/messages_data/error_emails/multiple_invalid_content_dispositions.py | Python | apache-2.0 | 909 |
import numpy as np
import scipy.interpolate as interp
import warnings
from astropy.io import fits
def concentration(radii, phot, eta_radius=0.2, eta_radius_factor=1.5, interp_kind='linear', add_zero=False):
"""
Calculates the concentration parameter
C = 5 * log10(r_80 / r2_0)
Inputs:
radii -- 1d array of aperture photometry radii
phot -- 1d array of aperture photometry fluxes
interp_kind -- kind of interpolation; passed to scipy.interpolate.interp1d.
Some options are linear, quadratic, and cubic.
add_zero -- add a 0 radius and zero flux point to their respective arrays
to help with interpolation at small radii; should only matter for quadratic or
cubic interpolation
"""
assert len(radii) == len(phot)
assert np.all(radii > 0)
assert np.all(phot > 0)
if add_zero:
radii = np.insert(radii, 0, 0)
phot = np.insert(phot, 0, 0)
eta_vals = eta(radii, phot)
if np.any(eta_vals < 0.2):
eta_interp = interp.interp1d(eta_vals, radii, kind=interp_kind)
eta_r = eta_radius_factor * eta_interp(eta_radius)
else:
warnings.warn("eta is never less than " + str(eta_radius) + ". Using lowest eta value as proxy")
eta_r = eta_radius_factor * radii[np.argmin(eta_vals)]
phot_interp = interp.interp1d(radii, phot, kind=interp_kind)
if eta_r < np.max(radii):
maxphot = phot_interp(eta_r)
else:
maxphot = np.max(phot)
norm_phot = phot / maxphot
radius_interp = interp.interp1d(norm_phot, radii, kind=interp_kind)
r20 = radius_interp(0.2)
r80 = radius_interp(0.8)
assert r20 < r80 < np.max(radii)
c = 5 * np.log10(r80 / r20)
return c
def eta(radii, phot):
"""
eta = I(r) / \bar{I}(<r)
radii -- 1d array of aperture photometry radii
phot -- 1d array of aperture photometry fluxes
this is currently calculated quite naively, and probably could be done better
"""
phot_area = np.pi * radii**2
phot_area_diff = np.ediff1d(phot_area, to_begin=phot_area[0])
I_bar = phot / (phot_area)
I_delta_r = np.ediff1d(phot, to_begin=phot[0]) / phot_area_diff
I_r = (I_delta_r[:-1] + I_delta_r[1:]) / 2 #lost last array element here
I_r = np.append(I_r, I_delta_r[-1]) #added it back in here
eta = I_r / I_bar
return eta
def find_eta(eta_val, radii, phot):
eta_interp = interp.interp1d(eta(radii, phot), radii)
return eta_interp(eta_val)
def snr(name):
"""
name before fits and apphot files
"""
#first calculate the image uncertainty using the MAD
hdulist = fits.open(name + '_bs.fits')
im_med = np.median(hdulist[0].data)
im_err = np.median(np.abs(hdulist[0].data - im_med))
#now get the total flux
apphot = np.loadtxt(name + ".apphot", usecols=[0,1])
radii = apphot[:,0]
phot = apphot[:,1]
try:
eta_rad = find_eta(0.2, radii, phot)
if eta_rad > np.max(radii)/1.5:
eta_rad = np.max(radii)/1.5
except ValueError:
eta_rad = 1.0
phot_interp = interp.interp1d(radii, phot)
total_phot = phot_interp(1.5*eta_rad)
return total_phot / np.sqrt(np.pi*(1.5*eta_rad)**2 * im_err**2)
| astronomeralex/morphology-software | morphology.py | Python | mit | 3,255 |
# file: runme.py
import wolfssl
print ""
print "Trying to connect to the example server -d..."
wolfssl.wolfSSL_Init()
#wolfssl.wolfSSL_Debugging_ON()
ctx = wolfssl.wolfSSL_CTX_new(wolfssl.wolfTLSv1_2_client_method())
if ctx == None:
print "Couldn't get SSL CTX for TLSv1.2"
exit(-1)
ret = wolfssl.wolfSSL_CTX_load_verify_locations(ctx, "../certs/ca-cert.pem", None)
if ret != wolfssl.SSL_SUCCESS:
print "Couldn't do SSL_CTX_load_verify_locations "
print "error string = ", ret
exit(-1)
ssl = wolfssl.wolfSSL_new(ctx)
ret = wolfssl.wolfSSL_swig_connect(ssl, "localhost", 11111)
if ret != wolfssl.SSL_SUCCESS:
print "Couldn't do SSL connect"
err = wolfssl.wolfSSL_get_error(ssl, 0)
if ret == -2:
print "tcp error, is example server running?"
else:
print "error string = ", wolfssl.wolfSSL_error_string(err)
exit(-1)
print "...Connected"
written = wolfssl.wolfSSL_write(ssl, "hello from python\r\n", 19)
if written > 0:
print "Wrote ", written, " bytes"
byteArray = wolfssl.byteArray(100)
readBytes = wolfssl.wolfSSL_read(ssl, byteArray, 100)
print "server reply: ", wolfssl.cdata(byteArray, readBytes)
| yashdsaraf/bb-bot | wolfssl/swig/runme.py | Python | gpl-3.0 | 1,174 |
#!/usr/bin/env python
#
# Created by Samvel Khalatyan on Mar 23, 2014
# Copyright (c) 2014 Samvel Khalatyan. All rights reserved
#
# Use of this source code is governed by a license that can be found in
# the LICENSE file.
import random
import unittest
from lib import unigraph
class UnigraphExtra(unigraph.Unigraph):
def has_edge(self, left_vertex, right_vertex):
if left_vertex == right_vertex:
return True
else:
return right_vertex in self._vertices[left_vertex]
class UnigraphEdgeTestCase(unittest.TestCase):
def setUp(self):
self.graph = UnigraphExtra(random.randrange(10, 15))
for edge in range(2 * self.graph.vertices()):
f, t = (random.randrange(self.graph.vertices()) for x in range(2))
self.graph.add_edge(f, t)
def test_edge(self):
for vertex in range(self.graph.vertices()):
existing_vertices = set(self.graph._vertices[vertex])
all_vertices = set(range(self.graph.vertices()))
missing_vertices = all_vertices - all_vertices
for adj_vertex in existing_vertices:
self.assertTrue(self.graph.has_edge(vertex, adj_vertex))
for adj_vertex in missing_vertices:
self.assertFalse(self.graph.has_edge(vertex, adj_vertex))
def test_self_loop(self):
for vertex in range(self.graph.vertices()):
self.assertTrue(self.graph.has_edge(vertex, vertex))
if "__main__" == __name__:
unittest.main()
| ksamdev/algorithms_old | ch4/python/ch4_ex4.1.4.py | Python | mit | 1,523 |
# Webhooks pfor external integrations.
from typing import Any, Dict, Text
from django.utils.translation import ugettext as _
from django.http import HttpRequest, HttpResponse
from zerver.lib.actions import check_send_stream_message
from zerver.lib.response import json_success, json_error
from zerver.decorator import REQ, has_request_variables, api_key_only_webhook_view
from zerver.models import UserProfile
import ujson
PINGDOM_SUBJECT_TEMPLATE = '{name} status.'
PINGDOM_MESSAGE_TEMPLATE = 'Service {service_url} changed its {type} status from {previous_state} to {current_state}.'
PINGDOM_MESSAGE_DESCRIPTION_TEMPLATE = 'Description: {description}.'
SUPPORTED_CHECK_TYPES = (
'HTTP',
'HTTP_CUSTOM'
'HTTPS',
'SMTP',
'POP3',
'IMAP',
'PING',
'DNS',
'UDP',
'PORT_TCP',
)
@api_key_only_webhook_view('Pingdom')
@has_request_variables
def api_pingdom_webhook(request, user_profile, payload=REQ(argument_type='body'),
stream=REQ(default='pingdom')):
# type: (HttpRequest, UserProfile, Dict[str, Any], Text) -> HttpResponse
check_type = get_check_type(payload)
if check_type in SUPPORTED_CHECK_TYPES:
subject = get_subject_for_http_request(payload)
body = get_body_for_http_request(payload)
else:
return json_error(_('Unsupported check_type: {check_type}').format(check_type=check_type))
check_send_stream_message(user_profile, request.client, stream, subject, body)
return json_success()
def get_subject_for_http_request(payload):
# type: (Dict[str, Any]) -> Text
return PINGDOM_SUBJECT_TEMPLATE.format(name=payload['check_name'])
def get_body_for_http_request(payload):
# type: (Dict[str, Any]) -> Text
current_state = payload['current_state']
previous_state = payload['previous_state']
data = {
'service_url': payload['check_params']['hostname'],
'previous_state': previous_state,
'current_state': current_state,
'type': get_check_type(payload)
}
body = PINGDOM_MESSAGE_TEMPLATE.format(**data)
if current_state == 'DOWN' and previous_state == 'UP':
description = PINGDOM_MESSAGE_DESCRIPTION_TEMPLATE.format(description=payload['long_description'])
body += '\n{description}'.format(description=description)
return body
def get_check_type(payload):
# type: (Dict[str, Any]) -> Text
return payload['check_type']
| amanharitsh123/zulip | zerver/webhooks/pingdom/view.py | Python | apache-2.0 | 2,434 |
import numpy as np
import pandas as pd
input_file = "3_floor.csv"
# comma delimited is the default
df = pd.read_csv(input_file, header = 0)
# for space delimited use:
# df = pd.read_csv(input_file, header = 0, delimiter = " ")
# for tab delimited use:
# df = pd.read_csv(input_file, header = 0, delimiter = "\t")
# put the original column names in a python list
original_headers = list(df.columns.values)
# remove the non-numeric columns
df = df._get_numeric_data()
# put the numeric column names in a python list
numeric_headers = list(df.columns.values)
# create a numpy array with the numeric values for input into scikit-learn
numpy_array = df.as_matrix()
# reverse the order of the columns
#numeric_headers.reverse()
#reverse_df = df[numeric_headers]
# throughput random forest regression
t = numpy_array[0:160, 3]
x = np.linspace(0, 159, 160)
xall = np.linspace(0, 181, 182)
xtest = np.linspace(160, 181, 22)
from sklearn.ensemble import RandomForestRegressor
#tfit = RandomForestRegressor(100).fit(x[:, None], t).predict(x[:, None])
tfit = RandomForestRegressor(100).fit(numpy_array[0:160, 0:2 ], t).predict(numpy_array[0:182, 0:2])
import matplotlib.pyplot as plt
fig, ax = plt.subplots()
ax.errorbar(x, t, 0.3, fmt='*', label="Training traffic")
ax.plot(xall, tfit, '-r', label="Predicted traffic")
ax.errorbar(xtest, numpy_array[160:182, 3], fmt='og', label="Test traffic")
ax.set_ylabel('Throughput (kbits/second)')
ax.set_xlabel('Time in hours')
ax.set_title('Taffic Prediction with Random Forest Regression on 3rd floor')
ax.legend(loc="upper left")
plt.show()
| Silvia333/ASleep | my_importData.py | Python | apache-2.0 | 1,589 |
#! /usr/bin/env python
# -*- coding: utf-8
import sys
import os
import cv2
import numpy as np
import time
import StringIO
from misc import WithTimer
from numpy_cache import FIFOLimitedArrayCache
from app_base import BaseApp
from image_misc import norm01, norm01c, norm0255, tile_images_normalize, ensure_float01, tile_images_make_tiles, ensure_uint255_and_resize_to_fit, get_tiles_height_width, get_tiles_height_width_ratio
from image_misc import FormattedString, cv2_typeset_text, to_255
from caffe_proc_thread import CaffeProcThread
from jpg_vis_loading_thread import JPGVisLoadingThread
from caffevis_app_state import CaffeVisAppState
from caffevis_helper import get_pretty_layer_name, read_label_file, load_sprite_image, load_square_sprite_image, check_force_backward_true
class CaffeVisApp(BaseApp):
'''App to visualize using caffe.'''
def __init__(self, settings, key_bindings):
super(CaffeVisApp, self).__init__(settings, key_bindings)
print 'Got settings', settings
self.settings = settings
self.bindings = key_bindings
self._net_channel_swap = settings.caffe_net_channel_swap
if self._net_channel_swap is None:
self._net_channel_swap_inv = None
else:
self._net_channel_swap_inv = tuple([self._net_channel_swap.index(ii) for ii in range(len(self._net_channel_swap))])
self._range_scale = 1.0 # not needed; image already in [0,255]
# Set the mode to CPU or GPU. Note: in the latest Caffe
# versions, there is one Caffe object *per thread*, so the
# mode must be set per thread! Here we set the mode for the
# main thread; it is also separately set in CaffeProcThread.
sys.path.insert(0, os.path.join(settings.caffevis_caffe_root, 'python'))
import caffe
if settings.caffevis_mode_gpu:
caffe.set_mode_gpu()
print 'CaffeVisApp mode (in main thread): GPU'
else:
caffe.set_mode_cpu()
print 'CaffeVisApp mode (in main thread): CPU'
self.net = caffe.Classifier(
settings.caffevis_deploy_prototxt,
settings.caffevis_network_weights,
mean = None, # Set to None for now, assign later # self._data_mean,
channel_swap = self._net_channel_swap,
raw_scale = self._range_scale,
)
if isinstance(settings.caffevis_data_mean, basestring):
# If the mean is given as a filename, load the file
try:
filename, file_extension = os.path.splitext(settings.caffevis_data_mean)
if file_extension == ".npy":
# load mean from numpy array
self._data_mean = np.load(settings.caffevis_data_mean)
print "Loaded mean from numpy file, data_mean.shape: ", self._data_mean.shape
elif file_extension == ".binaryproto":
# load mean from binary protobuf file
blob = caffe.proto.caffe_pb2.BlobProto()
data = open(settings.caffevis_data_mean, 'rb').read()
blob.ParseFromString(data)
self._data_mean = np.array(caffe.io.blobproto_to_array(blob))
self._data_mean = np.squeeze(self._data_mean)
print "Loaded mean from binaryproto file, data_mean.shape: ", self._data_mean.shape
else:
# unknown file extension, trying to load as numpy array
self._data_mean = np.load(settings.caffevis_data_mean)
print "Loaded mean from numpy file, data_mean.shape: ", self._data_mean.shape
except IOError:
print '\n\nCound not load mean file:', settings.caffevis_data_mean
print 'Ensure that the values in settings.py point to a valid model weights file, network'
print 'definition prototxt, and mean. To fetch a default model and mean file, use:\n'
print '$ cd models/caffenet-yos/'
print '$ ./fetch.sh\n\n'
raise
input_shape = self.net.blobs[self.net.inputs[0]].data.shape[-2:] # e.g. 227x227
# Crop center region (e.g. 227x227) if mean is larger (e.g. 256x256)
excess_h = self._data_mean.shape[1] - input_shape[0]
excess_w = self._data_mean.shape[2] - input_shape[1]
assert excess_h >= 0 and excess_w >= 0, 'mean should be at least as large as %s' % repr(input_shape)
self._data_mean = self._data_mean[:, (excess_h/2):(excess_h/2+input_shape[0]),
(excess_w/2):(excess_w/2+input_shape[1])]
elif settings.caffevis_data_mean is None:
self._data_mean = None
else:
# The mean has been given as a value or a tuple of values
self._data_mean = np.array(settings.caffevis_data_mean)
# Promote to shape C,1,1
while len(self._data_mean.shape) < 1:
self._data_mean = np.expand_dims(self._data_mean, -1)
#if not isinstance(self._data_mean, tuple):
# # If given as int/float: promote to tuple
# self._data_mean = tuple(self._data_mean)
if self._data_mean is not None:
self.net.transformer.set_mean(self.net.inputs[0], self._data_mean)
check_force_backward_true(settings.caffevis_deploy_prototxt)
self.labels = None
if self.settings.caffevis_labels:
self.labels = read_label_file(self.settings.caffevis_labels)
self.proc_thread = None
self.jpgvis_thread = None
self.handled_frames = 0
if settings.caffevis_jpg_cache_size < 10*1024**2:
raise Exception('caffevis_jpg_cache_size must be at least 10MB for normal operation.')
self.img_cache = FIFOLimitedArrayCache(settings.caffevis_jpg_cache_size)
self._populate_net_layer_info()
def _populate_net_layer_info(self):
'''For each layer, save the number of filters and precompute
tile arrangement (needed by CaffeVisAppState to handle
keyboard navigation).
'''
self.net_layer_info = {}
for key in self.net.blobs.keys():
self.net_layer_info[key] = {}
# Conv example: (1, 96, 55, 55)
# FC example: (1, 1000)
blob_shape = self.net.blobs[key].data.shape
assert len(blob_shape) in (2,4), 'Expected either 2 for FC or 4 for conv layer'
self.net_layer_info[key]['isconv'] = (len(blob_shape) == 4)
self.net_layer_info[key]['data_shape'] = blob_shape[1:] # Chop off batch size
self.net_layer_info[key]['n_tiles'] = blob_shape[1]
self.net_layer_info[key]['tiles_rc'] = get_tiles_height_width_ratio(blob_shape[1], self.settings.caffevis_layers_aspect_ratio)
self.net_layer_info[key]['tile_rows'] = self.net_layer_info[key]['tiles_rc'][0]
self.net_layer_info[key]['tile_cols'] = self.net_layer_info[key]['tiles_rc'][1]
def start(self):
self.state = CaffeVisAppState(self.net, self.settings, self.bindings, self.net_layer_info)
self.state.drawing_stale = True
self.layer_print_names = [get_pretty_layer_name(self.settings, nn) for nn in self.state._layers]
if self.proc_thread is None or not self.proc_thread.is_alive():
# Start thread if it's not already running
self.proc_thread = CaffeProcThread(self.settings, self.net, self.state,
self.settings.caffevis_frame_wait_sleep,
self.settings.caffevis_pause_after_keys,
self.settings.caffevis_heartbeat_required,
self.settings.caffevis_mode_gpu)
self.proc_thread.start()
if self.jpgvis_thread is None or not self.jpgvis_thread.is_alive():
# Start thread if it's not already running
self.jpgvis_thread = JPGVisLoadingThread(self.settings, self.state, self.img_cache,
self.settings.caffevis_jpg_load_sleep,
self.settings.caffevis_heartbeat_required)
self.jpgvis_thread.start()
def get_heartbeats(self):
return [self.proc_thread.heartbeat, self.jpgvis_thread.heartbeat]
def quit(self):
print 'CaffeVisApp: trying to quit'
with self.state.lock:
self.state.quit = True
if self.proc_thread != None:
for ii in range(3):
self.proc_thread.join(1)
if not self.proc_thread.is_alive():
break
if self.proc_thread.is_alive():
raise Exception('CaffeVisApp: Could not join proc_thread; giving up.')
self.proc_thread = None
print 'CaffeVisApp: quitting.'
def _can_skip_all(self, panes):
return ('caffevis_layers' not in panes.keys())
def handle_input(self, input_image, panes):
if self.debug_level > 1:
print 'handle_input: frame number', self.handled_frames, 'is', 'None' if input_image is None else 'Available'
self.handled_frames += 1
if self._can_skip_all(panes):
return
with self.state.lock:
if self.debug_level > 1:
print 'CaffeVisApp.handle_input: pushed frame'
self.state.next_frame = input_image
if self.debug_level > 1:
print 'CaffeVisApp.handle_input: caffe_net_state is:', self.state.caffe_net_state
def redraw_needed(self):
return self.state.redraw_needed()
def draw(self, panes):
if self._can_skip_all(panes):
if self.debug_level > 1:
print 'CaffeVisApp.draw: skipping'
return False
with self.state.lock:
# Hold lock throughout drawing
do_draw = self.state.drawing_stale and self.state.caffe_net_state == 'free'
#print 'CaffeProcThread.draw: caffe_net_state is:', self.state.caffe_net_state
if do_draw:
self.state.caffe_net_state = 'draw'
if do_draw:
if self.debug_level > 1:
print 'CaffeVisApp.draw: drawing'
if 'caffevis_control' in panes:
self._draw_control_pane(panes['caffevis_control'])
if 'caffevis_status' in panes:
self._draw_status_pane(panes['caffevis_status'])
layer_data_3D_highres = None
if 'caffevis_layers' in panes:
layer_data_3D_highres = self._draw_layer_pane(panes['caffevis_layers'])
if 'caffevis_aux' in panes:
self._draw_aux_pane(panes['caffevis_aux'], layer_data_3D_highres)
if 'caffevis_back' in panes:
# Draw back pane as normal
self._draw_back_pane(panes['caffevis_back'])
if self.state.layers_pane_zoom_mode == 2:
# ALSO draw back pane into layers pane
self._draw_back_pane(panes['caffevis_layers'])
if 'caffevis_jpgvis' in panes:
self._draw_jpgvis_pane(panes['caffevis_jpgvis'])
with self.state.lock:
self.state.drawing_stale = False
self.state.caffe_net_state = 'free'
return do_draw
def _draw_prob_labels_pane(self, pane):
'''Adds text label annotation atop the given pane.'''
if not self.labels or not self.state.show_label_predictions or not self.settings.caffevis_prob_layer:
return
#pane.data[:] = to_255(self.settings.window_background)
defaults = {'face': getattr(cv2, self.settings.caffevis_class_face),
'fsize': self.settings.caffevis_class_fsize,
'clr': to_255(self.settings.caffevis_class_clr_0),
'thick': self.settings.caffevis_class_thick}
loc = self.settings.caffevis_class_loc[::-1] # Reverse to OpenCV c,r order
clr_0 = to_255(self.settings.caffevis_class_clr_0)
clr_1 = to_255(self.settings.caffevis_class_clr_1)
probs_flat = self.net.blobs[self.settings.caffevis_prob_layer].data.flatten()
top_5 = probs_flat.argsort()[-1:-6:-1]
strings = []
pmax = probs_flat[top_5[0]]
for idx in top_5:
prob = probs_flat[idx]
text = '%.2f %s' % (prob, self.labels[idx])
fs = FormattedString(text, defaults)
#fs.clr = tuple([clr_1[ii]*prob/pmax + clr_0[ii]*(1-prob/pmax) for ii in range(3)])
fs.clr = tuple([max(0,min(255,clr_1[ii]*prob + clr_0[ii]*(1-prob))) for ii in range(3)])
strings.append([fs]) # Line contains just fs
cv2_typeset_text(pane.data, strings, loc,
line_spacing = self.settings.caffevis_class_line_spacing)
def _draw_control_pane(self, pane):
pane.data[:] = to_255(self.settings.window_background)
with self.state.lock:
layer_idx = self.state.layer_idx
loc = self.settings.caffevis_control_loc[::-1] # Reverse to OpenCV c,r order
strings = []
defaults = {'face': getattr(cv2, self.settings.caffevis_control_face),
'fsize': self.settings.caffevis_control_fsize,
'clr': to_255(self.settings.caffevis_control_clr),
'thick': self.settings.caffevis_control_thick}
for ii in range(len(self.layer_print_names)):
fs = FormattedString(self.layer_print_names[ii], defaults)
this_layer = self.state._layers[ii]
if self.state.backprop_selection_frozen and this_layer == self.state.backprop_layer:
fs.clr = to_255(self.settings.caffevis_control_clr_bp)
fs.thick = self.settings.caffevis_control_thick_bp
if this_layer == self.state.layer:
if self.state.cursor_area == 'top':
fs.clr = to_255(self.settings.caffevis_control_clr_cursor)
fs.thick = self.settings.caffevis_control_thick_cursor
else:
if not (self.state.backprop_selection_frozen and this_layer == self.state.backprop_layer):
fs.clr = to_255(self.settings.caffevis_control_clr_selected)
fs.thick = self.settings.caffevis_control_thick_selected
strings.append(fs)
cv2_typeset_text(pane.data, strings, loc,
line_spacing = self.settings.caffevis_control_line_spacing,
wrap = True)
def _draw_status_pane(self, pane):
pane.data[:] = to_255(self.settings.window_background)
defaults = {'face': getattr(cv2, self.settings.caffevis_status_face),
'fsize': self.settings.caffevis_status_fsize,
'clr': to_255(self.settings.caffevis_status_clr),
'thick': self.settings.caffevis_status_thick}
loc = self.settings.caffevis_status_loc[::-1] # Reverse to OpenCV c,r order
status = StringIO.StringIO()
fps = self.proc_thread.approx_fps()
with self.state.lock:
print >>status, 'pattern' if self.state.pattern_mode else ('back' if self.state.layers_show_back else 'fwd'),
print >>status, '%s:%d |' % (self.state.layer, self.state.selected_unit),
if not self.state.back_enabled:
print >>status, 'Back: off',
else:
print >>status, 'Back: %s' % ('deconv' if self.state.back_mode == 'deconv' else 'bprop'),
print >>status, '(from %s_%d, disp %s)' % (self.state.backprop_layer,
self.state.backprop_unit,
self.state.back_filt_mode),
print >>status, '|',
print >>status, 'Boost: %g/%g' % (self.state.layer_boost_indiv, self.state.layer_boost_gamma)
if fps > 0:
print >>status, '| FPS: %.01f' % fps
if self.state.extra_msg:
print >>status, '|', self.state.extra_msg
self.state.extra_msg = ''
strings = [FormattedString(line, defaults) for line in status.getvalue().split('\n')]
cv2_typeset_text(pane.data, strings, loc,
line_spacing = self.settings.caffevis_status_line_spacing)
def _draw_layer_pane(self, pane):
'''Returns the data shown in highres format, b01c order.'''
if self.state.layers_show_back:
layer_dat_3D = self.net.blobs[self.state.layer].diff[0]
else:
layer_dat_3D = self.net.blobs[self.state.layer].data[0]
# Promote FC layers with shape (n) to have shape (n,1,1)
if len(layer_dat_3D.shape) == 1:
layer_dat_3D = layer_dat_3D[:,np.newaxis,np.newaxis]
n_tiles = layer_dat_3D.shape[0]
tile_rows,tile_cols = self.net_layer_info[self.state.layer]['tiles_rc']
display_3D_highres = None
if self.state.pattern_mode:
# Show desired patterns loaded from disk
load_layer = self.state.layer
if self.settings.caffevis_jpgvis_remap and self.state.layer in self.settings.caffevis_jpgvis_remap:
load_layer = self.settings.caffevis_jpgvis_remap[self.state.layer]
if self.settings.caffevis_jpgvis_layers and load_layer in self.settings.caffevis_jpgvis_layers:
jpg_path = os.path.join(self.settings.caffevis_unit_jpg_dir,
'regularized_opt', load_layer, 'whole_layer.jpg')
# Get highres version
#cache_before = str(self.img_cache)
display_3D_highres = self.img_cache.get((jpg_path, 'whole'), None)
#else:
# display_3D_highres = None
if display_3D_highres is None:
try:
with WithTimer('CaffeVisApp:load_sprite_image', quiet = self.debug_level < 1):
display_3D_highres = load_square_sprite_image(jpg_path, n_sprites = n_tiles)
except IOError:
# File does not exist, so just display disabled.
pass
else:
self.img_cache.set((jpg_path, 'whole'), display_3D_highres)
#cache_after = str(self.img_cache)
#print 'Cache was / is:\n %s\n %s' % (cache_before, cache_after)
if display_3D_highres is not None:
# Get lowres version, maybe. Assume we want at least one pixel for selection border.
row_downsamp_factor = int(np.ceil(float(display_3D_highres.shape[1]) / (pane.data.shape[0] / tile_rows - 2)))
col_downsamp_factor = int(np.ceil(float(display_3D_highres.shape[2]) / (pane.data.shape[1] / tile_cols - 2)))
ds = max(row_downsamp_factor, col_downsamp_factor)
if ds > 1:
#print 'Downsampling by', ds
display_3D = display_3D_highres[:,::ds,::ds,:]
else:
display_3D = display_3D_highres
else:
display_3D = layer_dat_3D * 0 # nothing to show
else:
# Show data from network (activations or diffs)
if self.state.layers_show_back:
back_what_to_disp = self.get_back_what_to_disp()
if back_what_to_disp == 'disabled':
layer_dat_3D_normalized = np.tile(self.settings.window_background, layer_dat_3D.shape + (1,))
elif back_what_to_disp == 'stale':
layer_dat_3D_normalized = np.tile(self.settings.stale_background, layer_dat_3D.shape + (1,))
else:
layer_dat_3D_normalized = tile_images_normalize(layer_dat_3D,
boost_indiv = self.state.layer_boost_indiv,
boost_gamma = self.state.layer_boost_gamma,
neg_pos_colors = ((1,0,0), (0,1,0)))
else:
layer_dat_3D_normalized = tile_images_normalize(layer_dat_3D,
boost_indiv = self.state.layer_boost_indiv,
boost_gamma = self.state.layer_boost_gamma)
#print ' ===layer_dat_3D_normalized.shape', layer_dat_3D_normalized.shape, 'layer_dat_3D_normalized dtype', layer_dat_3D_normalized.dtype, 'range', layer_dat_3D_normalized.min(), layer_dat_3D_normalized.max()
display_3D = layer_dat_3D_normalized
# Convert to float if necessary:
display_3D = ensure_float01(display_3D)
# Upsample gray -> color if necessary
# e.g. (1000,32,32) -> (1000,32,32,3)
if len(display_3D.shape) == 3:
display_3D = display_3D[:,:,:,np.newaxis]
if display_3D.shape[3] == 1:
display_3D = np.tile(display_3D, (1, 1, 1, 3))
# Upsample unit length tiles to give a more sane tile / highlight ratio
# e.g. (1000,1,1,3) -> (1000,3,3,3)
if display_3D.shape[1] == 1:
display_3D = np.tile(display_3D, (1, 3, 3, 1))
if self.state.layers_show_back and not self.state.pattern_mode:
padval = self.settings.caffevis_layer_clr_back_background
else:
padval = self.settings.window_background
highlights = [None] * n_tiles
with self.state.lock:
if self.state.cursor_area == 'bottom':
highlights[self.state.selected_unit] = self.settings.caffevis_layer_clr_cursor # in [0,1] range
if self.state.backprop_selection_frozen and self.state.layer == self.state.backprop_layer:
highlights[self.state.backprop_unit] = self.settings.caffevis_layer_clr_back_sel # in [0,1] range
_, display_2D = tile_images_make_tiles(display_3D, hw = (tile_rows,tile_cols), padval = padval, highlights = highlights)
if display_3D_highres is None:
display_3D_highres = display_3D
# Display pane based on layers_pane_zoom_mode
state_layers_pane_zoom_mode = self.state.layers_pane_zoom_mode
assert state_layers_pane_zoom_mode in (0,1,2)
if state_layers_pane_zoom_mode == 0:
# Mode 0: normal display (activations or patterns)
display_2D_resize = ensure_uint255_and_resize_to_fit(display_2D, pane.data.shape)
elif state_layers_pane_zoom_mode == 1:
# Mode 1: zoomed selection
unit_data = display_3D_highres[self.state.selected_unit]
display_2D_resize = ensure_uint255_and_resize_to_fit(unit_data, pane.data.shape)
else:
# Mode 2: zoomed backprop pane
display_2D_resize = ensure_uint255_and_resize_to_fit(display_2D, pane.data.shape) * 0
pane.data[:] = to_255(self.settings.window_background)
pane.data[0:display_2D_resize.shape[0], 0:display_2D_resize.shape[1], :] = display_2D_resize
if self.settings.caffevis_label_layers and self.state.layer in self.settings.caffevis_label_layers and self.labels and self.state.cursor_area == 'bottom':
# Display label annotation atop layers pane (e.g. for fc8/prob)
defaults = {'face': getattr(cv2, self.settings.caffevis_label_face),
'fsize': self.settings.caffevis_label_fsize,
'clr': to_255(self.settings.caffevis_label_clr),
'thick': self.settings.caffevis_label_thick}
loc_base = self.settings.caffevis_label_loc[::-1] # Reverse to OpenCV c,r order
lines = [FormattedString(self.labels[self.state.selected_unit], defaults)]
cv2_typeset_text(pane.data, lines, loc_base)
return display_3D_highres
def _draw_aux_pane(self, pane, layer_data_normalized):
pane.data[:] = to_255(self.settings.window_background)
mode = None
with self.state.lock:
if self.state.cursor_area == 'bottom':
mode = 'selected'
else:
mode = 'prob_labels'
if mode == 'selected':
unit_data = layer_data_normalized[self.state.selected_unit]
unit_data_resize = ensure_uint255_and_resize_to_fit(unit_data, pane.data.shape)
pane.data[0:unit_data_resize.shape[0], 0:unit_data_resize.shape[1], :] = unit_data_resize
elif mode == 'prob_labels':
self._draw_prob_labels_pane(pane)
def _draw_back_pane(self, pane):
mode = None
with self.state.lock:
back_enabled = self.state.back_enabled
back_mode = self.state.back_mode
back_filt_mode = self.state.back_filt_mode
state_layer = self.state.layer
selected_unit = self.state.selected_unit
back_what_to_disp = self.get_back_what_to_disp()
if back_what_to_disp == 'disabled':
pane.data[:] = to_255(self.settings.window_background)
elif back_what_to_disp == 'stale':
pane.data[:] = to_255(self.settings.stale_background)
else:
# One of the backprop modes is enabled and the back computation (gradient or deconv) is up to date
grad_blob = self.net.blobs['data'].diff
# Manually deprocess (skip mean subtraction and rescaling)
#grad_img = self.net.deprocess('data', diff_blob)
grad_blob = grad_blob[0] # bc01 -> c01
grad_blob = grad_blob.transpose((1,2,0)) # c01 -> 01c
if self._net_channel_swap_inv is None:
grad_img = grad_blob[:, :, :] # e.g. BGR -> RGB
else:
grad_img = grad_blob[:, :, self._net_channel_swap_inv] # e.g. BGR -> RGB
# Mode-specific processing
assert back_mode in ('grad', 'deconv')
assert back_filt_mode in ('raw', 'gray', 'norm', 'normblur')
if back_filt_mode == 'raw':
grad_img = norm01c(grad_img, 0)
elif back_filt_mode == 'gray':
grad_img = grad_img.mean(axis=2)
grad_img = norm01c(grad_img, 0)
elif back_filt_mode == 'norm':
grad_img = np.linalg.norm(grad_img, axis=2)
grad_img = norm01(grad_img)
else:
grad_img = np.linalg.norm(grad_img, axis=2)
cv2.GaussianBlur(grad_img, (0,0), self.settings.caffevis_grad_norm_blur_radius, grad_img)
grad_img = norm01(grad_img)
# If necessary, re-promote from grayscale to color
if len(grad_img.shape) == 2:
grad_img = np.tile(grad_img[:,:,np.newaxis], 3)
if (self.settings.static_files_input_mode == "siamese_image_list") and (grad_img.shape[2] == 6):
grad_img1 = grad_img[:, :, 0:3]
grad_img2 = grad_img[:, :, 3:6]
half_pane_shape = (pane.data.shape[0] / 2, pane.data.shape[1])
grad_img_disp1 = cv2.resize(grad_img1[:], half_pane_shape)
grad_img_disp2 = cv2.resize(grad_img2[:], half_pane_shape)
grad_img_disp = np.concatenate((grad_img_disp1, grad_img_disp2), axis=1)
else:
grad_img_disp = grad_img
grad_img_resize = ensure_uint255_and_resize_to_fit(grad_img_disp, pane.data.shape)
pane.data[0:grad_img_resize.shape[0], 0:grad_img_resize.shape[1], :] = grad_img_resize
def _draw_jpgvis_pane(self, pane):
pane.data[:] = to_255(self.settings.window_background)
with self.state.lock:
state_layer, state_selected_unit, cursor_area, show_unit_jpgs = self.state.layer, self.state.selected_unit, self.state.cursor_area, self.state.show_unit_jpgs
try:
# Some may be missing this setting
self.settings.caffevis_jpgvis_layers
except:
print '\n\nNOTE: you need to upgrade your settings.py and settings_local.py files. See README.md.\n\n'
raise
if self.settings.caffevis_jpgvis_remap and state_layer in self.settings.caffevis_jpgvis_remap:
img_key_layer = self.settings.caffevis_jpgvis_remap[state_layer]
else:
img_key_layer = state_layer
if self.settings.caffevis_jpgvis_layers and img_key_layer in self.settings.caffevis_jpgvis_layers and cursor_area == 'bottom' and show_unit_jpgs:
img_key = (img_key_layer, state_selected_unit, pane.data.shape)
img_resize = self.img_cache.get(img_key, None)
if img_resize is None:
# If img_resize is None, loading has not yet been attempted, so show stale image and request load by JPGVisLoadingThread
with self.state.lock:
self.state.jpgvis_to_load_key = img_key
pane.data[:] = to_255(self.settings.stale_background)
elif img_resize.nbytes == 0:
# This is the sentinal value when the image is not
# found, i.e. loading was already attempted but no jpg
# assets were found. Just display disabled.
pane.data[:] = to_255(self.settings.window_background)
else:
# Show image
pane.data[:img_resize.shape[0], :img_resize.shape[1], :] = img_resize
else:
# Will never be available
pane.data[:] = to_255(self.settings.window_background)
def handle_key(self, key, panes):
return self.state.handle_key(key)
def get_back_what_to_disp(self):
'''Whether to show back diff information or stale or disabled indicator'''
if (self.state.cursor_area == 'top' and not self.state.backprop_selection_frozen) or not self.state.back_enabled:
return 'disabled'
elif self.state.back_stale:
return 'stale'
else:
return 'normal'
def set_debug(self, level):
self.debug_level = level
self.proc_thread.debug_level = level
self.jpgvis_thread.debug_level = level
def draw_help(self, help_pane, locy):
defaults = {'face': getattr(cv2, self.settings.help_face),
'fsize': self.settings.help_fsize,
'clr': to_255(self.settings.help_clr),
'thick': self.settings.help_thick}
loc_base = self.settings.help_loc[::-1] # Reverse to OpenCV c,r order
locx = loc_base[0]
lines = []
lines.append([FormattedString('', defaults)])
lines.append([FormattedString('Caffevis keys', defaults)])
kl,_ = self.bindings.get_key_help('sel_left')
kr,_ = self.bindings.get_key_help('sel_right')
ku,_ = self.bindings.get_key_help('sel_up')
kd,_ = self.bindings.get_key_help('sel_down')
klf,_ = self.bindings.get_key_help('sel_left_fast')
krf,_ = self.bindings.get_key_help('sel_right_fast')
kuf,_ = self.bindings.get_key_help('sel_up_fast')
kdf,_ = self.bindings.get_key_help('sel_down_fast')
keys_nav_0 = ','.join([kk[0] for kk in (kl, kr, ku, kd)])
keys_nav_1 = ''
if len(kl)>1 and len(kr)>1 and len(ku)>1 and len(kd)>1:
keys_nav_1 += ' or '
keys_nav_1 += ','.join([kk[1] for kk in (kl, kr, ku, kd)])
keys_nav_f = ','.join([kk[0] for kk in (klf, krf, kuf, kdf)])
nav_string = 'Navigate with %s%s. Use %s to move faster.' % (keys_nav_0, keys_nav_1, keys_nav_f)
lines.append([FormattedString('', defaults, width=120, align='right'),
FormattedString(nav_string, defaults)])
for tag in ('sel_layer_left', 'sel_layer_right', 'zoom_mode', 'pattern_mode',
'ez_back_mode_loop', 'freeze_back_unit', 'show_back', 'back_mode', 'back_filt_mode',
'boost_gamma', 'boost_individual', 'reset_state'):
key_strings, help_string = self.bindings.get_key_help(tag)
label = '%10s:' % (','.join(key_strings))
lines.append([FormattedString(label, defaults, width=120, align='right'),
FormattedString(help_string, defaults)])
locy = cv2_typeset_text(help_pane.data, lines, (locx, locy),
line_spacing = self.settings.help_line_spacing)
return locy
| yosinski/deep-visualization-toolbox | caffevis/app.py | Python | mit | 33,221 |
# metafits format version number
_VERSION=2.0
import logging,datetime,math
import numpy,ephem
try:
import astropy.io.fits as pyfits
except ImportError:
import pyfits
import json,urllib,urllib2
USEWEB = True # If true, use the web services to get metadata, otherwise call functions in obssched.tilestatus
if not USEWEB:
import mwaconfig
from obssched import tilestatus
from mwapy import ephem_utils
import mwapy
try:
from mwapy.eorpy import ionex
_USE_IONEX=True
except ImportError:
_USE_IONEX=False
try:
from mwapy.pb import primarybeammap
_useplotting=True
except ImportError:
_useplotting=False
try:
from astropy.time import Time
from astropy.coordinates.angles import Angle
from astropy import units as u
_useastropy=True
except ImportError:
_useastropy=False
# configure the logging
logging.basicConfig(format='# %(levelname)s:%(name)s: %(message)s')
logger=logging.getLogger('metadata')
logger.setLevel(logging.WARNING)
# 128 antennas * 2 polns
_NINP_128T=256
# 16 inputs (16 antennas * 2 polns) go into each Rx
_INPUTSPERRX=16
# default digital gain
_DEFAULT_GAIN=64
# total number of coarse channels
_NUM_COARSE_CHANNELS=256
# if a dipole is bad, its delay is set to this
_BAD_DIPOLE_DELAY=32
_BASEURL='http://mro.mwa128t.org'
######################################################################
def fetch_metadata(service='obs', gpstime=None, filename=None, URL=_BASEURL):
"""Given a service (eg 'obs' or 'con') and either a gpstime in
seconds or a data file name for that observation, return
the metadata as a Python dictionary.
"""
if USEWEB:
if gpstime is not None:
data = urllib.urlencode({'obs_id':gpstime})
if gpstime > ephem_utils.GPSseconds_now():
logger.warning('Time requested (%d) is in the future; results may not be reliable' % (gpstime))
elif filename is not None:
data = urllib.urlencode({'filename':filename})
else:
logger.error("Must pass either filename or obs_id")
return
if service.strip().lower() in ['obs','con']:
service = service.strip().lower()
else:
logger.error("Invalid service name: %s" % service)
return
url=URL + '/metadata/' + service + '?' + data
try:
logger.debug('Retrieving %s...' % url)
result = json.load(urllib2.urlopen(url))
except urllib2.HTTPError as error:
logger.error("HTTP error from server: code=%d, response:\n %s" % (error.code, error.read()))
logger.error('Unable to retrieve %s' % (url))
return
except urllib2.URLError as error:
logger.error("URL or network error: %s" % error.reason)
logger.error('Unable to retrieve %s' % (url))
return
except:
logger.error('Unable to retrieve %s' % (url))
return
return result
else:
if service == 'obs':
resdict = tilestatus.getObservationInfo(obsid=gpstime, filename=filename)
elif service == 'con':
db = tilestatus.getdb()
if filename and (not gpstime):
gpstime = tilestatus.getobsid(filename, db=db)
tiles = tilestatus.getTiles(reftime=gpstime, db=db)
resdict = dict([(tileid,dict(value)) for (tileid,value) in tiles.items()])
else:
return None
return json.loads(json.dumps(resdict))
######################################################################
def fetch_observations(URL=_BASEURL, **kwargs):
service='find'
constraints={}
if kwargs is not None:
for key, value in kwargs.iteritems():
if value is not None:
constraints[key]=value
if len(constraints.keys())>0:
data = urllib.urlencode(constraints)
else:
return []
url=URL + '/metadata/' + service + '?' + data
try:
logger.debug('Retrieving %s...' % url)
result = json.load(urllib2.urlopen(url))
except urllib2.HTTPError as error:
logger.error("HTTP error from server: code=%d, response:\n %s" % (error.code, error.read()))
logger.error('Unable to retrieve %s' % (url))
return
except urllib2.URLError as error:
logger.error("URL or network error: %s" % error.reason)
logger.error('Unable to retrieve %s' % (url))
return
except:
logger.error('Unable to retrieve %s' % (url))
return []
return result
######################################################################
def fetch_obsinfo(gpstime, URL=_BASEURL):
"""
Return the MWA observation data associated with a given gpstime
"""
return fetch_metadata(service='obs',
gpstime=gpstime,
URL=URL)
######################################################################
def fetch_tileinfo(gpstime, URL=_BASEURL):
"""
Return the MWA tile connection data associated with a given gpstime
"""
tileinfo=fetch_metadata(service='con',
gpstime=gpstime,
URL=URL)
# make sure keys are integers
newtileinfo={}
for t in tileinfo.keys():
newtileinfo[int(t)]=tileinfo[t]
return newtileinfo
######################################################################
class MWA_Observation_Summary():
"""
Holds a quick bit of data for each observation
This is returned in a search query
"""
def __init__(self, data):
self.obsid=data[0]
self.projectid=data[3]
self.creator=data[2]
self.obsname=data[1]
try:
self.ra=data[4]
self.dec=data[5]
except:
self.ra=None
self.dec=None
if _useastropy:
self.time=Time(self.obsid,format='gps',scale='utc')
else:
self.time = None
def __str__(self):
if self.dec>=0:
decsign='+'
else:
decsign='-'
decstring=decsign + '%.1f' % numpy.abs(self.dec)
while len(decstring)<5:
decstring=' ' + decstring
if self.time is not None:
return '%d\t%s\t%s\t%-15s\t%5.1f\t%s\t%s' % (self.obsid,
self.time.datetime.strftime('%Y-%m-%dT%H:%M:%S'),
self.projectid,
self.creator,
self.ra,
decstring,
self.obsname)
else:
return '%d\t%s\t%s\t%-15s\t%5.1f\t%s\t%s' % (self.obsid,
'--',
self.projectid,
self.creator,
self.ra,
decstring,
self.obsname)
@classmethod
def string_header(self):
return '# starttime\t\t\t\tproject\tcreator\t\tRA(d)\tDec(d)\tobsname'
######################################################################
class MWA_Observation():
"""
holds the fundamental data for a MWA observation
the basic information is based on the observation_number,
which is the starttime in GPS seconds
once that is set, it computes all the rest of the times.
o=MWA_Observation(input,rfstream=0, ionex=False)
input is either gpstime or obsinfo structure
"""
##################################################
def __init__(self, input,
rfstream=0, ionex=False, url=_BASEURL):
# hours
self.LST=None
self.HA=None
self.MJD=None
# hours
self.UT=None
# seconds
self.duration=0
# degrees
self.sun_elevation=None
# degrees
self.azimuth=None
self.elevation=None
self.RA=None
self.Dec=None
self.ra_phase_center=None
self.dec_phase_center=None
self.filename=''
self.delays=[]
self.center_channel=-1
self.channels=[]
self.year=None
self.month=None
self.day=None
self.hour=None
self.minute=None
self.second=None
self.calibration=None
self.calibrators=[]
self.epoch=None
self.mwatime=None
self.Tsky=None
self.inttime=None
self.fine_channel=None
self.TEC=None
self.RM=None
self.rfstreamnumber=rfstream
self.ionex=ionex
obsinfo=None
self.observation_number=None
if url is None:
url=_BASEURL
if isinstance(input, int):
# it is a gpstime
gpstime=input
obsinfo=fetch_obsinfo(gpstime, URL=url)
elif isinstance(input, dict):
# assume it is a obsinfo structure
obsinfo=input
if obsinfo is not None:
self.fromobsinfo(obsinfo)
##################################################
def fromobsinfo(self, obsinfo):
self.observation_number=obsinfo['starttime']
self.stoptime=obsinfo['stoptime']
self.duration=self.stoptime-self.observation_number
self._Schedule_Metadata=obsinfo['metadata']
self.calibration=self._Schedule_Metadata['calibration']
self.calibrators=self._Schedule_Metadata['calibrators']
self.filename=obsinfo['obsname']
self.inttime=obsinfo['int_time']
self.fine_channel=obsinfo['freq_res']
self.creator=obsinfo['creator']
self.projectid=obsinfo['projectid']
self.mode=obsinfo['mode']
self.ra_phase_center=obsinfo['ra_phase_center']
self.dec_phase_center=obsinfo['dec_phase_center']
try:
RFstream=obsinfo['rfstreams'][self.rfstreamnumber]
except KeyError:
try:
RFstream=obsinfo['rfstreams'][str(self.rfstreamnumber)]
except KeyError:
logger.error('RFstream %d not present in observation %d' % (self.rfstreamnumber,
self.observation_number))
return None
try:
self.channels=RFstream['frequencies']
self.center_channel=RFstream['frequencies'][len(RFstream['frequencies'])/2]
except IndexError:
self.center_channel=None
mwa=ephem_utils.Obs[ephem_utils.obscode['MWA']]
if 'ra' in RFstream.keys() and RFstream['ra'] is not None:
logger.info('Found (RA,Dec) in RFstream (%.5f,%.5f)\n' % (
RFstream['ra'],RFstream['dec']))
self.RA=RFstream.ra
self.Dec=RFstream.dec
mwatime=ephem_utils.MWATime(gpstime=self.observation_number)
self.azimuth,self.elevation=ephem_utils.radec2azel(self.RA,self.Dec,
self.observation_number)
self.HA=ephem_utils.HA(self.LST,self.RA,self.Dec,self.epoch)/15.0
elif (RFstream['azimuth'] is not None):
logger.info('Found (Az,El) in RFstream (%.5f,%.5f)\n' % (
RFstream['azimuth'],RFstream['elevation']))
self.azimuth=RFstream['azimuth']
self.elevation=RFstream['elevation']
self.RA,self.Dec=ephem_utils.azel2radec(self.azimuth,self.elevation,
self.observation_number)
self.HA=ephem_utils.HA(self.LST,self.RA,self.Dec,self.epoch)/15.0
elif (RFstream['hex'] is not None and len(RFstream['hex'])>0):
logger.info('Found delays in RFstream (%s)\n' % (
RFstream['hex']))
self.delays=[int(x) for x in RFstream['hex'].split(',')]
self.azimuth,za=delays2azza(self.delays)
self.elevation=90-za
self.RA,self.Dec=ephem_utils.azel2radec(self.azimuth,self.elevation,
self.observation_number)
self.HA=ephem_utils.HA(self.LST,self.RA,self.Dec,self.epoch)/15.0
else:
logger.warning('No coordinate specified in RFstream:\n %s\n' % RFstream)
if (len(self.delays)==0):
# still need to get the delays
self.delays=RFstream['delays']
if self.delays is None or len(self.delays)==0:
logger.warning('Unable to find a valid delay setting')
self.delays=[0]*16
##################################################
def __str__(self):
if (self.observation_number is None):
return "None"
s='%s at %d (GPS) [RFstream=%d] [project=%s]\n' % (self.filename,self.observation_number,
self.rfstreamnumber,
self.projectid)
s+='%d (%04d/%02d/%02d) %02d:%02d:%02d (epoch=%.3f), for %d s (Sun at %.1f deg)\n' % (self.MJD,self.year,
self.month,self.day,
self.hour,self.minute,self.second,
self.epoch,
self.duration,
self.sun_elevation)
if self.center_channel is not None:
s+='Channels: %s (center=%d)\n' % ((','.join([str(x) for x in self.channels])),self.center_channel)
if self.inttime is not None and self.fine_channel is not None:
s+='IntTime: %.1f s; FreqRes: %d kHz\n' % (self.inttime,
self.fine_channel)
if (self.LST is not None and self.HA is not None):
s+='LST=%.3f deg (HA=%s)\n' % (self.LST,
ephem_utils.dec2sexstring(self.HA,digits=0,roundseconds=1))
if (self.azimuth is not None):
s+='(Az,El) = (%.3f, %.3f) deg\n' % (self.azimuth,self.elevation)
if (self.RA is not None):
s+='(RA,Dec) = (%.3f, %.3f) deg (J2000)\n' % (self.RA,self.Dec)
if (len(self.delays)>0 and _useplotting):
Tx,Ty=primarybeammap.get_skytemp('%04d%02d%02d%02d%02d%02d' % (
self.year,self.month,self.day,self.hour,self.minute,self.second)
,self.delays,self.center_channel*1.28,verbose=False)
s+='Sky Temp (X,Y) = (%.1f, %.1f) K\n' % (Tx,Ty)
#if (len(self.receivers)>0):
# s+='receivers = %s\n' % (','.join([str(x) for x in self.receivers]))
if (len(self.delays)>0):
s+='delays = %s\n' % (','.join([str(x) for x in self.delays]))
if (self.calibration is not None):
if (self.calibration):
s+='calibration = True' + ' [' + self.calibrators + ']\n'
else:
s+='calibration = False\n'
if self.TEC is not None:
s+='Zenith TEC = %.1f TECU\n' % self.TEC
if self.RM is not None:
s+='Zenith Rotation Measure = %.2f rad/m^2\n' % self.RM
return s
##################################################
def __setattr__(self, name, value):
self.__dict__[name]=value
if (name == 'observation_number' and value is not None):
# if the observation_number is set, compute everything else
self._settimes_fromgps()
##################################################
def _settimes_fromgps(self):
"""
_settimes_fromgps(self)
if the observation number (starttime) is set, determine the rest of the times (MJD, UTC)
also figure out LST, Sun altitude
"""
if (self.observation_number is None):
logger.error('Cannot set times without an observation_number')
else:
self.mwatime=ephem_utils.MWATime(gpstime=self.observation_number)
self.MJD=int(self.mwatime.MJD)
self.UT=self.mwatime.UT
self.year=self.mwatime.year
self.month=self.mwatime.month
self.day=self.mwatime.day
self.hour=self.mwatime.hour
self.minute=self.mwatime.minute
self.second=self.mwatime.second
self.LST=float(self.mwatime.LST)
self.epoch=self.mwatime.epoch
mwa=ephem_utils.Obs[ephem_utils.obscode['MWA']]
observer=ephem.Observer()
# make sure no refraction is included
observer.pressure=0
observer.long=mwa.long/ephem_utils.DEG_IN_RADIAN
observer.lat=mwa.lat/ephem_utils.DEG_IN_RADIAN
observer.elevation=mwa.elev
observer.date='%d/%d/%d %s' % (self.year,self.month,self.day,
self.mwatime.strftime("%H:%M:%S"))
body=ephem.__dict__['Sun']()
body.compute(observer)
self.sun_elevation=body.alt*ephem_utils.DEG_IN_RADIAN
if _USE_IONEX and self.ionex:
i=ionex.ionexmaps(self.observation_number)
self.TEC=i(self.observation_number)
self.RM=i.RM(self.observation_number)
######################################################################
class MWA_tile_config():
"""
class MWA_tile_config(ileinfo=None, pol='X')
holds configuration information for a single tile/polarization:
tilename
recevier number
slot number
input number
antenna number
polarization
length (electrical + physical)
flag status
per-channel gains
beamformer delays
cable flavor
this can be used to generate an instr_config file
tile is like 11,12
antenna is like 0, 1 (ordinal numbers)
"""
##################################################
def __init__(self, tileinfo=None, pol='X'):
self.tile=None
self.tilename=None
self.inputnumber=None
self.antenna=None
self.receiver=None
self.slot=None
self.pol=pol
self.length=None
self.flag=False
self.electrical=None
self.gains=None
self.delays=None
self.flavor=None
self.tile_pos_east,self.tile_pos_north=0,0
self.tile_altitude=None
self.beamformer=None
self.cable_attenuation=None
self.beamformer_gain=None
if tileinfo is not None:
self.fromtileinfo(tileinfo)
##################################################
def fromtileinfo(self, tileinfo):
self.receiver=tileinfo['receiver']
self.slot=tileinfo['slot']
if tileinfo['flagged'] is not None:
self.flag=tileinfo['flagged']
if self.pol=='X':
self.inputnumber=2*(tileinfo['inputnum']-1)+1
self.beamformer_gain=tileinfo['bfgainx']
elif self.pol=='Y':
self.inputnumber=2*(tileinfo['inputnum']-1)+0
self.beamformer_gain=tileinfo['bfgainy']
self.flavor=tileinfo['flavor']
self.gains=tileinfo['dgains']
self.tile_pos_east,self.tile_pos_north=tileinfo['pos']
self.tile_altitude=tileinfo['altitude']
# length is total electrical length
self.length=tileinfo['ted']
self.electrical=True
self.beamformer=tileinfo['bf']
self.cable_attenuation=tileinfo['catten']
self.tile=tileinfo['id']
self.tilename='Tile%03d' % self.tile
def __str__(self):
return self.tilename + self.pol
######################################################################
class instrument_configuration():
"""
"""
##################################################
def __init__(self, input,
rfstream=0, min_bad_dipoles=2, coarse_channels=24,
n_inputs=_NINP_128T, timeoffset=0, lock=False, url=_BASEURL):
self.min_bad_dipoles=min_bad_dipoles
self.tiles={}
self.inputs={}
self.duration=0
self.ninputs=_NINP_128T
self.ninputsperrx=_INPUTSPERRX
self.obs=None
self.RA=None
self.HA=None
self.Dec=None
self.channel_selection=None
self.rfstreamnumber=rfstream
self.receivers=set()
self.coarse_channels=coarse_channels
self.coarse_channel=None
self.n_inputs=n_inputs
self.timeoffset=timeoffset
self.n_scans=None
self.corrtype='B'
self.invert_freq=0
self.conjugate=1
self.lock=lock
obsinfo=None
if url is None:
self.url=_BASEURL
else:
self.url=url
if isinstance(input, int):
# it is a gpstime
gpstime=input
obsinfo=fetch_obsinfo(gpstime, URL=self.url)
elif isinstance(input, dict):
# assume it is a obsinfo structure
obsinfo=input
if obsinfo is not None:
self.fromobsinfo(obsinfo)
##################################################
def fromobsinfo(self, obsinfo):
self.gpstime=obsinfo['starttime']
self.stoptime=obsinfo['stoptime']
self.obs=MWA_Observation(obsinfo, rfstream=0, ionex=False)
self.duration=self.stoptime-self.gpstime
self.mwatime=ephem_utils.MWATime(gpstime=self.gpstime)
if self.channel_selection is None or len(self.channel_selection)==0:
self.channel_selection=numpy.arange(len(self.obs.channels))
if isinstance(self.channel_selection,list):
self.channel_selection=numpy.array(self.channel_selection)
if len(self.channel_selection)<self.coarse_channels:
logger.warning('Will only select %d coarse channels' % len(self.channel_selection))
self.coarse_channels=len(self.channel_selection)
if self.n_scans is None:
self.n_scans=int((self.obs.duration)/self.obs.inttime)
# in MHz
self.bandwidth=self.coarse_channels * 1.28
# fine channel is in kHz
self.n_chans=int(self.bandwidth*1e3/(self.obs.fine_channel))
if (self.coarse_channels==24 and len(self.channel_selection)==self.coarse_channels):
self.channel=self.obs.center_channel
else:
if self.coarse_channels > 1:
try:
self.channel=numpy.array(self.obs.channels)[self.channel_selection][self.coarse_channels/2]
except IndexError:
if (isinstance(self.obs.channels,int)):
self.channel=numpy.arange(self.obs.channels-12,self.obs.channels+12)[self.coarse_channels/2]
elif len(self.obs.channels)==1:
self.channel=numpy.arange(self.obs.channels[0]-12,self.obs.channels[0]+12)[self.coarse_channels/2]
else:
if self.coarse_channel is None:
logger.error('Need to specify which coarse channel is being processed')
return False
self.channel=self.obs.channels[self.coarse_channel]
logger.info('Selecting coarse channel number %d: %d' % (self.coarse_channel,self.channel))
if self.RA is None:
try:
self.RA=self.obs.RA
self.Dec=self.obs.Dec
except:
logger.info('Setting RA,Dec from phase center')
self.RA=self.obs.ra_phase_center
self.Dec=self.obs.dec_phase_center
self.RFstreams=obsinfo['rfstreams']
# the primary RFstream is just used for pointing info
try:
self.RFstream=obsinfo['rfstreams'][self.rfstreamnumber]
except KeyError:
try:
self.RFstream=obsinfo['rfstreams'][str(self.rfstreamnumber)]
except KeyError:
logger.error('RFstream %d not present in observation %d' % (self.rfstreamnumber,
self.gpstime))
return None
self.gettileinfo()
##################################################
# def __getattr__(self, name):
# """
# can get attributes from MWA_Observation instance if needed
# """
# if name in self.__dict__.keys():
# return self.__dict__[name]
# else:
# if name in self.obs.__dict__.keys():
# return self.obs.__dict__[name]
# # if we get here it's to raise an exception
# return self.__dict__[name]
##################################################
def gettileinfo(self):
tileinfo=fetch_tileinfo(self.gpstime, URL=self.url)
if tileinfo is None:
return None
for t in tileinfo.keys():
for stream in self.RFstreams.keys():
RFstream=self.RFstreams[stream]
# figure out which tiles and which polarizations
# are included in this RFstream
if t in RFstream['tileset']['xlist'] and t in RFstream['tileset']['ylist']:
logger.debug('Tile %d in RFstream %d' % (t,RFstream['number']))
self.tiles[t]={'X': MWA_tile_config(tileinfo[t], pol='X'),
'Y': MWA_tile_config(tileinfo[t], pol='Y')}
self.inputs[self.tiles[t]['X'].inputnumber]=self.tiles[t]['X']
self.inputs[self.tiles[t]['Y'].inputnumber]=self.tiles[t]['Y']
elif t in RFstream['tileset']['xlist']:
self.tiles[t]={'X': MWA_tile_config(tileinfo[t], pol='X')}
self.inputs[self.tiles[t]['X'].inputnumber]=self.tiles[t]['X']
elif t in RFstream['tileset']['ylist']:
self.tiles[t]={'Y': MWA_tile_config(tileinfo[t], pol='Y')}
self.inputs[self.tiles[t]['Y'].inputnumber]=self.tiles[t]['Y']
# determine the delays
for t in sorted(self.tiles.keys()):
for stream in self.RFstreams.keys():
RFstream=self.RFstreams[stream]
# get the delays for the tiles
if self.tiles[t]['X'].tile in RFstream['tileset']['xlist']:
self.tiles[t]['X'].delays=numpy.array(RFstream['delays'])
# and flag individual dipole delays if they are listed as bad
if t in RFstream['bad_dipoles'].keys() and len(RFstream['bad_dipoles'][t][0])>0:
self.tiles[t]['X'].delays[numpy.array(RFstream['bad_dipoles'][t][0])-1]=_BAD_DIPOLE_DELAY
if self.tiles[t]['Y'].tile in RFstream['tileset']['ylist']:
self.tiles[t]['Y'].delays=numpy.array(RFstream['delays'])
# and flag individual dipole delays if they are listed as bad
if t in RFstream['bad_dipoles'].keys() and len(RFstream['bad_dipoles'][t][1])>0:
self.tiles[t]['Y'].delays[numpy.array(RFstream['bad_dipoles'][t][1])-1]=_BAD_DIPOLE_DELAY
# delays should be filled now
antenna_number=0
for t in sorted(self.tiles.keys()):
for stream in self.RFstreams.keys():
RFstream=self.RFstreams[stream]
try:
tilex=self.tiles[t]['X']
self.receivers.add(tilex.receiver)
tilex.antenna=antenna_number
except:
tilex=None
try:
tiley=self.tiles[t]['Y']
self.receivers.add(tiley.receiver)
tiley.antenna=antenna_number
except:
tiley=None
# flag tiles if too many of their delays are "bad"
if tilex is not None and ((tilex.delays==_BAD_DIPOLE_DELAY).sum()>= self.min_bad_dipoles) and not tilex.flag:
logger.info('Flagging tile %d because %d X dipole(s) are bad' % (tilex.tile,
(tilex.delays==_BAD_DIPOLE_DELAY).sum()))
tilex.flag=True
if tiley is not None:
tiley.flag=True
if tiley is not None and ((tiley.delays==_BAD_DIPOLE_DELAY).sum()>= self.min_bad_dipoles) and not tiley.flag:
logger.info('Flagging tile %d because %d Y dipole(s) are bad' % (tiley.tile,
(tiley.delays==_BAD_DIPOLE_DELAY).sum()))
if tilex is not None:
tilex.flag=True
tiley.flag=True
# flag if they are listed in the bad_tiles set
if t in RFstream['bad_tiles']:
logger.info('Flagging tile %d' % t)
if tilex is not None:
tilex.flag=True
if tiley is not None:
tiley.flag=True
antenna_number+=1
##################################################
def __str__(self):
preamble="""##################################################
# this file maps inputs into the receiver/correlator to antennas and polarisations.
# in addition, a cable length delta (in meters) can be specified
# the first column is not actually used by the uvfits writer, but is there as
# an aide to human readers. Inputs are ordered from 0 to n_inp-1
# antenna numbering starts at 0 and is an index into the corresponding antenna_locations.txt file
# lines beginning with '\#' and blank lines are ignored. Do not leave spaces in empty lines.
#
# Input flagging: put a 1 in the flag column to flag all data from that input.
# 0 means no flag.
# Cable lengths: if length is prefixed by EL_ then no velocity correction factor is needed
"""
s=preamble
#s+='# Written by %s\n' % (__file__.split('/')[-1])
s+='# for observation at %d\n' % (self.gpstime)
now=datetime.datetime.utcnow().strftime("%Y-%m-%dT%H:%M:%S")
s+='# %s\n' % now
s+="""##################################################
# INPUT ANTENNA POL DELTA FLAG
"""
for inputnumber in sorted(self.inputs.keys()):
if (self.inputs[inputnumber].flag):
f=1
else:
f=0
length='%.2f' % self.inputs[inputnumber].length
if self.inputs[inputnumber].electrical:
length='EL_' + length
s+="%d\t%d\t%s\t%s\t%d" % (inputnumber, self.inputs[inputnumber].antenna,
self.inputs[inputnumber].pol.upper(),
length, f)
s+=' # Rx%03d Slot%02d %s\n' % (self.inputs[inputnumber].receiver,
self.inputs[inputnumber].slot,
self.inputs[inputnumber].tilename)
return s
##################################################
def instr_config(self):
return self.__str__()
##################################################
def antenna_locations(self):
preamble="""# lines beginning with \'#\' and blank lines are ignored. Do not leave spaces in empty lines.
# locations of antennas relative to the centre of the array in local topocentric
# \"east\", \"north\", \"height\". Units are meters.
# Format: Antenna_name east north height
# antenna names must be 8 chars or less
# fields are separated by white space
"""
s=preamble
#s+='# Written by %s\n' % (__file__.split('/')[-1])
s+='# for observation at %d\n' % (self.gpstime)
now=datetime.datetime.utcnow().strftime("%Y-%m-%dT%H:%M:%S")
s+='# %s\n' % now
for t in sorted(self.tiles.keys()):
tile=None
if 'X' in self.tiles[t].keys():
tile=self.tiles[t]['X']
elif 'Y' in self.tiles[t].keys():
tile=self.tiles[t]['Y']
if tile is not None:
s+='%s %09.3f %09.3f %08.3f\n' % (tile.tilename,tile.tile_pos_east,
tile.tile_pos_north,tile.tile_altitude)
return s
##################################################
def make_metafits(self, quick=False):
h=pyfits.PrimaryHDU()
head=h.header
head.set('GPSTIME',self.gpstime,'[s] GPS time of observation start')
if self.duration>0:
head.set('EXPOSURE',self.duration,'[s] duration of observation')
if self.channel_selection is None or len(self.channel_selection)==0:
self.channel_selection=numpy.arange(len(self.obs.channels))
if isinstance(self.channel_selection,list):
self.channel_selection=numpy.array(self.channel_selection)
head.set('FILENAME',self.obs.filename,'Name of observation')
head.set('MJD',self.obs.MJD,'[days] MJD of observation')
head.set('DATE-OBS','%04d-%02d-%02dT%02d:%02d:%02d' % (
self.obs.year,
self.obs.month,self.obs.day,
self.obs.hour,self.obs.minute,self.obs.second),'[UT] Date and time of observation')
if self.obs.LST is not None:
head.set('LST',self.obs.LST,'[deg] LST')
if self.obs.HA is not None:
head.set('HA',ephem_utils.dec2sexstring(self.obs.HA,digits=0,roundseconds=1),
'[hours] hour angle')
if (self.obs.azimuth is not None):
head.set('AZIMUTH',self.obs.azimuth,'[deg] Azimuth of pointing center')
head.set('ALTITUDE',self.obs.elevation,'[deg] Altitude of pointing center')
if (self.obs.RA is not None):
head.set('RA',self.obs.RA,'[deg] RA of pointing center')
head.set('DEC',self.obs.Dec,'[deg] Dec of pointing center')
if (self.obs.ra_phase_center is not None):
head.set('RAPHASE',self.obs.ra_phase_center,
'[deg] RA of desired phase center')
head.set('DECPHASE',self.obs.dec_phase_center,
'[deg] DEC of desired phase center')
if self.obs._Schedule_Metadata is not None:
head.set('SUN-DIST',self.obs._Schedule_Metadata['sun_pointing_distance'],
'[deg] Distance from pointing center to Sun')
head.set('MOONDIST',self.obs._Schedule_Metadata['moon_pointing_distance'],
'[deg] Distance from pointing center to Moon')
head.set('JUP-DIST',self.obs._Schedule_Metadata['jupiter_pointing_distance'],
'[deg] Distance from pointing center to Jupiter')
if len(self.obs._Schedule_Metadata['gridpoint_name'])>0:
head.set('GRIDNAME',self.obs._Schedule_Metadata['gridpoint_name'],
'Pointing grid name')
head.set('GRIDNUM',self.obs._Schedule_Metadata['gridpoint_number'],
'Pointing grid number')
head.set('CREATOR',self.obs.creator.strip(),'Observation creator')
head.set('PROJECT',self.obs.projectid.strip(),'Project ID')
head.set('MODE',self.obs.mode,'Observation mode')
if (len(self.receivers)>0):
head.set('RECVRS',','.join([str(x) for x in self.receivers]),'Active receivers')
if (len(self.obs.delays)>0):
head.set('DELAYS',','.join([str(x) for x in self.obs.delays]),'Beamformer delays')
if (self.obs.calibration is not None):
if (self.obs.calibration):
head.set('CALIBRAT',True,'Intended for calibration')
head.set('CALIBSRC',self.obs.calibrators.strip(),'Calibrator source')
else:
head.set('CALIBRAT',False,'Intended for calibration')
gains=None
if self.obs.center_channel is not None:
head.set('CENTCHAN',self.obs.center_channel,'Center coarse channel')
head.set('CHANNELS',','.join([str(x) for x in numpy.array(self.obs.channels)[self.channel_selection]]),'Coarse channels')
#if gains is not None:
# head.set('CHANGAIN',','.join([str(x) for x in gains]),'Coarse channel gains')
head.set('CHANSEL',','.join(map(str,self.channel_selection)),'Subset of total channels used')
head.set('SUN-ALT',self.obs.sun_elevation,'[deg] Altitude of Sun')
try:
head.set('FIBRFACT',self.fiber_velocity_factor,'Fiber velocity factor')
except AttributeError:
pass
head.set('FINECHAN',self.obs.fine_channel,'[kHz] Fine channel width')
head.set('INTTIME',self.obs.inttime,'[s] Individual integration time')
#head.set('TILEFLAG',','.join([str(x) for x in self.tiles_to_flag]),'Tiles flagged')
nav_freq=int(self.obs.fine_channel/10)
head.set('NAV_FREQ',nav_freq,'Assumed frequency averaging')
head.set('NSCANS',self.n_scans,'Number of scans (time instants) in correlation products')
head.set('NINPUTS',self.n_inputs,'Number of inputs into the correlation products')
head.set('NCHANS',self.n_chans,'Number of fine channels in spectrum')
head.set('BANDWDTH',self.bandwidth,'[MHz] Total bandwidth')
head.set("FREQCENT",channel2frequency(self.channel)+(nav_freq-1)*0.005,
'[MHz] Center frequency of observation')
head.set('TIMEOFF',0,
'[s] Offset between observation starttime and start of correlations')
head.set('DATESTRT',self.mwatime.strftime('%Y-%m-%dT%H:%M:%S'),
'[UT] Date and time of correlations start')
head.set('VERSION',_VERSION,'METAFITS version number')
head.set('MWAVER',mwapy.__version__,'MWAPY version number')
head.set('MWADATE',mwapy.__date__,'MWAPY version date')
head.set('TELESCOP','MWA128T')
Input=[]
Antenna=[]
Pol=[]
Delta=[]
Flag=[]
Length=[]
Rx=[]
Slot=[]
Tile=[]
North=[]
East=[]
Height=[]
Gains=[]
Delays=[]
for inputnumber in sorted(self.inputs.keys()):
tile=self.inputs[inputnumber]
if (tile.flag):
f=1
else:
f=0
length='%.2f' % tile.length
if tile.electrical:
length='EL_' + length
Input.append(inputnumber)
Antenna.append(tile.antenna)
Pol.append(tile.pol.upper())
Length.append(length)
Flag.append(f)
Rx.append(tile.receiver)
Slot.append(tile.slot)
Tile.append(tile.tile)
North.append(tile.tile_pos_north)
East.append(tile.tile_pos_east)
Height.append(tile.tile_altitude)
# select only the appropriate values for the channels used
Gains.append(numpy.array(tile.gains)[numpy.array(self.obs.channels)][self.channel_selection])
Delays.append(numpy.array(tile.delays))
if not quick:
col1=pyfits.Column(name='Input',format='I',array=Input)
col2=pyfits.Column(name='Antenna',format='I',array=Antenna)
col3=pyfits.Column(name='Tile',format='I',array=Tile)
col4=pyfits.Column(name='Pol',format='A',array=Pol)
col5=pyfits.Column(name='Rx',format='I',array=Rx)
col6=pyfits.Column(name='Slot',format='I',array=Slot)
col7=pyfits.Column(name='Flag',format='I',array=Flag)
col8=pyfits.Column(name='Length',format='A14',array=Length)
col9=pyfits.Column(name='North',format='E',unit='m',array=North)
col10=pyfits.Column(name='East',format='E',unit='m',array=East)
col11=pyfits.Column(name='Height',format='E',unit='m',array=Height)
col12=pyfits.Column(name='Gains',format='%dI' % len(Gains[0]),array=Gains)
col13=pyfits.Column(name='Delays',format='%dI' % len(Delays[0]), array=Delays)
try:
# this works in the newer pyfits (and astropy?)
# but not the older versions
tbhdu=pyfits.BinTableHDU.from_columns([col1,col2,col3,col4,col5,col6,col7,col8,col9,col10,col11,col12,col13])
tbhdu.name='TILEDATA'
except:
# in the newer pyfits/astropy this produces lots of warnings
tbhdu=pyfits.new_table([col1,col2,col3,col4,col5,col6,col7,col8,col9,col10,col11,col12,col13])
tbhdu.update_ext_name('TILEDATA',comment='Data about the tile/slot/Rx mapping')
#tbhdu=pyfits.BinTableHDU(name='TILEDATA')
#tbhdu.from_columns([col1,col2,col3,col4,col5,col6,col7,col8,col9,col10,col11,col12,col13])
hdulist=pyfits.HDUList([h,tbhdu])
else:
hdulist=pyfits.HDUList([h])
now=datetime.datetime.utcnow().strftime("%Y-%m-%dT%H:%M:%S")
head.set('DATE',now,'UT Date of file creation')
#head.add_comment('Written by %s\n' % (__file__.split('/')[-1]))
#hdulist=pyfits.HDUList([h,tbhdu])
return hdulist
##################################################
def make_header(self):
"""
make_header(self)
"""
if self.RA is None or self.Dec is None:
logger.error('Cannot construct header.txt without valid RA and Dec')
return False
if self.gpstime is None:
logger.error('Cannot construct header.txt without valid gpstime')
return False
# make header.txt
header="# uvfits header obs id: %d (%s after chopping off first %d sec of data)\n" % (
self.gpstime,self.mwatime.strftime('%Y/%m/%d %H:%M:%S'),self.timeoffset)
header+="# blank lines and lines beginning with \'#\' are ignored. Do not leave spaces in empty lines.\n"
header+="# line format: key value comments\n"
header+="FIELDNAME %s\n" % self.obs.filename
header+="N_SCANS %-3d # number of scans (time instants) in correlation products\n" % (self.n_scans)
header+="N_INPUTS %-3d # number of inputs into the correlation products\n" % (self.n_inputs)
header+="N_CHANS %-3d # number of channels in spectrum\n" % (self.n_chans)
header+="CORRTYPE %s # correlation type to use. \'C\'(cross), \'B\'(both), or \'A\'(auto)\n" % (self.corrtype)
header+="INT_TIME %.1f # integration time of scan in seconds\n" % (self.obs.inttime)
if self.channel is not None:
nav_freq=int(self.obs.fine_channel/10)
# correct for averaging
header+="FREQCENT %.3f # observing center freq in MHz\n" % (
channel2frequency(self.channel)+(nav_freq-1)*0.005)
else:
logger.warning('No center channel specified; corr2uvfits will not be happy')
header+="BANDWIDTH %.3f # total bandwidth in MHz\n" % (self.bandwidth)
header+="RA_HRS %.6f # the RA of the desired phase centre (hours)\n" % (self.obs.ra_phase_center/15.0)
if self.lock:
header+="HA_HRS %.6f # the HA at the *start* of the scan. (hours)\n" % (self.HA)
header+="DEC_DEGS %.4f # the DEC of the desired phase centre (degs)\n" % (self.obs.dec_phase_center)
header+="DATE %s # YYYYMMDD\n" % (self.mwatime.strftime('%Y%m%d'))
header+="TIME %s # HHMMSS\n" % (self.mwatime.strftime('%H%M%S'))
header+="INVERT_FREQ %d # 1 if the freq decreases with channel number\n" % (self.invert_freq)
self.header=header
##################################################
def delays2azza(xx):
"""
# From Chris Williams
# receiverStatusPy/StatusTools.py
################################
# delays2azza(xx)
#
# This takes a 16-element integer array of delay settings (each element of the array xx should be an integer from 0 to 31 in
# units of the delay step on the delay boards). It uses several triangles of elements to determine roughly what the pointing
# direction is from the delay settings that the beamformer has
#
# It returns a tuple containing (average azimuth, average zenith angle) determined by averaging the angles determined by the
# selected triangles
"""
dip_sep=1.10
delaystep=435 # delay in picoseconds
dtor=0.0174532925
azs=[]
zas=[]
#choose triangles to back out the delays...
ii=[0,0,3,0]
jj=[15,15,12,3]
kk=[12,3,15,12]
for a in range(len(ii)):
i=ii[a]
j=jj[a]
k=kk[a]
d1=delaystep*xx[i]
ox1=(-1.5+(i%4)*1.0)*dip_sep
oy1=(1.5-math.floor(i/4))*dip_sep
d2=delaystep*xx[j]
ox2=(-1.5+(j%4)*1.0)*dip_sep
oy2=(1.5-math.floor(j/4))*dip_sep
d3=delaystep*xx[k]
ox3=(-1.5+(k%4)*1.0)*dip_sep
oy3=(1.5-math.floor(k/4))*dip_sep
az,za=triangulate(d1,ox1,oy1,d2,ox2,oy2,d3,ox3,oy3)
if az is not None:
azs.append(az)
zas.append(za)
else:
#Bad triangle...
#logging.warning("Bad delay triangle: %i %i %i"%(i,j,k))
pass
if len(azs)==0 or len(zas)==0:
logging.warning("Can't triangulate a pointing...")
return None,None
else:
azavg=sum(azs)/len(azs)
zaavg=sum(zas)/len(zas)
return azavg,zaavg
##################################################
def triangulate(d1,ox1,oy1,d2,ox2,oy2,d3,ox3,oy3):
"""
################################
# triangulate(d1,ox1,oy1,d2,ox2,oy2,d3,ox3,oy3)
#
# This function triangulates the azimuth and zenith angle from 3 positions/delays of dipoles on a tile
#
# d1,d2,d3 are the delays (in picoseconds) between the three elements
# ox[1,2,3] are the x position offsets between the 3 elements
# oy[1,2,3] are the y position offsets between the 3 elements
#
# It returns a tuple which contains the (azimuth, zenith angle) in degrees
# that is pointed at by the combination of 3 elements (its the intersection of 3 great circles)
# It will return (None,None) if the triangle is colinear (i.e. not a triangle!)
"""
dtor=0.0174532925
c=0.000299798 # c in m/picosecond
try:
# take the arctan to get the azimuth
az=math.atan2((d3-d1)*(oy2-oy1)-(d2-d1)*(oy3-oy1),(d2-d1)*(ox3-ox1)-(d3-d1)*(ox2-ox1))
if d1-d2 == 0 and d1-d3 == 0:
return 0.0,0.0
if abs((ox2-ox3)*math.sin(az)+(oy2-oy3)*math.cos(az)) > 1e-15: #check if the triangle is bad (if its colinear)
za=math.asin((d2-d3)*c/((ox2-ox3)*math.sin(az)+(oy2-oy3)*math.cos(az)))
elif abs((ox1-ox3)*math.sin(az)+(oy1-oy3)*math.cos(az)) > 1e-15:
za=math.asin((d1-d3)*c/((ox1-ox3)*math.sin(az)+(oy1-oy3)*math.cos(az)))
else:
return None,None
azd=az/dtor
zad=za/dtor
except:
#if there are math range errors, return None
return None,None
if zad < 0:
zad*=-1
azd+=180
while azd <0:
azd+=360
while azd >= 360:
azd-=360
return azd,zad
######################################################################
def channel2frequency(channel):
"""
returns center frequency (in MHz) given a channel
assumes 10 kHz fine channels and 1.28 MHz coarse channels
"""
return 1.28*channel-0.64
######################################################################
def from_iterable(iterable):
"""
from_iterable(['ABC', 'DEF']) --> A B C D E F
replaces chain.from_iterable which is not available in python 2.5
"""
for it in iterable:
for element in it:
yield element
######################################################################
def ternary(condition, value1, value2):
"""
python 2.4 does not have a ternary operator
so redo it here
"""
if (condition):
return value1
else:
return value2
##################################################
def find_observations(GPSstart, GPSstop, limit=1000, url=_BASEURL):
"""
return gpstimes of all observations between GPSstart and GPSstop
"""
results=fetch_observations(URL=url,
mintime=GPSstart,
maxtime=GPSstop,
limit=limit)
if results is None or len(results)==0:
return []
else:
return [x[0] for x in results]
##################################################
def find_closest_observation(GPStime, maxdiff=10, url=_BASEURL):
"""
return the observation closest to GPStime
"""
gpstimes=numpy.array(find_observations(GPStime-maxdiff/2, GPStime+maxdiff/2,
url=url))
if gpstimes is None or len(gpstimes)==0:
return None
tdiff=numpy.abs(gpstimes-GPStime)
return (gpstimes[tdiff==tdiff.min()])[0]
| ryandougherty/mwa-capstone | MWA_Tools/mwapy/metadata.py | Python | gpl-2.0 | 50,133 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
###############################################################################
# Copyright (C) 2005-2008 Francisco José Rodríguez Bogado, #
# Diego Muñoz Escalante. #
# (pacoqueen@users.sourceforge.net, escalant3@users.sourceforge.net) #
# #
# This file is part of GeotexInn. #
# #
# GeotexInn is free software; you can redistribute it and/or modify #
# it under the terms of the GNU General Public License as published by #
# the Free Software Foundation; either version 2 of the License, or #
# (at your option) any later version. #
# #
# GeotexInn is distributed in the hope that it will be useful, #
# but WITHOUT ANY WARRANTY; without even the implied warranty of #
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the #
# GNU General Public License for more details. #
# #
# You should have received a copy of the GNU General Public License #
# along with GeotexInn; if not, write to the Free Software #
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA #
###############################################################################
###################################################################
## cuentas_destino.py -- Cuentas bancarias de la empresa para
## transferencias en pagos.
###################################################################
## NOTAS:
##
###################################################################
## Changelog:
## 21 de febrero de 2007 -> Inicio
##
###################################################################
import sys, os
from ventana import Ventana
import utils
import pygtk
pygtk.require('2.0')
import gtk, gtk.glade, time, datetime
try:
import pclases
from seeker import VentanaGenerica
except ImportError:
sys.path.append(os.path.join('..', 'framework'))
import pclases
from seeker import VentanaGenerica
from utils import _float as float
class CuentasDestino(Ventana, VentanaGenerica):
def __init__(self, objeto = None, usuario = None):
"""
Constructor. objeto puede ser un objeto de pclases con el que
comenzar la ventana (en lugar del primero de la tabla, que es
el que se muestra por defecto).
"""
self.usuario = usuario
self.clase = pclases.CuentaDestino
self.dic_campos = {"nombre": "e_nombre",
"observaciones": "e_observaciones",
"banco": "e_banco",
"swif": "e_swif",
"iban": "e_iban",
"cuenta": "e_cuenta",
"nombreBanco": "e_nombre_banco",
"proveedorID": "cbe_proveedor",
}
Ventana.__init__(self, 'cuentas_destino.glade', objeto)
connections = {'b_salir/clicked': self.salir,
'b_nuevo/clicked': self.nuevo,
'b_borrar/clicked': self.borrar,
'b_actualizar/clicked': self.actualizar_ventana,
'b_guardar/clicked': self.guardar,
'b_buscar/clicked': self.buscar
}
self.add_connections(connections)
self.inicializar_ventana()
if self.objeto == None:
self.ir_a_primero()
else:
self.ir_a(objeto)
gtk.main()
def es_diferente(self):
"""
Devuelve True si algún valor en ventana difiere de
los del objeto.
"""
if self.objeto == None:
igual = True
else:
igual = self.objeto != None
for colname in self.dic_campos:
col = self.clase.sqlmeta.columns[colname]
try:
valor_ventana = self.leer_valor(col, self.dic_campos[colname])
except (ValueError, TypeError):
igual = False
valor_objeto = getattr(self.objeto, col.name)
if isinstance(col, pclases.SODateCol):
valor_objeto = utils.abs_fecha(valor_objeto)
igual = igual and (valor_ventana == valor_objeto)
if not igual:
break
return not igual
def inicializar_ventana(self):
"""
Inicializa los controles de la ventana, estableciendo sus
valores por defecto, deshabilitando los innecesarios,
rellenando los combos, formateando el TreeView -si lo hay-...
"""
# Inicialmente no se muestra NADA. Sólo se le deja al
# usuario la opción de buscar o crear nuevo.
self.activar_widgets(False)
self.wids['b_actualizar'].set_sensitive(False)
self.wids['b_guardar'].set_sensitive(False)
self.wids['b_nuevo'].set_sensitive(True)
self.wids['b_buscar'].set_sensitive(True)
# Inicialización del resto de widgets:
cols = (('Proveedor', 'gobject.TYPE_STRING', False, True, True, None),
('Cuenta origen', 'gobject.TYPE_STRING', False, True, False, None),
('Importe', 'gobject.TYPE_STRING', False, True, False, None),
('Fecha', 'gobject.TYPE_STRING', False, True, False, None),
('Observaciones', 'gobject.TYPE_STRING', False, True, False, None),
('Concepto', 'gobject.TYPE_STRING', False, True, False, None), # Número de factura o cuenta LOGIC.
('ID', 'gobject.TYPE_INT64', False, False, False, None))
utils.preparar_listview(self.wids['tv_transferencias'], cols)
utils.rellenar_lista(self.wids['cbe_proveedor'], [(p.id, p.nombre) for p in pclases.Proveedor.select(orderBy = "nombre")])
def activar_widgets(self, s, chequear_permisos = True):
"""
Activa o desactiva (sensitive=True/False) todos
los widgets de la ventana que dependan del
objeto mostrado.
Entrada: s debe ser True o False. En todo caso
se evaluará como boolean.
"""
if self.objeto == None:
s = False
ws = tuple(["e_observaciones", "tv_transferencias", "b_borrar"] + [self.dic_campos[k] for k in self.dic_campos.keys()])
for w in ws:
try:
self.wids[w].set_sensitive(s)
except Exception, msg:
print "Widget problemático:", w, "Excepción:", msg
if chequear_permisos:
self.check_permisos(nombre_fichero_ventana = "cuentas_destino.py")
def refinar_resultados_busqueda(self, resultados):
"""
Muestra en una ventana de resultados todos los
registros de "resultados".
Devuelve el id (primera columna de la ventana
de resultados) de la fila seleccionada o None
si se canceló.
"""
filas_res = []
for r in resultados:
filas_res.append((r.id, r.nombre, r.banco, r.proveedor and r.proveedor.nombre or "-", r.observaciones))
idcuenta = utils.dialogo_resultado(filas_res,
titulo = 'SELECCIONE CUENTA BANCARIA',
cabeceras = ('ID', 'Nombre', 'Banco', 'Proveedor', 'Observaciones'),
padre = self.wids['ventana'])
if idcuenta < 0:
return None
else:
return idcuenta
def rellenar_widgets(self):
"""
Introduce la información de la cuenta actual
en los widgets.
No se chequea que sea != None, así que
hay que tener cuidado de no llamar a
esta función en ese caso.
"""
cuenta = self.objeto
for nombre_col in self.dic_campos:
self.escribir_valor(cuenta.sqlmeta.columns[nombre_col], getattr(cuenta, nombre_col), self.dic_campos[nombre_col])
self.rellenar_tabla_transferencias()
self.objeto.make_swap()
def rellenar_tabla_transferencias(self):
model = self.wids['tv_transferencias'].get_model()
model.clear()
total = 0.0
for p in self.objeto.pagos:
total += p.importe
model.append((p.proveedor and p.proveedor.nombre,
p.cuentaOrigen and p.cuentaOrigen.nombre,
utils.float2str(p.importe),
utils.str_fecha(p.fecha),
p.observaciones,
p.concepto,
p.id))
self.wids['e_total'].set_text(utils.float2str(total))
def nuevo(self, widget):
"""
Función callback del botón b_nuevo.
Pide los datos básicos para crear un nuevo objeto.
Una vez insertado en la BD hay que hacerlo activo
en la ventana para que puedan ser editados el resto
de campos que no se hayan pedido aquí.
"""
cuenta_anterior = self.objeto
if cuenta_anterior != None:
cuenta_anterior.notificador.desactivar()
cuenta = pclases.CuentaDestino()
utils.dialogo_info('NUEVA CUENTA CREADA',
'Se ha creado una cuenta nueva.\nA continuación complete la información de la misma y guarde los cambios.',
padre = self.wids['ventana'])
cuenta.notificador.activar(self.aviso_actualizacion)
self.objeto = cuenta
self.activar_widgets(True)
self.actualizar_ventana(objeto_anterior = cuenta_anterior)
def buscar(self, widget):
"""
Muestra una ventana de búsqueda y a continuación los
resultados. El objeto seleccionado se hará activo
en la ventana a no ser que se pulse en Cancelar en
la ventana de resultados.
"""
cuenta = self.objeto
a_buscar = utils.dialogo_entrada(titulo = "BUSCAR CUENTA",
texto = "Introduzca nombre de la cuenta o banco:",
padre = self.wids['ventana'])
if a_buscar != None:
try:
ida_buscar = int(a_buscar)
except ValueError:
ida_buscar = -1
criterio = pclases.OR(pclases.CuentaDestino.q.nombre.contains(a_buscar),
pclases.CuentaDestino.q.banco.contains(a_buscar),
pclases.CuentaDestino.q.id == ida_buscar)
resultados = pclases.CuentaDestino.select(criterio)
if resultados.count() > 1:
## Refinar los resultados
idcuenta = self.refinar_resultados_busqueda(resultados)
if idcuenta == None:
return
resultados = [pclases.CuentaDestino.get(idcuenta)]
# Me quedo con una lista de resultados de un único objeto ocupando la primera posición.
# (Más abajo será cuando se cambie realmente el objeto actual por este resultado.)
elif resultados.count() < 1:
## Sin resultados de búsqueda
utils.dialogo_info('SIN RESULTADOS', 'La búsqueda no produjo resultados.\nPruebe a cambiar el texto buscado o déjelo en blanco para ver una lista completa.\n(Atención: Ver la lista completa puede resultar lento si el número de elementos es muy alto)',
padre = self.wids['ventana'])
return
## Un único resultado
# Primero anulo la función de actualización
if cuenta != None:
cuenta.notificador.desactivar()
# Pongo el objeto como actual
try:
cuenta = resultados[0]
except IndexError:
utils.dialogo_info(titulo = "ERROR",
texto = "Se produjo un error al recuperar la información.\nCierre y vuelva a abrir la ventana antes de volver a intentarlo.",
padre = self.wids['texto'])
return
# Y activo la función de notificación:
cuenta.notificador.activar(self.aviso_actualizacion)
self.activar_widgets(True)
self.objeto = cuenta
self.actualizar_ventana()
def guardar(self, widget):
"""
Guarda el contenido de los entry y demás widgets de entrada
de datos en el objeto y lo sincroniza con la BD.
"""
# Desactivo el notificador momentáneamente
self.objeto.notificador.activar(lambda: None)
# Actualizo los datos del objeto
for colname in self.dic_campos:
col = self.clase.sqlmeta.columns[colname]
try:
valor_ventana = self.leer_valor(col, self.dic_campos[colname])
setattr(self.objeto, colname, valor_ventana)
except (ValueError, TypeError):
pass # TODO: Avisar al usuario o algo. El problema es que no hay una forma "limpia" de obtener el valor que ha fallado.
# Fuerzo la actualización de la BD y no espero a que SQLObject lo haga por mí:
self.objeto.syncUpdate()
self.objeto.sync()
# Vuelvo a activar el notificador
self.objeto.notificador.activar(self.aviso_actualizacion)
self.actualizar_ventana()
self.wids['b_guardar'].set_sensitive(False)
def borrar(self, widget):
"""
Elimina la cuenta de la tabla pero NO
intenta eliminar ninguna de sus relaciones,
de forma que si se incumple alguna
restricción de la BD, cancelará la eliminación
y avisará al usuario.
"""
cuenta = self.objeto
if not utils.dialogo('¿Eliminar la cuenta?', 'BORRAR', padre = self.wids['ventana']):
return
if cuenta.pagos != []:
utils.dialogo_info('CUENTA NO ELIMINADA',
'La cuenta está implicada en operaciones que impiden su borrado.',
padre = self.wids['ventana'])
else:
cuenta.notificador.desactivar()
try:
cuenta.destroySelf()
except Exception, e:
self.logger.error("cuentas_destino::borrar -> Cuenta ID %d no se pudo eliminar. Excepción: %s." % (cuenta.id, e))
utils.dialogo_info(titulo = "CUENTA NO BORRADA",
texto = "La cuenta no se pudo eliminar.\n\nSe generó un informe de error en el «log» de la aplicación.",
padre = self.wids['ventana'])
self.actualizar_ventana()
return
self.objeto = None
self.ir_a_primero()
if __name__ == "__main__":
p = CuentasDestino()
| pacoqueen/upy | formularios/cuentas_destino.py | Python | gpl-2.0 | 15,409 |
class Solution:
def selfDividingNumbers(self, left: int, right: int) -> List[int]:
# check every digit
return [x for x in range(left, right+1) if all([int(i) != 0 and x % int(i)==0 for i in str(x)])]
# def selfDividingNumbers(self, left: int, right: int) -> List[int]:
# return [x for x in range(left, right+1) if all((i and (x % i==0) for i in map(int, str(x))))]
| qiyuangong/leetcode | python/728_Self_Dividing_Numbers.py | Python | mit | 398 |
# -*- coding: utf-8 -*-
from django.views.generic import TemplateView
from maps.models import ReviewGroup, Map
class IndexView(TemplateView):
template_name = 'map_review/home.html'
def get_context_data(self, **kwargs):
ctx = super(IndexView, self).get_context_data(**kwargs)
ctx['groups'] = ReviewGroup.objects.all()
ctx['maps'] = Map.objects.order_by('-review_created_on')
return ctx
| mapaction/map_review | map_review/views.py | Python | gpl-2.0 | 429 |
#!/usr/bin/python
import os
import sys
import gzip
import simplejson as json
from datetime import datetime as dt
from pprint import pprint as pp
SAMPLE_VALUE = 5000 # 1 for no sampling
SAMPLE_END = 1000 # -1 for no end
SAMPLE_VALUE = 1 # 1 for no sampling
SAMPLE_END = -1 # -1 for no end
def openfile(infile):
if infile.endswith( '.gz' ):
return gzip.open(infile, mode='rb')
else:
return open(infile, 'r')
class stats( object ):
def __init__(self):
self.min = sys.maxint
self.max = (sys.maxint-1) * -1
self.count = 0
self.sum = 0
self.Asum = 0
self.Raverage = 0
self.variance = 0
def add( self, v ):
if self.min > v:
self.min = v
if self.max < v:
self.max = v
self.count += 1
self.sum += v
self.Asum += abs(v)
if self.count == 1:
self.Raverage = v
else:
self.Raverage = (( self.Raverage * self.count) + v ) / (self.count + 1)
self.variance += (v - self.Raverage) ** 2
#dev = ( self.variance'] / self.count']) ** 0.5
def get( self ):
if self.count == 0:
return {
'average' : 0,
'Aaverage': 0,
'stddev' : 0,
'min' : 0,
'max' : 0,
'count' : 0,
'sum' : 0,
'Asum' : 0,
'Raverage': 0,
'variance': 0
}
else:
avg = self.sum / self.count
Aavg = self.Asum / self.count
stddev = (self.variance / self.count) ** 0.5
return {
'average' : avg,
'Aaverage': Aavg,
'stddev' : stddev,
'min' : self.min,
'max' : self.max,
'count' : self.count,
'sum' : self.sum,
'Asum' : self.Asum,
'Raverage': self.Raverage,
'variance': self.variance
}
def main(infiles):
for infile in infiles:
print "\n\nANALYZING", infile
#$v=0;
#$c=0;
#$min=10000000;
#$max=0;
#$av=0;
#$var=0;
#
#$a=$v/$c;
#$dev=sqrt($var/$c);
#$mx=$a+(2*$dev);
#
#print "$c\t$v\t$a\t$min\t$max\t$av\t$var\t$dev\t$mx\n"; }
#
#$c+=1;
#$v+=$_;
#if ($_ < $min) { $min = $_; };
#if ($_ > $max) { $max = $_; };
#if ($c==1) {
# $av = $_; $var=0; }
#
#else {
# $av=(($av*$c)+$_)/($c+1);
# $var+=($_-$av)**2;
# $dev=sqrt($var/$c);
# print "$av\t$var\t$dev\n";}' | \
values = {
'count' : { },
'info' : { },
'format' : { },
'polytype': { },
'qual' : { 'all': stats() },
'dist' : { 'all': stats() },
}
codes = { 'info': {}, 'format': {} }
count = 0
with openfile(infile) as fhd:
for line in fhd:
line = line.strip()
if len(line) == 0:
continue
if line[0] == "#":
#print line
if '##FORMAT' in line:
lp = line[13:]
code = lp[:2]
nfo = lp[lp.find('Description="')+13:-2]
print 'format\t', code, "\t", nfo
codes['format'][code] = nfo
elif '##INFO' in line:
lp = line[11:]
code = lp[:lp.index(',')]
nfo = lp[lp.find('Description="')+13:-2]
print 'info\t', code, "\t", nfo
codes['info'][code] = nfo
continue
#sys.exit(0)
count += 1
if count % SAMPLE_VALUE != 0:
continue
#print line
cols = line.split()
chrom = cols[0]
pos = int( cols[1])
ref = cols[3]
alt = cols[4]
qual = float(cols[5])
info = cols[7].split(";")
fmtLbl = cols[8].split(":")
fmtVal = cols[9].split(":")
if chrom not in values[ 'dist' ]:
values[ 'dist' ][ chrom ] = stats()
values[ 'qual' ][ chrom ] = stats()
values[ 'count' ][ chrom ] = 0
values[ 'count' ][ chrom ] += 1
polytype = None
if len(ref) == 1:
if len(alt) == 1:
polytype = 'SNP'
else:
polytype = 'INS'
else:
if len(alt) == 1:
polytype = 'DEL'
else:
if len(ref) == len(alt):
polytype = 'MNP'
else:
polytype = 'REP'
if values[ 'count' ][ chrom ] == 1:
print 'first', chrom
values[ 'dist' ][ chrom ].prev = pos
#print values[ 'dist' ][ chrom ].prev
else:
#print 'consecutive', values[ 'dist' ][ chrom ].prev, pos,
dist = pos - values[ 'dist' ][ chrom ].prev
values[ 'dist' ][ 'all' ].add( dist )
values[ 'dist' ][ chrom ].add( dist )
values[ 'dist' ][ chrom ].prev = pos
#print values[ 'dist' ][ chrom ].prev
if polytype not in values[ 'polytype' ]:
values[ 'polytype' ][ polytype ] = { 'all': stats() }
if chrom not in values[ 'polytype' ][ polytype ]:
values[ 'polytype' ][ polytype ][ chrom ] = stats()
values[ 'polytype' ][ polytype ][ chrom ].prev = pos
else:
dist = pos - values[ 'polytype' ][ polytype ][ chrom ].prev
values[ 'polytype' ][ polytype ][ 'all' ].add( dist )
values[ 'polytype' ][ polytype ][ chrom ].add( dist )
values[ 'polytype' ][ polytype ][ chrom ].prev = pos
values['qual']['all'].add( qual )
values['qual'][chrom].add( qual )
for i in info:
if "=" in i:
k, v = i.split("=")
#print "K %s V %s" % (k ,v)
if k in codes['info']:
k = codes['info'][k]
try:
v = float(v)
except:
pass
if type(v) is float:
#print "float", v
if k not in values['info']:
values['info'][k] = { 'all': stats() }
if chrom not in values['info'][k]:
values['info'][k][chrom] = stats()
values['info'][k][chrom].add( v )
values['info'][k]['all'].add( v )
elif i == "INDEL":
if i not in values['info']:
values['info'][i] = { 'all': stats() }
if chrom not in values['info'][i]:
values['info'][i][chrom] = stats()
values['info'][i][chrom].prev = pos
continue
dist = pos - values['info'][i][chrom].prev
values['info'][i][chrom].add( dist )
values['info'][i]['all'].add( dist )
values['info'][i][chrom].prev = pos
for fp in range(len(fmtLbl)):
f = fmtLbl[fp]
v = fmtVal[fp]
if f in codes['format']:
f = codes['format'][f]
#print f
if '/' in v:
continue
if ',' in v:
pass
else:
try:
v = float(v)
except:
continue
#print fp, f, v
if f not in values['format']:
values['format'][f] = { 'all': stats() }
if chrom not in values['format'][f]:
values['format'][f][chrom] = stats()
values['format'][f][chrom].add( v )
values['format'][f]['all'].add( v )
if count == SAMPLE_END:
break
for k in values:
#print "k",k
if k in [ 'count' ]:
pass
elif k in [ 'dist', 'qual' ]:
for chrom in values[k]:
#print "chrom", chrom
values[k][chrom] = values[k][chrom].get()
#elif k in []:
#values[k] = values[k].get()
else:
for chrom in values[k]:
#print " chrom", chrom
for kk in values[k][chrom]:
#print " kk", kk
v = values[k][chrom][kk].get()
#pp(v)
values[k][chrom][kk] = v
#pp( values )
json.dump(values, open(os.path.basename(infile)+'.json', 'w'), sort_keys=True, indent=' ')
if __name__ == '__main__':
main(sys.argv[1:])
| sauloal/vcf_stats | vcf_stats.py | Python | mit | 10,221 |
# coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
'''
Created on Jun 27, 2012
'''
__author__ = "Shyue Ping Ong"
__copyright__ = "Copyright 2012, The Materials Project"
__version__ = "0.1"
__maintainer__ = "Shyue Ping Ong"
__email__ = "shyuep@gmail.com"
__date__ = "Jun 27, 2012"
import unittest
import os
import json
from pymatgen.entries.exp_entries import ExpEntry
from monty.json import MontyDecoder
test_dir = os.path.join(os.path.dirname(__file__), "..", "..", "..",
'test_files')
class ExpEntryTest(unittest.TestCase):
def setUp(self):
with open(os.path.join(test_dir, "Fe2O3_exp.json"), "r") as f:
thermodata = json.load(f, cls=MontyDecoder)
self.entry = ExpEntry("Fe2O3", thermodata)
def test_energy(self):
self.assertAlmostEqual(self.entry.energy, -825.5)
def test_to_from_dict(self):
d = self.entry.as_dict()
e = ExpEntry.from_dict(d)
self.assertAlmostEqual(e.energy, -825.5)
def test_str(self):
self.assertIsNotNone(str(self.entry))
if __name__ == "__main__":
unittest.main()
| dongsenfo/pymatgen | pymatgen/entries/tests/test_exp_entries.py | Python | mit | 1,172 |
from django.shortcuts import render_to_response
from django.template import RequestContext
from django.core.exceptions import ObjectDoesNotExist
from django.views.decorators.cache import never_cache
from django.http import HttpResponse, HttpResponseRedirect
from session_csrf import anonymous_csrf
from ..models import ZLB, ZLBVirtualServer, ZLBVirtualServerRule, ZLBVirtualServerProtection
from ..models import ZLBRule, ZLBProtection, Offender, ZLBVirtualServerPref
from ..forms import ZLBForm, VirtualServerConfirm
from BanHammer.blacklist.management import zeus
import BanHammer.blacklist.tasks as tasks
from BanHammer import settings
@anonymous_csrf
@never_cache
def index(request, zlb=None, action=None):
request.session['order_by'] = request.GET.get('order_by', 'hostname')
request.session['order'] = request.GET.get('order', 'asc')
order_by = request.session.get('order_by', 'address')
order = request.session.get('order', 'asc')
zlbs = ZLB.objects.all()
if order_by == 'created_date':
zlbs = sorted(list(zlbs), key=lambda zlb: zlb.created_date)
elif order_by == 'updated_date':
zlbs = sorted(list(zlbs), key=lambda zlb: zlb.updated_date)
elif order_by == 'name':
zlbs = sorted(list(zlbs), key=lambda zlb: zlb.name)
elif order_by == 'hostname':
zlbs = sorted(list(zlbs), key=lambda zlb: zlb.hostname)
elif order_by == 'datacenter':
zlbs = sorted(list(zlbs), key=lambda zlb: zlb.datacenter)
if order == 'desc':
zlbs.reverse()
data = {'zlbs': zlbs}
if action == 'update':
data['zlb'] = zlb
data['action'] = 'update'
data['testing_env'] = settings.TESTING_ENV
return render_to_response(
'zlb/index.html',
data,
context_instance = RequestContext(request)
)
@anonymous_csrf
def new(request):
if request.method == 'POST':
form = ZLBForm(request.POST)
if form.is_valid():
name = form.cleaned_data['name']
hostname = form.cleaned_data['hostname']
datacenter = form.cleaned_data['datacenter']
doc_url = form.cleaned_data['doc_url']
login = form.cleaned_data['login']
password = form.cleaned_data['password']
comment = form.cleaned_data['comment']
zlb = ZLB(
name=name,
hostname=hostname,
datacenter=datacenter,
doc_url=doc_url,
login=login,
password=password,
comment=comment,
)
zlb.save()
return HttpResponseRedirect('/zlbs')
else:
form = ZLBForm()
return render_to_response(
'zlb/new.html',
{'form': form},
context_instance = RequestContext(request)
)
@anonymous_csrf
def edit(request, id):
if request.method == 'POST':
form = ZLBForm(request.POST)
if form.is_valid():
zlb = ZLB.objects.get(id=id)
zlb.name = form.cleaned_data['name']
zlb.hostname = form.cleaned_data['hostname']
zlb.datacenter = form.cleaned_data['datacenter']
zlb.doc_url = form.cleaned_data['doc_url']
zlb.comment = form.cleaned_data['comment']
zlb.login = form.cleaned_data['login']
if form.cleaned_data['password']:
zlb.password = form.cleaned_data['password']
zlb.save()
return HttpResponseRedirect('/zlbs')
else:
initial = ZLB.objects.get(id=id)
initial = initial.__dict__
id = initial['id']
initial['password'] = ''
form = ZLBForm(initial)
return render_to_response(
'zlb/edit.html',
{'form': form, 'id': id},
context_instance = RequestContext(request)
)
@anonymous_csrf
def delete(request, id):
zlb = ZLB.objects.get(id=id)
zlb.delete()
return HttpResponseRedirect('/zlbs')
@anonymous_csrf
@never_cache
def show(request, id):
zlb = ZLB.objects.get(id=id)
if zlb.updating:
return render_to_response(
'zlb/updating.html',
{'zlb': zlb,},
context_instance = RequestContext(request)
)
vs = ZLBVirtualServer.objects.filter(zlb_id=zlb.id)
prefs_o = ZLBVirtualServerPref.objects.filter(zlb=zlb)
prefs = {}
for p in prefs_o:
prefs[p.vs_name] = p
pr = {}
rul = {}
return render_to_response(
'zlb/show.html',
{'zlb': zlb,
'prefs': prefs,
'vs': vs,
'testing_env': settings.TESTING_ENV,},
context_instance = RequestContext(request)
)
@anonymous_csrf
@never_cache
def update(request, id):
tasks.update_zlb.delay(id)
zlb = ZLB.objects.get(id=id)
return HttpResponseRedirect('/zlbs')
def _parse_addr(addresses):
addr_list = addresses.split(', ')
addresses = []
for addr in addr_list:
network = addr.split('/')
addr = network[0]
if len(network) == 2:
cidr = network[1]
else:
cidr = None
if cidr:
offender = Offender.objects.filter(address=addr, cidr=cidr)
else:
offender = Offender.objects.filter(address=addr)
if offender.count() != 0:
addresses.append(offender[0])
else:
addresses.append(addr)
return addresses
@anonymous_csrf
def index_protection(request, zlb_id):
zlb = ZLB.objects.get(id=zlb_id)
protections = ZLBProtection.objects.filter(zlb_id=zlb_id)
for p in protections:
p.allowed_addresses = _parse_addr(p.allowed_addresses)
p.banned_addresses = _parse_addr(p.banned_addresses)
p.virtual_servers = ZLBVirtualServerProtection.objects.filter(zlb_id=zlb_id, protection_id=p.id)
return render_to_response(
'zlb/protections.html',
{'zlb': zlb,
'protections': protections,},
context_instance = RequestContext(request)
)
@anonymous_csrf
def index_rules(request, zlb_id):
zlb = ZLB.objects.get(id=zlb_id)
rules = ZLBRule.objects.filter(zlb_id=zlb_id)
for rule in rules:
rule.virtual_servers = ZLBVirtualServerRule.objects.filter(zlb_id=zlb_id, rule_id=rule.id)
return render_to_response(
'zlb/rules.html',
{'zlb': zlb,
'rules': rules,},
context_instance = RequestContext(request)
)
@never_cache
@anonymous_csrf
def virtual_server(request, zlb_id, vs_id):
if request.method == 'POST':
form = VirtualServerConfirm(request.POST)
if form.is_valid():
confirm = form.cleaned_data['confirm']
vs = ZLBVirtualServer.objects.get(id=vs_id)
pref = ZLBVirtualServerPref.objects.filter(zlb_id=zlb_id,vs_name=vs.name)
if pref.count() == 0:
p = ZLBVirtualServerPref(
zlb_id=zlb_id,
vs_name=vs.name,
favorite=False,
confirm=confirm,
)
p.save()
else:
pref = pref[0]
pref.confirm = confirm
pref.save()
return HttpResponseRedirect('/zlb/%s/virtual_server/%s' % (zlb_id, vs_id))
else:
form = VirtualServerConfirm()
zlb = ZLB.objects.get(id=zlb_id)
virtual_server = ZLBVirtualServer.objects.get(id=vs_id)
prefs = ZLBVirtualServerPref.objects.filter(zlb=zlb,vs_name=virtual_server.name)
rules = ZLBVirtualServerRule.objects.filter(virtualserver=virtual_server)
protections = ZLBVirtualServerProtection.objects.filter(virtualserver=virtual_server)
for p in protections:
p.protection.allowed_addresses = _parse_addr(p.protection.allowed_addresses)
p.protection.banned_addresses = _parse_addr(p.protection.banned_addresses)
return render_to_response(
'zlb/virtual_server.html',
{'zlb': zlb,
'virtual_server': virtual_server,
'prefs': prefs,
'rules': rules,
'protections': protections,
'form': form,},
context_instance = RequestContext(request)
)
@never_cache
@anonymous_csrf
def virtual_server_name(request, zlb_id, vs_name):
virtual_server_o = ZLBVirtualServer.objects.get(zlb_id=zlb_id, name=vs_name)
return virtual_server(request, zlb_id, virtual_server_o.id)
@anonymous_csrf
def virtual_server_favorite(request, zlb_id, vs_id):
vs = ZLBVirtualServer.objects.get(id=vs_id)
pref = ZLBVirtualServerPref.objects.filter(zlb_id=zlb_id,vs_name=vs.name)
if pref.count() == 0:
p = ZLBVirtualServerPref(
zlb_id=zlb_id,
vs_name=vs.name,
favorite=True,
)
p.save()
else:
pref = pref[0]
pref.favorite = True
pref.save()
return HttpResponseRedirect('/zlb/%s/virtual_server/%s' % (zlb_id, vs_id))
@anonymous_csrf
def virtual_server_unfavorite(request, zlb_id, vs_id):
vs = ZLBVirtualServer.objects.get(id=vs_id)
pref = ZLBVirtualServerPref.objects.get(zlb_id=zlb_id,vs_name=vs.name)
pref.favorite = False
pref.save()
return HttpResponseRedirect('/zlb/%s/virtual_server/%s' % (zlb_id, vs_id))
@anonymous_csrf
def virtual_server_unconfirm(request, zlb_id, vs_id):
vs = ZLBVirtualServer.objects.get(id=vs_id)
pref = ZLBVirtualServerPref.objects.get(zlb_id=zlb_id,vs_name=vs.name)
pref.confirm = ''
pref.save()
return HttpResponseRedirect('/zlb/%s/virtual_server/%s' % (zlb_id, vs_id))
| mozilla/BanHammer | BanHammer/blacklist/views/zlb.py | Python | bsd-3-clause | 9,627 |
"""
Copyright (c) 2015 Tim Waugh <tim@cyberelk.net>
## This program is free software; you can redistribute it and/or modify
## it under the terms of the GNU General Public License as published by
## the Free Software Foundation; either version 2 of the License, or
## (at your option) any later version.
## This program is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU General Public License for more details.
## You should have received a copy of the GNU General Public License
## along with this program; if not, write to the Free Software
## Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
"""
import logging
log = logging.getLogger(__name__)
FORMATTERS = {}
def list_formatters():
return list(FORMATTERS.keys())
def get_formatter(name, *args, **kwargs):
"""
Get a new formatter instance by name
"""
return FORMATTERS[name](*args, **kwargs)
class RegisteredFormatter(type):
"""
Metaclass for EntryFormatter, registering for use with get_formatter()
"""
def __new__(meta, name, bases, class_dict):
cls = type.__new__(meta, name, bases, class_dict)
FORMATTERS[class_dict['FORMAT_NAME']] = cls
return cls
class EntryFormatter(object, metaclass=RegisteredFormatter):
# Base class for output format implementations
# Class docstring is used for help output to describe the output
# format:
"""
Only display MESSAGE field
"""
# This is the name used to select the output format, collected
# automatically by the metaclass and used by list_formatters(),
# get_formatter(), and the CLI '-o' parameter:
FORMAT_NAME = 'cat'
# The formatter can either use the inclusions and exclusions
# listed in the config file:
FILTER_INCLUSIONS = None
FILTER_EXCLUSIONS = None
# or else it can set its own rules. If FILTER_INCLUSIONS is not
# None, this formatter will only receive those entries it has
# asked for.
# FILTER_INCLUSIONS = [{'field': ['values', ...]}, ...]
# The PRIORITY field is allowed to be a single value rather than a
# list, just like in the config file.
def format(self, entry):
"""
Format a single journal entry.
:param entry: dict, entry to format
:return: str, formatted entry including any newline required
"""
return entry['MESSAGE'] + '\n'
def flush(self):
"""
Return any closing formatting required.
This is called when there are no more
entries to format and can be used to
eg. display analysis of the logs.
"""
return ''
| twaugh/journal-brief | journal_brief/format/__init__.py | Python | gpl-2.0 | 2,779 |
import smtplib
import imaplib
import time
import sys
from email.mime.multipart import MIMEMultipart
from email.mime.text import MIMEText
msg = MIMEMultipart()
msg['From'] = "admin@mailu.io"
msg['To'] = "replyusea@mailu.io"
msg['Subject'] = "Reply Test"
msg.attach(MIMEText("Reply Text", 'plain'))
try:
smtp_server = smtplib.SMTP('localhost')
smtp_server.set_debuglevel(1)
smtp_server.connect('localhost', 587)
smtp_server.ehlo()
smtp_server.starttls()
smtp_server.ehlo()
smtp_server.login("admin@mailu.io", "password")
smtp_server.sendmail("admin@mailu.io", "replyuser@mailu.io", msg.as_string())
smtp_server.quit()
except:
sys.exit(25)
time.sleep(30)
# check original target
try:
imap_server = imaplib.IMAP4_SSL('localhost')
imap_server.login('replyuser@mailu.io', 'password')
except Exception as exc:
print("Failed with:", exc)
sys.exit(110)
stat, count = imap_server.select('inbox')
try:
stat, data = imap_server.fetch(count[0], '(UID BODY[TEXT])')
except :
sys.exit(99)
if "Reply Text" in str(data[0][1]):
print("Success: Mail is in target inbox")
else:
print("Failed receiving email in target inbox")
sys.exit(99)
typ, data = imap_server.search(None, 'ALL')
for num in data[0].split():
imap_server.store(num, '+FLAGS', '\\Deleted')
imap_server.expunge()
imap_server.close()
imap_server.logout()
# check original/replied user
try:
imap_server = imaplib.IMAP4_SSL('localhost')
imap_server.login('admin@mailu.io', 'password')
except Exception as exc:
print("Failed with:", exc)
sys.exit(110)
stat, count = imap_server.select('inbox')
try:
stat, data = imap_server.fetch(count[0], '(UID BODY[TEXT])')
except :
sys.exit(99)
if "Cause this is just a test" in str(data[0][1]):
print("Success: Reply is in original inbox")
else:
print("Failed receiving reply in original inbox")
sys.exit(99)
typ, data = imap_server.search(None, 'ALL')
for num in data[0].split():
imap_server.store(num, '+FLAGS', '\\Deleted')
imap_server.expunge()
imap_server.close()
imap_server.logout()
| kaiyou/freeposte.io | tests/reply_test.py | Python | mit | 2,104 |
'''
____ _ _ _ _ _ ____ ___
| _ \ ___ __| | \ | | ___| |_ / \ | _ \_ _|
| |_) / _ \ / _` | \| |/ _ \ __| / _ \ | |_) | |
| __/ (_) | (_| | |\ | __/ |_ / ___ \| __/| |
|_| \___/ \__,_|_| \_|\___|\__| /_/ \_\_| |___|
File: PodNet.py
Author: Zach Podbielniak
Last Update: 01/05/2018
Overview: This file sets forth forwarding the PodNet C API to Python, as
well as exposing all other Python related utilties.
This file is part of the PodNet API and comes with no warranty,
use with your own discretion.
'''
from PodNet.PodNetLib import *
PodNetLib.CreateSerialInterface.restype = HANDLE
PodNetLib.CreateSerialInterface.argtypes = [LPSTR, ULONG, LPVOID, ULONG]
PodNetLib.GetSerialInterfaceHandle.restype = HANDLE
PodNetLib.GetSerialInterfaceHandle.argtypes = [LPSTR]
PodNetLib.GetSerialInterfaceBaudRate.restype = ULONG
PodNetLib.GetSerialInterfaceBaudRate.argtypes = [HANDLE]
PodNetLib.WriteSerialCharacter.restype = BOOL
PodNetLib.WriteSerialCharacter.argtypes = [HANDLE, CHAR]
PodNetLib.WriteSerialString.restype = BOOL
PodNetLib.WriteSerialString.argtypes = [HANDLE, LPSTR]
PodNetLib.WriteSerial.restype = BOOL
PodNetLib.WriteSerial.argtypes = [HANDLE, LPBYTE, ULONGLONG, ULONG]
PodNetLib.WriteSerialAsync.restype = HANDLE
PodNetLib.WriteSerialAsync.argtypes = [HANDLE, LPBYTE, ULONGLONG, ULONG]
PodNetLib.SerialHasNewData.restype = ULONG
PodNetLib.SerialHasNewData.argtypes = [HANDLE]
PodNetLib.ReadSerialNextByte.restype = USHORT
PodNetLib.ReadSerialNextByte.argtypes = [HANDLE]
PodNetLib.ReadSerial.restype = BOOL
PodNetLib.ReadSerial.argtypes = [HANDLE, LPVOID, ULONGLONG, ULONG]
PodNetLib.ReadSerialAsync.restype = HANDLE
PodNetLib.ReadSerialAsync.argtypes = [HANDLE, ULONGLONG, ULONG]
PodNetLib.FlushSerial.restype = BOOL
PodNetLib.FlushSerial.argtypes = [HANDLE]
def CreateSerialInterface(lpcszDevice, ulBaud, lpReserved, ulFlags):
return PodNetLib.CreateSerialInterface(lpcszDevice.encode('utf-8'), ulBaud, lpReserved, ulFlags)
def GetSerialInterfaceHandle(lpcszDevice):
result = None
result = PodNetLib.GetSerialInterfaceHandle(lpcszDevice.encode('utf-8'))
if (result == None):
result = GetSerialInterfaceHandle(lpcszDevice)
return result
def GetSerialInterfaceBaudRate(hSerial):
return PodNetLib.GetSerialInterfaceBaudRate(hPin)
def WriteSerialCharacter(hSerial, cChar):
return PodNetLib.WriteSerialCharacter(hSerial, cChar)
def WriteSerialString(hSerial, lpcszString):
return PodNetLib.WriteSerialString(hSerial, lpcszString)
def WriteSerial(hSerial, lpbyData, ullSize, ulFlags):
return PodNetLib.WriteSerial(hSerial, lpbyData, ullSize, ulFlags)
def WriteSerialAsync(hSerial, lpbyData, ullSize, ulFlags):
return PodNetLib.WriteSerialAsync(hSerial, lpbyData, ullSize, ulFlags)
def SerialHasNewData(hSerial):
return PodNetLib.SerialHasNewData(hSerial)
def ReadSerialNextByte(hSerial):
return PodNetLib.ReadSerialNextByte(hSerial)
def ReadSerial(hSerial, lpDataOut, ullDataSize, ulFlags):
return PodNetLib.ReadSerial(hSerial, lpDataOut, ullDataSize, ulFlags)
def ReadSerialAsync(hSerial, ullDataSize, ulFlags):
return PodNetLib.ReadSerialAsync(hSerial, ullDataSize, ulFlags)
def FlushSerial(hSerial):
return PodNetLib.FlushSerial(hSerial)
| zachpodbielniak/PodNet | Linux/Python/CSerial.py | Python | gpl-3.0 | 3,399 |
#!/usr/bin/python
# -*- coding: utf-8 -*-
from copy import deepcopy
class TreeNode(dict):
def __init__(self, *args, **kwargs):
super(TreeNode, self).__init__(*args, **kwargs)
@property
def is_leaf(self):
raise NotImplementedError()
@property
def children(self, *args, **kwargs):
"""
Returns all the child nodes from the itself
:return children as list of TreeNodes
"""
raise NotImplementedError()
def leaves(self, *args, **kwargs):
"""
Returns all leaves from the current node
:return a list of leaves
"""
raise NotImplementedError()
def traverse(self, node_callback=lambda node: node, leaf_callback=lambda node: node):
"""
Traverse the tree and for each node or leaf calls the corresponding callback
"""
raise NotImplementedError()
class Operand(TreeNode):
def __init__(self, *args, **kwargs):
super(Operand, self).__init__(*args, **kwargs)
def __getattr__(self, name):
if name in self:
return self[name]
else:
raise AttributeError("Operand doesn't have an attribute named '%s'" % name)
def __setattr__(self, name, val):
self[name] = val
@property
def is_leaf(self):
return True
@property
def children(self, *args, **kwargs):
return []
def leaves(self, *args, **kwargs):
return [self]
class OperatorFactoryError(Exception):
pass
class OperatorFactory(object):
@staticmethod
def create(op_type):
if op_type.lower() == Or.type:
return Or()
elif op_type.lower() == And.type:
return And()
elif op_type.lower() == Not.type:
return Not()
else:
raise OperatorFactoryError("Cannot create an operator of type '%s'" % op_type)
class Operator(TreeNode):
type = "base_operator"
def __init__(self, operands=None, *args, **kwargs):
super(Operator, self).__init__(*args, **kwargs)
self._operands = [] if not operands else operands
def has_left_operand(self):
raise Exception("Not implemented!")
def has_right_operand(self):
raise Exception("Not implemented!")
def add_input(self, operand):
# An operator can have only two inputs (binary tree). If another gets added then it creates a new operator
# of the same type with inputs as the last element and the one wanted to be added. The result is a left input
# operand and a right input operator, with the last element and the new one as inputs
if len(self._operands) == 2:
op = self.__class__([self._operands.pop(1), operand])
self._operands.append(op)
else:
self._operands.append(operand)
return self
@property
def is_leaf(self):
return False
@property
def children(self, *args, **kwargs):
return self.inputs
@property
def inputs(self):
return self._operands
def leaves(self, ignore_negated=False, *args, **kwargs):
leaves = []
self.traverse(ignore_negated=ignore_negated, leaf_callback=lambda leaf: leaves.append(leaf))
return leaves
def traverse(self, node_callback=lambda node: node, leaf_callback=lambda leaf: leaf, ignore_negated=False):
def _do_traverse(operand):
if operand.is_leaf:
leaf_callback(operand)
elif isinstance(operand, Not) and ignore_negated:
pass
else:
node_callback(operand)
_do_traverse(operand.inputs[0])
if len(operand.inputs) > 1:
_do_traverse(operand.inputs[1])
return _do_traverse(self)
def __str__(self):
return "[TreeNode] '{op}' operator with {children} children ".format(op=self.type.upper(), children=len(self.children))
def __repr__(self):
return self.__str__()
class And(Operator):
type = "and"
def has_left_operand(self):
return True
def has_right_operand(self):
return True
class Or(Operator):
type = "or"
def has_left_operand(self):
return True
def has_right_operand(self):
return True
class NotOperatorError(Exception):
pass
class Not(Operator):
type = "not"
def has_left_operand(self):
return False
def has_right_operand(self):
return True
def add_input(self, operand):
if not self._operands:
self._operands.append(operand)
else:
raise NotOperatorError("Cannot add more than one input to Not Operator")
return self
| sebastiandev/plyse | plyse/query_tree.py | Python | mit | 4,740 |
# -*- coding: utf-8 -*-
"""Init and utils."""
from zope.i18nmessageid import MessageFactory
_ = MessageFactory('apm.sitetheme')
def initialize(context):
"""Initializer called when used as a Zope 2 product."""
| a25kk/apm | src/apm.sitetheme/apm/sitetheme/__init__.py | Python | mit | 217 |
# -*- coding: utf-8 -*-
from flask import render_template, session, redirect, url_for, request, flash
from flask.ext.login import login_required, current_user
from .. import db
from ..models import Step, Project, User, Commit, Record
from . import main
import json
from datetime import datetime
@main.route('/', methods=['GET'])
@login_required
def index():
'''
默认首页试图函数, 返回我参与的项目
'''
# print 'projects', current_user.projects
if current_user.admin:
p = Project.query.filter_by(del_flag=False).order_by(Project.priority.desc()).all()
else:
p = current_user.projects.filter_by(del_flag=False).order_by(Project.priority.desc()).all()
# print(p[0])
if p == []:
progress = 0
finish_steps_length = 0
steps_length = 0
return render_template('project.html', project=None, projects=p,
progress=progress, finish_steps_length=finish_steps_length,
steps_length=steps_length, remained_days=None, other_users=[])
else:
color = ['info', 'info', 'info', 'warning', 'warning', 'danger']
finish_steps_length = float(Step.query.filter_by(project_id=p[0].id, status=1).count())
# finish_steps_length = float(len(list(finish_steps)))
steps_length = float(len(list(p[0].steps)))
progress = finish_steps_length / steps_length * 100
all_projects = Project.query.all()
remained_days = p[0].expected_finish_at - datetime.now()
other_users = db.session.query(User).filter(User.id.notin_([u.id for u in p[0].users])).all()
return render_template('project.html', project=p[0], projects=p, all_projects=all_projects,
progress=progress, finish_steps_length=finish_steps_length,
steps_length=steps_length, remained_days=remained_days,
other_users=other_users, commits=p[0].commits,
color=color)
@main.route('/project/<project_id>', methods=['GET'])
@login_required
def project(project_id):
'''
项目显示细节
'''
# print 'projects', current_user.projects
color = ['info', 'info', 'info', 'warning', 'warning', 'danger']
p = Project.query.get_or_404(project_id)
finish_steps_length = Step.query.filter_by(project_id=p.id, status=1).count()
steps_length = float(len(list(p.steps)))
progress = finish_steps_length / steps_length * 100
remained_days = p.expected_finish_at - datetime.now()
other_users = db.session.query(User).filter(User.id.notin_([u.id for u in p.users])).all()
# print [u.id for u in p.users], [u.id for u in other_users]
all_projects = Project.query.all()
if current_user.admin:
projects = Project.query.filter_by(del_flag=False).order_by(Project.priority.desc()).all()
else:
projects = current_user.projects.filter_by(del_flag=False).order_by(Project.priority.desc()).all()
return render_template('project.html', project=p, projects=projects, progress=progress,
all_projects=all_projects, finish_steps_length=finish_steps_length,
steps_length=steps_length, remained_days=remained_days,
other_users=other_users, commits=p.commits, color=color)
@main.route('/step/finish/<id>', methods=['POST'])
@login_required
def finish_step(id):
'''
完成步骤
'''
remark = request.form.get('remark')
p_status = 0
s = Step.query.get_or_404(id)
s.status = True
s.finish_at = datetime.now()
s.finish_remark = remark
db.session.add(s)
db.session.commit()
p = s.project
record = Record(project=p, content=u'{0}完成了步骤{1}, 备注:{2}'.format(current_user.name, s.content, remark))
db.session.add(record)
finish_steps_length = float(Step.query.filter_by(project_id=p.id, status=1).count())
for i in p.steps:
if i.status == False:
break
else:
p.status = True
p.finish_at = datetime.now()
db.session.add(p)
p_status = 1
record = Record(project=p, content=u'项目已完成')
db.session.add(record)
db.session.commit()
ret = {'p_status': p_status, 'sid': id, 'finish_steps_length': finish_steps_length}
return json.dumps(ret)
@main.route('/create_project', methods=['POST'])
@login_required
def create_project():
print request.form
project_name = request.form.get('name')
project_content = request.form.get('content')
git_name = request.form.get('git_name').strip()
if git_name:
git_name += u'.git'
time = request.form.get('start_time')
start_time = datetime.strptime(time, '%m/%d/%Y')
time = request.form.get('finish_time')
finish_time = datetime.strptime(time, '%m/%d/%Y')
priority = int(request.form.get('priority'))
steps = []
for i in range(1, len(request.form) - 6):
steps.append(request.form.get('step' + str(i)))
if request.form.get('father') != u'None':
father = Project.query.get_or_404(request.form.get('father'))
p = Project(name=project_name, content=project_content, create_id=current_user.id,
father=father, start_time=start_time, expected_finish_at=finish_time,
git_name=git_name, priority=priority)
else:
p = Project(name=project_name, content=project_content, create_id=current_user.id,
start_time=start_time, expected_finish_at=finish_time, priority=priority,
git_name=git_name)
db.session.add(p)
db.session.commit()
user = current_user._get_current_object()
user.projects.append(p)
db.session.add(user)
for i in steps:
s = Step(content=i, project=p)
db.session.add(s)
record = Record(project=p, content=u'{0}创建了此项目'.format(current_user.name))
db.session.add(record)
db.session.commit()
flash(u'创建成功')
return redirect(url_for('main.project', project_id=p.id))
@main.route('/project/<project_id>/addUser', methods=['POST'])
@login_required
def add_user(project_id):
users_id = [r for r in request.form]
p = Project.query.get_or_404(project_id)
users = db.session.query(User).filter(User.id.in_(users_id)).all()
for u in users:
record = Record(project=p, content=u'{0}邀请{1}加入项目'.format(current_user.name, u.name))
db.session.add(record)
p.users.append(u)
db.session.add(p)
db.session.commit()
flash(u'添加成功')
return redirect(url_for('main.project', project_id=project_id))
@main.route('/project/step/<sid>/cancel')
@login_required
def step_status_cancel(sid):
s = Step.query.get_or_404(sid)
s.status = 0
s.finish_remark = ''
s.finish_at = None
p = s.project
db.session.add(s)
record = Record(project=p, content=u'{0}将步骤{1}转为未完成'.format(current_user.name, s.content))
db.session.add(record)
if p.status == 1:
p.status = 0
p.finish_at = None
db.session.add(p)
record = Record(project=p, content=u'项目状态转为未完成'.format(current_user.name, s.content))
db.session.add(record)
db.session.commit()
return redirect(url_for('.project', project_id=s.project.id))
@main.route('/project/<project_id>/removeUser', methods=['POST'])
@login_required
def remove_user(project_id):
p = Project.query.get_or_404(project_id)
users_id = [r for r in request.form]
users = db.session.query(User).filter(User.id.in_(users_id)).all()
for u in users:
record = Record(project=p, content=u'{0}将{1}从项目中移除'.format(current_user.name, u.name))
db.session.add(record)
p.users.remove(u)
db.session.add(p)
db.session.commit()
flash(u'移除成功')
return redirect(url_for('main.project', project_id=project_id))
@main.route('/project/git/commit/<p_id>', methods=['POST'])
def git_commit(p_id):
p = Project.query.get_or_404(p_id)
data = json.loads(unicode(request.data))
c = Commit(branch=data['branch'], ref=data['hashref'], cname=data['name'],
cemail=data['email'], message=data['message'], project=p)
db.session.add(c)
record = Record(project=p, content=u"git账户{0}<{1}>提交了一次commit,备注是:\"{2}\"".format(data['name'],
data['email'], data['message']))
db.session.add(record)
db.session.commit()
return 'ok', 200
@main.route('/edit/<project_id>', methods=['GET'])
@login_required
def edit(project_id):
p = Project.query.get_or_404(project_id)
all_projects = Project.query.all()
return render_template('edit.html', project=p, all_projects=all_projects, length=len(p.steps.all()))
@main.route('/edit_name/<project_id>', methods=['POST'])
@login_required
def edit_name(project_id):
p = Project.query.get_or_404(project_id)
old_name = p.name
name = request.form.get('name')
p.name = name
db.session.add(p)
record = Record(project=p, content=u"{0}将对项目名称进行了编辑,"
u"从\"{1}\"更改为\"{2}\"".format(current_user.name, old_name, name))
db.session.add(record)
db.session.commit()
return name, 200
@main.route('/edit_content/<project_id>', methods=['POST'])
@login_required
def edit_content(project_id):
p = Project.query.get_or_404(project_id)
old_content = p.content
content = request.form.get('content')
p.content = old_content
db.session.add(p)
record = Record(project=p, content=u"{0}将对项目内容进行了编辑,"
u"从\"{1}\"更改为\"{2}\"".format(current_user.name, old_content, content))
db.session.add(record)
db.session.commit()
return content, 200
@main.route('/edit_start_time/<project_id>', methods=['POST'])
@login_required
def edit_start_time(project_id):
p = Project.query.get_or_404(project_id)
old_time = p.start_time
time = request.form.get('time')
try:
start_time = datetime.strptime(time, '%m/%d/%Y')
except:
return str(p.start_time), 200
p.start_time = start_time
db.session.add(p)
record = Record(project=p, content=u"{0}将对项目开始时间进行了编辑,"
u"时间从\"{1}\"更改为\"{2}\"".format(current_user.name, old_time, start_time))
db.session.add(record)
db.session.commit()
return str(start_time), 200
@main.route('/edit_end_time/<project_id>', methods=['POST'])
@login_required
def edit_end_time(project_id):
p = Project.query.get_or_404(project_id)
old_time = p.expected_finish_at
time = request.form.get('time')
try:
end_time = datetime.strptime(time, '%m/%d/%Y')
except:
return str(p.expected_finish_at), 200
p.expected_finish_at = end_time
db.session.add(p)
record = Record(project=p, content=u"{0}将对期望完成时间进行了编辑,"
u"时间从\"{1}\"更改为\"{2}\"".format(current_user.name, old_time, end_time))
db.session.add(record)
db.session.commit()
return str(end_time), 200
@main.route('/edit_priority/<project_id>', methods=['POST'])
@login_required
def edit_priority(project_id):
p = Project.query.get_or_404(project_id)
old_priority = p.priority
priority = int(request.form.get('priority'))
p.priority = priority
db.session.add(p)
record = Record(project=p, content=u'{0}将对优先级进行了编辑,'
u'优先级从“{1}”更改为“{2}”'.format(current_user.name,
old_priority, priority))
db.session.add(record)
db.session.commit()
ret = ''
for _ in range(priority):
ret += '<span class ="glyphicon glyphicon-star" style="color: #f1c40f;" > </span>'
return ret, 200
@main.route('/edit_step/<step_id>', methods=['POST'])
@login_required
def edit_step(step_id):
s = Step.query.get_or_404(step_id)
old_content = s.content
content = request.form.get('content')
s.content = content
db.session.add(s)
record = Record(project=s.project, content=u"{0}将对步骤进行了编辑,"
u"将\"{1}\"更改为\"{2}\"".format(current_user.name, old_content, s.content))
db.session.add(record)
db.session.commit()
return content, 200
@main.route('/add_step/<project_id>', methods=['POST'])
@login_required
def add_step(project_id):
p = Project.query.get_or_404(project_id)
s = Step(content=request.form.get('content'))
db.session.add(s)
p.steps.append(s)
length = str(len(p.steps.all()))
db.session.add(p)
record = Record(project=p, content=u"{0}将添加了步骤\"{1}\"".format(current_user.name, s.content))
db.session.add(record)
db.session.commit()
ret = u' <div class="form-group" id="step' + length + u'"><label for="inputStep1">步骤' + length +\
u'</label><p><input type="text" style="display: inline;width: 93%" ' \
u'class="form-control" id="' + s.id + u'" ' \
u'value="' + s.content + '" ><a style="display: inline" class="btn btn-danger"' \
u'id="/step_remove/' + s.id+u'" >—</a></p></div>'
return ret, 200
@main.route('/step_remove/<step_id>', methods=['GET'])
@login_required
def remove_step(step_id):
s = Step.query.get_or_404(step_id)
p = s.project
p.steps.remove(s)
db.session.add(p)
s.del_flag = True
db.session.add(s)
record = Record(project=p, content=u"{0}将删除了步骤\"{1}\"".format(current_user.name, s.content))
db.session.add(record)
db.session.commit()
return '', 200
@main.route('/change_father_project/<project_id>', methods=['POST'])
@login_required
def edit_father(project_id):
father_id = request.form.get('father_id')
p = Project.query.get_or_404(project_id)
if father_id == 'None':
p.father = None
ret = 'None'
else:
f = Project.query.get_or_404(father_id)
p.father = f
ret = f.name
db.session.add(p)
record = Record(project=p, content=u"{0}将父项目转为\"{1}\"".format(current_user.name, ret))
db.session.add(record)
db.session.commit()
return ret, 200
@main.route('/edit_git/<project_id>', methods=['POST'])
@login_required
def edit_git(project_id):
p = Project.query.get_or_404(project_id)
old_name = p.git_name
git_name = request.form.get('git_name').strip()
if git_name:
git_name += u'.git'
p.git_name = git_name
db.session.add(p)
record = Record(project=p, content=u"{0}将对git进行了编辑,"
u"从\"{1}\"更改为\"{2}\"".format(current_user.name, old_name, git_name))
db.session.add(record)
db.session.commit()
return git_name, 200
@main.route('/bug_locat', methods=['POST'])
def bug_locat():
data = json.loads(unicode(request.data))
time = data['time']
content = data['content']
type_ = data['type']
province = data['province']
print time, content, type_, province
# record = Record(project=p, content=u"git账户{0}<{1}>提交了一次commit,备注是:\"{2}\"".format(data['name'],
# data['email'], data['message']))
# db.session.add(record)
# db.session.commit()
return 'ok', 200
| bigzhao/flask-projects-manage | app/main/views.py | Python | mit | 15,703 |
# -*- coding: utf-8 -*-
#
# SelfTest/Cipher/test_pkcs1_oaep.py: Self-test for PKCS#1 OAEP encryption
#
# ===================================================================
# The contents of this file are dedicated to the public domain. To
# the extent that dedication to the public domain is not available,
# everyone is granted a worldwide, perpetual, royalty-free,
# non-exclusive license to exercise all rights associated with the
# contents of this file for any purpose whatsoever.
# No rights are reserved.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# ===================================================================
from __future__ import nested_scopes
__revision__ = "$Id$"
import unittest
from Crypto.SelfTest.st_common import list_test_cases, a2b_hex, b2a_hex
from Crypto.Util.py3compat import *
from Crypto.PublicKey import RSA
from Crypto.Cipher import PKCS1_OAEP as PKCS
from Crypto.Hash import MD2,MD5,SHA as SHA1,SHA256,RIPEMD
from Crypto import Random
def rws(t):
"""Remove white spaces, tabs, and new lines from a string"""
for c in ['\n', '\t', ' ']:
t = t.replace(c,'')
return t
def t2b(t):
"""Convert a text string with bytes in hex form to a byte string"""
clean = rws(t)
if len(clean)%2 == 1:
raise ValueError("Even number of characters expected")
return a2b_hex(clean)
class PKCS1_OAEP_Tests(unittest.TestCase):
def setUp(self):
self.rng = Random.new().read
self.key1024 = RSA.generate(1024, self.rng)
# List of tuples with test data for PKCS#1 OAEP
# Each tuple is made up by:
# Item #0: dictionary with RSA key component
# Item #1: plaintext
# Item #2: ciphertext
# Item #3: random data (=seed)
# Item #4: hash object
_testData = (
#
# From in oaep-int.txt to be found in
# ftp://ftp.rsasecurity.com/pub/pkcs/pkcs-1/pkcs-1v2-1-vec.zip
#
(
# Private key
{
'n':'''bb f8 2f 09 06 82 ce 9c 23 38 ac 2b 9d a8 71 f7
36 8d 07 ee d4 10 43 a4 40 d6 b6 f0 74 54 f5 1f
b8 df ba af 03 5c 02 ab 61 ea 48 ce eb 6f cd 48
76 ed 52 0d 60 e1 ec 46 19 71 9d 8a 5b 8b 80 7f
af b8 e0 a3 df c7 37 72 3e e6 b4 b7 d9 3a 25 84
ee 6a 64 9d 06 09 53 74 88 34 b2 45 45 98 39 4e
e0 aa b1 2d 7b 61 a5 1f 52 7a 9a 41 f6 c1 68 7f
e2 53 72 98 ca 2a 8f 59 46 f8 e5 fd 09 1d bd cb''',
# Public key
'e':'11',
# In the test vector, only p and q were given...
# d is computed offline as e^{-1} mod (p-1)(q-1)
'd':'''a5dafc5341faf289c4b988db30c1cdf83f31251e0
668b42784813801579641b29410b3c7998d6bc465745e5c3
92669d6870da2c082a939e37fdcb82ec93edac97ff3ad595
0accfbc111c76f1a9529444e56aaf68c56c092cd38dc3bef
5d20a939926ed4f74a13eddfbe1a1cecc4894af9428c2b7b
8883fe4463a4bc85b1cb3c1'''
}
,
# Plaintext
'''d4 36 e9 95 69 fd 32 a7 c8 a0 5b bc 90 d3 2c 49''',
# Ciphertext
'''12 53 e0 4d c0 a5 39 7b b4 4a 7a b8 7e 9b f2 a0
39 a3 3d 1e 99 6f c8 2a 94 cc d3 00 74 c9 5d f7
63 72 20 17 06 9e 52 68 da 5d 1c 0b 4f 87 2c f6
53 c1 1d f8 23 14 a6 79 68 df ea e2 8d ef 04 bb
6d 84 b1 c3 1d 65 4a 19 70 e5 78 3b d6 eb 96 a0
24 c2 ca 2f 4a 90 fe 9f 2e f5 c9 c1 40 e5 bb 48
da 95 36 ad 87 00 c8 4f c9 13 0a de a7 4e 55 8d
51 a7 4d df 85 d8 b5 0d e9 68 38 d6 06 3e 09 55''',
# Random
'''aa fd 12 f6 59 ca e6 34 89 b4 79 e5 07 6d de c2
f0 6c b5 8f''',
# Hash
SHA1,
),
#
# From in oaep-vect.txt to be found in Example 1.1
# ftp://ftp.rsasecurity.com/pub/pkcs/pkcs-1/pkcs-1v2-1-vec.zip
#
(
# Private key
{
'n':'''a8 b3 b2 84 af 8e b5 0b 38 70 34 a8 60 f1 46 c4
91 9f 31 87 63 cd 6c 55 98 c8 ae 48 11 a1 e0 ab
c4 c7 e0 b0 82 d6 93 a5 e7 fc ed 67 5c f4 66 85
12 77 2c 0c bc 64 a7 42 c6 c6 30 f5 33 c8 cc 72
f6 2a e8 33 c4 0b f2 58 42 e9 84 bb 78 bd bf 97
c0 10 7d 55 bd b6 62 f5 c4 e0 fa b9 84 5c b5 14
8e f7 39 2d d3 aa ff 93 ae 1e 6b 66 7b b3 d4 24
76 16 d4 f5 ba 10 d4 cf d2 26 de 88 d3 9f 16 fb''',
'e':'''01 00 01''',
'd':'''53 33 9c fd b7 9f c8 46 6a 65 5c 73 16 ac a8 5c
55 fd 8f 6d d8 98 fd af 11 95 17 ef 4f 52 e8 fd
8e 25 8d f9 3f ee 18 0f a0 e4 ab 29 69 3c d8 3b
15 2a 55 3d 4a c4 d1 81 2b 8b 9f a5 af 0e 7f 55
fe 73 04 df 41 57 09 26 f3 31 1f 15 c4 d6 5a 73
2c 48 31 16 ee 3d 3d 2d 0a f3 54 9a d9 bf 7c bf
b7 8a d8 84 f8 4d 5b eb 04 72 4d c7 36 9b 31 de
f3 7d 0c f5 39 e9 cf cd d3 de 65 37 29 ea d5 d1 '''
}
,
# Plaintext
'''66 28 19 4e 12 07 3d b0 3b a9 4c da 9e f9 53 23
97 d5 0d ba 79 b9 87 00 4a fe fe 34''',
# Ciphertext
'''35 4f e6 7b 4a 12 6d 5d 35 fe 36 c7 77 79 1a 3f
7b a1 3d ef 48 4e 2d 39 08 af f7 22 fa d4 68 fb
21 69 6d e9 5d 0b e9 11 c2 d3 17 4f 8a fc c2 01
03 5f 7b 6d 8e 69 40 2d e5 45 16 18 c2 1a 53 5f
a9 d7 bf c5 b8 dd 9f c2 43 f8 cf 92 7d b3 13 22
d6 e8 81 ea a9 1a 99 61 70 e6 57 a0 5a 26 64 26
d9 8c 88 00 3f 84 77 c1 22 70 94 a0 d9 fa 1e 8c
40 24 30 9c e1 ec cc b5 21 00 35 d4 7a c7 2e 8a''',
# Random
'''18 b7 76 ea 21 06 9d 69 77 6a 33 e9 6b ad 48 e1
dd a0 a5 ef''',
SHA1
),
#
# From in oaep-vect.txt to be found in Example 2.1
# ftp://ftp.rsasecurity.com/pub/pkcs/pkcs-1/pkcs-1v2-1-vec.zip
#
(
# Private key
{
'n':'''01 94 7c 7f ce 90 42 5f 47 27 9e 70 85 1f 25 d5
e6 23 16 fe 8a 1d f1 93 71 e3 e6 28 e2 60 54 3e
49 01 ef 60 81 f6 8c 0b 81 41 19 0d 2a e8 da ba
7d 12 50 ec 6d b6 36 e9 44 ec 37 22 87 7c 7c 1d
0a 67 f1 4b 16 94 c5 f0 37 94 51 a4 3e 49 a3 2d
de 83 67 0b 73 da 91 a1 c9 9b c2 3b 43 6a 60 05
5c 61 0f 0b af 99 c1 a0 79 56 5b 95 a3 f1 52 66
32 d1 d4 da 60 f2 0e da 25 e6 53 c4 f0 02 76 6f
45''',
'e':'''01 00 01''',
'd':'''08 23 f2 0f ad b5 da 89 08 8a 9d 00 89 3e 21 fa
4a 1b 11 fb c9 3c 64 a3 be 0b aa ea 97 fb 3b 93
c3 ff 71 37 04 c1 9c 96 3c 1d 10 7a ae 99 05 47
39 f7 9e 02 e1 86 de 86 f8 7a 6d de fe a6 d8 cc
d1 d3 c8 1a 47 bf a7 25 5b e2 06 01 a4 a4 b2 f0
8a 16 7b 5e 27 9d 71 5b 1b 45 5b dd 7e ab 24 59
41 d9 76 8b 9a ce fb 3c cd a5 95 2d a3 ce e7 25
25 b4 50 16 63 a8 ee 15 c9 e9 92 d9 24 62 fe 39'''
},
# Plaintext
'''8f f0 0c aa 60 5c 70 28 30 63 4d 9a 6c 3d 42 c6
52 b5 8c f1 d9 2f ec 57 0b ee e7''',
# Ciphertext
'''01 81 af 89 22 b9 fc b4 d7 9d 92 eb e1 98 15 99
2f c0 c1 43 9d 8b cd 49 13 98 a0 f4 ad 3a 32 9a
5b d9 38 55 60 db 53 26 83 c8 b7 da 04 e4 b1 2a
ed 6a ac df 47 1c 34 c9 cd a8 91 ad dc c2 df 34
56 65 3a a6 38 2e 9a e5 9b 54 45 52 57 eb 09 9d
56 2b be 10 45 3f 2b 6d 13 c5 9c 02 e1 0f 1f 8a
bb 5d a0 d0 57 09 32 da cf 2d 09 01 db 72 9d 0f
ef cc 05 4e 70 96 8e a5 40 c8 1b 04 bc ae fe 72
0e''',
# Random
'''8c 40 7b 5e c2 89 9e 50 99 c5 3e 8c e7 93 bf 94
e7 1b 17 82''',
SHA1
),
#
# From in oaep-vect.txt to be found in Example 10.1
# ftp://ftp.rsasecurity.com/pub/pkcs/pkcs-1/pkcs-1v2-1-vec.zip
#
(
# Private key
{
'n':'''ae 45 ed 56 01 ce c6 b8 cc 05 f8 03 93 5c 67 4d
db e0 d7 5c 4c 09 fd 79 51 fc 6b 0c ae c3 13 a8
df 39 97 0c 51 8b ff ba 5e d6 8f 3f 0d 7f 22 a4
02 9d 41 3f 1a e0 7e 4e be 9e 41 77 ce 23 e7 f5
40 4b 56 9e 4e e1 bd cf 3c 1f b0 3e f1 13 80 2d
4f 85 5e b9 b5 13 4b 5a 7c 80 85 ad ca e6 fa 2f
a1 41 7e c3 76 3b e1 71 b0 c6 2b 76 0e de 23 c1
2a d9 2b 98 08 84 c6 41 f5 a8 fa c2 6b da d4 a0
33 81 a2 2f e1 b7 54 88 50 94 c8 25 06 d4 01 9a
53 5a 28 6a fe b2 71 bb 9b a5 92 de 18 dc f6 00
c2 ae ea e5 6e 02 f7 cf 79 fc 14 cf 3b dc 7c d8
4f eb bb f9 50 ca 90 30 4b 22 19 a7 aa 06 3a ef
a2 c3 c1 98 0e 56 0c d6 4a fe 77 95 85 b6 10 76
57 b9 57 85 7e fd e6 01 09 88 ab 7d e4 17 fc 88
d8 f3 84 c4 e6 e7 2c 3f 94 3e 0c 31 c0 c4 a5 cc
36 f8 79 d8 a3 ac 9d 7d 59 86 0e aa da 6b 83 bb''',
'e':'''01 00 01''',
'd':'''05 6b 04 21 6f e5 f3 54 ac 77 25 0a 4b 6b 0c 85
25 a8 5c 59 b0 bd 80 c5 64 50 a2 2d 5f 43 8e 59
6a 33 3a a8 75 e2 91 dd 43 f4 8c b8 8b 9d 5f c0
d4 99 f9 fc d1 c3 97 f9 af c0 70 cd 9e 39 8c 8d
19 e6 1d b7 c7 41 0a 6b 26 75 df bf 5d 34 5b 80
4d 20 1a dd 50 2d 5c e2 df cb 09 1c e9 99 7b be
be 57 30 6f 38 3e 4d 58 81 03 f0 36 f7 e8 5d 19
34 d1 52 a3 23 e4 a8 db 45 1d 6f 4a 5b 1b 0f 10
2c c1 50 e0 2f ee e2 b8 8d ea 4a d4 c1 ba cc b2
4d 84 07 2d 14 e1 d2 4a 67 71 f7 40 8e e3 05 64
fb 86 d4 39 3a 34 bc f0 b7 88 50 1d 19 33 03 f1
3a 22 84 b0 01 f0 f6 49 ea f7 93 28 d4 ac 5c 43
0a b4 41 49 20 a9 46 0e d1 b7 bc 40 ec 65 3e 87
6d 09 ab c5 09 ae 45 b5 25 19 01 16 a0 c2 61 01
84 82 98 50 9c 1c 3b f3 a4 83 e7 27 40 54 e1 5e
97 07 50 36 e9 89 f6 09 32 80 7b 52 57 75 1e 79'''
},
# Plaintext
'''8b ba 6b f8 2a 6c 0f 86 d5 f1 75 6e 97 95 68 70
b0 89 53 b0 6b 4e b2 05 bc 16 94 ee''',
# Ciphertext
'''53 ea 5d c0 8c d2 60 fb 3b 85 85 67 28 7f a9 15
52 c3 0b 2f eb fb a2 13 f0 ae 87 70 2d 06 8d 19
ba b0 7f e5 74 52 3d fb 42 13 9d 68 c3 c5 af ee
e0 bf e4 cb 79 69 cb f3 82 b8 04 d6 e6 13 96 14
4e 2d 0e 60 74 1f 89 93 c3 01 4b 58 b9 b1 95 7a
8b ab cd 23 af 85 4f 4c 35 6f b1 66 2a a7 2b fc
c7 e5 86 55 9d c4 28 0d 16 0c 12 67 85 a7 23 eb
ee be ff 71 f1 15 94 44 0a ae f8 7d 10 79 3a 87
74 a2 39 d4 a0 4c 87 fe 14 67 b9 da f8 52 08 ec
6c 72 55 79 4a 96 cc 29 14 2f 9a 8b d4 18 e3 c1
fd 67 34 4b 0c d0 82 9d f3 b2 be c6 02 53 19 62
93 c6 b3 4d 3f 75 d3 2f 21 3d d4 5c 62 73 d5 05
ad f4 cc ed 10 57 cb 75 8f c2 6a ee fa 44 12 55
ed 4e 64 c1 99 ee 07 5e 7f 16 64 61 82 fd b4 64
73 9b 68 ab 5d af f0 e6 3e 95 52 01 68 24 f0 54
bf 4d 3c 8c 90 a9 7b b6 b6 55 32 84 eb 42 9f cc''',
# Random
'''47 e1 ab 71 19 fe e5 6c 95 ee 5e aa d8 6f 40 d0
aa 63 bd 33''',
SHA1
),
)
def testEncrypt1(self):
# Verify encryption using all test vectors
for test in self._testData:
# Build the key
comps = [ long(rws(test[0][x]),16) for x in ('n','e') ]
key = RSA.construct(comps)
# RNG that takes its random numbers from a pool given
# at initialization
class randGen:
def __init__(self, data):
self.data = data
self.idx = 0
def __call__(self, N):
r = self.data[self.idx:N]
self.idx += N
return r
# The real test
key._randfunc = randGen(t2b(test[3]))
cipher = PKCS.new(key, test[4])
ct = cipher.encrypt(t2b(test[1]))
self.assertEqual(ct, t2b(test[2]))
def testEncrypt2(self):
# Verify that encryption fails if plaintext is too long
pt = '\x00'*(128-2*20-2+1)
cipher = PKCS.new(self.key1024)
self.assertRaises(ValueError, cipher.encrypt, pt)
def testDecrypt1(self):
# Verify decryption using all test vectors
for test in self._testData:
# Build the key
comps = [ long(rws(test[0][x]),16) for x in ('n','e','d') ]
key = RSA.construct(comps)
# The real test
cipher = PKCS.new(key, test[4])
pt = cipher.decrypt(t2b(test[2]))
self.assertEqual(pt, t2b(test[1]))
def testDecrypt2(self):
# Simplest possible negative tests
for ct_size in (127,128,129):
cipher = PKCS.new(self.key1024)
self.assertRaises(ValueError, cipher.decrypt, bchr(0x00)*ct_size)
def testEncryptDecrypt1(self):
# Encrypt/Decrypt messages of length [0..128-2*20-2]
for pt_len in xrange(0,128-2*20-2):
pt = self.rng(pt_len)
ct = PKCS.encrypt(pt, self.key1024)
pt2 = PKCS.decrypt(ct, self.key1024)
self.assertEqual(pt,pt2)
def testEncryptDecrypt1(self):
# Helper function to monitor what's requested from RNG
global asked
def localRng(N):
global asked
asked += N
return self.rng(N)
# Verify that OAEP is friendly to all hashes
for hashmod in (MD2,MD5,SHA1,SHA256,RIPEMD):
# Verify that encrypt() asks for as many random bytes
# as the hash output size
asked = 0
pt = self.rng(40)
self.key1024._randfunc = localRng
cipher = PKCS.new(self.key1024, hashmod)
ct = cipher.encrypt(pt)
self.assertEqual(cipher.decrypt(ct), pt)
self.failUnless(asked > hashmod.digest_size)
def testEncryptDecrypt2(self):
# Verify that OAEP supports labels
pt = self.rng(35)
xlabel = self.rng(22)
cipher = PKCS.new(self.key1024, label=xlabel)
ct = cipher.encrypt(pt)
self.assertEqual(cipher.decrypt(ct), pt)
def testEncryptDecrypt3(self):
# Verify that encrypt() uses the custom MGF
global mgfcalls
# Helper function to monitor what's requested from MGF
def newMGF(seed,maskLen):
global mgfcalls
mgfcalls += 1
return bchr(0x00)*maskLen
mgfcalls = 0
pt = self.rng(32)
cipher = PKCS.new(self.key1024, mgfunc=newMGF)
ct = cipher.encrypt(pt)
self.assertEqual(mgfcalls, 2)
self.assertEqual(cipher.decrypt(ct), pt)
def get_tests(config={}):
tests = []
tests += list_test_cases(PKCS1_OAEP_Tests)
return tests
if __name__ == '__main__':
suite = lambda: unittest.TestSuite(get_tests())
unittest.main(defaultTest='suite')
# vim:set ts=4 sw=4 sts=4 expandtab:
| ktan2020/legacy-automation | win/Lib/site-packages/Crypto/SelfTest/Cipher/test_pkcs1_oaep.py | Python | mit | 17,292 |
# GemRB - Infinity Engine Emulator
# Copyright (C) 2003 The GemRB Project
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
#
#feedback
import GemRB
from GUIDefines import *
FeedbackWindow = 0
TextAreaControl = 0
def OnLoad():
global FeedbackWindow, TextAreaControl
GemRB.LoadWindowPack("GUIOPT", 800, 600)
FeedbackWindow = GemRB.LoadWindow(9)
FeedbackWindow.SetFrame( )
MarkerSlider = FeedbackWindow.GetControl(30)
MarkerSliderS = FeedbackWindow.GetControl(8)
LocatorSlider = FeedbackWindow.GetControl(31)
LocatorSliderS = FeedbackWindow.GetControl(9)
THac0Rolls = FeedbackWindow.GetControl(32)
THac0RollsB = FeedbackWindow.GetControl(10)
THac0RollsB.SetSprites("GBTNOPT4", 0, 0, 1, 2, 3)
CombatInfo = FeedbackWindow.GetControl(33)
CombatInfoB = FeedbackWindow.GetControl(11)
CombatInfoB.SetSprites("GBTNOPT4", 0, 0, 1, 2, 3)
Actions = FeedbackWindow.GetControl(34)
ActionsB = FeedbackWindow.GetControl(12)
ActionsB.SetSprites("GBTNOPT4", 0, 0, 1, 2, 3)
StateChanges = FeedbackWindow.GetControl(35)
StateChangesB = FeedbackWindow.GetControl(13)
StateChangesB.SetSprites("GBTNOPT4", 0, 0, 1, 2, 3)
SelectionText = FeedbackWindow.GetControl(36)
SelectionTextB = FeedbackWindow.GetControl(14)
SelectionTextB.SetSprites("GBTNOPT4", 0, 0, 1, 2, 3)
Miscellaneous = FeedbackWindow.GetControl(37)
MiscellaneousB = FeedbackWindow.GetControl(15)
MiscellaneousB.SetSprites("GBTNOPT4", 0, 0, 1, 2, 3)
OkButton = FeedbackWindow.GetControl(26)
CancelButton = FeedbackWindow.GetControl(27)
TextAreaControl = FeedbackWindow.GetControl(28)
TextAreaControl.SetText(18043)
OkButton.SetText(11973)
OkButton.SetFlags (IE_GUI_BUTTON_DEFAULT, OP_OR)
CancelButton.SetText(13727)
CancelButton.SetFlags (IE_GUI_BUTTON_CANCEL, OP_OR)
MarkerSlider.SetEvent(IE_GUI_BUTTON_ON_PRESS, MarkerSliderPress)
MarkerSliderS.SetEvent(IE_GUI_SLIDER_ON_CHANGE, MarkerSliderPress)
MarkerSliderS.SetVarAssoc("GUI Feedback Level",1)
LocatorSlider.SetEvent(IE_GUI_BUTTON_ON_PRESS, LocatorSliderPress)
LocatorSliderS.SetEvent(IE_GUI_SLIDER_ON_CHANGE, LocatorSliderPress)
LocatorSliderS.SetVarAssoc("Locator Feedback Level",1)
THac0Rolls.SetEvent(IE_GUI_BUTTON_ON_PRESS, THac0RollsPress)
THac0RollsB.SetEvent(IE_GUI_BUTTON_ON_PRESS, THac0RollsBPress)
THac0RollsB.SetFlags(IE_GUI_BUTTON_CHECKBOX, OP_OR)
THac0RollsB.SetVarAssoc("Rolls",1)
CombatInfo.SetEvent(IE_GUI_BUTTON_ON_PRESS, CombatInfoPress)
CombatInfoB.SetEvent(IE_GUI_BUTTON_ON_PRESS, CombatInfoBPress)
CombatInfoB.SetFlags(IE_GUI_BUTTON_CHECKBOX, OP_OR)
CombatInfoB.SetVarAssoc("Combat Info",1)
Actions.SetEvent(IE_GUI_BUTTON_ON_PRESS, ActionsPress)
ActionsB.SetEvent(IE_GUI_BUTTON_ON_PRESS, ActionsBPress)
ActionsB.SetFlags(IE_GUI_BUTTON_CHECKBOX, OP_OR)
ActionsB.SetVarAssoc("Actions",1)
StateChanges.SetEvent(IE_GUI_BUTTON_ON_PRESS, StateChangesPress)
StateChangesB.SetEvent(IE_GUI_BUTTON_ON_PRESS, StateChangesBPress)
StateChangesB.SetFlags(IE_GUI_BUTTON_CHECKBOX, OP_OR)
StateChangesB.SetVarAssoc("State Changes",1)
SelectionText.SetEvent(IE_GUI_BUTTON_ON_PRESS, SelectionTextPress)
SelectionTextB.SetEvent(IE_GUI_BUTTON_ON_PRESS, SelectionTextBPress)
SelectionTextB.SetFlags(IE_GUI_BUTTON_CHECKBOX, OP_OR)
SelectionTextB.SetVarAssoc("Selection Text",1)
Miscellaneous.SetEvent(IE_GUI_BUTTON_ON_PRESS, MiscellaneousPress)
MiscellaneousB.SetEvent(IE_GUI_BUTTON_ON_PRESS, MiscellaneousBPress)
MiscellaneousB.SetFlags(IE_GUI_BUTTON_CHECKBOX, OP_OR)
MiscellaneousB.SetVarAssoc("Miscellaneous Text",1)
OkButton.SetEvent(IE_GUI_BUTTON_ON_PRESS, OkPress)
CancelButton.SetEvent(IE_GUI_BUTTON_ON_PRESS, CancelPress)
FeedbackWindow.SetVisible(WINDOW_VISIBLE)
return
def MarkerSliderPress():
TextAreaControl.SetText(18024)
return
def LocatorSliderPress():
TextAreaControl.SetText(18025)
return
def THac0RollsPress():
TextAreaControl.SetText(18026)
return
def THac0RollsBPress():
TextAreaControl.SetText(18026)
return
def CombatInfoPress():
TextAreaControl.SetText(18027)
return
def CombatInfoBPress():
TextAreaControl.SetText(18027)
return
def ActionsPress():
TextAreaControl.SetText(18028)
return
def ActionsBPress():
TextAreaControl.SetText(18028)
return
def StateChangesPress():
TextAreaControl.SetText(18029)
return
def StateChangesBPress():
TextAreaControl.SetText(18029)
return
def SelectionTextPress():
TextAreaControl.SetText(18030)
return
def SelectionTextBPress():
TextAreaControl.SetText(18030)
return
def MiscellaneousPress():
TextAreaControl.SetText(18031)
return
def MiscellaneousBPress():
TextAreaControl.SetText(18031)
return
def OkPress():
if FeedbackWindow:
FeedbackWindow.Unload()
GemRB.SetNextScript("GamePlay")
return
def CancelPress():
if FeedbackWindow:
FeedbackWindow.Unload()
GemRB.SetNextScript("GamePlay")
return
| tomprince/gemrb | gemrb/GUIScripts/iwd2/Feedback.py | Python | gpl-2.0 | 5,453 |
# All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from neutron_lib.api.definitions import network
MTU = 'mtu'
# The alias of the extension.
ALIAS = 'net-mtu'
# Whether or not this extension is simply signaling behavior to the user
# or it actively modifies the attribute map.
IS_SHIM_EXTENSION = False
# Whether the extension is marking the adoption of standardattr model for
# legacy resources, or introducing new standardattr attributes. False or
# None if the standardattr model is adopted since the introduction of
# resource extension.
# If this is True, the alias for the extension should be prefixed with
# 'standard-attr-'.
IS_STANDARD_ATTR_EXTENSION = False
# The name of the extension.
NAME = 'Network MTU'
# A prefix for API resources. An empty prefix means that the API is going
# to be exposed at the v2/ level as any other core resource.
API_PREFIX = ''
# The description of the extension.
DESCRIPTION = "Provides MTU attribute for a network resource."
# A timestamp of when the extension was introduced.
UPDATED_TIMESTAMP = "2015-03-25T10:00:00-00:00"
RESOURCE_ATTRIBUTE_MAP = {
network.COLLECTION_NAME: {
MTU: {'allow_post': False, 'allow_put': False,
'is_visible': True, 'is_filter': True, 'is_sort_key': True},
},
}
# The subresource attribute map for the extension. It adds child resources
# to main extension's resource. The subresource map must have a parent and
# a parameters entry. If an extension does not need such a map, None can
# be specified (mandatory).
SUB_RESOURCE_ATTRIBUTE_MAP = {}
# The action map: it associates verbs with methods to be performed on
# the API resource.
ACTION_MAP = {}
# The action status.
ACTION_STATUS = {
}
# The list of required extensions.
REQUIRED_EXTENSIONS = []
# The list of optional extensions.
OPTIONAL_EXTENSIONS = []
| openstack/neutron-lib | neutron_lib/api/definitions/network_mtu.py | Python | apache-2.0 | 2,378 |
"""
Definition of a sample, corresponding to a multilayer.
A multilayer a series of horizontal layers deposited on a substrate.
A multilayer may also have no layer and simply be considered as a substrate.
The substrate composition, and the composition, thickness and mass thickness
of each layer are defined by the :class:`Sample` class.
"""
__all__ = ['CONC_UNKNOWN', 'CONC_DIFF', 'composition_from_formula',
'Layer', 'Sample']
# Standard library modules.
import string
from collections import defaultdict
# Third party modules.
from pyparsing import Word, Group, Optional, OneOrMore
# Local modules.
import stratagemtools.element_properties as ep
# Globals and constants variables.
CONC_UNKNOWN = None
"""Flag when the composition of an element is unknown."""
CONC_DIFF = '?'
"""Flag when the composition of an element should be calculated by difference."""
_symbol = Word(string.ascii_uppercase, string.ascii_lowercase)
_digit = Word(string.digits + ".")
_elementRef = Group(_symbol + Optional(_digit, default="1"))
CHEMICAL_FORMULA_PARSER = OneOrMore(_elementRef)
def composition_from_formula(formula):
"""
Calculates the composition (expressed in weight fractions) of a chemical
formula.
Example::
>>> composition_from_formula('Al2O3')
... {8: 0.4707492883573059, 13: 0.5292507116426941}
:arg formula: a valid chemical formula
:type formula: :class:`str`
:return: composition (expressed in weight fractions). The keys of the
:class:`dict` are atomic numbers and the values, weight fractions.
:rtype: :class:`dict`
"""
# Parse chemical formula
formulaData = CHEMICAL_FORMULA_PARSER.parseString(formula)
zs = []
atomicfractions = []
for symbol, atomicfraction in formulaData:
zs.append(ep.atomic_number(symbol=symbol))
atomicfractions.append(float(atomicfraction))
# Calculate total atomic mass
totalatomicmass = 0.0
for z, atomicfraction in zip(zs, atomicfractions):
atomicmass = ep.atomic_mass_kg_mol(z)
totalatomicmass += atomicfraction * atomicmass
# Create composition
composition = defaultdict(float)
for z, atomicfraction in zip(zs, atomicfractions):
atomicmass = ep.atomic_mass_kg_mol(z)
weightfraction = atomicfraction * atomicmass / totalatomicmass
composition[z] += weightfraction
return composition
class Layer:
"""
Object to store a layer definition.
Once created a layer object is immutable.
.. note:: Should not be used directly, but via the :class:`Sample`'s methods
:meth:`add_layer <.Sample.add_layer>` and
:meth:`get_layer <.Sample.get_layer>`.
"""
def __init__(self, composition, thickness_m=None,
mass_thickness_kg_m2=None, density_kg_m3=None):
self._is_composition_known = \
all(isinstance(wf, float) for wf in composition.values())
self._composition = composition.copy()
if density_kg_m3 is None and self._is_composition_known:
density_kg_m3 = 0.0
for z, wf in composition.items():
density_kg_m3 += wf / ep.mass_density_kg_m3(z)
density_kg_m3 = 1.0 / density_kg_m3
self._density_kg_m3 = density_kg_m3
self._is_density_known = density_kg_m3 is not None
if thickness_m is not None and \
mass_thickness_kg_m2 is None and \
density_kg_m3 is not None:
mass_thickness_kg_m2 = thickness_m * density_kg_m3
elif thickness_m is None and \
mass_thickness_kg_m2 is not None and \
density_kg_m3 is not None:
thickness_m = mass_thickness_kg_m2 / density_kg_m3
self._thickness_m = thickness_m
self._mass_thickness_kg_m2 = mass_thickness_kg_m2
self._is_thickness_known = thickness_m is not None and \
mass_thickness_kg_m2 is not None
def __repr__(self):
comp_str = ', '.join('%s: %.4f' % (ep.symbol(z), wf) \
for z, wf in self.composition.items())
thickness_str = '%s nm' % (self.thickness_m * 1e9,) \
if self.is_thickness_known() else 'unknown'
return '<Layer(composition={%s}, thickness=%s)>' % (comp_str, thickness_str)
def is_composition_known(self):
"""
Returns whether the composition is known, i.e. contains no
:data:`CONC_UNKNOWN` or :data:`CONC_DIFF` flag.
:rtype: :class:`bool`
"""
return self._is_composition_known
def is_thickness_known(self):
"""
Returns whether the thickness is known.
:rtype: :class:`bool`
"""
return self._is_thickness_known
def is_density_known(self):
"""
Returns whether the density is known.
:rtype: :class:`bool`
"""
return self._is_density_known
@property
def composition(self):
"""
Returns a copy of the composition :class:`dict`.
The composition, and any other parameters, cannot be modified.
:return: composition (expressed in weight fractions). The keys of the
:class:`dict` are atomic numbers and the values, weight fractions.
:rtype: :class:`dict`
"""
return self._composition.copy()
@property
def thickness_m(self):
"""
Returns thickness in meters.
It can be ``None`` if not defined.
:rtype: :class:`float`
"""
return self._thickness_m
@property
def mass_thickness_kg_m2(self):
"""
Returns mass thickness in kg/m2.
It can be ``None`` if not defined.
:rtype: :class:`float`
"""
return self._mass_thickness_kg_m2
@property
def density_kg_m3(self):
"""
Returns density in kg/m3.
It can be ``None`` if not defined.
:rtype: :class:`float`
"""
return self._density_kg_m3
class Sample:
"""
Object to store a multilayer sample definition.
"""
def __init__(self, composition, density_kg_m3=None):
"""
:arg composition: composition of the substrate as :class:`dict` where
the keys are atomic numbers and the values, weight fractions.
If the weight fraction is not known, set it to :data:`CONC_UNKNOWN`,
if the weight fraction should be calculated by difference, set it
to :data:`CONC_DIFF`.
:type composition: :class:`dict`
:arg density_kg_m3: mass density in kilograms per cubic meter (optional).
If the composition is known, the density will be automatically
calculated based on:
.. math::
\\frac{1}{\\rho} = \\sum \\frac{w_i}{\\rho_i}
where :math:`w_i` and :math:`\\rho_i` are respectively the weight
fraction and mass density of element :math:`i`.
:type density_kg_m3: :class:`float`
"""
self._substrate = Layer(composition, density_kg_m3=density_kg_m3)
self._layers = []
def __repr__(self):
comp_str = ', '.join('%s: %.4f' % (ep.symbol(z), wf) \
for z, wf in self.composition.items())
return '<%s(%s, %i layers)>' % (self.__class__.__name__, comp_str,
len(self._layers))
def add_layer(self, composition, thickness_m=None,
mass_thickness_kg_m2=None, density_kg_m3=None):
"""
Adds a layer below the previously added layer, or if no layer was added
on top of the substrate.
:arg composition: composition of the layer as :class:`dict` where
the keys are atomic numbers and the values, weight fractions.
If the weight fraction is not known, set it to :data:`CONC_UNKNOWN`,
if the weight fraction should be calculated by difference, set it
to :data:`CONC_DIFF`.
:type composition: :class:`dict`
:arg thickness_m: thickness of the layer in meters (optional). If the
*mass_thickness_kg_m2* and *density_kg_m3* are known, the thickness
will be automatically calculated.
:type thickness_m: :class:`float`
:arg mass_thickness_kg_m2: mass thickness of the layer in kilograms per
square meter (optional). The mass thickness is defined as the
thickness times the density. If *thickness_m* and *density_kg_m3*
are known the mass thickness will be automatically calculated.
:type mass_thickness_kg_m2: :class:`float`
:arg density_kg_m3: mass density in kilograms per cubic meter (optional).
If the composition is known, the density will be automatically
calculated based on:
.. math::
\\frac{1}{\\rho} = \\sum \\frac{w_i}{\\rho_i}
where :math:`w_i` and :math:`\\rho_i` are respectively the weight
fraction and mass density of element :math:`i`.
:type density_kg_m3: :class:`float`
:return: a layer
:rtype: :class:`Layer`
"""
layer = Layer(composition, thickness_m, mass_thickness_kg_m2, density_kg_m3)
self._layers.append(layer)
return layer
def pop_layer(self, index):
"""
Removes the layer at *index*.
:arg index: index of the layer to be removed
:type index: :class:`int`
"""
self._layers.pop(index)
def get_layer(self, index):
"""
Returns the layer at *index*.
Index *0* is the first layer of the multilayer, while index *-1* is the
last layer, the first one above the substrate.
:arg index: index of the layer
:type index: :class:`int`
:return: a layer
:rtype: :class:`Layer`
"""
return self._layers[index]
@property
def composition(self):
"""
Returns the a copy of the composition of the substrate.
The composition, and any other parameters, cannot be modified.
:return: composition (expressed in weight fractions). The keys of the
:class:`dict` are atomic numbers and the values, weight fractions.
:rtype: :class:`dict`
"""
return self._substrate.composition
@property
def substrate(self):
"""
Returns the "layer" of the substrate, a :class:`Layer` object
corresponding to the composition and density of the substrate.
:rtype: :class:`Layer`
"""
return self._substrate
@property
def layers(self):
"""
Returns a copy of layers of this sample.
It cannot be modified.
The layers are ordered from top to bottom.
:rtype: :class:`tuple`
"""
return tuple(self._layers)
| ppinard/stratagemtools | stratagemtools/sample.py | Python | mit | 11,132 |
#!/usr/bin/env python
"""
=================================================
sMRI: Regional Tessellation and Surface Smoothing
=================================================
Introduction
============
This script, tessellation_tutorial.py, demonstrates the use of create_tessellation_flow from nipype.workflows.smri.freesurfer, and it can be run with::
python tessellation_tutorial.py
This example requires that the user has Freesurfer installed, and that the Freesurfer directory for 'fsaverage' is present.
.. seealso::
ConnectomeViewer
The Connectome Viewer connects Multi-Modal Multi-Scale Neuroimaging and Network Datasets For Analysis and Visualization in Python.
http://www.geuz.org/gmsh/
Gmsh: a three-dimensional finite element mesh generator with built-in pre- and post-processing facilities
http://www.blender.org/
Blender is the free open source 3D content creation suite, available for all major operating systems under the GNU General Public License.
.. warning::
This workflow will take several hours to finish entirely, since smoothing the larger cortical surfaces is very time consuming.
Packages and Data Setup
=======================
Import the necessary modules and workflow from nipype.
"""
import nipype.pipeline.engine as pe # pypeline engine
import nipype.interfaces.cmtk as cmtk
import nipype.interfaces.io as nio # Data i/o
import os
import os.path as op
from nipype.workflows.smri.freesurfer import create_tessellation_flow
"""
Directories
===========
Set the default directory and lookup table (LUT) paths
"""
fs_dir = os.environ['FREESURFER_HOME']
lookup_file = op.join(fs_dir, 'FreeSurferColorLUT.txt')
subjects_dir = op.join(fs_dir, 'subjects/')
output_dir = './tessellate_tutorial'
"""
Inputs
======
Create the tessellation workflow and set inputs
Here we will choose Gifti (gii) as the output format, because
we want to able to view the surface in ConnectomeViewer.
In you intend to view the meshes in gmsh or Blender, you should change
the workflow creation to use stereolithographic (stl) format.
"""
tessflow = create_tessellation_flow(name='tessflow', out_format='gii')
tessflow.inputs.inputspec.subject_id = 'fsaverage'
tessflow.inputs.inputspec.subjects_dir = subjects_dir
tessflow.inputs.inputspec.lookup_file = lookup_file
"""
We also create a conditional node to package the surfaces for ConnectomeViewer.
Simply set cff to "False" to ignore this step.
"""
cff = True
if cff:
cff = pe.Node(interface=cmtk.CFFConverter(), name='cff')
cff.inputs.out_file = 'Meshes.cff'
"""
Outputs
=======
Create a datasink to organize the smoothed meshes
Using regular-expression substitutions we can remove the extraneous folders generated by the mapnode.
"""
datasink = pe.Node(interface=nio.DataSink(), name="datasink")
datasink.inputs.base_directory = 'meshes'
datasink.inputs.regexp_substitutions = [('_smoother[\d]*/', '')]
"""
Execution
=========
Finally, create and run another pipeline that connects the workflow and datasink
"""
tesspipe = pe.Workflow(name='tessellate_tutorial')
tesspipe.base_dir = output_dir
tesspipe.connect([(tessflow, datasink, [('outputspec.meshes', '@meshes.all')])])
"""
If the surfaces are to be packaged, this will connect the CFFConverter
node to the tessellation and smoothing workflow, as well as to the datasink.
"""
if cff:
tesspipe.connect([(tessflow, cff, [('outputspec.meshes', 'gifti_surfaces')])])
tesspipe.connect([(cff, datasink, [('connectome_file', '@cff')])])
tesspipe.run()
| BrainIntensive/OnlineBrainIntensive | resources/nipype/nipype/examples/tessellation_tutorial.py | Python | mit | 3,562 |
# ex:ts=4:sw=4:sts=4:et
# -*- tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*-
'''
BitBake 'Fetch' implementations
This implementation is for Secure Shell (SSH), and attempts to comply with the
IETF secsh internet draft:
http://tools.ietf.org/wg/secsh/draft-ietf-secsh-scp-sftp-ssh-uri/
Currently does not support the sftp parameters, as this uses scp
Also does not support the 'fingerprint' connection parameter.
'''
# Copyright (C) 2006 OpenedHand Ltd.
#
#
# Based in part on svk.py:
# Copyright (C) 2006 Holger Hans Peter Freyther
# Based on svn.py:
# Copyright (C) 2003, 2004 Chris Larson
# Based on functions from the base bb module:
# Copyright 2003 Holger Schurig
#
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 2 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
import re, os
from bb import data
from bb.fetch2 import FetchMethod
from bb.fetch2 import FetchError
from bb.fetch2 import logger
from bb.fetch2 import runfetchcmd
__pattern__ = re.compile(r'''
\s* # Skip leading whitespace
ssh:// # scheme
( # Optional username/password block
(?P<user>\S+) # username
(:(?P<pass>\S+))? # colon followed by the password (optional)
)?
(?P<cparam>(;[^;]+)*)? # connection parameters block (optional)
@
(?P<host>\S+?) # non-greedy match of the host
(:(?P<port>[0-9]+))? # colon followed by the port (optional)
/
(?P<path>[^;]+) # path on the remote system, may be absolute or relative,
# and may include the use of '~' to reference the remote home
# directory
(?P<sparam>(;[^;]+)*)? # parameters block (optional)
$
''', re.VERBOSE)
class SSH(FetchMethod):
'''Class to fetch a module or modules via Secure Shell'''
def supports(self, url, urldata, d):
return __pattern__.match(url) != None
def localpath(self, url, urldata, d):
m = __pattern__.match(urldata.url)
path = m.group('path')
host = m.group('host')
lpath = os.path.join(data.getVar('DL_DIR', d, True), host, os.path.basename(path))
return lpath
def download(self, url, urldata, d):
dldir = data.getVar('DL_DIR', d, True)
m = __pattern__.match(url)
path = m.group('path')
host = m.group('host')
port = m.group('port')
user = m.group('user')
password = m.group('pass')
ldir = os.path.join(dldir, host)
lpath = os.path.join(ldir, os.path.basename(path))
if not os.path.exists(ldir):
os.makedirs(ldir)
if port:
port = '-P %s' % port
else:
port = ''
if user:
fr = user
if password:
fr += ':%s' % password
fr += '@%s' % host
else:
fr = host
fr += ':%s' % path
import commands
cmd = 'scp -B -r %s %s %s/' % (
port,
commands.mkarg(fr),
commands.mkarg(ldir)
)
bb.fetch2.check_network_access(d, cmd)
runfetchcmd(cmd, d)
| hulifox008/bitbake | lib/bb/fetch2/ssh.py | Python | gpl-2.0 | 3,701 |
"""
The guts for the i386 envi opcode disassembler.
"""
import struct
import envi
import envi.bits as e_bits
import opcode86
all_tables = opcode86.tables86
# Grab our register enums etc...
from envi.archs.i386.regs import *
# Our instruction prefix masks
# NOTE: table 3-4 (section 3.6) of intel 1 shows how REX/OP_SIZE
# interact...
INSTR_PREFIX= 0x0001
PREFIX_LOCK = 0x0002
PREFIX_REPNZ= 0x0004
PREFIX_REPZ = 0x0008
PREFIX_REP = 0x0010
PREFIX_REP_SIMD= 0x0020
PREFIX_OP_SIZE= 0x0040
PREFIX_ADDR_SIZE= 0x0080
PREFIX_SIMD= 0x0100
PREFIX_CS = 0x0200
PREFIX_SS = 0x0400
PREFIX_DS = 0x0800
PREFIX_ES = 0x1000
PREFIX_FS = 0x2000
PREFIX_GS = 0x4000
PREFIX_REG_MASK= 0x8000
# envi.registers meta offsets
RMETA_LOW8 = 0x00080000
RMETA_HIGH8 = 0x08080000
RMETA_LOW16 = 0x00100000
# Use a list here instead of a dict for speed (max 255 anyway)
i386_prefixes = [ None for i in range(256) ]
i386_prefixes[0xF0] = PREFIX_LOCK
i386_prefixes[0xF2] = PREFIX_REPNZ
i386_prefixes[0xF3] = PREFIX_REP
i386_prefixes[0x2E] = PREFIX_CS
i386_prefixes[0x36] = PREFIX_SS
i386_prefixes[0x3E] = PREFIX_DS
i386_prefixes[0x26] = PREFIX_ES
i386_prefixes[0x64] = PREFIX_FS
i386_prefixes[0x65] = PREFIX_GS
i386_prefixes[0x66] = PREFIX_OP_SIZE
i386_prefixes[0x67] = PREFIX_ADDR_SIZE
# The scale byte index into this for multiplier imm
scale_lookup = (1, 2, 4, 8)
# A set of instructions that are considered privileged (mark with IF_PRIV)
# FIXME this should be part of the opcdode tables!
priv_lookup = {
"int":True,
"in":True,
"out":True,
"insb":True,
"outsb":True,
"insd":True,
"outsd":True,
"vmcall":True,
"vmlaunch":True,
"vmresume":True,
"vmxoff":True,
"vmread":True,
"vmwrite":True,
"rsm":True,
"lar":True,
"lsl":True,
"clts":True,
"invd":True,
"wbinvd":True,
"wrmsr":True,
"rdmsr":True,
"sysexit":True,
"lgdt":True,
"lidt":True,
"lmsw":True,
"monitor":True,
"mwait":True,
"vmclear":True,
"vmptrld":True,
"vmptrst":True,
"vmxon":True,
}
# Map of codes to their respective envi flags
iflag_lookup = {
opcode86.INS_RET: envi.IF_NOFALL|envi.IF_RET,
opcode86.INS_CALL: envi.IF_CALL,
opcode86.INS_HALT: envi.IF_NOFALL,
opcode86.INS_CALLCC: envi.IF_CALL,
opcode86.INS_BRANCH: envi.IF_NOFALL | envi.IF_BRANCH,
opcode86.INS_BRANCHCC: envi.IF_BRANCH,
}
sizenames = ["" for x in range(17)]
sizenames[1] = "byte"
sizenames[2] = "word"
sizenames[4] = "dword"
sizenames[8] = "qword"
sizenames[16] = "oword"
def addrToName(mcanv, va):
sym = mcanv.syms.getSymByAddr(va)
if sym != None:
return repr(sym)
return "0x%.8x" % va
###########################################################################
#
# Operand objects for the i386 architecture
#
class i386RegOper(envi.RegisterOper):
def __init__(self, reg, tsize):
self.reg = reg
self.tsize = tsize
def repr(self, op):
return self._dis_regctx.getRegisterName(self.reg)
def getOperValue(self, op, emu=None):
if emu == None: return None # This operand type requires an emulator
return emu.getRegister(self.reg)
def setOperValue(self, op, emu, value):
emu.setRegister(self.reg, value)
def render(self, mcanv, op, idx):
hint = mcanv.syms.getSymHint(op.va, idx)
if hint != None:
mcanv.addNameText(name, typename="registers")
else:
name = self._dis_regctx.getRegisterName(self.reg)
mcanv.addNameText(name, typename="registers")
def __eq__(self, other):
if not isinstance(other, i386RegOper):
return False
if other.reg != self.reg:
return False
if other.tsize != self.tsize:
return False
return True
class i386ImmOper(envi.ImmedOper):
"""
An operand representing an immediate.
"""
def __init__(self, imm, tsize):
self.imm = imm
self.tsize = tsize
def repr(self, op):
ival = self.imm
if ival > 4096:
return "0x%.8x" % ival
return str(ival)
def getOperValue(self, op, emu=None):
return self.imm
def render(self, mcanv, op, idx):
value = self.imm
hint = mcanv.syms.getSymHint(op.va, idx)
if hint != None:
if mcanv.mem.isValidPointer(value):
mcanv.addVaText(hint, value)
else:
mcanv.addNameText(hint)
elif mcanv.mem.isValidPointer(value):
name = addrToName(mcanv, value)
mcanv.addVaText(name, value)
else:
if self.imm >= 4096:
mcanv.addNameText('0x%.8x' % value)
else:
mcanv.addNameText(str(value))
def __eq__(self, other):
if not isinstance(other, i386ImmOper):
return False
if other.imm != self.imm:
return False
if other.tsize != self.tsize:
return False
return True
class i386PcRelOper(envi.Operand):
"""
This is the operand used for EIP relative offsets
for operands on instructions like jmp/call
"""
def __init__(self, imm, tsize):
self.imm = imm
self.tsize = tsize
def repr(self, op):
return "0x%.8x" % (op.va + op.size + self.imm)
def isImmed(self):
return True # FIXME trying this out....
def getOperValue(self, op, emu=None):
return op.va + op.size + self.imm
def render(self, mcanv, op, idx):
hint = mcanv.syms.getSymHint(op.va, idx)
if hint != None:
mcanv.addVaText(hint, value)
else:
value = op.va + op.size + self.imm
name = addrToName(mcanv, value)
mcanv.addVaText(name, value)
def __eq__(self, other):
if not isinstance(other, i386PcRelOper):
return False
if other.imm != self.imm:
return False
if other.tsize != self.tsize:
return False
return True
class i386RegMemOper(envi.DerefOper):
"""
An operand which represents the result of reading/writting memory from the
dereference (with possible displacement) from a given register.
"""
def __init__(self, reg, tsize, disp=0):
self.reg = reg
self.tsize = tsize
self.disp = disp
self._is_deref = True
def repr(self, op):
r = self._dis_regctx.getRegisterName(self.reg)
if self.disp > 0:
return "%s [%s + %d]" % (sizenames[self.tsize],r,self.disp)
elif self.disp < 0:
return "%s [%s - %d]" % (sizenames[self.tsize],r,abs(self.disp))
return "%s [%s]" % (sizenames[self.tsize],r)
def getOperValue(self, op, emu=None):
if emu == None: return None # This operand type requires an emulator
return emu.readMemValue(self.getOperAddr(op, emu), self.tsize)
def setOperValue(self, op, emu, val):
emu.writeMemValue(self.getOperAddr(op, emu), val, self.tsize)
def getOperAddr(self, op, emu):
if emu == None: return None # This operand type requires an emulator
base, size = emu.getSegmentInfo(op)
rval = emu.getRegister(self.reg)
return base + rval + self.disp
def isDeref(self):
# The disassembler may reach in and set this (if lea...)
return self._is_deref
def render(self, mcanv, op, idx):
mcanv.addNameText(sizenames[self.tsize])
mcanv.addText(" [")
mcanv.addNameText(self._dis_regctx.getRegisterName(self.reg), typename="registers")
hint = mcanv.syms.getSymHint(op.va, idx)
if hint != None:
mcanv.addText(" + ")
mcanv.addNameText(hint)
else:
if self.disp > 0:
mcanv.addText(" + ")
mcanv.addNameText(str(self.disp))
elif self.disp < 0:
mcanv.addText(" - ")
mcanv.addNameText(str(abs(self.disp)))
mcanv.addText("]")
def __eq__(self, other):
if not isinstance(other, i386RegMemOper):
return False
if other.reg != self.reg:
return False
if other.disp != self.disp:
return False
if other.tsize != self.tsize:
return False
return True
class i386ImmMemOper(envi.DerefOper):
"""
An operand which represents the dereference (memory read/write) of
a memory location associated with an immediate.
"""
def __init__(self, imm, tsize):
self.imm = imm
self.tsize = tsize
def repr(self, op):
return "%s [0x%.8x]" % (sizenames[self.tsize], self.imm)
def getOperValue(self, op, emu=None):
if emu == None: return None # This operand type requires an emulator
return emu.readMemValue(self.getOperAddr(op, emu), self.tsize)
def setOperValue(self, op, emu, val):
emu.writeMemValue(self.getOperAddr(op, emu), val, self.tsize)
def getOperAddr(self, op, emu=None):
ret = self.imm
if emu != None:
base, size = emu.getSegmentInfo(op)
ret += base
return ret
def render(self, mcanv, op, idx):
mcanv.addNameText(sizenames[self.tsize])
mcanv.addText(" [")
value = self.imm
hint = mcanv.syms.getSymHint(op.va, idx)
if hint != None:
mcanv.addVaText(hint, value)
else:
name = addrToName(mcanv, value)
mcanv.addVaText(name, value)
mcanv.addText("]")
def __eq__(self, other):
if not isinstance(other, i386ImmMemOper):
return False
if other.imm != self.imm:
return False
if other.tsize != self.tsize:
return False
return True
class i386SibOper(envi.DerefOper):
"""
An operand which represents the result of reading/writting memory from the
dereference (with possible displacement) from a given register.
"""
def __init__(self, tsize, reg=None, imm=None, index=None, scale=1, disp=0):
self.reg = reg
self.imm = imm
self.index = index
self.scale = scale
self.tsize = tsize
self.disp = disp
self._is_deref = True
def __eq__(self, other):
if not isinstance(other, i386SibOper):
return False
if other.imm != self.imm:
return False
if other.reg != self.reg:
return False
if other.index != self.index:
return False
if other.scale != self.scale:
return False
if other.disp != self.disp:
return False
if other.tsize != self.tsize:
return False
return True
def isDeref(self):
return self._is_deref
def repr(self, op):
r = "%s [" % sizenames[self.tsize]
if self.reg != None:
r += self._dis_regctx.getRegisterName(self.reg)
if self.imm != None:
r += "0x%.8x" % self.imm
if self.index != None:
r += " + %s" % self._dis_regctx.getRegisterName(self.index)
if self.scale != 1:
r += " * %d" % self.scale
if self.disp > 0:
r += " + %d" % self.disp
elif self.disp < 0:
r += " - %d" % abs(self.disp)
r += "]"
return r
def getOperValue(self, op, emu=None):
if emu == None: return None # This operand type requires an emulator
return emu.readMemValue(self.getOperAddr(op, emu), self.tsize)
def setOperValue(self, op, emu, val):
emu.writeMemValue(self.getOperAddr(op, emu), val, self.tsize)
def getOperAddr(self, op, emu=None):
if emu == None: return None # This operand type requires an emulator
ret = 0
if self.imm != None:
ret += self.imm
if self.reg != None:
ret += emu.getRegister(self.reg)
if self.index != None:
ret += (emu.getRegister(self.index) * self.scale)
# Handle x86 segmentation
base, size = emu.getSegmentInfo(op)
ret += base
return ret + self.disp
def _getOperBase(self, emu=None):
# Special SIB only method for getting the SIB base value
if self.imm:
return self.imm
if emu:
return emu.getRegister(self.reg)
return None
def render(self, mcanv, op, idx):
mcanv.addNameText(sizenames[self.tsize])
mcanv.addText(" [")
if self.imm != None:
name = addrToName(mcanv, self.imm)
mcanv.addVaText(name, self.imm)
if self.reg != None:
name = self._dis_regctx.getRegisterName(self.reg)
mcanv.addNameText(name, typename="registers")
# Does our SIB have a scale
if self.index != None:
mcanv.addText(" + ")
name = self._dis_regctx.getRegisterName(self.index)
mcanv.addNameText(name, typename="registers")
if self.scale != 1:
mcanv.addText(" * ")
mcanv.addNameText(str(self.scale))
hint = mcanv.syms.getSymHint(op.va, idx)
if hint != None:
mcanv.addText(" + ")
mcanv.addNameText(hint)
else:
# If we have a displacement, add it.
if self.disp != 0:
mcanv.addText(" + ")
mcanv.addNameText(str(self.disp))
mcanv.addText("]")
class i386Opcode(envi.Opcode):
# Printable prefix names
prefix_names = [
(PREFIX_LOCK, "lock"),
(PREFIX_REPNZ, "repnz"),
(PREFIX_REP, "rep"),
(PREFIX_CS, "cs"),
(PREFIX_SS, "ss"),
(PREFIX_DS, "ds"),
(PREFIX_ES, "es"),
(PREFIX_FS, "fs"),
(PREFIX_GS, "gs"),
]
def getBranches(self, emu=None):
ret = []
# To start with we have no flags.
flags = 0
addb = False
# If we are a conditional branch, even our fallthrough
# case is conditional...
if self.opcode == opcode86.INS_BRANCHCC:
flags |= envi.BR_COND
addb = True
# If we can fall through, reflect that...
if not self.iflags & envi.IF_NOFALL:
ret.append((self.va + self.size, flags|envi.BR_FALL))
# In intel, if we have no operands, it has no
# further branches...
if len(self.opers) == 0:
return ret
# Check for a call...
if self.opcode == opcode86.INS_CALL:
flags |= envi.BR_PROC
addb = True
# A conditional call? really? what compiler did you use? ;)
elif self.opcode == opcode86.INS_CALLCC:
flags |= (envi.BR_PROC | envi.BR_COND)
addb = True
elif self.opcode == opcode86.INS_BRANCH:
oper0 = self.opers[0]
if isinstance(oper0, i386SibOper) and oper0.scale == 4:
# In the case with no emulator, note that our deref is
# from the base of a table. If we have one, parse out all the
# valid pointers from our base
base = oper0._getOperBase(emu)
if emu == None:
ret.append((base, flags | envi.BR_DEREF | envi.BR_TABLE))
else:
# Since we're parsing this out, lets just resolve the derefs
# for our caller...
dest = emu.readMemValue(base, oper0.tsize)
while emu.isValidPointer(dest):
ret.append((dest, envi.BR_COND))
base += oper0.tsize
dest = emu.readMemValue(base, oper0.tsize)
else:
addb = True
if addb:
oper0 = self.opers[0]
if oper0.isDeref():
flags |= envi.BR_DEREF
tova = oper0.getOperAddr(self, emu=emu)
else:
tova = oper0.getOperValue(self, emu=emu)
ret.append((tova, flags))
return ret
def render(self, mcanv):
"""
Render this opcode to the specified memory canvas
"""
if self.prefixes:
pfx = self.getPrefixName()
if pfx:
mcanv.addNameText("%s: " % pfx, pfx)
mcanv.addNameText(self.mnem, typename="mnemonic")
mcanv.addText(" ")
# Allow each of our operands to render
imax = len(self.opers)
lasti = imax - 1
for i in xrange(imax):
oper = self.opers[i]
oper.render(mcanv, self, i)
if i != lasti:
mcanv.addText(",")
operand_range = (2,3,4)
MODE_16 = 0
MODE_32 = 1
MODE_64 = 2
class i386Disasm:
def __init__(self, mode=MODE_32):
self._dis_mode = MODE_32
self._dis_prefixes = i386_prefixes
self._dis_regctx = i386RegisterContext()
# This will make function lookups nice and quick
self._dis_amethods = [ None for x in range(22) ]
self._dis_amethods[opcode86.ADDRMETH_A>>16] = self.ameth_a
self._dis_amethods[opcode86.ADDRMETH_C>>16] = self.ameth_c
self._dis_amethods[opcode86.ADDRMETH_D>>16] = self.ameth_d
self._dis_amethods[opcode86.ADDRMETH_E>>16] = self.ameth_e
self._dis_amethods[opcode86.ADDRMETH_M>>16] = self.ameth_e
self._dis_amethods[opcode86.ADDRMETH_N>>16] = self.ameth_n
self._dis_amethods[opcode86.ADDRMETH_Q>>16] = self.ameth_q
self._dis_amethods[opcode86.ADDRMETH_R>>16] = self.ameth_e
self._dis_amethods[opcode86.ADDRMETH_W>>16] = self.ameth_w
self._dis_amethods[opcode86.ADDRMETH_I>>16] = self.ameth_i
self._dis_amethods[opcode86.ADDRMETH_J>>16] = self.ameth_j
self._dis_amethods[opcode86.ADDRMETH_O>>16] = self.ameth_o
self._dis_amethods[opcode86.ADDRMETH_G>>16] = self.ameth_g
self._dis_amethods[opcode86.ADDRMETH_P>>16] = self.ameth_p
self._dis_amethods[opcode86.ADDRMETH_S>>16] = self.ameth_s
self._dis_amethods[opcode86.ADDRMETH_U>>16] = self.ameth_u
self._dis_amethods[opcode86.ADDRMETH_V>>16] = self.ameth_v
self._dis_amethods[opcode86.ADDRMETH_X>>16] = self.ameth_x
self._dis_amethods[opcode86.ADDRMETH_Y>>16] = self.ameth_y
# Offsets used to add in addressing method parsers
self.ROFFSETMMX = getRegOffset(i386regs, "mm0")
self.ROFFSETSIMD = getRegOffset(i386regs, "xmm0")
self.ROFFSETDEBUG = getRegOffset(i386regs, "debug0")
self.ROFFSETCTRL = getRegOffset(i386regs, "ctrl0")
self.ROFFSETTEST = getRegOffset(i386regs, "test0")
self.ROFFSETSEG = getRegOffset(i386regs, "es")
self.ROFFSETFPU = getRegOffset(i386regs, "st0")
def parse_modrm(self, byte):
# Pass in a string with an offset for speed rather than a new string
mod = (byte >> 6) & 0x3
reg = (byte >> 3) & 0x7
rm = byte & 0x7
#print "MOD/RM",hex(byte),mod,reg,rm
return (mod,reg,rm)
def byteRegOffset(self, val):
# NOTE: This is used for high byte metas in 32 bit mode only
if val < 4:
return val + RMETA_LOW8
return (val-4) + RMETA_HIGH8
# Parse modrm as though addr mode might not be just a reg
def extended_parse_modrm(self, bytes, offset, opersize, regbase=0):
"""
Return a tuple of (size, Operand)
"""
mod,reg,rm = self.parse_modrm(ord(bytes[offset]))
size = 1
#print "EXTENDED MOD REG RM",mod,reg,rm
if mod == 3: # Easy one, just a reg
# FIXME only use self.byteRegOffset in 32 bit mode, NOT 64 bit...
if opersize == 1: rm = self.byteRegOffset(rm)
elif opersize == 2: rm += RMETA_LOW16
#print "OPERSIZE",opersize,rm
return (size, i386RegOper(rm+regbase, opersize))
elif mod == 0:
# means we are [reg] unless rm == 4 (SIB) or rm == 5 ([imm32])
if rm == 5:
imm = e_bits.parsebytes(bytes, offset + size, 4)
size += 4
# NOTE: in 64 bit mode, *this* is where we differ, (This case is RIP relative)
return(size, i386ImmMemOper(imm, opersize))
elif rm == 4:
sibsize, scale, index, base, imm = self.parse_sib(bytes, offset+size, mod)
size += sibsize
if base != None: base += regbase # Adjust for different register addressing modes
if index != None: index += regbase # Adjust for different register addressing modes
oper = i386SibOper(opersize, reg=base, imm=imm, index=index, scale=scale_lookup[scale])
return (size, oper)
else:
return(size, i386RegMemOper(regbase+rm, opersize))
elif mod == 1:
# mod 1 means we are [ reg + disp8 ] (unless rm == 4 which means sib + disp8)
if rm == 4:
sibsize, scale, index, base, imm = self.parse_sib(bytes, offset+size, mod)
size += sibsize
disp = e_bits.parsebytes(bytes, offset+size, 1, sign=True)
size += 1
if base != None: base += regbase # Adjust for different register addressing modes
if index != None: index += regbase # Adjust for different register addressing modes
oper = i386SibOper(opersize, reg=base, index=index, scale=scale_lookup[scale], disp=disp)
return (size,oper)
else:
x = e_bits.signed(ord(bytes[offset+size]), 1)
size += 1
return(size, i386RegMemOper(regbase+rm, opersize, disp=x))
elif mod == 2:
# Means we are [ reg + disp32 ] (unless rm == 4 which means SIB + disp32)
if rm == 4:
sibsize, scale, index, base, imm = self.parse_sib(bytes,offset+size,mod)
size += sibsize
disp = e_bits.parsebytes(bytes, offset + size, 4, sign=True)
size += 4
if base != None: base += regbase # Adjust for different register addressing modes
if index != None: index += regbase # Adjust for different register addressing modes
oper = i386SibOper(opersize, reg=base, imm=imm, index=index, scale=scale_lookup[scale], disp=disp)
return (size, oper)
else:
# NOTE: Immediate displacements in SIB are still 4 bytes in 64 bit mode
disp = e_bits.parsebytes(bytes, offset+size, 4, sign=True)
size += 4
return(size, i386RegMemOper(regbase+rm, opersize, disp=disp))
else:
raise Exception("How does mod == %d" % mod)
def parse_sib(self, bytes, offset, mod):
"""
Return a tuple of (size, scale, index, base, imm)
"""
byte = ord(bytes[offset])
scale = (byte >> 6) & 0x3
index = (byte >> 3) & 0x7
base = byte & 0x7
imm = None
size = 1
# Special SIB case with no index reg
if index == 4:
index = None
# Special SIB case with possible immediate
if base == 5:
if mod == 0: # [ imm32 + index * scale ]
base = None
imm = e_bits.parsebytes(bytes, offset+size, 4, sign=False)
size += 4
# FIXME is there special stuff needed here?
elif mod == 1:
pass
#raise "OMG MOD 1"
elif mod == 2:
pass
#raise "OMG MOD 2"
return (size, scale, index, base, imm)
def _dis_calc_tsize(self, opertype, prefixes):
"""
Use the oper type and prefixes to decide on the tsize for
the operand.
"""
mode = MODE_32
#print "OPERTYPE",hex(opertype)
sizelist = opcode86.OPERSIZE.get(opertype, None)
if sizelist == None:
raise "OPERSIZE FAIL: %.8x" % opertype
if prefixes & PREFIX_OP_SIZE:
mode = MODE_16
#print "OPERTYPE",hex(opertype)
#print "SIZELIST",repr(sizelist)
return sizelist[mode]
def disasm(self, bytes, offset, va):
# Stuff for opcode parsing
tabdesc = all_tables[0] # A tuple (optable, shiftbits, mask byte, sub, max)
startoff = offset # Use startoff as a size knob if needed
# Stuff we'll be putting in the opcode object
optype = None # This gets set if we successfully decode below
mnem = None
operands = []
prefixes = 0
while True:
obyte = ord(bytes[offset])
# This line changes in 64 bit mode
p = self._dis_prefixes[obyte]
if p == None:
break
if obyte == 0x66 and ord(bytes[offset+1]) == 0x0f:
break
prefixes |= p
offset += 1
continue
#pdone = False
while True:
obyte = ord(bytes[offset])
#print "OBYTE",hex(obyte)
if (obyte > tabdesc[4]):
#print "Jumping To Overflow Table:", tabdesc[5]
tabdesc = all_tables[tabdesc[5]]
tabidx = ((obyte - tabdesc[3]) >> tabdesc[1]) & tabdesc[2]
#print "TABIDX: %d" % tabidx
opdesc = tabdesc[0][tabidx]
#print 'OPDESC: %s' % repr(opdesc)
# Hunt down multi-byte opcodes
nexttable = opdesc[0]
#print "NEXT",nexttable,hex(obyte)
if nexttable != 0: # If we have a sub-table specified, use it.
#print "Multi-Byte Next Hop For",hex(obyte),opdesc[0]
tabdesc = all_tables[nexttable]
# In the case of 66 0f, the next table is *already* assuming we ate
# the 66 *and* the 0f... oblidge them.
if obyte == 0x66 and ord(bytes[offset+1]) == 0x0f:
offset += 1
# Account for the table jump we made
offset += 1
continue
# We are now on the final table...
#print repr(opdesc)
mnem = opdesc[6]
optype = opdesc[1]
if tabdesc[2] == 0xff:
offset += 1 # For our final opcode byte
break
if optype == 0:
#print tabidx
#print opdesc
#print "OPTTYPE 0"
raise envi.InvalidInstruction(bytes=bytes[startoff:startoff+16])
operoffset = 0
# Begin parsing operands based off address method
for i in operand_range:
oper = None # Set this if we end up with an operand
osize = 0
# Pull out the operand description from the table
operflags = opdesc[i]
opertype = operflags & opcode86.OPTYPE_MASK
addrmeth = operflags & opcode86.ADDRMETH_MASK
# If there are no more operands, break out of the loop!
if operflags == 0:
break
#print "ADDRTYPE: %.8x OPERTYPE: %.8x" % (addrmeth, opertype)
tsize = self._dis_calc_tsize(opertype, prefixes)
#print hex(opertype),hex(addrmeth)
# If addrmeth is zero, we have operands embedded in the opcode
if addrmeth == 0:
osize = 0
oper = self.ameth_0(operflags, opdesc[5+i], tsize, prefixes)
else:
#print "ADDRTYPE",hex(addrmeth)
ameth = self._dis_amethods[addrmeth >> 16]
#print "AMETH",ameth
if ameth == None:
raise Exception("Implement Addressing Method 0x%.8x" % addrmeth)
# NOTE: Depending on your addrmethod you may get beginning of operands, or offset
try:
if addrmeth == opcode86.ADDRMETH_I or addrmeth == opcode86.ADDRMETH_J:
osize, oper = ameth(bytes, offset+operoffset, tsize, prefixes)
# If we are a sign extended immediate and not the same as the other operand,
# do the sign extension during disassembly so nothing else has to worry about it..
if operflags & opcode86.OP_SIGNED and len(operands) and tsize != operands[-1].tsize:
otsize = operands[-1].tsize
oper.imm = e_bits.sign_extend(oper.imm, oper.tsize, otsize)
oper.tsize = otsize
else:
osize, oper = ameth(bytes, offset, tsize, prefixes)
except struct.error, e:
# Catch struct unpack errors due to insufficient data length
raise envi.InvalidInstruction(bytes=bytes[startoff:startoff+16])
if oper != None:
# This is a filty hack for now...
oper._dis_regctx = self._dis_regctx
operands.append(oper)
operoffset += osize
# Pull in the envi generic instruction flags
iflags = iflag_lookup.get(optype, 0)
if priv_lookup.get(mnem, False):
iflags |= envi.IF_PRIV
# Lea will have a reg-mem/sib operand with _is_deref True, but should be false
if optype == opcode86.INS_LEA:
operands[1]._is_deref = False
ret = i386Opcode(va, optype, mnem, prefixes, (offset-startoff)+operoffset, operands, iflags)
return ret
# Declare all the address method parsers here!
def ameth_0(self, operflags, operval, tsize, prefixes):
# Special address method for opcodes with embedded operands
if operflags & opcode86.OP_REG:
return i386RegOper(operval, tsize)
elif operflags & opcode86.OP_IMM:
return i386ImmOper(operval, tsize)
raise Exception("Unknown ameth_0! operflags: 0x%.8x" % operflags)
def ameth_a(self, bytes, offset, tsize, prefixes):
imm = e_bits.parsebytes(bytes, offset, tsize)
seg = e_bits.parsebytes(bytes, offset+tsize, 2)
# THIS BEING GHETTORIGGED ONLY EFFECTS callf jmpf
#print "FIXME: envi.intel.ameth_a skipping seg prefix %d" % seg
return (tsize+2, i386ImmOper(imm, tsize))
def ameth_e(self, bytes, offset, tsize, prefixes):
return self.extended_parse_modrm(bytes, offset, tsize)
def ameth_n(self, bytes, offset, tsize, prefixes):
mod,reg,rm = self.parse_modrm(ord(bytes[offset]))
return (1, i386RegOper(rm + self.ROFFSETMMX, tsize))
def ameth_q(self, bytes, offset, tsize, prefixes):
mod,reg,rm = self.parse_modrm(ord(bytes[offset]))
if mod == 3:
return (1, i386RegOper(rm + self.ROFFSETMMX, tsize))
return self.extended_parse_modrm(bytes, offset, tsize)
def ameth_w(self, bytes, offset, tsize, prefixes):
mod,reg,rm = self.parse_modrm(ord(bytes[offset]))
if mod == 3:
return (1, i386RegOper(rm + self.ROFFSETSIMD, tsize))
return self.extended_parse_modrm(bytes, offset, tsize)
def ameth_i(self, bytes, offset, tsize, prefixes):
# FIXME sign extend here if opflags has OP_SIGNED
imm = e_bits.parsebytes(bytes, offset, tsize)
return (tsize, i386ImmOper(imm, tsize))
def ameth_j(self, bytes, offset, tsize, prefixes):
imm = e_bits.parsebytes(bytes, offset, tsize, sign=True)
return (tsize, i386PcRelOper(imm, tsize))
def ameth_o(self, bytes, offset, tsize, prefixes):
# NOTE: displacement *stays* 32 bit even with REX
# (but 16 bit should probably be supported)
imm = e_bits.parsebytes(bytes, offset, 4, sign=False)
return (4, i386ImmMemOper(imm, tsize))
def ameth_g(self, bytes, offset, tsize, prefixes):
mod,reg,rm = self.parse_modrm(ord(bytes[offset]))
if tsize == 1: reg = self.byteRegOffset(reg)
elif tsize == 2: reg += RMETA_LOW16
return (0, i386RegOper(reg, tsize))
def ameth_c(self, bytes, offset, tsize, prefixes):
mod,reg,rm = self.parse_modrm(ord(bytes[offset]))
return (0, i386RegOper(reg+self.ROFFSETCTRL, tsize))
def ameth_d(self, bytes, offset, tsize, prefixes):
mod,reg,rm = self.parse_modrm(ord(bytes[offset]))
return (0, i386RegOper(reg+self.ROFFSETDEBUG, tsize))
def ameth_p(self, bytes, offset, tsize, prefixes):
mod,reg,rm = self.parse_modrm(ord(bytes[offset]))
return (0, i386RegOper(reg+self.ROFFSETMMX, tsize))
def ameth_s(self, bytes, offset, tsize, prefixes):
mod,reg,rm = self.parse_modrm(ord(bytes[offset]))
return (0, i386RegOper(reg+self.ROFFSETSEG, tsize))
def ameth_u(self, bytes, offset, tsize, prefixes):
mod,reg,rm = self.parse_modrm(ord(bytes[offset]))
return (0, i386RegOper(reg+self.ROFFSETTEST, tsize))
def ameth_v(self, bytes, offset, tsize, prefixes):
mod,reg,rm = self.parse_modrm(ord(bytes[offset]))
return (0, i386RegOper(reg+self.ROFFSETSIMD, tsize))
def ameth_x(self, bytes, offset, tsize, prefixes):
#FIXME this needs the DS over-ride, but is only for outsb which we don't support
return (0, i386RegMemOper(REG_ESI, tsize))
def ameth_y(self, bytes, offset, tsize, prefixes):
#FIXME this needs the ES over-ride, but is only for insb which we don't support
return (0, i386RegMemOper(REG_ESI, tsize))
if __name__ == '__main__':
# A little helper to make testing easier
import sys
d = i386Disasm()
b = file(sys.argv[1], 'rb').read()
offset = 0
va = 0x41414141
while offset < len(b):
op = d.disasm(b, offset, va+offset)
print '0x%.8x %s %s' % (va+offset, b[offset:offset+len(op)].encode('hex').ljust(16), repr(op))
offset += len(op)
| foreni-administrator/pyew | envi/archs/i386/disasm.py | Python | gpl-2.0 | 35,078 |
# -*- coding: utf-8 -*-
"""Order Model unit tests."""
import pytest
from store.orders.models import Order, OrderConsistsOf
from store.book.models import Book
from store.customer.models import Customer
# from .factories import ConsistsOfFactory
@pytest.mark.usefixtures('db')
class TestOrderConsistsOf:
"""Order Consists Of tests."""
def test_create_order_consists_of_for_order_and_book(self):
"""Retrieve a list of orders consisting of books."""
b1 = Book('9780439708180', "Harry Potter and the Philosopher's Stone",
"J. K. Rowling", "Scholastic", 1999, 10, 6.79, "paperback", "fantasy", "fantasy")
b1.save()
c1 = Customer('foo', 'foo@bar.com', "3241234",
"12341234", "coolstreet")
c1.save()
o1 = Order(c1.id)
o1.save()
oco = OrderConsistsOf(o1.id, b1.isbn13, 42)
oco.save()
q = OrderConsistsOf.query.filter_by(consists_qty=42).all()
assert len(q) == 1
# def test_check_if_book_is_book_in_consists_of(self):
# """Check book is in consists of."""
# book = Book('9780439708180', "Harry Potter and the Philosopher's Stone",
# "J. K. Rowling", "Scholastic", 1999, 10, 6.79, "paperback", "fantasy", "fantasy")
# book.save()
# customer = Customer('foo', 'foo@bar.com', "3241234",
# "12341234", "coolstreet")
# customer.save()
# order = Order(customer.id)
# order.save()
# oco = OrderConsistsOf(order.id, book.isbn13, 42)
# oco.save()
# retrieved = OrderConsistsOf.get_by_id((order.id, book.isbn13))
# assert len(retrieved) == 1
| William93/store | tests/test_order_consists_of_models.py | Python | bsd-3-clause | 1,715 |
# ~*~ coding:utf-8 ~*~
import sys
from PyQt5.QtWidgets import QApplication
from window import Window
if __name__ == '__main__':
app = QApplication(sys.argv)
window = Window()
window.show()
sys.exit(app.exec_())
| katya-malyk/sphere_approximation | __init__.py | Python | apache-2.0 | 229 |
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import uuid
from tempest_lib import exceptions as lib_exc
from tempest.api.volume import base
from tempest.common.utils import data_utils
from tempest import config
from tempest import test
CONF = config.CONF
class VolumesV2SnapshotNegativeTestJSON(base.BaseVolumeTest):
@classmethod
def skip_checks(cls):
super(VolumesV2SnapshotNegativeTestJSON, cls).skip_checks()
if not CONF.volume_feature_enabled.snapshot:
raise cls.skipException("Cinder volume snapshots are disabled")
@test.attr(type=['negative', 'gate'])
def test_create_snapshot_with_nonexistent_volume_id(self):
# Create a snapshot with nonexistent volume id
s_name = data_utils.rand_name('snap')
self.assertRaises(lib_exc.NotFound,
self.snapshots_client.create_snapshot,
str(uuid.uuid4()), display_name=s_name)
@test.attr(type=['negative', 'gate'])
def test_create_snapshot_without_passing_volume_id(self):
# Create a snapshot without passing volume id
s_name = data_utils.rand_name('snap')
self.assertRaises(lib_exc.NotFound,
self.snapshots_client.create_snapshot,
None, display_name=s_name)
class VolumesV1SnapshotNegativeTestJSON(VolumesV2SnapshotNegativeTestJSON):
_api_version = 1
| Vaidyanath/tempest | tempest/api/volume/test_volumes_snapshots_negative.py | Python | apache-2.0 | 1,938 |
# This file is part of the pyMOR project (http://www.pymor.org).
# Copyright Holders: Rene Milk, Stephan Rave, Felix Schindler
# License: BSD 2-Clause License (http://opensource.org/licenses/BSD-2-Clause)
""" This module provides generic time-stepping algorithms for the solution of
instationary problems.
The algorithms are generic in the sense that each algorithms operates exclusively
on |Operators| and |VectorArrays|. In particular, the algorithms
can also be used to turn an arbitrary stationary |Discretization| provided
by an external library into an instationary |Discretization|.
Currently, implementations of :func:`explicit_euler` and :func:`implicit_euler`
time-stepping are provided. The :class:`TimeStepperInterface` defines a
common interface that has to be fulfilled by the time-steppers that are used
by |InstationaryDiscretization|. The classes :class:`ExplicitEulerTimeStepper`
and :class:`ImplicitEulerTimeStepper` encapsulate :func:`explicit_euler` and
:func:`implicit_euler` to provide this interface.
"""
from __future__ import absolute_import, division, print_function
from pymor.core.interfaces import ImmutableInterface, abstractmethod
from pymor.la.interfaces import VectorArrayInterface
from pymor.operators.interfaces import OperatorInterface
class TimeStepperInterface(ImmutableInterface):
"""Interface for time-stepping algorithms.
Algorithms implementing this interface solve time-dependent problems
of the form ::
M * d_t u + A(u, mu, t) = F(mu, t).
Time-steppers used by |InstationaryDiscretization| have to fulfill
this interface.
"""
@abstractmethod
def solve(self, initial_time, end_time, initial_data, operator, rhs=None, mass=None, mu=None, num_values=None):
"""Apply time-stepper to the equation ::
M * d_t u + A(u, mu, t) = F(mu, t).
Parameters
----------
initial_time
The time at which to begin time-stepping.
end_time
The time until which to perform time-stepping.
initial_data
The solution vector at `initial_time`.
operator
The |Operator| A.
rhs
The right hand side F (either |VectorArray| of length 1 or |Operator| with
`range.dim == 1`). If `None`, zero right hand side is assumed.
mass
The |Operator| M. If `None`, the identity operator is assumed.
mu
|Parameter| for which `operator` and `rhs` are evaluated. The current time is added
to `mu` with key `_t`.
num_values
The number of returned vectors of the solution trajectory. If `None`, each
intermediate vector that is calculated is returned.
Returns
-------
|VectorArray| containing the solution trajectory.
"""
pass
class ImplicitEulerTimeStepper(TimeStepperInterface):
"""Implict-Euler time-stepper.
Solves equations of the form ::
M * d_t u + A(u, mu, t) = F(mu, t).
Parameters
----------
nt
The number of time-steps the time-stepper will perform.
invert_options
The :attr:`~pymor.operators.interfaces.OperatorInterface.invert_options` used
to invert `M + dt*A`.
"""
def __init__(self, nt, invert_options=None):
self.nt = nt
self.invert_options = invert_options
def solve(self, initial_time, end_time, initial_data, operator, rhs=None, mass=None, mu=None, num_values=None):
return implicit_euler(operator, rhs, mass, initial_data, initial_time, end_time, self.nt, mu,
self.invert_options, num_values)
class ExplicitEulerTimeStepper(TimeStepperInterface):
"""Implict-Euler time-stepper.
Solves equations of the form ::
M * d_t u + A(u, mu, t) = F(mu, t).
Parameters
----------
nt
The number of time-steps the time-stepper will perform.
"""
def __init__(self, nt):
self.nt = nt
def solve(self, initial_time, end_time, initial_data, operator, rhs=None, mass=None, mu=None, num_values=None):
if mass is not None:
raise NotImplementedError
return explicit_euler(operator, rhs, initial_data, initial_time, end_time, self.nt, mu, num_values)
def implicit_euler(A, F, M, U0, t0, t1, nt, mu=None, invert_options=None, num_values=None):
assert isinstance(A, OperatorInterface)
assert isinstance(F, (OperatorInterface, VectorArrayInterface))
assert isinstance(M, OperatorInterface)
assert not M.parametric
assert A.source == A.range == M.source == M.range
num_values = num_values or nt + 1
dt = (t1 - t0) / nt
DT = (t1 - t0) / (num_values - 1)
if isinstance(F, OperatorInterface):
assert F.range.dim == 1
assert F.source == A.range
F_time_dep = F.parametric and '_t' in F.parameter_type
if not F_time_dep:
dt_F = F.as_vector(mu) * dt
else:
assert len(F) == 1
assert F in A.range
F_time_dep = False
dt_F = F * dt
assert U0 in A.source
assert len(U0) == 1
A_time_dep = A.parametric and '_t' in A.parameter_type
R = A.source.empty(reserve=nt+1)
R.append(U0)
M_dt_A = M + A * dt
if not A_time_dep:
M_dt_A = M_dt_A.assemble(mu)
t = t0
U = U0.copy()
for n in xrange(nt):
t += dt
mu['_t'] = t
if F_time_dep:
dt_F = F.as_vector(mu) * dt
U = M_dt_A.apply_inverse(M.apply(U) + dt_F, mu=mu, options=invert_options)
while t - t0 + (min(dt, DT) * 0.5) >= len(R) * DT:
R.append(U)
return R
def explicit_euler(A, F, U0, t0, t1, nt, mu=None, num_values=None):
assert isinstance(A, OperatorInterface)
assert F is None or isinstance(F, (OperatorInterface, VectorArrayInterface))
assert A.source == A.range
num_values = num_values or nt + 1
if isinstance(F, OperatorInterface):
assert F.range.dim == 1
assert F.source == A.source
F_time_dep = F.parametric and '_t' in F.parameter_type
if not F_time_dep:
F_ass = F.as_vector(mu)
elif isinstance(F, VectorArrayInterface):
assert len(F) == 1
assert F in A.source
F_time_dep = False
F_ass = F
assert len(U0) == 1
assert U0 in A.source
A_time_dep = A.parametric and '_t' in A.parameter_type
if not A_time_dep:
A = A.assemble(mu)
dt = (t1 - t0) / nt
DT = (t1 - t0) / (num_values - 1)
R = A.source.empty(reserve=num_values)
R.append(U0)
t = t0
U = U0.copy()
if F is None:
for n in xrange(nt):
t += dt
mu['_t'] = t
U.axpy(-dt, A.apply(U, mu=mu))
while t - t0 + (min(dt, DT) * 0.5) >= len(R) * DT:
R.append(U)
else:
for n in xrange(nt):
t += dt
mu['_t'] = t
if F_time_dep:
F_ass = F.as_vector(mu)
U.axpy(dt, F_ass - A.apply(U, mu=mu))
while t - t0 + (min(dt, DT) * 0.5) >= len(R) * DT:
R.append(U)
return R
| michaellaier/pymor | src/pymor/algorithms/timestepping.py | Python | bsd-2-clause | 7,154 |
# /xhr/resources/conditional.py -- to fake a 304 response
def main(request, response):
tag = request.GET.first("tag", None)
match = request.headers.get("If-None-Match", None)
date = request.GET.first("date", "")
modified = request.headers.get("If-Modified-Since", None)
if tag:
response.headers.set("ETag", '"%s"' % tag)
elif date:
response.headers.set("Last-Modified", date)
if ((match is not None and match == tag) or
(modified is not None and modified == date)):
response.status = (304, "SUPERCOOL")
return ""
else:
response.headers.set("Content-Type", "text/plain")
return "MAYBE NOT" | Varentsov/servo | tests/wpt/web-platform-tests/resource-timing/resources/fake_responses.py | Python | mpl-2.0 | 678 |
# -*- coding: utf-8 -*-
# This file is part of Shuup.
#
# Copyright (c) 2012-2016, Shoop Commerce Ltd. All rights reserved.
#
# This source code is licensed under the AGPLv3 license found in the
# LICENSE file in the root directory of this source tree.
import os
import pytest
from django.core.urlresolvers import reverse
from shuup.core.models import Order, OrderStatus
from shuup.testing.browser_utils import (
click_element, wait_until_appeared, wait_until_condition
)
from shuup.testing.factories import create_empty_order, get_default_shop
from shuup.testing.utils import initialize_admin_browser_test
pytestmark = pytest.mark.skipif(os.environ.get("SHUUP_BROWSER_TESTS", "0") != "1", reason="No browser tests run.")
@pytest.mark.browser
@pytest.mark.djangodb
def test_orders_list_view(browser, admin_user, live_server, settings):
shop = get_default_shop()
for i in range(0, 10):
order = create_empty_order(shop=shop)
order.save()
# Set last one canceled
Order.objects.last().set_canceled()
initialize_admin_browser_test(browser, live_server, settings)
_visit_orders_list_view(browser, live_server)
_test_status_filter(browser) # Will set three orders from end canceled
def _visit_orders_list_view(browser, live_server):
url = reverse("shuup_admin:order.list")
browser.visit("%s%s" % (live_server, url))
wait_until_condition(browser, condition=lambda x: x.is_text_present("Orders"))
wait_until_appeared(browser, ".picotable-item-info")
def _test_status_filter(browser):
# Check initial row count where the cancelled order should be excluded
_check_row_count(browser, Order.objects.count() - 1)
# Take three last valid orders and set those cancelled
orders = Order.objects.valid()[:3]
for order in orders:
order.set_canceled()
# Filter with cancelled
cancelled_status = OrderStatus.objects.get_default_canceled()
_change_status_filter(browser, "%s" % cancelled_status.pk)
# Check cancelled row count
_check_row_count(browser, (3 + 1))
# Filter with initial
initial_status = OrderStatus.objects.get_default_initial()
_change_status_filter(browser, "%s" % initial_status.pk)
# Take new count
_check_row_count(browser, (Order.objects.count() - 3 - 1))
# Change status filter to all
_change_status_filter(browser, '"_all"')
# Now all orders should be visible
_check_row_count(browser, Order.objects.count())
def _check_row_count(browser, expected_row_count):
picotable = browser.find_by_id("picotable")
tbody = picotable.find_by_tag("tbody").first
wait_until_condition(browser, lambda x: len(x.find_by_css("#picotable tbody tr")) == expected_row_count)
# technically this is handled above, but do the assertion anyways ;)
assert len(browser.find_by_css("#picotable tbody tr")) == expected_row_count
def _change_status_filter(browser, to_value):
picotable = browser.find_by_id("picotable")
click_element(browser, "#picotable div.choice-filter")
click_element(browser, "#picotable div.choice-filter option[value='%s']" % to_value)
| suutari/shoop | shuup_tests/browser/admin/test_order_list.py | Python | agpl-3.0 | 3,132 |
# coding=utf-8
# Copyright 2016 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import absolute_import, division, print_function, unicode_literals
import copy
import pickle
from abc import abstractmethod
from builtins import object, str
from future.utils import PY2, PY3, text_type
from pants.util.objects import (Exactly, SubclassesOf, SuperclassesOf, TypeCheckError,
TypedDatatypeInstanceConstructionError, datatype)
from pants_test.base_test import BaseTest
class TypeConstraintTestBase(BaseTest):
class A(object):
pass
class B(A):
pass
class C(B):
pass
class BPrime(A):
pass
class SuperclassesOfTest(TypeConstraintTestBase):
def test_none(self):
with self.assertRaises(ValueError):
SubclassesOf()
def test_single(self):
superclasses_of_b = SuperclassesOf(self.B)
self.assertEqual((self.B,), superclasses_of_b.types)
self.assertTrue(superclasses_of_b.satisfied_by(self.A()))
self.assertTrue(superclasses_of_b.satisfied_by(self.B()))
self.assertFalse(superclasses_of_b.satisfied_by(self.BPrime()))
self.assertFalse(superclasses_of_b.satisfied_by(self.C()))
def test_multiple(self):
superclasses_of_a_or_b = SuperclassesOf(self.A, self.B)
self.assertEqual((self.A, self.B), superclasses_of_a_or_b.types)
self.assertTrue(superclasses_of_a_or_b.satisfied_by(self.A()))
self.assertTrue(superclasses_of_a_or_b.satisfied_by(self.B()))
self.assertFalse(superclasses_of_a_or_b.satisfied_by(self.BPrime()))
self.assertFalse(superclasses_of_a_or_b.satisfied_by(self.C()))
class ExactlyTest(TypeConstraintTestBase):
def test_none(self):
with self.assertRaises(ValueError):
Exactly()
def test_single(self):
exactly_b = Exactly(self.B)
self.assertEqual((self.B,), exactly_b.types)
self.assertFalse(exactly_b.satisfied_by(self.A()))
self.assertTrue(exactly_b.satisfied_by(self.B()))
self.assertFalse(exactly_b.satisfied_by(self.BPrime()))
self.assertFalse(exactly_b.satisfied_by(self.C()))
def test_multiple(self):
exactly_a_or_b = Exactly(self.A, self.B)
self.assertEqual((self.A, self.B), exactly_a_or_b.types)
self.assertTrue(exactly_a_or_b.satisfied_by(self.A()))
self.assertTrue(exactly_a_or_b.satisfied_by(self.B()))
self.assertFalse(exactly_a_or_b.satisfied_by(self.BPrime()))
self.assertFalse(exactly_a_or_b.satisfied_by(self.C()))
def test_disallows_unsplatted_lists(self):
with self.assertRaises(TypeError):
Exactly([1])
def test_str_and_repr(self):
exactly_b_types = Exactly(self.B, description='B types')
self.assertEquals("=(B types)", str(exactly_b_types))
self.assertEquals("Exactly(B types)", repr(exactly_b_types))
exactly_b = Exactly(self.B)
self.assertEquals("=B", str(exactly_b))
self.assertEquals("Exactly(B)", repr(exactly_b))
exactly_multiple = Exactly(self.A, self.B)
self.assertEquals("=(A, B)", str(exactly_multiple))
self.assertEquals("Exactly(A, B)", repr(exactly_multiple))
def test_checking_via_bare_type(self):
self.assertTrue(Exactly(self.B).satisfied_by_type(self.B))
self.assertFalse(Exactly(self.B).satisfied_by_type(self.C))
class SubclassesOfTest(TypeConstraintTestBase):
def test_none(self):
with self.assertRaises(ValueError):
SubclassesOf()
def test_single(self):
subclasses_of_b = SubclassesOf(self.B)
self.assertEqual((self.B,), subclasses_of_b.types)
self.assertFalse(subclasses_of_b.satisfied_by(self.A()))
self.assertTrue(subclasses_of_b.satisfied_by(self.B()))
self.assertFalse(subclasses_of_b.satisfied_by(self.BPrime()))
self.assertTrue(subclasses_of_b.satisfied_by(self.C()))
def test_multiple(self):
subclasses_of_b_or_c = SubclassesOf(self.B, self.C)
self.assertEqual((self.B, self.C), subclasses_of_b_or_c.types)
self.assertTrue(subclasses_of_b_or_c.satisfied_by(self.B()))
self.assertTrue(subclasses_of_b_or_c.satisfied_by(self.C()))
self.assertFalse(subclasses_of_b_or_c.satisfied_by(self.BPrime()))
self.assertFalse(subclasses_of_b_or_c.satisfied_by(self.A()))
class ExportedDatatype(datatype(['val'])):
pass
class AbsClass(object):
pass
class SomeTypedDatatype(datatype([('val', int)])): pass
class SomeMixin(object):
@abstractmethod
def as_str(self): pass
def stripped(self):
return self.as_str().strip()
class TypedWithMixin(datatype([('val', text_type)]), SomeMixin):
"""Example of using `datatype()` with a mixin."""
def as_str(self):
return self.val
class AnotherTypedDatatype(datatype([('string', text_type), ('elements', list)])): pass
class WithExplicitTypeConstraint(datatype([('a_string', text_type), ('an_int', Exactly(int))])): pass
class MixedTyping(datatype(['value', ('name', text_type)])): pass
class SomeBaseClass(object):
@abstractmethod
def something(self): pass
class SomeDatatypeClass(SomeBaseClass):
def something(self):
return 'asdf'
def __repr__(self):
return 'SomeDatatypeClass()'
class WithSubclassTypeConstraint(datatype([('some_value', SubclassesOf(SomeBaseClass))])): pass
class NonNegativeInt(datatype([('an_int', int)])):
"""Example of overriding __new__() to perform deeper argument checking."""
# NB: __new__() in the class returned by datatype() will raise if any kwargs are provided, but
# subclasses are free to pass around kwargs as long as they don't forward them to that particular
# __new__() method.
def __new__(cls, *args, **kwargs):
# Call the superclass ctor first to ensure the type is correct.
this_object = super(NonNegativeInt, cls).__new__(cls, *args, **kwargs)
value = this_object.an_int
if value < 0:
raise cls.make_type_error("value is negative: {!r}.".format(value))
return this_object
class CamelCaseWrapper(datatype([('nonneg_int', NonNegativeInt)])): pass
class ReturnsNotImplemented(object):
def __eq__(self, other):
return NotImplemented
class DatatypeTest(BaseTest):
def test_eq_with_not_implemented_super(self):
class DatatypeSuperNotImpl(datatype(['val']), ReturnsNotImplemented, tuple):
pass
self.assertNotEqual(DatatypeSuperNotImpl(1), DatatypeSuperNotImpl(1))
def test_type_included_in_eq(self):
foo = datatype(['val'])
bar = datatype(['val'])
self.assertFalse(foo(1) == bar(1))
self.assertTrue(foo(1) != bar(1))
def test_subclasses_not_equal(self):
foo = datatype(['val'])
class Bar(foo):
pass
self.assertFalse(foo(1) == Bar(1))
self.assertTrue(foo(1) != Bar(1))
def test_repr(self):
bar = datatype(['val', 'zal'], superclass_name='Bar')
self.assertEqual('Bar(val=1, zal=1)', repr(bar(1, 1)))
class Foo(datatype(['val'], superclass_name='F'), AbsClass):
pass
self.assertEqual('Foo(val=1)', repr(Foo(1)))
def test_not_iterable(self):
bar = datatype(['val'])
with self.assertRaises(TypeError):
for x in bar(1):
pass
def test_deep_copy(self):
# deep copy calls into __getnewargs__, which namedtuple defines as implicitly using __iter__.
bar = datatype(['val'])
self.assertEqual(bar(1), copy.deepcopy(bar(1)))
def test_atrs(self):
bar = datatype(['val'])
self.assertEqual(1, bar(1).val)
def test_as_dict(self):
bar = datatype(['val'])
self.assertEqual({'val': 1}, bar(1)._asdict())
def test_replace_non_iterable(self):
bar = datatype(['val', 'zal'])
self.assertEqual(bar(1, 3), bar(1, 2)._replace(zal=3))
def test_properties_not_assignable(self):
bar = datatype(['val'])
bar_inst = bar(1)
with self.assertRaises(AttributeError):
bar_inst.val = 2
def test_invalid_field_name(self):
with self.assertRaises(ValueError):
datatype(['0isntanallowedfirstchar'])
def test_override_eq_disallowed(self):
class OverridesEq(datatype(['myval'])):
def __eq__(self, other):
return other.myval == self.myval
with self.assertRaises(TypeCheckError) as tce:
OverridesEq(1)
self.assertIn('Should not override __eq__.', str(tce.exception))
def test_subclass_pickleable(self):
before = ExportedDatatype(1)
dumps = pickle.dumps(before, protocol=2)
after = pickle.loads(dumps)
self.assertEqual(before, after)
def test_mixed_argument_types(self):
bar = datatype(['val', 'zal'])
self.assertEqual(bar(1, 2), bar(val=1, zal=2))
self.assertEqual(bar(1, 2), bar(zal=2, val=1))
def test_double_passed_arg(self):
bar = datatype(['val', 'zal'])
with self.assertRaises(TypeError):
bar(1, val=1)
def test_too_many_args(self):
bar = datatype(['val', 'zal'])
with self.assertRaises(TypeError):
bar(1, 1, 1)
def test_unexpect_kwarg(self):
bar = datatype(['val'])
with self.assertRaises(TypeError):
bar(other=1)
class TypedDatatypeTest(BaseTest):
def test_class_construction_errors(self):
# NB: datatype subclasses declared at top level are the success cases
# here by not failing on import.
# If the type_name can't be converted into a suitable identifier, throw a
# ValueError.
with self.assertRaises(ValueError) as cm:
class NonStrType(datatype([int])): pass
expected_msg = (
"Type names and field names must be valid identifiers: \"<class 'int'>\""
if PY3 else
"Type names and field names can only contain alphanumeric characters and underscores: \"<type 'int'>\""
)
self.assertEqual(str(cm.exception), expected_msg)
# This raises a TypeError because it doesn't provide a required argument.
with self.assertRaises(TypeError) as cm:
class NoFields(datatype()): pass
expected_msg = (
"datatype() missing 1 required positional argument: 'field_decls'"
if PY3 else
"datatype() takes at least 1 argument (0 given)"
)
self.assertEqual(str(cm.exception), expected_msg)
with self.assertRaises(ValueError) as cm:
class JustTypeField(datatype([text_type])): pass
expected_msg = (
"Type names and field names must be valid identifiers: \"<class 'str'>\""
if PY3 else
"Type names and field names can only contain alphanumeric characters and underscores: \"<type 'unicode'>\""
)
self.assertEqual(str(cm.exception), expected_msg)
with self.assertRaises(ValueError) as cm:
class NonStringField(datatype([3])): pass
expected_msg = (
"Type names and field names must be valid identifiers: '3'"
if PY3 else
"Type names and field names cannot start with a number: '3'"
)
self.assertEqual(str(cm.exception), expected_msg)
with self.assertRaises(ValueError) as cm:
class NonStringTypeField(datatype([(32, int)])): pass
expected_msg = (
"Type names and field names must be valid identifiers: '32'"
if PY3 else
"Type names and field names cannot start with a number: '32'"
)
self.assertEqual(str(cm.exception), expected_msg)
with self.assertRaises(ValueError) as cm:
class MultipleSameName(datatype([
'field_a',
'field_b',
'field_a',
])):
pass
expected_msg = "Encountered duplicate field name: 'field_a'"
self.assertEqual(str(cm.exception), expected_msg)
with self.assertRaises(ValueError) as cm:
class MultipleSameNameWithType(datatype([
'field_a',
('field_a', int),
])):
pass
expected_msg = "Encountered duplicate field name: 'field_a'"
self.assertEqual(str(cm.exception), expected_msg)
with self.assertRaises(TypeError) as cm:
class InvalidTypeSpec(datatype([('a_field', 2)])): pass
expected_msg = (
"type spec for field 'a_field' was not a type or TypeConstraint: "
"was 2 (type 'int').")
self.assertEqual(str(cm.exception), expected_msg)
def test_instance_construction_by_repr(self):
some_val = SomeTypedDatatype(3)
self.assertEqual(3, some_val.val)
self.assertEqual(repr(some_val), "SomeTypedDatatype(val=3)")
self.assertEqual(str(some_val), "SomeTypedDatatype(val<=int>=3)")
some_object = WithExplicitTypeConstraint(text_type('asdf'), 45)
self.assertEqual(some_object.a_string, 'asdf')
self.assertEqual(some_object.an_int, 45)
def compare_repr(include_unicode = False):
expected_message = "WithExplicitTypeConstraint(a_string={unicode_literal}'asdf', an_int=45)"\
.format(unicode_literal='u' if include_unicode else '')
self.assertEqual(repr(some_object), expected_message)
def compare_str(unicode_type_name):
expected_message = "WithExplicitTypeConstraint(a_string<={}>=asdf, an_int<=int>=45)".format(unicode_type_name)
self.assertEqual(str(some_object), expected_message)
if PY2:
compare_str('unicode')
compare_repr(include_unicode=True)
else:
compare_str('str')
compare_repr()
some_nonneg_int = NonNegativeInt(an_int=3)
self.assertEqual(3, some_nonneg_int.an_int)
self.assertEqual(repr(some_nonneg_int), "NonNegativeInt(an_int=3)")
self.assertEqual(str(some_nonneg_int), "NonNegativeInt(an_int<=int>=3)")
wrapped_nonneg_int = CamelCaseWrapper(NonNegativeInt(45))
# test attribute naming for camel-cased types
self.assertEqual(45, wrapped_nonneg_int.nonneg_int.an_int)
# test that repr() is called inside repr(), and str() inside str()
self.assertEqual(repr(wrapped_nonneg_int),
"CamelCaseWrapper(nonneg_int=NonNegativeInt(an_int=45))")
self.assertEqual(
str(wrapped_nonneg_int),
"CamelCaseWrapper(nonneg_int<=NonNegativeInt>=NonNegativeInt(an_int<=int>=45))")
mixed_type_obj = MixedTyping(value=3, name=text_type('asdf'))
self.assertEqual(3, mixed_type_obj.value)
def compare_repr(include_unicode = False):
expected_message = "MixedTyping(value=3, name={unicode_literal}'asdf')" \
.format(unicode_literal='u' if include_unicode else '')
self.assertEqual(repr(mixed_type_obj), expected_message)
def compare_str(unicode_type_name):
expected_message = "MixedTyping(value=3, name<={}>=asdf)".format(unicode_type_name)
self.assertEqual(str(mixed_type_obj), expected_message)
if PY2:
compare_str('unicode')
compare_repr(include_unicode=True)
else:
compare_str('str')
compare_repr()
subclass_constraint_obj = WithSubclassTypeConstraint(SomeDatatypeClass())
self.assertEqual('asdf', subclass_constraint_obj.some_value.something())
self.assertEqual(repr(subclass_constraint_obj),
"WithSubclassTypeConstraint(some_value=SomeDatatypeClass())")
self.assertEqual(
str(subclass_constraint_obj),
"WithSubclassTypeConstraint(some_value<+SomeBaseClass>=SomeDatatypeClass())")
def test_mixin_type_construction(self):
obj_with_mixin = TypedWithMixin(text_type(' asdf '))
def compare_repr(include_unicode = False):
expected_message = "TypedWithMixin(val={unicode_literal}' asdf ')" \
.format(unicode_literal='u' if include_unicode else '')
self.assertEqual(repr(obj_with_mixin), expected_message)
def compare_str(unicode_type_name):
expected_message = "TypedWithMixin(val<={}>= asdf )".format(unicode_type_name)
self.assertEqual(str(obj_with_mixin), expected_message)
if PY2:
compare_str('unicode')
compare_repr(include_unicode=True)
else:
compare_str('str')
compare_repr()
self.assertEqual(obj_with_mixin.as_str(), ' asdf ')
self.assertEqual(obj_with_mixin.stripped(), 'asdf')
def test_instance_construction_errors(self):
with self.assertRaises(TypeError) as cm:
SomeTypedDatatype(something=3)
expected_msg = "error: in constructor of type SomeTypedDatatype: type check error:\n__new__() got an unexpected keyword argument 'something'"
self.assertEqual(str(cm.exception), expected_msg)
# not providing all the fields
with self.assertRaises(TypeError) as cm:
SomeTypedDatatype()
expected_msg_ending = (
"__new__() missing 1 required positional argument: 'val'"
if PY3 else
"__new__() takes exactly 2 arguments (1 given)"
)
expected_msg = "error: in constructor of type SomeTypedDatatype: type check error:\n" + expected_msg_ending
self.assertEqual(str(cm.exception), expected_msg)
# unrecognized fields
with self.assertRaises(TypeError) as cm:
SomeTypedDatatype(3, 4)
expected_msg_ending = (
"__new__() takes 2 positional arguments but 3 were given"
if PY3 else
"__new__() takes exactly 2 arguments (3 given)"
)
expected_msg = "error: in constructor of type SomeTypedDatatype: type check error:\n" + expected_msg_ending
self.assertEqual(str(cm.exception), expected_msg)
with self.assertRaises(TypedDatatypeInstanceConstructionError) as cm:
CamelCaseWrapper(nonneg_int=3)
expected_msg = (
"""error: in constructor of type CamelCaseWrapper: type check error:
field 'nonneg_int' was invalid: value 3 (with type 'int') must satisfy this type constraint: Exactly(NonNegativeInt).""")
self.assertEqual(str(cm.exception), expected_msg)
# test that kwargs with keywords that aren't field names fail the same way
with self.assertRaises(TypeError) as cm:
CamelCaseWrapper(4, a=3)
expected_msg = "error: in constructor of type CamelCaseWrapper: type check error:\n__new__() got an unexpected keyword argument 'a'"
self.assertEqual(str(cm.exception), expected_msg)
def test_type_check_errors(self):
# single type checking failure
with self.assertRaises(TypeCheckError) as cm:
SomeTypedDatatype([])
expected_msg = (
"""error: in constructor of type SomeTypedDatatype: type check error:
field 'val' was invalid: value [] (with type 'list') must satisfy this type constraint: Exactly(int).""")
self.assertEqual(str(cm.exception), expected_msg)
# type checking failure with multiple arguments (one is correct)
with self.assertRaises(TypeCheckError) as cm:
AnotherTypedDatatype(text_type('correct'), text_type('should be list'))
def compare_str(unicode_type_name, include_unicode=False):
expected_message = (
"""error: in constructor of type AnotherTypedDatatype: type check error:
field 'elements' was invalid: value {unicode_literal}'should be list' (with type '{type_name}') must satisfy this type constraint: Exactly(list)."""
.format(type_name=unicode_type_name, unicode_literal='u' if include_unicode else ''))
self.assertEqual(str(cm.exception), expected_message)
if PY2:
compare_str('unicode', include_unicode=True)
else:
compare_str('str')
# type checking failure on both arguments
with self.assertRaises(TypeCheckError) as cm:
AnotherTypedDatatype(3, text_type('should be list'))
def compare_str(unicode_type_name, include_unicode=False):
expected_message = (
"""error: in constructor of type AnotherTypedDatatype: type check error:
field 'string' was invalid: value 3 (with type 'int') must satisfy this type constraint: Exactly({type_name}).
field 'elements' was invalid: value {unicode_literal}'should be list' (with type '{type_name}') must satisfy this type constraint: Exactly(list)."""
.format(type_name=unicode_type_name, unicode_literal='u' if include_unicode else ''))
self.assertEqual(str(cm.exception), expected_message)
if PY2:
compare_str('unicode', include_unicode=True)
else:
compare_str('str')
with self.assertRaises(TypeCheckError) as cm:
NonNegativeInt(text_type('asdf'))
def compare_str(unicode_type_name, include_unicode=False):
expected_message = (
"""error: in constructor of type NonNegativeInt: type check error:
field 'an_int' was invalid: value {unicode_literal}'asdf' (with type '{type_name}') must satisfy this type constraint: Exactly(int)."""
.format(type_name=unicode_type_name, unicode_literal='u' if include_unicode else ''))
self.assertEqual(str(cm.exception), expected_message)
if PY2:
compare_str('unicode', include_unicode=True)
else:
compare_str('str')
with self.assertRaises(TypeCheckError) as cm:
NonNegativeInt(-3)
expected_msg = (
"""error: in constructor of type NonNegativeInt: type check error:
value is negative: -3.""")
self.assertEqual(str(cm.exception), expected_msg)
with self.assertRaises(TypeCheckError) as cm:
WithSubclassTypeConstraint(3)
expected_msg = (
"""error: in constructor of type WithSubclassTypeConstraint: type check error:
field 'some_value' was invalid: value 3 (with type 'int') must satisfy this type constraint: SubclassesOf(SomeBaseClass).""")
self.assertEqual(str(cm.exception), expected_msg)
| foursquare/pants | tests/python/pants_test/util/test_objects.py | Python | apache-2.0 | 20,881 |
"""
Application file for the anti-spam app.
"""
from django.apps import AppConfig
from django.utils.translation import ugettext_lazy as _
class AntiSpamConfig(AppConfig):
"""
Application configuration class for the anti-spam app.
"""
name = 'apps.antispam'
verbose_name = _('Anti-spam')
| TamiaLab/carnetdumaker | apps/antispam/apps.py | Python | agpl-3.0 | 311 |
from main.emails import send_member_email, send_multimember_email
from main.management.helpers import SendNotificationEmailCommand
from main.models import Membership, SentMemberEmail
class Command(SendNotificationEmailCommand):
help = 'Send emails to everyone who has been added to a page.'
model = Membership
tracking_model = SentMemberEmail
recipient_field = 'user'
other_field = 'page'
send_single_email = staticmethod(send_member_email)
send_bundled_email = staticmethod(send_multimember_email)
def final_exclusion_filter(self, recipient, membership):
assert recipient == membership.user
if not recipient.email_on_new_membership:
return True
if recipient.id == membership.page.owner_id:
return True
| reverie/jotleaf.com | jotleaf/main/management/commands/send_membership_emails.py | Python | mit | 790 |
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
from __future__ import unicode_literals
import frappe
from frappe.utils import flt, comma_or
from frappe import msgprint, _, throw
from frappe.model.document import Document
def validate_status(status, options):
if status not in options:
frappe.throw(_("Status must be one of {0}").format(comma_or(options)))
status_map = {
"Lead": [
["Converted", "has_customer"],
["Opportunity", "has_opportunity"],
],
"Opportunity": [
["Lost", "eval:self.status=='Lost'"],
["Quotation", "has_quotation"],
["Converted", "has_ordered_quotation"]
],
"Quotation": [
["Draft", None],
["Submitted", "eval:self.docstatus==1"],
["Lost", "eval:self.status=='Lost'"],
["Ordered", "has_sales_order"],
["Cancelled", "eval:self.docstatus==2"],
],
"Sales Order": [
["Draft", None],
["Submitted", "eval:self.docstatus==1"],
["Stopped", "eval:self.status=='Stopped'"],
["Cancelled", "eval:self.docstatus==2"],
],
"Delivery Note": [
["Draft", None],
["Submitted", "eval:self.docstatus==1"],
["Cancelled", "eval:self.docstatus==2"],
],
"Purchase Receipt": [
["Draft", None],
["Submitted", "eval:self.docstatus==1"],
["Cancelled", "eval:self.docstatus==2"],
]
}
class StatusUpdater(Document):
"""
Updates the status of the calling records
Delivery Note: Update Delivered Qty, Update Percent and Validate over delivery
Sales Invoice: Update Billed Amt, Update Percent and Validate over billing
Installation Note: Update Installed Qty, Update Percent Qty and Validate over installation
"""
def update_prevdoc_status(self):
self.update_qty()
self.validate_qty()
def set_status(self, update=False):
if self.is_new():
return
if self.doctype in status_map:
_status = self.status
sl = status_map[self.doctype][:]
sl.reverse()
for s in sl:
if not s[1]:
self.status = s[0]
break
elif s[1].startswith("eval:"):
if eval(s[1][5:]):
self.status = s[0]
break
elif getattr(self, s[1])():
self.status = s[0]
break
if self.status != _status and self.status not in ("Submitted", "Cancelled"):
self.add_comment("Label", _(self.status))
if update:
frappe.db.set_value(self.doctype, self.name, "status", self.status)
def validate_qty(self):
"""Validates qty at row level"""
self.tolerance = {}
self.global_tolerance = None
for args in self.status_updater:
# get unique transactions to update
for d in self.get_all_children():
if d.doctype == args['source_dt'] and d.get(args["join_field"]):
args['name'] = d.get(args['join_field'])
# get all qty where qty > target_field
item = frappe.db.sql("""select item_code, `{target_ref_field}`,
`{target_field}`, parenttype, parent from `tab{target_dt}`
where `{target_ref_field}` < `{target_field}`
and name=%s and docstatus=1""".format(**args),
args['name'], as_dict=1)
if item:
item = item[0]
item['idx'] = d.idx
item['target_ref_field'] = args['target_ref_field'].replace('_', ' ')
if not item[args['target_ref_field']]:
msgprint(_("Note: System will not check over-delivery and over-booking for Item {0} as quantity or amount is 0").format(item.item_code))
elif args.get('no_tolerance'):
item['reduce_by'] = item[args['target_field']] - item[args['target_ref_field']]
if item['reduce_by'] > .01:
msgprint(_("Allowance for over-{0} crossed for Item {1}")
.format(args["overflow_type"], item.item_code))
throw(_("{0} must be reduced by {1} or you should increase overflow tolerance")
.format(_(item.target_ref_field.title()), item["reduce_by"]))
else:
self.check_overflow_with_tolerance(item, args)
def check_overflow_with_tolerance(self, item, args):
"""
Checks if there is overflow condering a relaxation tolerance
"""
# check if overflow is within tolerance
tolerance, self.tolerance, self.global_tolerance = get_tolerance_for(item['item_code'],
self.tolerance, self.global_tolerance)
overflow_percent = ((item[args['target_field']] - item[args['target_ref_field']]) /
item[args['target_ref_field']]) * 100
if overflow_percent - tolerance > 0.01:
item['max_allowed'] = flt(item[args['target_ref_field']] * (100+tolerance)/100)
item['reduce_by'] = item[args['target_field']] - item['max_allowed']
msgprint(_("Allowance for over-{0} crossed for Item {1}.")
.format(args["overflow_type"], item["item_code"]))
throw(_("{0} must be reduced by {1} or you should increase overflow tolerance")
.format(_(item["target_ref_field"].title()), item["reduce_by"]))
def update_qty(self, change_modified=True):
"""
Updates qty at row level
"""
for args in self.status_updater:
# condition to include current record (if submit or no if cancel)
if self.docstatus == 1:
args['cond'] = ' or parent="%s"' % self.name.replace('"', '\"')
else:
args['cond'] = ' and parent!="%s"' % self.name.replace('"', '\"')
args['modified_cond'] = ''
if change_modified:
args['modified_cond'] = ', modified = now()'
# update quantities in child table
for d in self.get_all_children():
if d.doctype == args['source_dt']:
# updates qty in the child table
args['detail_id'] = d.get(args['join_field'])
args['second_source_condition'] = ""
if args.get('second_source_dt') and args.get('second_source_field') \
and args.get('second_join_field'):
if not args.get("second_source_extra_cond"):
args["second_source_extra_cond"] = ""
args['second_source_condition'] = """ + ifnull((select sum(%(second_source_field)s)
from `tab%(second_source_dt)s`
where `%(second_join_field)s`="%(detail_id)s"
and (`tab%(second_source_dt)s`.docstatus=1) %(second_source_extra_cond)s), 0) """ % args
if args['detail_id']:
if not args.get("extra_cond"): args["extra_cond"] = ""
frappe.db.sql("""update `tab%(target_dt)s`
set %(target_field)s = (select sum(%(source_field)s)
from `tab%(source_dt)s` where `%(join_field)s`="%(detail_id)s"
and (docstatus=1 %(cond)s) %(extra_cond)s) %(second_source_condition)s
where name='%(detail_id)s'""" % args)
# get unique transactions to update
for name in set([d.get(args['percent_join_field']) for d in self.get_all_children(args['source_dt'])]):
if name:
args['name'] = name
# update percent complete in the parent table
if args.get('target_parent_field'):
frappe.db.sql("""update `tab%(target_parent_dt)s`
set %(target_parent_field)s = (select sum(if(%(target_ref_field)s >
ifnull(%(target_field)s, 0), %(target_field)s,
%(target_ref_field)s))/sum(%(target_ref_field)s)*100
from `tab%(target_dt)s` where parent="%(name)s") %(modified_cond)s
where name='%(name)s'""" % args)
# update field
if args.get('status_field'):
frappe.db.sql("""update `tab%(target_parent_dt)s`
set %(status_field)s = if(ifnull(%(target_parent_field)s,0)<0.001,
'Not %(keyword)s', if(%(target_parent_field)s>=99.99,
'Fully %(keyword)s', 'Partly %(keyword)s'))
where name='%(name)s'""" % args)
def update_billing_status_for_zero_amount_refdoc(self, ref_dt):
ref_fieldname = ref_dt.lower().replace(" ", "_")
zero_amount_refdoc = []
all_zero_amount_refdoc = frappe.db.sql_list("""select name from `tab%s`
where docstatus=1 and base_net_total = 0""" % ref_dt)
for item in self.get("items"):
if item.get(ref_fieldname) \
and item.get(ref_fieldname) in all_zero_amount_refdoc \
and item.get(ref_fieldname) not in zero_amount_refdoc:
zero_amount_refdoc.append(item.get(ref_fieldname))
if zero_amount_refdoc:
self.update_biling_status(zero_amount_refdoc, ref_dt, ref_fieldname)
def update_biling_status(self, zero_amount_refdoc, ref_dt, ref_fieldname):
for ref_dn in zero_amount_refdoc:
ref_doc_qty = flt(frappe.db.sql("""select sum(ifnull(qty, 0)) from `tab%s Item`
where parent=%s""" % (ref_dt, '%s'), (ref_dn))[0][0])
billed_qty = flt(frappe.db.sql("""select sum(ifnull(qty, 0))
from `tab%s Item` where %s=%s and docstatus=1""" %
(self.doctype, ref_fieldname, '%s'), (ref_dn))[0][0])
per_billed = ((ref_doc_qty if billed_qty > ref_doc_qty else billed_qty)\
/ ref_doc_qty)*100
frappe.db.set_value(ref_dt, ref_dn, "per_billed", per_billed)
if frappe.get_meta(ref_dt).get_field("billing_status"):
if per_billed < 0.001: billing_status = "Not Billed"
elif per_billed >= 99.99: billing_status = "Fully Billed"
else: billing_status = "Partly Billed"
frappe.db.set_value(ref_dt, ref_dn, "billing_status", billing_status)
def get_tolerance_for(item_code, item_tolerance={}, global_tolerance=None):
"""
Returns the tolerance for the item, if not set, returns global tolerance
"""
if item_tolerance.get(item_code):
return item_tolerance[item_code], item_tolerance, global_tolerance
tolerance = flt(frappe.db.get_value('Item',item_code,'tolerance') or 0)
if not tolerance:
if global_tolerance == None:
global_tolerance = flt(frappe.db.get_value('Stock Settings', None, 'tolerance'))
tolerance = global_tolerance
item_tolerance[item_code] = tolerance
return tolerance, item_tolerance, global_tolerance
| treejames/erpnext | erpnext/controllers/status_updater.py | Python | agpl-3.0 | 9,429 |
# The Hazard Library
# Copyright (C) 2015 GEM Foundation
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
Module exports :class:'DostEtAl2004'
"""
from __future__ import division
import numpy as np
# standard acceleration of gravity in m/s**2
from scipy.constants import g
from openquake.hazardlib.gsim.base import GMPE, CoeffsTable
from openquake.hazardlib import const
from openquake.hazardlib.imt import PGA, PGV
class DostEtAl2004(GMPE):
"""
Implements the GMPE of Dost et al. (2004) for PGA and PGV from
induced seismicity earthquakes in the Netherlands
Dost, B., van Eck, T. and Haak, H. (2004) Scaling of peak ground
acceleration and peak ground velocity recorded in the Netherlands.
Bollettino di Geofisica Teorica ed Applicata. 45(3), 153 - 168
"""
#: The GMPE is derived from induced earthquakes
DEFINED_FOR_TECTONIC_REGION_TYPE = const.TRT.INDUCED
#: Supported intensity measure types are peak ground acceleration
#: and peak ground velocity
DEFINED_FOR_INTENSITY_MEASURE_TYPES = set([
PGA,
PGV
])
#: Supported intensity measure component is the average horizontal
DEFINED_FOR_INTENSITY_MEASURE_COMPONENT = const.IMC.GMRotD100
#: Supported standard deviation types is total.
DEFINED_FOR_STANDARD_DEVIATION_TYPES = set([
const.StdDev.TOTAL
])
#: No required site parameters
REQUIRES_SITES_PARAMETERS = set(())
#: Required rupture parameters are magnitude (ML is used)
REQUIRES_RUPTURE_PARAMETERS = set(('mag', ))
#: Required distance measure is hypocentral distance
REQUIRES_DISTANCES = set(('rhypo',))
#: GMPE not tested against independent implementation so raise
#: not verified warning
non_verified = True
def get_mean_and_stddevs(self, sites, rup, dists, imt, stddev_types):
"""
See :meth:`superclass method
<.base.GroundShakingIntensityModel.get_mean_and_stddevs>`
for spec of input and result values.
"""
C = self.COEFFS[imt]
imean = (self._compute_magnitude_term(C, rup.mag) +
self._compute_distance_term(C, dists.rhypo))
# Convert mean from cm/s and cm/s/s
if isinstance(imt, PGA):
mean = np.log((10.0 ** (imean)) / g)
else:
mean = np.log(10.0 ** imean)
stddevs = self._get_stddevs(C, len(dists.rhypo), stddev_types)
return mean, stddevs
def _compute_magnitude_term(self, C, mag):
"""
Returns the magnitude scaling term
"""
return C["c0"] + (C["c1"] * mag)
def _compute_distance_term(self, C, rhypo):
"""
Returns the distance scaling term
"""
return (C["c2"] * rhypo) + (C["c3"] * np.log10(rhypo))
def _get_stddevs(self, C, num_sites, stddev_types):
"""
Returns the total standard deviation
"""
stddevs = []
for stddev_type in stddev_types:
assert stddev_type in self.DEFINED_FOR_STANDARD_DEVIATION_TYPES
if stddev_type == const.StdDev.TOTAL:
stddevs.append(
np.log(10.0 ** C["sigma"]) + np.zeros(num_sites))
return stddevs
COEFFS = CoeffsTable(sa_damping=5, table="""
IMT c0 c1 c2 c3 sigma
pgv -1.53 0.74 -0.00139 -1.33 0.33
pga -1.41 0.57 -0.00139 -1.33 0.33
""")
class DostEtAl2004BommerAdaptation(DostEtAl2004):
"""
Adaptation of the GMPE for application to higher magnitudes proposed
by Bommer et al. (2013)
"""
#: Supported standard deviation types is total.
DEFINED_FOR_STANDARD_DEVIATION_TYPES = set([
const.StdDev.TOTAL,
const.StdDev.INTER_EVENT,
const.StdDev.INTRA_EVENT
])
def _compute_magnitude_term(self, C, mag):
"""
Returns the magnitude scaling term
"""
return C["c0"] + (C["c1"] * mag) + (C["c1e"] * ((mag - 4.5) ** 2.0))
def _get_stddevs(self, C, num_sites, stddev_types):
"""
Returns the the total, inter-event and intra-event standard deviation
"""
stddevs = []
for stddev_type in stddev_types:
assert stddev_type in self.DEFINED_FOR_STANDARD_DEVIATION_TYPES
if stddev_type == const.StdDev.TOTAL:
stddevs.append(
np.log(10.0 ** C["sigma"]) + np.zeros(num_sites))
if stddev_type == const.StdDev.INTER_EVENT:
stddevs.append(
np.log(10.0 ** C["tau"]) + np.zeros(num_sites))
if stddev_type == const.StdDev.INTRA_EVENT:
stddevs.append(
np.log(10.0 ** C["phi"]) + np.zeros(num_sites))
return stddevs
COEFFS = CoeffsTable(sa_damping=5, table="""
IMT c0 c1 c1e c2 c3 tau phi sigma
pgv -1.3972 0.7105 -0.0829 -0.00139 -1.33 0.1476 0.2952 0.33
pga -1.6090 0.6140 -0.1116 -0.00139 -1.33 0.1476 0.2952 0.33
""")
| silviacanessa/oq-hazardlib | openquake/hazardlib/gsim/dost_2004.py | Python | agpl-3.0 | 5,676 |
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
setup(name='tornadozencoder',
version='0.6.5',
description='Integration library for Zencoder',
author='Alex Schworer',
author_email='alex.schworer@gmail.com',
url='http://github.com/schworer/zencoder-py',
license="MIT License",
install_requires=['requests>=1.0'],
tests_require=['mock', 'nose'],
packages=['tornadozencoder'],
platforms='any',
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Topic :: Software Development :: Libraries :: Python Modules'
]
)
| Didacti/tornadozencoder | setup.py | Python | mit | 943 |
# Copyright 2013 VMware, Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
import collections
from neutron_lib.api.definitions import allowedaddresspairs as addr_apidef
from neutron_lib.api.definitions import port as port_def
from neutron_lib.api import validators
from neutron_lib.db import api as db_api
from neutron_lib.db import resource_extend
from neutron_lib.db import utils as db_utils
from neutron_lib.exceptions import allowedaddresspairs as addr_exc
from neutron_lib.objects import exceptions
from neutron_lib.utils import net as net_utils
from neutron.objects.port.extensions import (allowedaddresspairs
as obj_addr_pair)
@resource_extend.has_resource_extenders
class AllowedAddressPairsMixin(object):
"""Mixin class for allowed address pairs."""
def _process_create_allowed_address_pairs(self, context, port,
allowed_address_pairs):
if not validators.is_attr_set(allowed_address_pairs):
return []
try:
with db_api.CONTEXT_WRITER.using(context):
for address_pair in allowed_address_pairs:
# use port.mac_address if no mac address in address pair
if 'mac_address' not in address_pair:
address_pair['mac_address'] = port['mac_address']
# retain string format as passed through API
mac_address = net_utils.AuthenticEUI(
address_pair['mac_address'])
ip_address = net_utils.AuthenticIPNetwork(
address_pair['ip_address'])
pair_obj = obj_addr_pair.AllowedAddressPair(
context,
port_id=port['id'],
mac_address=mac_address,
ip_address=ip_address)
pair_obj.create()
except exceptions.NeutronDbObjectDuplicateEntry:
raise addr_exc.DuplicateAddressPairInRequest(
mac_address=address_pair['mac_address'],
ip_address=address_pair['ip_address'])
return allowed_address_pairs
def get_allowed_address_pairs(self, context, port_id):
pairs = obj_addr_pair.AllowedAddressPair.get_objects(
context, port_id=port_id)
return [self._make_allowed_address_pairs_dict(pair.db_obj)
for pair in pairs]
def get_allowed_address_pairs_for_ports(self, context, port_ids):
pairs = (
obj_addr_pair.AllowedAddressPair.
get_allowed_address_pairs_for_ports(
context, port_ids=port_ids))
result = collections.defaultdict(list)
for pair in pairs:
result[pair.port_id].append(
self._make_allowed_address_pairs_dict(pair.db_obj))
return result
@staticmethod
@resource_extend.extends([port_def.COLLECTION_NAME])
def _extend_port_dict_allowed_address_pairs(port_res, port_db):
# If port_db is provided, allowed address pairs will be accessed via
# sqlalchemy models. As they're loaded together with ports this
# will not cause an extra query.
allowed_address_pairs = [
AllowedAddressPairsMixin._make_allowed_address_pairs_dict(
address_pair) for
address_pair in port_db.allowed_address_pairs]
port_res[addr_apidef.ADDRESS_PAIRS] = allowed_address_pairs
return port_res
def _delete_allowed_address_pairs(self, context, id):
obj_addr_pair.AllowedAddressPair.delete_objects(
context, port_id=id)
@staticmethod
def _make_allowed_address_pairs_dict(allowed_address_pairs,
fields=None):
res = {'mac_address': allowed_address_pairs['mac_address'],
'ip_address': allowed_address_pairs['ip_address']}
return db_utils.resource_fields(res, fields)
def _has_address_pairs(self, port):
return (validators.is_attr_set(
port['port'][addr_apidef.ADDRESS_PAIRS]) and
port['port'][addr_apidef.ADDRESS_PAIRS] != [])
def _check_update_has_allowed_address_pairs(self, port):
"""Determine if request has an allowed address pair.
Return True if the port parameter has a non-empty
'allowed_address_pairs' attribute. Otherwise returns False.
"""
return (addr_apidef.ADDRESS_PAIRS in port['port'] and
self._has_address_pairs(port))
def _check_update_deletes_allowed_address_pairs(self, port):
"""Determine if request deletes address pair.
Return True if port has an allowed address pair and its value
is either [] or not is_attr_set, otherwise return False
"""
return (addr_apidef.ADDRESS_PAIRS in port['port'] and
not self._has_address_pairs(port))
def is_address_pairs_attribute_updated(self, port, update_attrs):
"""Check if the address pairs attribute is being updated.
Returns True if there is an update. This can be used to decide
if a port update notification should be sent to agents or third
party controllers.
"""
new_pairs = update_attrs.get(addr_apidef.ADDRESS_PAIRS)
if new_pairs is None:
return False
old_pairs = port.get(addr_apidef.ADDRESS_PAIRS)
# Missing or unchanged address pairs in attributes mean no update
return new_pairs != old_pairs
def update_address_pairs_on_port(self, context, port_id, port,
original_port, updated_port):
"""Update allowed address pairs on port.
Returns True if an update notification is required. Notification
is not done here because other changes on the port may need
notification. This method is expected to be called within
a transaction.
"""
new_pairs = port['port'].get(addr_apidef.ADDRESS_PAIRS)
if self.is_address_pairs_attribute_updated(original_port,
port['port']):
updated_port[addr_apidef.ADDRESS_PAIRS] = new_pairs
self._delete_allowed_address_pairs(context, port_id)
self._process_create_allowed_address_pairs(
context, updated_port, new_pairs)
return True
return False
| mahak/neutron | neutron/db/allowedaddresspairs_db.py | Python | apache-2.0 | 7,023 |
from textwrap import dedent
import json
import re
class NotSpecified(object):
"""Tell the difference between empty and None"""
class AssertionsAssertionsMixin:
def assertSortedEqual(self, one, two):
"""Assert that the sorted of the two equal"""
assert sorted(one) == sorted(two)
def assertJsonDictEqual(self, one, two):
"""Assert the two dictionaries are the same, print out as json if not"""
try:
assert one == two
except AssertionError:
print("Got =============>")
print(json.dumps(one, indent=2, sort_keys=True))
print("Expected --------------->")
print(json.dumps(two, indent=2, sort_keys=True))
raise
def assertReMatchLines(self, expected, output, remove=None):
"""Assert that all the lines match each other in order"""
expected = dedent(expected).strip()
expected_lines = expected.split("\n")
expected = expected.encode("utf-8")
output = dedent(output).strip()
output_lines = output.split("\n")
if remove:
output_lines = [line for line in output_lines if not any(r.match(line) for r in remove)]
output = output.encode("utf-8")
if len(expected_lines) != len(output_lines):
assert (
False
), "Different number of lines! Expected ===>\n{0}\n\nTo match ===>\n{1}".format(
expected, output
)
ansi_escape = re.compile(r"\x1b[^m]*m")
for a, b in zip(expected_lines, output_lines):
if not isinstance(a, bytes):
a = a.encode("utf-8")
if not isinstance(b, bytes):
b = re.sub(ansi_escape, "", b).encode("utf-8")
else:
b = re.sub(ansi_escape, "", b.decode("utf-8")).encode("utf-8")
assert re.match(
a, b
), "Didn't match! Expected ===>\n{0}\n\nTo match ===>\n{1}\n\n===>Failed matching {2} to {3}".format(
expected, output, a, b
)
| delfick/harpoon | tests/helpers/mixins/assertions.py | Python | mit | 2,082 |
# -*- coding: utf-8 -*-
filename = "/data/id16b/inhouse1/comm_17jan/restart/sofc/26jan/6100h_fluoXAS_0/results/6100h_fluoXAS_0/test.h5"
name = ["/detectorsum/Ni-K_norm", "/counters/arr_srcurr"]
new = "/detectorsum/Ni-K_norm2"
###### Import libraries ######
import spectrocrunch.io.nexus as nexus
from spectrocrunch.h5stacks.get_hdf5_imagestacks import get_hdf5_imagestacks
import h5py
import numpy as np
###### Normalize one dataset by another ######
stacks, axes = get_hdf5_imagestacks(filename, ["detectorsum"])
f = h5py.File(filename)
tmp = [s for s in new.split("/") if len(s) > 0]
nxdatagrp = nexus.newNXdata(f[tmp[0]], "/".join(tmp[1:]), "")
dset = nexus.createNXdataSignal(
nxdatagrp,
shape=f[name[0]]["data"][:].shape,
chunks=True,
dtype=f[name[0]]["data"][:].dtype,
)
nexus.linkaxes(f, axes, [nxdatagrp])
dset[:] = f[name[0]]["data"][:] / f[name[1]]["data"][:]
f.close()
| woutdenolf/spectrocrunch | scraps/id16norm.py | Python | mit | 904 |
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
setup(
name='mhef',
version='1.0.0b6',
description='Monster Hunter Encryption Functions',
url='https://github.com/svanheulen/mhef',
author='Seth VanHeulen',
author_email='svanheulen@gmail.com',
license='GPLv3',
classifiers=[
'Development Status :: 4 - Beta',
'License :: OSI Approved :: GNU General Public License v3 (GPLv3)',
'Operating System :: OS Independent',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 3',
'Topic :: Security :: Cryptography'
],
packages=['mhef'],
install_requires=['pycrypto']
)
| didix16/mh4editor | setup.py | Python | mit | 723 |
# BlenderBIM Add-on - OpenBIM Blender Add-on
# Copyright (C) 2020, 2021 Dion Moult <dion@thinkmoult.com>
#
# This file is part of BlenderBIM Add-on.
#
# BlenderBIM Add-on is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# BlenderBIM Add-on is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with BlenderBIM Add-on. If not, see <http://www.gnu.org/licenses/>.
import bpy
from blenderbim.bim.prop import StrProperty, Attribute
from ifcopenshell.api.classification.data import Data
from bpy.types import PropertyGroup
from bpy.props import (
PointerProperty,
StringProperty,
EnumProperty,
BoolProperty,
IntProperty,
FloatProperty,
FloatVectorProperty,
CollectionProperty,
)
classification_enum = []
def purge():
global classification_enum
classification_enum = []
def getClassifications(self, context):
global classification_enum
if len(classification_enum) < 1:
classification_enum.clear()
classification_enum.extend([(str(i), n, "") for i, n in Data.library_classifications.items()])
if classification_enum:
getReferences(self, context, parent_id=int(classification_enum[0][0]))
return classification_enum
def updateClassification(self, context):
getReferences(self, context, parent_id=int(self.available_classifications))
def getReferences(self, context, parent_id=None):
props = context.scene.BIMClassificationProperties
props.available_library_references.clear()
for reference in Data.library_file.by_id(parent_id).HasReferences:
new = props.available_library_references.add()
new.identification = reference.Identification or ""
new.name = reference.Name or ""
new.ifc_definition_id = reference.id()
new.has_references = bool(reference.HasReferences)
new.referenced_source
if reference.ReferencedSource.is_a("IfcClassificationReference"):
props.active_library_referenced_source = reference.ReferencedSource.ReferencedSource.id()
else:
props.active_library_referenced_source = 0
class ClassificationReference(PropertyGroup):
name: StringProperty(name="Name")
identification: StringProperty(name="Identification")
ifc_definition_id: IntProperty(name="IFC Definition ID")
has_references: BoolProperty(name="Has References")
referenced_source: IntProperty(name="IFC Definition ID")
class BIMClassificationProperties(PropertyGroup):
available_classifications: EnumProperty(
items=getClassifications, name="Available Classifications", update=updateClassification
)
classification_attributes: CollectionProperty(name="Classification Attributes", type=Attribute)
active_classification_id: IntProperty(name="Active Classification Id")
available_library_references: CollectionProperty(name="Available Library References", type=ClassificationReference)
active_library_referenced_source: IntProperty(name="Active Library Referenced Source")
active_library_reference_index: IntProperty(name="Active Library Reference Index")
class BIMClassificationReferenceProperties(PropertyGroup):
reference_attributes: CollectionProperty(name="Reference Attributes", type=Attribute)
active_reference_id: IntProperty(name="Active Reference Id")
| IfcOpenShell/IfcOpenShell | src/blenderbim/blenderbim/bim/module/classification/prop.py | Python | lgpl-3.0 | 3,717 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import argparse
from csv import DictReader
import codecs
import logging
from os import path
import re
import sys
from pyknp import Juman
import utils
import preprocess
reload(sys)
sys.setdefaultencoding('utf8')
verbose = False
logger = None
DIR_SCRIPT = path.dirname(path.abspath(__file__))
DIR_ROOT = DIR_SCRIPT # TODO: move this file to ./features
PATH_KANA_VOWEL_TABLE = path.join(DIR_ROOT,
'data/kana_vowel_table.csv')
PATH_EN_KANA_TABLE = path.join(DIR_ROOT,
'data/en_kana_table.csv')
def init_logger():
global logger
logger = logging.getLogger('Rhyme')
logger.setLevel(logging.WARNING)
log_fmt = '%(asctime)s/%(name)s[%(levelname)s]: %(message)s'
logging.basicConfig(format=log_fmt)
def get_phonetic_transcription(text, table_term_vowels):
result = []
for chunk in text.split():
try:
result.append(table_term_vowels[chunk])
except KeyError:
pass
return ' '.join(result)
def get_phonetic_transcription_juman(text):
u"""Return vowels using juman (slow)
"""
r = re.compile(u'\w+')
juman = Juman()
# (Hira+Kata)Kana -> vowel
kana_vowel_table = {}
with codecs.open(PATH_KANA_VOWEL_TABLE,
'r', encoding='utf-8') as f:
for line in f:
kana, vowel = line.strip().split(',')
kana_vowel_table[kana] = vowel
# English -> Japanese Hiragana
en_kana_table = {}
with codecs.open(PATH_EN_KANA_TABLE,
'r', encoding='utf-8') as f:
for line in f:
eng, kana = line.strip().split(',')
en_kana_table[eng] = kana
result = []
text = preprocess.translate_non_alphanumerics(text)
for chunk in text.split():
vowels = []
mrphs = juman.analysis(chunk)
for mrph in mrphs:
yomi = mrph.yomi.lower()
if yomi in en_kana_table:
yomi = en_kana_table[yomi]
m = r.search(yomi)
if m is not None:
# use stderr to print if needed.
#sys.stderr.write(m.group(0))
pass
for idx in range(len(yomi)):
kana = yomi[idx]
if kana == u'ー':
try:
vowels.append(vowels[-1])
except KeyError:
logger.info(chunk)
continue
if kana in [u'ゃ', u'ャ']:
try:
vowels[-1] = 'a'
except KeyError:
logger.info(chunk)
continue
if kana in [u'ゅ', u'ュ']:
try:
vowels[-1] = 'u'
except KeyError:
logger.info(chunk)
continue
if kana in [u'ょ', u'ョ']:
try:
vowels[-1] = 'o'
except KeyError:
logger.info(chunk)
continue
try:
vowels.append(kana_vowel_table[kana])
except KeyError:
pass
# logger.info(kana)
if len(vowels) > 0:
result.append(''.join(vowels))
return ' '.join(result)
def main(args):
global verbose
verbose = args.verbose
i = 0
table_term_vowel = utils.load_csv_to_dict(path.join(DIR_ROOT,
'data/term_vowel_table.csv'))
with codecs.open(args.filename, encoding='utf-8') as f:
for line in f:
i += 1
if verbose:
if i%10 == 0:
logger.info(i)
if len(line) == 0:
continue
result1 = get_phonetic_transcription(line.decode('utf-8'),
table_term_vowel)
result2 = get_phonetic_transcription_juman(line.decode('utf-8'))
print('{}'.format(line.strip()))
print(''.join(result1.split()))
print(result2)
print('')
return 0
if __name__ == '__main__':
init_logger()
parser = argparse.ArgumentParser()
parser.add_argument('filename')
parser.add_argument('-v', '--verbose', action='store_true', default=False)
args = parser.parse_args()
main(args)
| jntkym/rappers | rhyme.py | Python | mit | 4,537 |
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Implements the graph generation for computation of gradients."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import contextlib
import warnings
import numpy as np
import six
from six.moves import xrange # pylint: disable=redefined-builtin
from tensorflow.core.framework import attr_value_pb2
from tensorflow.python.eager import context
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import tensor_util
from tensorflow.python.ops import array_grad # pylint: disable=unused-import
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import check_ops
from tensorflow.python.ops import control_flow_grad # pylint: disable=unused-import
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import control_flow_util
from tensorflow.python.ops import functional_ops
from tensorflow.python.ops import image_grad # pylint: disable=unused-import
from tensorflow.python.ops import linalg_grad # pylint: disable=unused-import
from tensorflow.python.ops import linalg_ops # pylint: disable=unused-import
from tensorflow.python.ops import logging_ops # pylint: disable=unused-import
from tensorflow.python.ops import manip_grad # pylint: disable=unused-import
from tensorflow.python.ops import math_grad # pylint: disable=unused-import
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.ops import spectral_grad # pylint: disable=unused-import
from tensorflow.python.ops import tensor_array_ops
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.util.tf_export import tf_export
# Warn the user if we convert a sparse representation to dense with at
# least this number of elements.
_LARGE_SPARSE_NUM_ELEMENTS = 100000000
def _IndexedSlicesToTensor(value, dtype=None, name=None, as_ref=False):
"""Converts an IndexedSlices object `value` to a Tensor.
NOTE(mrry): This function is potentially expensive.
Args:
value: An ops.IndexedSlices object.
dtype: The dtype of the Tensor to be returned.
name: Optional name to use for the returned Tensor.
as_ref: True if a ref is requested.
Returns:
A dense Tensor representing the values in the given IndexedSlices.
Raises:
ValueError: If the IndexedSlices does not have the same dtype.
"""
_ = as_ref
if dtype and not dtype.is_compatible_with(value.dtype):
raise ValueError(
"Tensor conversion requested dtype %s for IndexedSlices with dtype %s" %
(dtype.name, value.dtype.name))
if value.dense_shape is None:
raise ValueError(
"Tensor conversion requested for IndexedSlices without dense_shape: %s"
% str(value))
# TODO(mrry): Consider adding static shape information to
# IndexedSlices, to avoid using numpy here.
dense_shape_value = tensor_util.constant_value(value.dense_shape)
if dense_shape_value is not None:
num_elements = np.prod(dense_shape_value)
if num_elements >= _LARGE_SPARSE_NUM_ELEMENTS:
warnings.warn(
"Converting sparse IndexedSlices to a dense Tensor with %d elements. "
"This may consume a large amount of memory." % num_elements)
else:
warnings.warn(
"Converting sparse IndexedSlices to a dense Tensor of unknown shape. "
"This may consume a large amount of memory.")
return math_ops.unsorted_segment_sum(
value.values, value.indices, value.dense_shape[0], name=name)
ops.register_tensor_conversion_function(ops.IndexedSlices,
_IndexedSlicesToTensor)
def _MarkReachedOps(from_ops, reached_ops):
"""Mark all ops reached from "from_ops".
Args:
from_ops: list of Operations.
reached_ops: list of booleans, indexed by operation id.
"""
queue = collections.deque()
queue.extend(from_ops)
while queue:
op = queue.popleft()
if not reached_ops[op._id]:
reached_ops[op._id] = True
for output in op.outputs:
queue.extend(output.consumers())
def _GatherInputs(to_ops, reached_ops):
"""List all inputs of to_ops that are in reached_ops.
Args:
to_ops: list of Operations.
reached_ops: list of booleans, indexed by operation id.
Returns:
The list of all inputs of to_ops that are in reached_ops.
That list includes all elements of to_ops.
"""
inputs = []
queue = collections.deque()
queue.extend(to_ops)
while queue:
op = queue.popleft()
# We are interested in this op.
if reached_ops[op._id]:
inputs.append(op)
# Clear the boolean so we won't add the inputs again.
reached_ops[op._id] = False
for inp in op.inputs:
queue.append(inp.op)
return inputs
def _PendingCount(graph, to_ops, from_ops, colocate_gradients_with_ops):
"""Initialize the pending count for ops between two lists of Operations.
'pending_count[op._id]' indicates the number of backprop inputs
to this operation.
Args:
graph: a Graph.
to_ops: list of Operations.
from_ops: list of Operations.
colocate_gradients_with_ops: Python bool. See docstring of gradients().
Returns:
A tuple containing: (1) a list of integers indexed by operation id,
indicating the number of backprop inputs to this operation, and (2)
a ControlFlowState object which is not None if the ops between from_ops
and to_ops contain control flow loops.
"""
# Mark reachable ops from from_ops.
reached_ops = [False] * (graph._last_id + 1)
for op in to_ops:
reached_ops[op._id] = True
_MarkReachedOps(from_ops, reached_ops)
# Mark between ops.
between_ops = [False] * (graph._last_id + 1)
between_op_list = []
queue = collections.deque()
queue.extend(to_ops)
while queue:
op = queue.popleft()
# We are interested in this op.
if reached_ops[op._id]:
between_ops[op._id] = True
between_op_list.append(op)
# Clear the boolean so we won't add the inputs again.
reached_ops[op._id] = False
for inp in op.inputs:
queue.append(inp.op)
# 'loop_state' is None if there are no while loops.
loop_state = control_flow_ops.MaybeCreateControlFlowState(
between_op_list, between_ops, colocate_gradients_with_ops)
# Initialize pending count for between ops.
pending_count = [0] * (graph._last_id + 1)
for op in between_op_list:
for x in op.inputs:
if between_ops[x.op._id]:
pending_count[x.op._id] += 1
return pending_count, loop_state
def _AsList(x):
return x if isinstance(x, (list, tuple)) else [x]
def _DefaultGradYs(grad_ys, ys, colocate_gradients_with_ops):
"""Fill in default values for grad_ys.
Args:
grad_ys: List of gradients, can contain None.
ys: List of tensors.
colocate_gradients_with_ops: If True, try colocating gradients with
the corresponding op.
Returns:
A list of gradients to use, without None.
Raises:
ValueError: If sizes of gradients and inputs don't match
TypeError: If type of any gradient is not valid for its input.
"""
if len(grad_ys) != len(ys):
raise ValueError("Passed %d grad_ys for %d ys" % (len(grad_ys), len(ys)))
grad_ys = ops.convert_n_to_tensor_or_indexed_slices(grad_ys, name="grad_y")
new_grad_ys = []
for i in xrange(len(grad_ys)):
grad_y = grad_ys[i]
y = ys[i]
with _maybe_colocate_with(y.op, colocate_gradients_with_ops):
if grad_y is None:
if y.dtype.is_complex:
raise TypeError(
"Gradients of complex tensors must set grad_ys (y.dtype = %r)" %
y.dtype)
new_grad_ys.append(
array_ops.fill(
array_ops.shape(y),
constant_op.constant(1, dtype=y.dtype, name="grad_ys_%d" % i)))
continue
if y.dtype.is_floating or y.dtype.is_integer:
if not grad_y.dtype.is_floating and not grad_y.dtype.is_integer:
raise TypeError("Gradient type %s generated for real or "
"integer-valued tensor %s with type %s must be "
"real or integer" %
(dtypes.as_dtype(grad_y.dtype).name, y,
dtypes.as_dtype(y.dtype).name))
elif y.dtype.is_complex:
if not grad_y.dtype.is_complex:
raise TypeError("Gradient type %s generated for complex-valued "
"tensor %s with type %s must be real" %
(dtypes.as_dtype(grad_y.dtype).name, y,
dtypes.as_dtype(y.dtype).name))
else:
raise TypeError("Tensor %s with type %s must be numeric "
"to obtain a default gradient" %
(y, dtypes.as_dtype(y.dtype).name))
# Create a grad_y tensor in the name scope of the gradient.
# Required for TensorArrays to identify which gradient call a
# grad_y value is coming from.
if isinstance(grad_y, ops.IndexedSlices):
new_grad_ys.append(
ops.IndexedSlices(
indices=(array_ops.identity(
grad_y.indices, name="grad_ys_%d_indices" % i)
if isinstance(grad_y.indices, ops.Tensor) else
grad_y.indices),
values=(array_ops.identity(
grad_y.values, name="grad_ys_%d_values" % i) if isinstance(
grad_y.values, ops.Tensor) else grad_y.values),
dense_shape=(array_ops.identity(
grad_y.dense_shape, name="grad_ys_%d_shape" % i)
if isinstance(grad_y.dense_shape, ops.Tensor) else
grad_y.dense_shape)))
else:
new_grad_ys.append(array_ops.identity(grad_y, name="grad_ys_%d" % i))
return new_grad_ys
def _IsTrainable(tensor):
dtype = dtypes.as_dtype(tensor.dtype)
return dtype.base_dtype in (dtypes.float16, dtypes.float32, dtypes.float64,
dtypes.complex64, dtypes.complex128)
def _VerifyGeneratedGradients(grads, op):
"""Verify that gradients are valid in number and type.
Args:
grads: List of generated gradients.
op: Operation for which the gradients where generated.
Raises:
ValueError: if sizes of gradients and inputs don't match.
TypeError: if type of any gradient is not valid for its input.
"""
if len(grads) != len(op.inputs):
raise ValueError("Num gradients %d generated for op %s do not match num "
"inputs %d" % (len(grads), op.node_def, len(op.inputs)))
def _StopOps(from_ops, stop_gradient_ops, pending_count):
"""The set of ops that terminate the gradient computation.
This computes the frontier of the forward graph *before* which backprop
should stop. Operations in the returned set will not be differentiated.
This set is defined as the subset of `from_ops` containing ops that have
no predecessor in `from_ops`. `pending_count` is the result of
`_PendingCount(g, xs, from_ops)`. An 'op' has predecessors in `from_ops`
iff pending_count[op._id] > 0.
In addition, none of `stop_gradient_ops` will be differentiated.
Args:
from_ops: list of Operations.
stop_gradient_ops: list of Operations never to backprop through.
pending_count: List of integers, indexed by operation id.
Returns:
The set of operations.
"""
stop_ops = set()
for op in from_ops:
is_stop_op = True
for inp in op.inputs:
if pending_count[inp.op._id] > 0:
is_stop_op = False
break
if is_stop_op:
stop_ops.add(op._id)
stop_ops.update(op._id for op in stop_gradient_ops) # pylint: disable=protected-access
return stop_ops
@contextlib.contextmanager
def _maybe_colocate_with(op, colocate_gradients_with_ops):
"""Context to colocate with `op` if `colocate_gradients_with_ops`."""
if colocate_gradients_with_ops:
with ops.colocate_with(op):
yield
else:
yield
def _SymGrad(op, out_grads):
"""Backprop through a function call node op given its outputs' gradients."""
f_in = [x for x in op.inputs] + out_grads
f_types = [x.dtype for x in op.inputs]
f = attr_value_pb2.NameAttrList()
f.name = op.type
for k in op.node_def.attr:
f.attr[k].CopyFrom(op.node_def.attr[k])
# pylint: disable=protected-access
in_grads = functional_ops._symbolic_gradient(input=f_in, Tout=f_types, f=f)
# pylint: enable=protected-access
return in_grads
def _MaybeCompile(scope, op, func, grad_fn):
"""Compile the calculation in grad_fn if op was marked as compiled."""
scope = scope.rstrip("/").replace("/", "_")
if func is not None:
xla_compile = func.definition.attr["_XlaCompile"].b
xla_separate_compiled_gradients = func.definition.attr[
"_XlaSeparateCompiledGradients"].b
xla_scope = func.definition.attr["_XlaScope"].s.decode()
else:
try:
xla_compile = op.get_attr("_XlaCompile")
xla_separate_compiled_gradients = op.get_attr(
"_XlaSeparateCompiledGradients")
xla_scope = op.get_attr("_XlaScope").decode()
except ValueError:
return grad_fn() # Exit early
if not xla_compile:
return grad_fn() # Exit early
# If the gradients are supposed to be compiled separately, we give them a
# _XlaScope name that is based on the name_scope of the gradients. Otherwise
# they just inherit the existing _XlaScope name, which lets them be merged
# together with the non-gradient computation.
if xla_separate_compiled_gradients:
xla_grad_scope = "%s_grad_%s" % (xla_scope, scope)
else:
xla_grad_scope = xla_scope
attrs = {
"_XlaCompile": attr_value_pb2.AttrValue(b=xla_compile),
"_XlaScope": attr_value_pb2.AttrValue(s=xla_grad_scope.encode())
}
with ops.get_default_graph()._attr_scope(attrs): # pylint: disable=protected-access
return grad_fn()
@tf_export("gradients")
def gradients(ys,
xs,
grad_ys=None,
name="gradients",
colocate_gradients_with_ops=False,
gate_gradients=False,
aggregation_method=None,
stop_gradients=None):
"""Constructs symbolic derivatives of sum of `ys` w.r.t. x in `xs`.
`ys` and `xs` are each a `Tensor` or a list of tensors. `grad_ys`
is a list of `Tensor`, holding the gradients received by the
`ys`. The list must be the same length as `ys`.
`gradients()` adds ops to the graph to output the derivatives of `ys` with
respect to `xs`. It returns a list of `Tensor` of length `len(xs)` where
each tensor is the `sum(dy/dx)` for y in `ys`.
`grad_ys` is a list of tensors of the same length as `ys` that holds
the initial gradients for each y in `ys`. When `grad_ys` is None,
we fill in a tensor of '1's of the shape of y for each y in `ys`. A
user can provide their own initial `grad_ys` to compute the
derivatives using a different initial gradient for each y (e.g., if
one wanted to weight the gradient differently for each value in
each y).
`stop_gradients` is a `Tensor` or a list of tensors to be considered constant
with respect to all `xs`. These tensors will not be backpropagated through,
as though they had been explicitly disconnected using `stop_gradient`. Among
other things, this allows computation of partial derivatives as opposed to
total derivatives. For example:
```python
a = tf.constant(0.)
b = 2 * a
g = tf.gradients(a + b, [a, b], stop_gradients=[a, b])
```
Here the partial derivatives `g` evaluate to `[1.0, 1.0]`, compared to the
total derivatives `tf.gradients(a + b, [a, b])`, which take into account the
influence of `a` on `b` and evaluate to `[3.0, 1.0]`. Note that the above is
equivalent to:
```python
a = tf.stop_gradient(tf.constant(0.))
b = tf.stop_gradient(2 * a)
g = tf.gradients(a + b, [a, b])
```
`stop_gradients` provides a way of stopping gradient after the graph has
already been constructed, as compared to `tf.stop_gradient` which is used
during graph construction. When the two approaches are combined,
backpropagation stops at both `tf.stop_gradient` nodes and nodes in
`stop_gradients`, whichever is encountered first.
Args:
ys: A `Tensor` or list of tensors to be differentiated.
xs: A `Tensor` or list of tensors to be used for differentiation.
grad_ys: Optional. A `Tensor` or list of tensors the same size as
`ys` and holding the gradients computed for each y in `ys`.
name: Optional name to use for grouping all the gradient ops together.
defaults to 'gradients'.
colocate_gradients_with_ops: If True, try colocating gradients with
the corresponding op.
gate_gradients: If True, add a tuple around the gradients returned
for an operations. This avoids some race conditions.
aggregation_method: Specifies the method used to combine gradient terms.
Accepted values are constants defined in the class `AggregationMethod`.
stop_gradients: Optional. A `Tensor` or list of tensors not to differentiate
through.
Returns:
A list of `sum(dy/dx)` for each x in `xs`.
Raises:
LookupError: if one of the operations between `x` and `y` does not
have a registered gradient function.
ValueError: if the arguments are invalid.
RuntimeError: if called in Eager mode.
"""
if context.in_eager_mode():
raise RuntimeError("tf.gradients not supported in EAGER mode. Use "
"functions in tf.contrib.eager.backprop instead.")
ys = _AsList(ys)
xs = _AsList(xs)
stop_gradients = [] if stop_gradients is None else _AsList(stop_gradients)
if grad_ys is None:
grad_ys = [None] * len(ys)
else:
grad_ys = _AsList(grad_ys)
with ops.name_scope(
name, "gradients",
list(ys) + list(xs) + list(stop_gradients) + list(grad_ys)) as grad_scope:
ys = ops.convert_n_to_tensor_or_indexed_slices(ys, name="y")
xs = [
x.handle if isinstance(x, resource_variable_ops.ResourceVariable) else x
for x in xs
]
xs = ops.internal_convert_n_to_tensor_or_indexed_slices(
xs, name="x", as_ref=True)
grad_ys = _DefaultGradYs(grad_ys, ys, colocate_gradients_with_ops)
# The approach we take here is as follows: Create a list of all ops in the
# subgraph between the ys and xs. Visit these ops in reverse order of ids
# to ensure that when we visit an op the gradients w.r.t its outputs have
# been collected. Then aggregate these gradients if needed, call the op's
# gradient function, and add the generated gradients to the gradients for
# its input.
# Initialize the pending count for ops in the connected subgraph from ys
# to the xs.
if len(ys) > 1:
ys = [array_ops.identity(y) if y.consumers() else y for y in ys]
to_ops = [t.op for t in ys]
from_ops = [t.op for t in xs]
stop_gradient_ops = [t.op for t in stop_gradients]
pending_count, loop_state = _PendingCount(
ops.get_default_graph(), to_ops, from_ops, colocate_gradients_with_ops)
# Iterate over the collected ops.
#
# grads: op => list of gradients received on each output endpoint of the
# op. The gradients for each endpoint are initially collected as a list.
# When it is time to call the op's gradient function, for each endpoint we
# aggregate the list of received gradients into a Add() Operation if there
# is more than one.
grads = {}
# Add the initial gradients for the ys.
for y, grad_y in zip(ys, grad_ys):
_SetGrad(grads, y, grad_y)
# Initialize queue with to_ops.
queue = collections.deque()
# Add the ops in 'to_ops' into the queue.
to_ops_set = set()
for op in to_ops:
# 'ready' handles the case where one output gradient relies on
# another output's gradient.
# pylint: disable=protected-access
ready = (pending_count[op._id] == 0)
if ready and op._id not in to_ops_set:
to_ops_set.add(op._id)
queue.append(op)
# pylint: enable=protected-access
if loop_state:
loop_exits = loop_state.ProcessUnusedLoopExits(pending_count, to_ops_set)
for y in loop_exits:
if _IsTrainable(y):
_SetGrad(grads, y, loop_state.ZerosLikeForExit(y))
queue.append(y.op)
stop_ops = _StopOps(from_ops, stop_gradient_ops, pending_count)
while queue:
# generate gradient subgraph for op.
op = queue.popleft()
with _maybe_colocate_with(op, colocate_gradients_with_ops):
if loop_state:
loop_state.EnterGradWhileContext(op, before=True)
out_grads = _AggregatedGrads(grads, op, loop_state, aggregation_method)
if loop_state:
loop_state.ExitGradWhileContext(op, before=True)
grad_fn = None
# pylint: disable=protected-access
func_call = None
is_func_call = ops.get_default_graph()._is_function(op.type)
has_out_grads = any(isinstance(g, ops.Tensor) or g for g in out_grads)
if has_out_grads and (op._id not in stop_ops):
if is_func_call:
func_call = ops.get_default_graph()._get_function(op.type)
grad_fn = func_call.python_grad_func
# pylint: enable=protected-access
else:
# A grad_fn must be defined, either as a function or as None
# for ops that do not have gradients.
try:
grad_fn = ops.get_gradient_function(op)
except LookupError:
raise LookupError(
"No gradient defined for operation '%s' (op type: %s)" %
(op.name, op.type))
if loop_state:
loop_state.EnterGradWhileContext(op, before=False)
if (grad_fn or is_func_call) and has_out_grads:
# NOTE: If _AggregatedGrads didn't compute a value for the i'th
# output, it means that the cost does not depend on output[i],
# therefore dC/doutput[i] is 0.
for i, out_grad in enumerate(out_grads):
if (not isinstance(out_grad, ops.Tensor) and not out_grad) and (
(not grad_fn and is_func_call) or _IsTrainable(op.outputs[i])):
# Only trainable outputs or outputs for a function call that
# will use SymbolicGradient get a zero gradient. Gradient
# functions should ignore the gradient for other outputs.
# TODO(apassos) gradients of resource handles might be an
# issue here because of zeros.
if loop_state:
out_grads[i] = loop_state.ZerosLike(op, i)
else:
out_grads[i] = control_flow_ops.ZerosLikeOutsideLoop(op, i)
with ops.name_scope(op.name + "_grad"):
# pylint: disable=protected-access
with ops.get_default_graph()._original_op(op):
# pylint: enable=protected-access
if grad_fn:
# If grad_fn was found, do not use SymbolicGradient even for
# functions.
in_grads = _MaybeCompile(grad_scope, op, func_call,
lambda: grad_fn(op, *out_grads))
else:
# For function call ops, we add a 'SymbolicGradient'
# node to the graph to compute gradients.
in_grads = _MaybeCompile(grad_scope, op, func_call,
lambda: _SymGrad(op, out_grads))
in_grads = _AsList(in_grads)
_VerifyGeneratedGradients(in_grads, op)
if gate_gradients and len([x for x in in_grads
if x is not None]) > 1:
with ops.device(None):
with ops.colocate_with(None, ignore_existing=True):
in_grads = control_flow_ops.tuple(in_grads)
_LogOpGradients(op, out_grads, in_grads)
else:
# If no grad_fn is defined or none of out_grads is available,
# just propagate a list of None backwards.
in_grads = [None] * len(op.inputs)
for i, (t_in, in_grad) in enumerate(zip(op.inputs, in_grads)):
if in_grad is not None:
if (isinstance(in_grad, ops.Tensor) and
t_in.dtype != dtypes.resource):
try:
in_grad.set_shape(t_in.get_shape())
except ValueError:
raise ValueError(
"Incompatible shapes between op input and calculated "
"input gradient. Forward operation: %s. Input index: %d. "
"Original input shape: %s. "
"Calculated input gradient shape: %s" %
(op.name, i, t_in.shape, in_grad.shape))
_SetGrad(grads, t_in, in_grad)
if loop_state:
loop_state.ExitGradWhileContext(op, before=False)
# Update pending count for the inputs of op and enqueue ready ops.
_UpdatePendingAndEnqueueReady(grads, op, queue, pending_count, loop_state)
if loop_state:
loop_state.PostProcessing()
return [_GetGrad(grads, x) for x in xs]
def _HasAnyNotNoneGrads(grads, op):
"""Return true iff op has real gradient."""
out_grads = _GetGrads(grads, op)
for out_grad in out_grads:
if isinstance(out_grad, (ops.Tensor, ops.IndexedSlices)):
return True
if out_grad and isinstance(out_grad, collections.Sequence):
if any([g is not None for g in out_grad]):
return True
return False
def _UpdatePendingAndEnqueueReady(grads, op, queue, pending_count, loop_state):
"""Update pending count for the inputs of op and enqueue ready ops."""
for x in op.inputs:
# pylint: disable=protected-access
pending_count[x.op._id] -= 1
ready = (pending_count[x.op._id] == 0)
if loop_state and not ready:
ready = (
pending_count[x.op._id] > 0 and control_flow_util.IsLoopSwitch(x.op))
# pylint: enable=protected-access
if ready:
if control_flow_util.IsLoopExit(x.op):
# if x is an exit without real gradient, defer processing them.
grad_state = loop_state.GetGradState(x.op, before=False)
grad_state.deferred_exits.append(x)
grad_state.pending_exits_count -= 1
if grad_state.pending_exits_count == 0:
# We now have all the exits so process them.
has_not_none_grad = False
for y in grad_state.deferred_exits:
if _HasAnyNotNoneGrads(grads, y.op):
has_not_none_grad = True
queue.append(y.op)
else:
grad_state.unused_exits.append(y)
if has_not_none_grad:
# For an unused exit, if it has trainable outputs, backprop
# a zero gradient. Otherwise, just ignore it.
for y in grad_state.unused_exits:
if _IsTrainable(y):
_SetGrad(grads, y, loop_state.ZerosLikeForExit(y))
queue.append(y.op)
else:
# All exits are "unused" so use None as gradient.
for y in grad_state.unused_exits:
queue.append(y.op)
else:
queue.append(x.op)
def _SetGrad(grads, t, grad):
"""Sets gradient "grad" in "grads" for tensor "t"."""
op = t.op
op_grads = grads.get(op)
if not op_grads:
op_grads = [[] for _ in xrange(len(op.outputs))]
grads[op] = op_grads
t_grads = op_grads[t.value_index]
if isinstance(t_grads, list):
t_grads.append(grad)
else:
assert control_flow_util.IsLoopSwitch(op)
op_grads[t.value_index] = grad
def _GetGrad(grads, t):
"""Gets gradient for tensor "t"."""
op = t.op
op_grads = grads.get(op)
if not op_grads:
return None
t_grad = op_grads[t.value_index]
assert not isinstance(
t_grad, list), ("gradients list should have been aggregated by now.")
return t_grad
def _GetGrads(grads, op):
"""Gets all gradients for op."""
if op in grads:
return grads[op]
else:
return [[] for _ in xrange(len(op.outputs))]
def _HandleNestedIndexedSlices(grad):
assert isinstance(grad, ops.IndexedSlices)
if isinstance(grad.values, ops.Tensor):
return grad
else:
assert isinstance(grad.values, ops.IndexedSlices)
g = _HandleNestedIndexedSlices(grad.values)
return ops.IndexedSlices(g.values, array_ops.gather(
grad.indices, g.indices), g.dense_shape)
def _AccumulatorShape(inputs):
shape = tensor_shape.unknown_shape()
for i in inputs:
if isinstance(i, ops.Tensor):
shape = shape.merge_with(i.get_shape())
return shape
def _LogOpGradients(op, out_grads, in_grads):
"""Log the in and out grads of an op."""
logging.vlog(1, "Gradient for '" + op.name + "'")
def _FilterGrad(x):
if x is None:
return False
if isinstance(x, (list, tuple)):
return bool(x)
else:
return True
logging.vlog(1, " in --> %s",
", ".join([x.name for x in out_grads if _FilterGrad(x)]))
logging.vlog(1, " out --> %s",
", ".join([x.name for x in in_grads if _FilterGrad(x)]))
def _MultiDeviceAddN(tensor_list):
"""Adds tensors from potentially multiple devices."""
# Basic function structure comes from control_flow_ops.group().
# Sort tensors according to their devices.
tensors_on_device = collections.defaultdict(lambda: [])
for tensor in tensor_list:
tensors_on_device[tensor.device].append(tensor)
# For each device, add the tensors on that device first.
# Then gather the partial sums from multiple devices.
# TODO(sjhwang): Create hierarchical aggregation tree as pbar's suggestion.
# E.g., aggregate per GPU, then per task, and so on.
summands = []
def DeviceKey(dev):
return "" if dev is None else dev
for dev in sorted(six.iterkeys(tensors_on_device), key=DeviceKey):
tensors = tensors_on_device[dev]
with ops.colocate_with(tensors[0].op, ignore_existing=True):
summands.append(math_ops.add_n(tensors))
return math_ops.add_n(summands)
@tf_export("AggregationMethod")
class AggregationMethod(object):
"""A class listing aggregation methods used to combine gradients.
Computing partial derivatives can require aggregating gradient
contributions. This class lists the various methods that can
be used to combine gradients in the graph:
* `ADD_N`: All of the gradient terms are summed as part of one
operation using the "AddN" op. It has the property that all
gradients must be ready before any aggregation is performed.
* `DEFAULT`: The system-chosen default aggregation method.
"""
ADD_N = 0
DEFAULT = ADD_N
# The following are experimental and may not be supported in future releases.
EXPERIMENTAL_TREE = 1
EXPERIMENTAL_ACCUMULATE_N = 2
def _AggregatedGrads(grads, op, loop_state, aggregation_method=None):
"""Get the aggregated gradients for op.
Args:
grads: The map of memoized gradients.
op: The op to get gradients for.
loop_state: An object for maintaining the state of the while loops in the
graph. It is of type ControlFlowState. None if the graph
contains no while loops.
aggregation_method: Specifies the method used to combine gradient terms.
Accepted values are constants defined in the class `AggregationMethod`.
Returns:
A list of gradients, one per each output of `op`. If the gradients
for a particular output is a list, this function aggregates it
before returning.
Raises:
TypeError: if the incoming grads are not Tensors or IndexedSlices.
ValueError: if the arguments are invalid.
"""
if aggregation_method is None:
aggregation_method = AggregationMethod.DEFAULT
if aggregation_method not in [
AggregationMethod.ADD_N, AggregationMethod.EXPERIMENTAL_TREE,
AggregationMethod.EXPERIMENTAL_ACCUMULATE_N
]:
raise ValueError(
"Invalid aggregation_method specified %s." % aggregation_method)
out_grads = _GetGrads(grads, op)
for i, out_grad in enumerate(out_grads):
if loop_state:
if isinstance(out_grad, (ops.Tensor, ops.IndexedSlices)):
assert control_flow_util.IsLoopSwitch(op)
continue
# Grads have to be Tensors or IndexedSlices
if (isinstance(out_grad, collections.Sequence) and not all([
isinstance(g, (ops.Tensor, ops.IndexedSlices))
for g in out_grad
if g is not None
])):
raise TypeError("gradients have to be either all Tensors "
"or all IndexedSlices")
# Aggregate multiple gradients, and convert [] to None.
if out_grad:
if len(out_grad) < 2:
used = "nop"
out_grads[i] = out_grad[0]
elif all([isinstance(g, ops.Tensor) for g in out_grad if g is not None]):
tensor_shape = _AccumulatorShape(out_grad)
if (aggregation_method == AggregationMethod.EXPERIMENTAL_ACCUMULATE_N
and len(out_grad) > 2 and tensor_shape.is_fully_defined()):
# The benefit of using AccumulateN is that its inputs can be combined
# in any order and this can allow the expression to be evaluated with
# a smaller memory footprint. When used with gpu_allocator_retry,
# it is possible to compute a sum of terms which are much larger than
# total GPU memory.
# AccumulateN can currently only be used if we know the shape for
# an accumulator variable. If this is not known, or if we only have
# 2 grads then we fall through to the "tree" case below.
used = "accumulate_n"
out_grads[i] = math_ops.accumulate_n(out_grad)
elif aggregation_method in [
AggregationMethod.EXPERIMENTAL_TREE,
AggregationMethod.EXPERIMENTAL_ACCUMULATE_N
]:
# Aggregate all gradients by doing pairwise sums: this may
# reduce performance, but it can improve memory because the
# gradients can be released earlier.
#
# TODO(vrv): Consider replacing this with a version of
# tf.AddN() that eagerly frees its inputs as soon as they are
# ready, so the order of this tree does not become a problem.
used = "tree"
with ops.name_scope(op.name + "_gradient_sum"):
running_sum = out_grad[0]
for grad in out_grad[1:]:
running_sum = math_ops.add_n([running_sum, grad])
out_grads[i] = running_sum
else:
used = "add_n"
out_grads[i] = _MultiDeviceAddN(out_grad)
logging.vlog(2, " _AggregatedGrads %d x %s using %s", len(out_grad),
tensor_shape, used)
else:
out_grad = math_ops._as_indexed_slices_list(
[g for g in out_grad if g is not None])
out_grad = [_HandleNestedIndexedSlices(x) for x in out_grad]
# Form IndexedSlices out of the concatenated values and
# indices.
out_grads[i] = ops.IndexedSlices(
array_ops.concat([x.values for x in out_grad], 0),
array_ops.concat([x.indices for x in out_grad], 0),
out_grad[0].dense_shape)
else: # not out_grad
# out_grads[i] is [], thus its aggregation is simply None.
out_grads[i] = None
return out_grads
# TODO(vrv): Make this available when we want to make it public.
def _hessian_vector_product(ys, xs, v):
"""Multiply the Hessian of `ys` wrt `xs` by `v`.
This is an efficient construction that uses a backprop-like approach
to compute the product between the Hessian and another vector. The
Hessian is usually too large to be explicitly computed or even
represented, but this method allows us to at least multiply by it
for the same big-O cost as backprop.
Implicit Hessian-vector products are the main practical, scalable way
of using second derivatives with neural networks. They allow us to
do things like construct Krylov subspaces and approximate conjugate
gradient descent.
Example: if `y` = 1/2 `x`^T A `x`, then `hessian_vector_product(y,
x, v)` will return an expression that evaluates to the same values
as (A + A.T) `v`.
Args:
ys: A scalar value, or a tensor or list of tensors to be summed to
yield a scalar.
xs: A list of tensors that we should construct the Hessian over.
v: A list of tensors, with the same shapes as xs, that we want to
multiply by the Hessian.
Returns:
A list of tensors (or if the list would be length 1, a single tensor)
containing the product between the Hessian and `v`.
Raises:
ValueError: `xs` and `v` have different length.
"""
# Validate the input
length = len(xs)
if len(v) != length:
raise ValueError("xs and v must have the same length.")
# First backprop
grads = gradients(ys, xs)
assert len(grads) == length
elemwise_products = [
math_ops.multiply(grad_elem, array_ops.stop_gradient(v_elem))
for grad_elem, v_elem in zip(grads, v)
if grad_elem is not None
]
# Second backprop
return gradients(elemwise_products, xs)
@tf_export("hessians")
def hessians(ys,
xs,
name="hessians",
colocate_gradients_with_ops=False,
gate_gradients=False,
aggregation_method=None):
"""Constructs the Hessian of sum of `ys` with respect to `x` in `xs`.
`hessians()` adds ops to the graph to output the Hessian matrix of `ys`
with respect to `xs`. It returns a list of `Tensor` of length `len(xs)`
where each tensor is the Hessian of `sum(ys)`.
The Hessian is a matrix of second-order partial derivatives of a scalar
tensor (see https://en.wikipedia.org/wiki/Hessian_matrix for more details).
Args:
ys: A `Tensor` or list of tensors to be differentiated.
xs: A `Tensor` or list of tensors to be used for differentiation.
name: Optional name to use for grouping all the gradient ops together.
defaults to 'hessians'.
colocate_gradients_with_ops: See `gradients()` documentation for details.
gate_gradients: See `gradients()` documentation for details.
aggregation_method: See `gradients()` documentation for details.
Returns:
A list of Hessian matrices of `sum(ys)` for each `x` in `xs`.
Raises:
LookupError: if one of the operations between `xs` and `ys` does not
have a registered gradient function.
"""
xs = _AsList(xs)
kwargs = {
"colocate_gradients_with_ops": colocate_gradients_with_ops,
"gate_gradients": gate_gradients,
"aggregation_method": aggregation_method
}
# Compute first-order derivatives and iterate for each x in xs.
hessians = []
_gradients = gradients(ys, xs, **kwargs)
for gradient, x in zip(_gradients, xs):
# change shape to one-dimension without graph branching
gradient = array_ops.reshape(gradient, [-1])
# Declare an iterator and tensor array loop variables for the gradients.
n = array_ops.size(x)
loop_vars = [
array_ops.constant(0, dtypes.int32),
tensor_array_ops.TensorArray(x.dtype, n)
]
# Iterate over all elements of the gradient and compute second order
# derivatives.
_, hessian = control_flow_ops.while_loop(
lambda j, _: j < n,
lambda j, result: (j + 1,
result.write(j, gradients(gradient[j], x)[0])),
loop_vars
)
_shape = array_ops.shape(x)
_reshaped_hessian = array_ops.reshape(hessian.stack(),
array_ops.concat((_shape, _shape), 0))
hessians.append(_reshaped_hessian)
return hessians
| rabipanda/tensorflow | tensorflow/python/ops/gradients_impl.py | Python | apache-2.0 | 40,448 |
from django.conf.urls import patterns, include, url
from django.conf import settings
from django.views.generic import RedirectView
from api.urls import router
# Uncomment the next two lines to enable the admin:
from django.contrib import admin
admin.autodiscover()
urlpatterns = patterns(
'',
# change Language
(r'^i18n/', include('django.conf.urls.i18n')),
url('^api/v1/', include(router.urls)),
url(r'^api-docs/', RedirectView.as_view(url='/api/v1/')),
url(r'^api/', RedirectView.as_view(url='/api/v1/')),
url(r'^api/v1', RedirectView.as_view(url='/api/v1/')),
# django default stuff
url(r'^accounts/', include('main.registration_urls')),
url(r'^admin/', include(admin.site.urls)),
url(r'^admin/doc/', include('django.contrib.admindocs.urls')),
# oath2_provider
url(r'^o/', include('oauth2_provider.urls', namespace='oauth2_provider')),
# google urls
url(r'^gauthtest/$',
'main.google_export.google_oauth2_request',
name='google-auth'),
url(r'^gwelcome/$',
'main.google_export.google_auth_return',
name='google-auth-welcome'),
# main website views
url(r'^$', 'main.views.home'),
url(r'^tutorial/$', 'main.views.tutorial', name='tutorial'),
url(r'^about-us/$', 'main.views.about_us', name='about-us'),
url(r'^getting_started/$', 'main.views.getting_started', name='getting_started'),
url(r'^faq/$', 'main.views.faq', name='faq'),
url(r'^syntax/$', 'main.views.syntax', name='syntax'),
url(r'^resources/$', 'main.views.resources', name='resources'),
url(r'^forms/$', 'main.views.form_gallery', name='forms_list'),
url(r'^forms/(?P<uuid>[^/]+)$', 'main.views.show'),
url(r'^people/$', 'main.views.members_list'),
url(r'^xls2xform/$', 'main.views.xls2xform'),
url(r'^support/$', 'main.views.support'),
url(r'^stats/$', 'staff.views.stats'),
url(r'^login_redirect/$', 'main.views.login_redirect'),
url(r"^attachment/$", 'odk_viewer.views.attachment_url'),
url(r"^attachment/(?P<size>[^/]+)$", 'odk_viewer.views.attachment_url'),
url(r'^jsi18n/$', 'django.views.i18n.javascript_catalog',
{'packages': ('main', 'odk_viewer',)}),
url(r'^typeahead_usernames', 'main.views.username_list', name='username_list'),
url(r'^(?P<username>[^/]+)/$', 'main.views.profile', name='user_profile'),
url(r'^(?P<username>[^/]+)/profile$', 'main.views.public_profile', name='public_profile'),
url(r'^(?P<username>[^/]+)/settings', 'main.views.profile_settings'),
url(r'^(?P<username>[^/]+)/cloneform$', 'main.views.clone_xlsform'),
url(r'^(?P<username>[^/]+)/activity$', 'main.views.activity'),
url(r'^(?P<username>[^/]+)/activity/api$', 'main.views.activity_api'),
url(r'^activity/fields$', 'main.views.activity_fields'),
url(r'^(?P<username>[^/]+)/api-token$', 'main.views.api_token'),
# form specific
url(r'^(?P<username>[^/]+)/forms/(?P<id_string>[^/]+)$', 'main.views.show'),
url(r'^(?P<username>[^/]+)/forms/(?P<id_string>[^/]+)/qrcode$', 'main.views.qrcode', name='get_qrcode'),
url(r'^(?P<username>[^/]+)/forms/(?P<id_string>[^/]+)/api$', 'main.views.api', name='mongo_view_api'),
url(r'^(?P<username>[^/]+)/forms/(?P<id_string>[^/]+)/public_api$', 'main.views.public_api', name='public_api'),
url(r'^(?P<username>[^/]+)/forms/(?P<id_string>[^/]+)/delete_data$', 'main.views.delete_data', name='delete_data'),
url(r'^(?P<username>[^/]+)/forms/(?P<id_string>[^/]+)/edit$', 'main.views.edit'),
url(r'^(?P<username>[^/]+)/forms/(?P<id_string>[^/]+)/perms$', 'main.views.set_perm'),
url(r'^(?P<username>[^/]+)/forms/(?P<id_string>[^/]+)/bamboo$', 'main.views.link_to_bamboo'),
url(r'^(?P<username>[^/]+)/forms/(?P<id_string>[^/]+)/photos', 'main.views.form_photos'),
url(r'^(?P<username>[^/]+)/forms/(?P<id_string>[^/]+)/doc/(?P<data_id>\d+)', 'main.views.download_metadata'),
url(r'^(?P<username>[^/]+)/forms/(?P<id_string>[^/]+)/delete-doc/(?P<data_id>\d+)', 'main.views.delete_metadata'),
url(r'^(?P<username>[^/]+)/forms/(?P<id_string>[^/]+)/formid-media/(?P<data_id>\d+)', 'main.views.download_media_data'),
url(r'^(?P<username>[^/]+)/forms/(?P<id_string>[^/]+)/submission/(?P<uuid>[^/]+)$', 'main.views.show_submission'),
url(r'^(?P<username>[^/]+)/forms/(?P<id_string>[^/]+)/addservice$', 'restservice.views.add_service', name="add_restservice"),
url(r'^(?P<username>[^/]+)/forms/(?P<id_string>[^/]+)/delservice$',
'restservice.views.delete_service', name="delete_restservice"),
url(r'^(?P<username>[^/]+)/forms/(?P<id_string>[^/]+)/update$', 'main.views.update_xform'),
url(r'^(?P<username>[^/]+)/forms/(?P<id_string>[^/]+)/preview$', 'main.views.enketo_preview'),
# briefcase api urls
url(r"^(?P<username>\w+)/view/submissionList$",
'odk_logger.views.view_submission_list'),
url(r"^(?P<username>\w+)/view/downloadSubmission$",
'odk_logger.views.view_download_submission'),
url(r"^(?P<username>\w+)/formUpload$",
'odk_logger.views.form_upload'),
url(r"^(?P<username>\w+)/upload$",
'odk_logger.views.form_upload'),
# stats
url(r"^stats/submissions/$", 'staff.views.submissions'),
# exporting stuff
url(r"^(?P<username>\w+)/forms/(?P<id_string>[^/]+)/data\.csv$", 'odk_viewer.views.data_export', name='csv_export', kwargs={'export_type': 'csv'}),
url(r"^(?P<username>\w+)/forms/(?P<id_string>[^/]+)/data\.xls", 'odk_viewer.views.data_export', name='xls_export', kwargs={'export_type': 'xls'}),
url(r"^(?P<username>\w+)/forms/(?P<id_string>[^/]+)/data\.csv.zip", 'odk_viewer.views.data_export', name='csv_zip_export', kwargs={'export_type': 'csv_zip'}),
url(r"^(?P<username>\w+)/forms/(?P<id_string>[^/]+)/data\.kml$", 'odk_viewer.views.kml_export'),
url(r"^(?P<username>\w+)/forms/(?P<id_string>[^/]+)/data\.zip", 'odk_viewer.views.zip_export'),
url(r"^(?P<username>\w+)/forms/(?P<id_string>[^/]+)/gdocs$", 'odk_viewer.views.google_xls_export'),
url(r"^odk_viewer/survey/(?P<instance_id>\d+)/$", 'odk_viewer.views.survey_responses'),
url(r"^(?P<username>\w+)/forms/(?P<id_string>[^/]+)/map_embed", 'odk_viewer.views.map_embed_view'),
url(r"^(?P<username>\w+)/forms/(?P<id_string>[^/]+)/map", 'odk_viewer.views.map_view'),
url(r"^(?P<username>\w+)/forms/(?P<id_string>[^/]+)/instance", 'odk_viewer.views.instance'),
url(r"^(?P<username>\w+)/forms/(?P<id_string>[^/]+)/enter-data", 'odk_logger.views.enter_data', name='enter_data'),
url(r"^(?P<username>\w+)/forms/(?P<id_string>[^/]+)/add-submission-with", 'odk_viewer.views.add_submission_with', name='add_submission_with'),
url(r"^(?P<username>\w+)/forms/(?P<id_string>[^/]+)/thank_you_submission", 'odk_viewer.views.thank_you_submission', name='thank_you_submission'),
url(r"^(?P<username>\w+)/forms/(?P<id_string>[^/]+)/edit-data/(?P<data_id>\d+)$", 'odk_logger.views.edit_data', name='edit_data'),
url(r"^(?P<username>\w+)/forms/(?P<id_string>[^/]+)/view-data", 'odk_viewer.views.data_view'),
url(r"^(?P<username>\w+)/exports/(?P<id_string>[^/]+)/(?P<export_type>\w+)/new$", 'odk_viewer.views.create_export'),
url(r"^(?P<username>\w+)/exports/(?P<id_string>[^/]+)/(?P<export_type>\w+)/delete$", 'odk_viewer.views.delete_export'),
url(r"^(?P<username>\w+)/exports/(?P<id_string>[^/]+)/(?P<export_type>\w+)/progress$", 'odk_viewer.views.export_progress'),
url(r"^(?P<username>\w+)/exports/(?P<id_string>[^/]+)/(?P<export_type>\w+)/$", 'odk_viewer.views.export_list'),
url(r"^(?P<username>\w+)/exports/(?P<id_string>[^/]+)/(?P<export_type>\w+)/(?P<filename>[^/]+)$", 'odk_viewer.views.export_download'),
# odk data urls
url(r"^submission$", 'odk_logger.views.submission'),
url(r"^(?P<username>\w+)/formList$", 'odk_logger.views.formList'),
url(r"^(?P<username>\w+)/xformsManifest/(?P<id_string>[^/]+)$",
'odk_logger.views.xformsManifest'),
url(r"^(?P<username>\w+)/submission$", 'odk_logger.views.submission'),
url(r"^(?P<username>\w+)/bulk-submission$", 'odk_logger.views.bulksubmission'),
url(r"^(?P<username>\w+)/bulk-submission-form$", 'odk_logger.views.bulksubmission_form'),
url(r"^(?P<username>\w+)/forms/(?P<id_string>[^/]+)/form\.xml$", 'odk_logger.views.download_xform', name="download_xform"),
url(r"^(?P<username>\w+)/forms/(?P<id_string>[^/]+)/form\.xls$", 'odk_logger.views.download_xlsform', name="download_xlsform"),
url(r"^(?P<username>\w+)/forms/(?P<id_string>[^/]+)/form\.json", 'odk_logger.views.download_jsonform', name="download_jsonform"),
url(r"^(?P<username>\w+)/delete/(?P<id_string>[^/]+)/$", 'odk_logger.views.delete_xform'),
url(r"^(?P<username>\w+)/(?P<id_string>[^/]+)/toggle_downloadable/$", 'odk_logger.views.toggle_downloadable'),
# SMS support
url(r'^(?P<username>[^/]+)/forms/(?P<id_string>[^/]+)/sms_submission/(?P<service>[a-z]+)/?$', 'sms_support.providers.import_submission_for_form', name='sms_submission_form_api'),
url(r'^(?P<username>[^/]+)/forms/(?P<id_string>[^/]+)/sms_submission$', 'sms_support.views.import_submission_for_form', name='sms_submission_form'),
url(r"^(?P<username>[^/]+)/sms_submission/(?P<service>[a-z]+)/?$", 'sms_support.providers.import_submission', name='sms_submission_api'),
url(r'^(?P<username>[^/]+)/forms/(?P<id_string>[^/]+)/sms_multiple_submissions$', 'sms_support.views.import_multiple_submissions_for_form', name='sms_submissions_form'),
url(r"^(?P<username>[^/]+)/sms_multiple_submissions$", 'sms_support.views.import_multiple_submissions', name='sms_submissions'),
url(r"^(?P<username>[^/]+)/sms_submission$", 'sms_support.views.import_submission', name='sms_submission'),
# static media
url(r'^static/(?P<path>.*)$', 'django.views.static.serve',
{'document_root': settings.MEDIA_ROOT}),
url(r'^media/(?P<path>.*)$', 'django.views.static.serve',
{'document_root': settings.MEDIA_ROOT}),
url(r'^favicon\.ico', RedirectView.as_view(url='/static/images/favicon.ico'))
)
| GeoODK/formhub | formhub/urls.py | Python | bsd-2-clause | 10,051 |
import pygame
"""
Monitors the users mouse and their selections
"""
class PickingHandler(object):
SUCCESS_COLOR = (0,0,255)
ERROR_COLOR = (255,0,0)
def __init__(self, viewport, transmutation_manager, physics_manager):
self.viewport = viewport
self.transmutation_manager = transmutation_manager
self.physics_manager = physics_manager
self._user_selection_bound1 = None
self._user_selection_bound2 = None
def is_picked(self, actor, position):
position = self.viewport.convert_position(position)
return actor.get_rect().collidepoint(position)
def tile_at_point(self, position, tile_size):
position = self.viewport.convert_position(position)
return (int(position[0] / tile_size), int(position[1] / tile_size))
def start_user_selection(self, position, tile_size):
self._user_selection_bound1 = self.tile_at_point(position, tile_size)
def stop_user_selection(self):
if self._user_selection_bounds is not None:
pygame.event.post(pygame.event.Event(pygame.USEREVENT, {"bounds":self._user_selection_bounds}))
self._user_selection_bound1 = None
self._user_selection_bound2 = None
@property
def surface(self):
if self._user_selection_bound1 is not None:
return self.selection_surface
else:
return pygame.Surface((0,0))
@property
def position(self):
if self._user_selection_bound1 is not None:
return self._user_selection_position
else:
return (0,0)
def _is_selection_space_filled(self, tile_size):
px_selection_box = self.selection_surface.get_rect().move(self._user_selection_position)
tile_selection_box = pygame.Rect(px_selection_box.left / tile_size, px_selection_box.top / tile_size, px_selection_box.width / tile_size, px_selection_box.height / tile_size)
return self.physics_manager.is_space_filled(tile_selection_box)
def get_points_used(self, tile_size):
return self.transmutation_manager.get_points_required((self.selection_surface.get_width() / tile_size), (self.selection_surface.get_height() / tile_size))
def update(self, delta, tile_size):
if self._user_selection_bound1 is not None: #user can select right now
new_bound2 = self.tile_at_point(pygame.mouse.get_pos(), tile_size)
if new_bound2 != self._user_selection_bound2:
self._user_selection_bound2 = new_bound2
self._user_selection_position = (
min(self._user_selection_bound1[0], self._user_selection_bound2[0]) * tile_size,
min(self._user_selection_bound1[1], self._user_selection_bound2[1]) * tile_size
)
self.selection_surface = pygame.Surface((
abs(self._user_selection_bound2[0] - self._user_selection_bound1[0]) * tile_size,
abs(self._user_selection_bound2[1] - self._user_selection_bound1[1]) * tile_size
))
self.selection_surface.set_alpha(150)
if self.selection_surface.get_width() == 0 or self.selection_surface.get_height() == 0 or self.get_points_used(tile_size) > self.transmutation_manager.current_points or self._is_selection_space_filled(tile_size):
self.selection_surface.fill(PickingHandler.ERROR_COLOR)
self._user_selection_bounds = None
else:
self.selection_surface.fill(PickingHandler.SUCCESS_COLOR)
self._user_selection_bounds = self.selection_surface.get_rect().move(self._user_selection_position)
| Desolace/LudumDare32 | picking_handler.py | Python | mit | 3,675 |
##
# Copyright 2009-2020 Ghent University, Forschungszentrum Juelich
#
# This file is part of EasyBuild,
# originally created by the HPC team of Ghent University (http://ugent.be/hpc/en),
# with support of Ghent University (http://ugent.be/hpc),
# the Flemish Supercomputer Centre (VSC) (https://www.vscentrum.be),
# the Hercules foundation (http://www.herculesstichting.be/in_English)
# and the Department of Economy, Science and Innovation (EWI) (http://www.ewi-vlaanderen.be/en).
#
# https://github.com/easybuilders/easybuild
#
# EasyBuild is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation v2.
#
# EasyBuild is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with EasyBuild. If not, see <http://www.gnu.org/licenses/>.
##
"""
EasyBuild support for building and installing the MPICH MPI library and derivatives, implemented as an easyblock
@author: Stijn De Weirdt (Ghent University)
@author: Dries Verdegem (Ghent University)
@author: Kenneth Hoste (Ghent University)
@author: Pieter De Baets (Ghent University)
@author: Jens Timmerman (Ghent University)
@author: Damian Alvarez (Forschungszentrum Juelich)
@author: Xavier Besseron (University of Luxembourg)
"""
import os
from distutils.version import LooseVersion
import easybuild.tools.environment as env
from easybuild.easyblocks.generic.configuremake import ConfigureMake
from easybuild.framework.easyconfig import CUSTOM
from easybuild.tools.build_log import EasyBuildError
from easybuild.tools.systemtools import get_shared_lib_ext
class EB_MPICH(ConfigureMake):
"""
Support for building the MPICH MPI library and derivatives.
- basically redefinition of environment variables
"""
@staticmethod
def extra_options(extra_vars=None):
"""Define custom easyconfig parameters specific to MPICH."""
extra_vars = ConfigureMake.extra_options(extra_vars)
extra_vars.update({
'debug': [False, "Enable debug build (which is slower)", CUSTOM],
})
return extra_vars
# MPICH configure script complains when F90 or F90FLAGS are set,
# they should be replaced with FC/FCFLAGS instead.
# Additionally, there are a set of variables (FCFLAGS among them) that should not be set at configure time,
# or they will leak in the mpix wrappers.
# Specific variables to be included in the wrapper exists, but they changed between MPICH 3.1.4 and MPICH 3.2
# and in a typical scenario we probably don't want them.
def correct_mpich_build_env(self):
"""
Method to correctly set the environment for MPICH and derivatives
"""
env_vars = ['CFLAGS', 'CPPFLAGS', 'CXXFLAGS', 'FCFLAGS', 'FFLAGS', 'LDFLAGS', 'LIBS']
vars_to_unset = ['F90', 'F90FLAGS']
for envvar in env_vars:
envvar_val = os.getenv(envvar)
if envvar_val:
new_envvar = 'MPICHLIB_%s' % envvar
new_envvar_val = os.getenv(new_envvar)
vars_to_unset.append(envvar)
if envvar_val == new_envvar_val:
self.log.debug("$%s == $%s, just defined $%s as empty", envvar, new_envvar, envvar)
elif new_envvar_val is None:
env.setvar(new_envvar, envvar_val)
else:
raise EasyBuildError("Both $%s and $%s set, can I overwrite $%s with $%s (%s) ?",
envvar, new_envvar, new_envvar, envvar, envvar_val)
env.unset_env_vars(vars_to_unset)
def add_mpich_configopts(self):
"""
Method to add common configure options for MPICH-based MPI libraries
"""
# additional configuration options
add_configopts = []
# use POSIX threads
add_configopts.append('--with-thread-package=pthreads')
if self.cfg['debug']:
# debug build, with error checking, timing and debug info
# note: this will affect performance
add_configopts.append('--enable-fast=none')
else:
# optimized build, no error checking, timing or debug info
add_configopts.append('--enable-fast')
# enable shared libraries, using GCC and GNU ld options
add_configopts.extend(['--enable-shared', '--enable-sharedlibs=gcc'])
# enable static libraries
add_configopts.extend(['--enable-static'])
# enable Fortran 77/90 and C++ bindings
add_configopts.extend(['--enable-f77', '--enable-fc', '--enable-cxx'])
self.cfg.update('configopts', ' '.join(add_configopts))
def configure_step(self, add_mpich_configopts=True):
"""
Custom configuration procedure for MPICH
* add common configure options for MPICH-based MPI libraries
* unset environment variables that leak into mpi* wrappers, and define $MPICHLIB_* equivalents instead
"""
# things might go wrong if a previous install dir is present, so let's get rid of it
if not self.cfg['keeppreviousinstall']:
self.log.info("Making sure any old installation is removed before we start the build...")
super(EB_MPICH, self).make_dir(self.installdir, True, dontcreateinstalldir=True)
if add_mpich_configopts:
self.add_mpich_configopts()
self.correct_mpich_build_env()
super(EB_MPICH, self).configure_step()
# make and make install are default
def sanity_check_step(self, custom_paths=None, use_new_libnames=None, check_launchers=True):
"""
Custom sanity check for MPICH
"""
shlib_ext = get_shared_lib_ext()
if custom_paths is None:
custom_paths = {}
if use_new_libnames is None:
# cfr. http://git.mpich.org/mpich.git/blob_plain/v3.1.1:/CHANGES
# MPICH changed its library names sinceversion 3.1.1
use_new_libnames = LooseVersion(self.version) >= LooseVersion('3.1.1')
# Starting MPICH 3.1.1, libraries have been renamed
# cf http://git.mpich.org/mpich.git/blob_plain/v3.1.1:/CHANGES
if use_new_libnames:
libnames = ['mpi', 'mpicxx', 'mpifort']
else:
libnames = ['fmpich', 'mpichcxx', 'mpichf90', 'mpich', 'mpl', 'opa']
binaries = ['mpicc', 'mpicxx', 'mpif77', 'mpif90']
if check_launchers:
binaries.extend(['mpiexec', 'mpiexec.hydra', 'mpirun'])
bins = [os.path.join('bin', x) for x in binaries]
headers = [os.path.join('include', x) for x in ['mpi.h', 'mpicxx.h', 'mpif.h']]
libs_fn = ['lib%s.%s' % (l, e) for l in libnames for e in ['a', shlib_ext]]
libs = [(os.path.join('lib', l), os.path.join('lib64', l)) for l in libs_fn]
custom_paths.setdefault('dirs', []).extend(['bin', 'include', ('lib', 'lib64')])
custom_paths.setdefault('files', []).extend(bins + headers + libs)
super(EB_MPICH, self).sanity_check_step(custom_paths=custom_paths)
| pescobar/easybuild-easyblocks | easybuild/easyblocks/m/mpich.py | Python | gpl-2.0 | 7,319 |
import os
import sys
import re
import json
from distutils.cmd import Command
from setuptools.command.test import test
def read_requirements(file_name):
return [i.strip() for i in open(
os.path.realpath(os.path.join(os.path.dirname(__file__), "..", "requirements", "%s" % file_name))
).readlines() if len(i.strip()) > 0]
def patch_requirements(requirements, file_name):
modified_requirements = read_requirements(file_name)
modified_packages = [re.split('[=><]+', x)[0] for x in modified_requirements]
patched_requirements = modified_requirements
return patched_requirements + [x for x in requirements if re.split('[=><]+', x)[0] not in modified_packages]
class ApiDocTest(test):
def finalize_options(self):
test.finalize_options(self)
self.test_args = []
self.test_suite = True
def run_tests(self):
import pytest
errno = pytest.main(self.test_args)
sys.exit(errno)
class Resource(Command):
user_options = []
def initialize_options(self):
pass
def finalize_options(self):
pass
def run(self):
resource_dir = os.path.realpath(
os.path.join(
os.path.dirname(os.path.dirname(__file__)),
'apidoc',
'template',
'resource'
)
)
resource_src_dir = os.path.realpath(os.path.join(resource_dir, 'src'))
resource_src_js_dir = os.path.realpath(os.path.join(resource_src_dir, 'js'))
resource_src_css_dir = os.path.realpath(os.path.join(resource_src_dir, 'css'))
resource_src_less_dir = os.path.realpath(os.path.join(resource_src_dir, 'less'))
resource_js_dir = os.path.realpath(os.path.join(resource_dir, 'js'))
resource_css_dir = os.path.realpath(os.path.join(resource_dir, 'css'))
try:
os.system('wget -O "%s" "%s"' % ('/tmp/bootstrap-raw', 'http://getbootstrap.com/assets/js/raw-files.js'))
assert os.path.exists('/tmp/bootstrap-raw'), 'Downloaded bootstrap zip not found'
with open('/tmp/bootstrap-raw') as f:
js_raw = json.loads(f.readline()[11:])
less_raw = json.loads(f.readline()[13:])
js_full = ''
for js_file in ["transition.js", "modal.js", "scrollspy.js", "tooltip.js", "popover.js", "affix.js"]:
js_full += js_raw[js_file]
with open('%s/bootstrap.js' % resource_src_js_dir, 'w') as f:
f.write(js_full)
if not os.path.exists('%s/bootstrap' % resource_src_less_dir):
os.mkdir('%s/bootstrap' % resource_src_less_dir)
less_full = ''
for less_file in less_raw.keys():
with open('%s/bootstrap/%s' % (resource_src_less_dir, less_file), 'w') as f:
f.write(less_raw[less_file])
for less_file in ['variables.less', 'mixins.less', 'type.less', 'buttons.less', 'button-groups.less', 'theme.less', 'scaffolding.less', 'code.less', 'grid.less', 'utilities.less', 'normalize.less', 'component-animations.less', 'popovers.less', 'navbar.less', 'responsive-utilities.less', 'jumbotron.less', 'tooltip.less', 'tables.less', 'wells.less', 'forms.less', 'print.less', 'navs.less', 'modals.less', 'close.less']:
less_full += '@import "bootstrap/%s";\n' % less_file
less_full += '@import "variables.less";\n'
with open('%s/bootstrap.less' % resource_src_less_dir, 'w') as f:
f.write(less_full)
os.system('lessc %s %s' % ('%s/bootstrap.less' % resource_src_less_dir, '%s/bootstrap.css' % resource_src_css_dir))
finally:
os.remove('/tmp/bootstrap-raw')
pass
os.system('wget -O "%s" "%s"' % ('%s/jquery.min.js' % resource_src_js_dir, 'http://code.jquery.com/jquery-2.0.3.min.js'))
assert os.path.exists('%s/jquery.min.js' % resource_src_js_dir), 'Downloaded jquery file not found'
os.system('wget -O "%s" "%s"' % ('%s/mousetrap.min.js' % resource_src_js_dir, 'http://cdn.craig.is/js/mousetrap/mousetrap.min.js'))
assert os.path.exists('%s/mousetrap.min.js' % resource_src_js_dir), 'Downloaded mousetrap file not found'
os.system('lessc -x "%s/apidoc.less" "%s/apidoc.css"' % (resource_src_less_dir, resource_src_css_dir))
for folder in [resource_css_dir, resource_js_dir]:
if not os.path.exists(folder):
os.makedirs(folder)
self._compress("css", ["%s/bootstrap.css" % resource_src_css_dir, "%s/apidoc.css" % resource_src_css_dir, "%s/font.css" % resource_css_dir], "%s/combined.css" % resource_css_dir)
assert os.path.exists('%s/combined.css' % resource_css_dir), 'Combined css file not found'
self._compress("css", ["%s/bootstrap.css" % resource_src_css_dir, "%s/apidoc.css" % resource_src_css_dir, "%s/font-embedded.css" % resource_css_dir], "%s/combined-embedded.css" % resource_css_dir)
assert os.path.exists('%s/combined-embedded.css' % resource_css_dir), 'Combined embedded css file not found'
self._compress("js", ["%s/jquery.min.js" % resource_src_js_dir, "%s/bootstrap.js" % resource_src_js_dir, "%s/mousetrap.min.js" % resource_src_js_dir, "%s/apidoc.js" % resource_src_js_dir], "%s/combined.js" % resource_js_dir)
assert os.path.exists('%s/combined.js' % resource_js_dir), 'Combined js file not found'
def _merge_files(self, input_files, output_file):
"""Combine the input files to a big output file"""
# we assume that all the input files have the same charset
with open(output_file, mode='wb') as out:
for input_file in input_files:
out.write(open(input_file, mode='rb').read())
def _compress(self, format, input_files, output_file):
import yuicompressor
import tempfile
handle, merged_filename = tempfile.mkstemp(prefix='minify')
os.close(handle)
try:
self._merge_files(input_files, merged_filename)
os.system('java -jar %s --type %s -o %s --charset utf-8 %s' % (yuicompressor.get_jar_filename(), format, output_file, merged_filename))
finally:
os.remove(merged_filename)
| SolutionsCloud/apidoc | setup_cmd/__init__.py | Python | gpl-3.0 | 6,276 |
import tests.model_control.test_ozone_custom_models_enabled as testmod
testmod.build_model( ['Logit'] , ['ConstantTrend'] , ['Seasonal_Hour'] , ['MLP'] ); | antoinecarme/pyaf | tests/model_control/detailed/transf_Logit/model_control_one_enabled_Logit_ConstantTrend_Seasonal_Hour_MLP.py | Python | bsd-3-clause | 156 |
import unittest, mock, tempfile, types, os, datetime
from django.db.models.signals import post_save
from rdflib import Graph, Literal, BNode, Namespace, RDF, URIRef
from rdflib.namespace import DC, FOAF, DCTERMS
BIB = Namespace('http://purl.org/net/biblio#')
RSS = Namespace('http://purl.org/rss/1.0/modules/link/')
ZOTERO = Namespace('http://www.zotero.org/namespaces/export#')
from cookies.accession import IngesterFactory, IngestManager
from cookies.tests.mocks import MockFile, MockIngester
from cookies.models import *
from cookies.accession.zotero import ZoteroIngest
from cookies import tasks
from cookies.signals import send_all_files_to_giles
os.environ.setdefault('LOGLEVEL', 'ERROR')
def disconnect_signal(signal, receiver, sender):
disconnect = getattr(signal, 'disconnect')
disconnect(receiver, sender)
def reconnect_signal(signal, receiver, sender):
connect = getattr(signal, 'connect')
connect(receiver, sender=sender)
class TestImport(unittest.TestCase):
def setUp(self):
self.factory = IngesterFactory()
disconnect_signal(post_save, send_all_files_to_giles, ContentRelation)
def test_import_factory(self):
"""
The IngesterFactory should return a wrapped object that supports
iteration. Each iteration should yield a :class:`.Resource` instance.
"""
ingest_class = self.factory.get('cookies.tests.mocks.MockIngester')
mock_file = MockFile()
mock_file.read = mock.MagicMock(name='read')
mock_file.read.return_value = [
{'name': 'Test',},
{'name': 'Test2',},
]
ingester = ingest_class(mock_file)
self.assertIsInstance(ingester, IngestManager)
self.assertIsInstance(ingester.next(), Resource)
self.assertEqual(mock_file.read.call_count, 1)
def tearDown(self):
reconnect_signal(post_save, send_all_files_to_giles, ContentRelation)
class TestHandleBulkWithZotero(unittest.TestCase):
def setUp(self):
disconnect_signal(post_save, send_all_files_to_giles, ContentRelation)
User.objects.get_or_create(username='AnonymousUser')
self.user = User.objects.create(username='Test User')
self.form_data = {
'name': 'A New Collection',
'created_by': self.user,
}
self.file_path = "test_data/TestRDF.rdf"
self.file_name = "TestRDF.rdf"
self.job = UserJob.objects.create(**{
'created_by': self.user,
})
def test_handle_bulk(self):
collection = tasks.handle_bulk(self.file_path, self.form_data, self.file_name, self.job)
self.assertIsInstance(collection, dict)
instance = Collection.objects.get(pk=collection['id'])
self.assertEqual(instance.resourcecontainer_set.count(), 20)
def tearDown(self):
User.objects.all().delete()
ConceptEntity.objects.all().delete()
Resource.objects.all().delete()
Relation.objects.all().delete()
Collection.objects.all().delete()
ContentRelation.objects.all().delete()
reconnect_signal(post_save, send_all_files_to_giles, ContentRelation)
class TestZoteroIngesterWithManager(unittest.TestCase):
def setUp(self):
disconnect_signal(post_save, send_all_files_to_giles, ContentRelation)
self.resource_data = {
'created_by': User.objects.create(username='TestUser')
}
User.objects.get_or_create(username='AnonymousUser')
def test_ingest(self):
factory = IngesterFactory()
ingest_class = factory.get('cookies.accession.zotero.ZoteroIngest')
ingester = ingest_class("test_data/TestRDF.rdf")
ingester.set_resource_defaults(**self.resource_data)
N = 0
for resource in ingester:
self.assertIsInstance(resource, Resource)
N += 1
self.assertEqual(N, 20, "Should create 20 resources from this RDF.")
def tearDown(self):
User.objects.all().delete()
ConceptEntity.objects.all().delete()
Resource.objects.all().delete()
Relation.objects.all().delete()
Collection.objects.all().delete()
ContentRelation.objects.all().delete()
reconnect_signal(post_save, send_all_files_to_giles, ContentRelation)
class TestZoteroIngesterWithManagerZIP(unittest.TestCase):
def setUp(self):
disconnect_signal(post_save, send_all_files_to_giles, ContentRelation)
self.resource_data = {
'created_by': User.objects.create(username='TestUser')
}
User.objects.get_or_create(username='AnonymousUser')
def test_ingest(self):
factory = IngesterFactory()
ingest_class = factory.get('cookies.accession.zotero.ZoteroIngest')
ingester = ingest_class("test_data/TestRDF.zip")
ingester.set_resource_defaults(**self.resource_data)
N = 0
for resource in ingester:
self.assertIsInstance(resource, Resource)
self.assertGreater(resource.content.count(), 0,
"Each resource in this RDF should have some form of content.")
N += 1
self.assertEqual(N, 20, "Should create 20 resources from this RDF.")
def tearDown(self):
User.objects.all().delete()
ConceptEntity.objects.all().delete()
Resource.objects.all().delete()
Relation.objects.all().delete()
reconnect_signal(post_save, send_all_files_to_giles, ContentRelation)
class TestZoteroIngesterRDFOnly(unittest.TestCase):
def test_parse_zotero_rdf(self):
ingester = ZoteroIngest("test_data/TestRDF.rdf")
data = ingester.next()
self.assertIn('name', data)
self.assertIn('entity_type', data)
# import pprint
#
# pprint.pprint(data)
class TestZoteroIngesterWithLinks(unittest.TestCase):
def setUp(self):
disconnect_signal(post_save, send_all_files_to_giles, ContentRelation)
self.location = "http://asdf.com/2/"
self.link = "file:///some/path.pdf"
self.g = Graph()
self.doc = BNode()
self.doc2 = BNode()
self.ident = BNode()
self.g.add((self.doc, DCTERMS.dateSubmitted, Literal("2014-10-30 18:04:59")))
self.g.add((self.doc, ZOTERO.itemType, Literal("attachment")))
self.g.add((self.doc, RDF.type, ZOTERO.Attachment))
self.g.add((self.doc, DC.identifier, self.ident))
self.g.add((self.doc, DC.title, Literal("PubMed Central Link")))
self.g.add((self.doc, RSS.type, Literal("text/html")))
self.g.add((self.ident, RDF.type, DCTERMS.URI))
self.g.add((self.ident, RDF.value, URIRef(self.location)))
self.g.add((self.doc2, RSS.link, Literal(self.link)))
_, self.rdf_path = tempfile.mkstemp(suffix='.rdf')
self.g.serialize(self.rdf_path, encoding='utf-8')
def test_handle_link(self):
ingester = ZoteroIngest(self.rdf_path)
ingester.graph = self.g
predicate, values = ingester.handle_link(RSS.link, self.doc)
values = dict(values)
self.assertIn('url', values)
self.assertEqual(values['url'], self.location,
"The URI of the link target should be interpreted as an URL.")
self.assertIsInstance(values[DCTERMS.dateSubmitted.toPython()],
datetime.datetime,
"dateSubmitted should be recast as a datetime object.")
def test_handle_file(self):
ingester = ZoteroIngest(self.rdf_path)
ingester.graph = self.g
predicate, values = ingester.handle_link(RSS.link, self.doc2)
values = dict(values)
self.assertIn('link', values)
self.assertEqual(values['link'], self.link.replace('file://', ''))
def tearDown(self):
os.remove(self.rdf_path)
reconnect_signal(post_save, send_all_files_to_giles, ContentRelation)
class TestZoteroIngester(unittest.TestCase):
def setUp(self):
disconnect_signal(post_save, send_all_files_to_giles, ContentRelation)
self.test_uri = 'http://the.cool.uri/1'
self.test_doi = '10.123/45678'
self.date = Literal("1991")
_, self.rdf_path = tempfile.mkstemp(suffix='.rdf')
self.g = Graph()
self.doc = BNode()
self.ident = BNode()
self.ident2 = BNode()
self.g.add((self.doc, RDF.type, BIB.Article))
self.g.add((self.doc, DC.date, self.date))
self.g.add((self.doc, DC.title, Literal("A T\xc3\xa9st Title".decode('utf-8'))))
self.g.add((self.doc, RSS.link, Literal(u"http://asdf.com")))
self.g.add((self.doc, DC.identifier, self.ident))
self.g.add((self.ident, RDF.type, DCTERMS.URI))
self.g.add((self.ident, RDF.value, URIRef(self.test_uri)))
self.g.add((self.doc, DC.identifier, self.ident2))
self.g.add((self.ident2, RDF.type, BIB.doi))
self.g.add((self.ident2, RDF.value, URIRef(self.test_doi)))
self.g.serialize(self.rdf_path, encoding='utf-8')
def test_load_graph(self):
"""
Unit test for :meth:`ZoteroIngest.__init__` with RDF document only.
"""
ingester = ZoteroIngest(self.rdf_path)
self.assertIsInstance(ingester.graph, Graph,
"When a path to an RDF document is passed to the constructor, an"
" rdflib.Graph should be instantiated and populated.")
self.assertEqual(len(ingester.graph), 10,
"The Graph should be populated with 10 nodes.")
def test_get_resources_nodes(self):
"""
Unit test for :meth:`ZoteroIngest._get_resources_nodes`\.
"""
ingester = ZoteroIngest(self.rdf_path)
nodes = ingester._get_resources_nodes(BIB.Article)
self.assertIsInstance(nodes, types.GeneratorType,
"_get_resources_nodes Should return a generator object that yields"
" rdflib.BNodes.")
nodes = [n for n in nodes]
self.assertIsInstance(nodes[0], BNode)
self.assertEqual(len(nodes), 1, "There should be one Article node.")
def test_new_entry(self):
"""
Unit test for :meth:`ZoteroIngest._new_entry`\.
"""
ingester = ZoteroIngest(self.rdf_path)
before = len(ingester.data)
ingester._new_entry()
after = len(ingester.data)
self.assertEqual(after, before + 1,
"A new entry should be added to ingester.data")
def test_set_value(self):
"""
Unit test for :meth:`ZoteroIngest._set_value`\.
"""
ingester = ZoteroIngest(self.rdf_path)
ingester._new_entry()
ingester._set_value("key", "value")
self.assertIn("key", ingester.data[-1],
"_set_value should add the key to the current entry.")
self.assertEqual(ingester.data[-1]["key"], ["value"],
"_set_value should add the value to a list")
def test_get_handler(self):
"""
Unit test for :meth:`ZoteroIngest._get_handler`\.
"""
ingester = ZoteroIngest(self.rdf_path)
handler = ingester._get_handler(DC.identifier)
self.assertIsInstance(handler, types.MethodType,
"_get_handler should return an instance method if the predicate"
" has an explicit handler.")
try:
handler('one', 'two')
except TypeError:
self.fail("The returned handler should accept two arguments.")
handler = ingester._get_handler("nonsense")
self.assertIsInstance(handler, types.LambdaType,
"_get_handler should return a lambda function if the predicate"
" does not have an explicit handler.")
try:
handler('one', 'two')
except TypeError:
self.fail("The returned handler should accept two arguments.")
def test_handle_identifier(self):
"""
Unit test for :meth:`ZoteroIngest.handle_identifier`\.
"""
ingester = ZoteroIngest(self.rdf_path)
# We want to intervene on our original graph here.
ingester.graph = self.g
result = ingester.handle_identifier(DC.identifier, self.ident)
self.assertIsInstance(result, tuple,
"Handlers should return tuples.")
self.assertEqual(result[0], 'uri',
"DCTERMS.URI identifiers should be used as first-class URIs.")
self.assertEqual(result[1].toPython(), self.test_uri)
result = ingester.handle_identifier(DC.identifier, self.ident2)
self.assertIsInstance(result, tuple,
"Handlers should return tuples.")
self.assertEqual(result[0], BIB.doi)
self.assertEqual(result[1].toPython(), self.test_doi)
def test_handle_date(self):
"""
Unit test for :meth:`ZoteroIngest.handle_date`\.
"""
ingester = ZoteroIngest(self.rdf_path)
ingester.graph = self.g
predicate, value = ingester.handle_date(DC.date, self.date)
self.assertIsInstance(value, datetime.datetime,
"ISO-8601 compliant dates should be recast to datetime instances.")
def test_handle_type(self):
"""
Unit test for :meth:`ZoteroIngest.handle_documentType`\.
"""
ingester = ZoteroIngest(self.rdf_path)
ingester.graph = self.g
predicate, value = ingester.handle_documentType(ZOTERO.itemType, "!")
self.assertEqual(predicate, "entity_type",
"ZOTERO.itemType should be flagged as the Resource.entity_type")
def test_handle_title(self):
"""
Unit test for :meth:`ZoteroIngest.handle_title`\.
"""
ingester = ZoteroIngest(self.rdf_path)
ingester.graph = self.g
predicate, value = ingester.handle_title(DC.title, "!")
self.assertEqual(predicate, "name",
"DC.title should be flagged as the Resource.name")
def test_handle(self):
"""
Unit test for :meth:`ZoteroIngest.handle`\.
"""
ingester = ZoteroIngest(self.rdf_path)
ingester.graph = self.g
ingester._new_entry() # Need somewhere to put the value.
predicate, value = ingester.handle(DC.identifier, self.ident)
self.assertEqual(value, self.test_uri,
"handle() should pass along the predicate and value to"
" handle_identifier(), and return native Python types.")
predicate, value = ingester.handle(DC.nonsense, "value")
self.assertEqual(predicate, DC.nonsense.toPython(),
"If there are no special handlers for the predicate, it should be"
" returned as a native Python type.")
self.assertEqual(value, "value",
"So too with the corresponding value.")
def tearDown(self):
os.remove(self.rdf_path)
reconnect_signal(post_save, send_all_files_to_giles, ContentRelation)
| diging/jars | cookies/tests/test_import.py | Python | gpl-3.0 | 14,911 |
#!/bin/env python
#function:
# demo to show how to use converted model using caffe2fluid
#
import sys
import os
import numpy as np
import paddle.fluid as fluid
import paddle.v2 as paddle
def test_model(exe, test_program, fetch_list, test_reader, feeder):
acc_set = []
for data in test_reader():
acc_np, pred = exe.run(program=test_program,
feed=feeder.feed(data),
fetch_list=fetch_list)
acc_set.append(float(acc_np))
acc_val = np.array(acc_set).mean()
return float(acc_val)
def evaluate(net_file, model_file):
""" main
"""
#1, build model
net_path = os.path.dirname(net_file)
if net_path not in sys.path:
sys.path.insert(0, net_path)
from lenet import LeNet as MyNet
#1, define network topology
images = fluid.layers.data(name='image', shape=[1, 28, 28], dtype='float32')
label = fluid.layers.data(name='label', shape=[1], dtype='int64')
net = MyNet({'data': images})
prediction = net.layers['prob']
acc = fluid.layers.accuracy(input=prediction, label=label)
place = fluid.CPUPlace()
exe = fluid.Executor(place)
exe.run(fluid.default_startup_program())
#2, load weights
if model_file.find('.npy') > 0:
net.load(data_path=model_file, exe=exe, place=place)
else:
net.load(data_path=model_file, exe=exe)
#3, test this model
test_program = fluid.default_main_program().clone()
test_reader = paddle.batch(paddle.dataset.mnist.test(), batch_size=128)
feeder = fluid.DataFeeder(feed_list=[images, label], place=place)
fetch_list = [acc, prediction]
print('go to test model using test set')
acc_val = test_model(exe, test_program, \
fetch_list, test_reader, feeder)
print('test accuracy is [%.4f], expected value[0.919]' % (acc_val))
if __name__ == "__main__":
net_file = 'models/lenet/lenet.py'
weight_file = 'models/lenet/lenet.npy'
argc = len(sys.argv)
if argc == 3:
net_file = sys.argv[1]
weight_file = sys.argv[2]
elif argc > 1:
print('usage:')
print('\tpython %s [net_file] [weight_file]' % (sys.argv[0]))
print('\teg:python %s %s %s %s' % (sys.argv[0], net_file, weight_file))
sys.exit(1)
evaluate(net_file, weight_file)
| lcy-seso/models | fluid/image_classification/caffe2fluid/examples/mnist/evaluate.py | Python | apache-2.0 | 2,349 |
# This file is part of Indico.
# Copyright (C) 2002 - 2021 CERN
#
# Indico is free software; you can redistribute it and/or
# modify it under the terms of the MIT License; see the
# LICENSE file for more details.
"""
String manipulation functions
"""
import binascii
import re
import string
import unicodedata
from enum import Enum
from itertools import chain
from operator import attrgetter
from uuid import uuid4
import bleach
import email_validator
import markdown
import translitcodec
from html2text import HTML2Text
from jinja2.filters import do_striptags
from lxml import etree, html
from markupsafe import Markup, escape
from sqlalchemy import ForeignKeyConstraint, inspect
# basic list of tags, used for markdown content
BLEACH_ALLOWED_TAGS = bleach.ALLOWED_TAGS + [
'sup', 'sub', 'small', 'br', 'p', 'table', 'thead', 'tbody', 'th', 'tr', 'td', 'img', 'hr', 'h1', 'h2', 'h3', 'h4',
'h5', 'h6', 'pre', 'dl', 'dd', 'dt'
]
BLEACH_ALLOWED_ATTRIBUTES = dict(bleach.ALLOWED_ATTRIBUTES, img=['src', 'alt', 'style'])
# extended list of tags, used for HTML content
BLEACH_ALLOWED_TAGS_HTML = BLEACH_ALLOWED_TAGS + [
'address', 'area', 'bdo', 'big', 'caption', 'center', 'cite', 'col', 'colgroup', 'del', 'dfn', 'dir', 'div',
'fieldset', 'font', 'ins', 'kbd', 'legend', 'map', 'menu', 'q', 's', 'samp', 'span', 'strike', 'tfoot', 'tt', 'u',
'var'
]
# yuck, this is ugly, but all these attributes were allowed in legacy...
BLEACH_ALLOWED_ATTRIBUTES_HTML = dict(BLEACH_ALLOWED_ATTRIBUTES, **{'*': [
'align', 'abbr', 'alt', 'border', 'bgcolor', 'class', 'cellpadding', 'cellspacing', 'color', 'char', 'charoff',
'cite', 'clear', 'colspan', 'compact', 'dir', 'disabled', 'face', 'href', 'height', 'headers', 'hreflang', 'hspace',
'id', 'ismap', 'lang', 'name', 'noshade', 'nowrap', 'rel', 'rev', 'rowspan', 'rules', 'size', 'scope', 'shape',
'span', 'src', 'start', 'style', 'summary', 'tabindex', 'target', 'title', 'type', 'valign', 'value', 'vspace',
'width', 'wrap'
]})
BLEACH_ALLOWED_STYLES_HTML = [
'background-color', 'border-top-color', 'border-top-style', 'border-top-width', 'border-top', 'border-right-color',
'border-right-style', 'border-right-width', 'border-right', 'border-bottom-color', 'border-bottom-style',
'border-bottom-width', 'border-bottom', 'border-left-color', 'border-left-style', 'border-left-width',
'border-left', 'border-color', 'border-style', 'border-width', 'border', 'bottom', 'border-collapse',
'border-spacing', 'color', 'clear', 'clip', 'caption-side', 'display', 'direction', 'empty-cells', 'float',
'font-size', 'font-family', 'font-style', 'font', 'font-variant', 'font-weight', 'font-size-adjust', 'font-stretch',
'height', 'left', 'list-style-type', 'list-style-position', 'line-height', 'letter-spacing', 'marker-offset',
'margin', 'margin-left', 'margin-right', 'margin-top', 'margin-bottom', 'max-height', 'min-height', 'max-width',
'min-width', 'marks', 'overflow', 'outline-color', 'outline-style', 'outline-width', 'outline', 'orphans',
'position', 'padding-top', 'padding-right', 'padding-bottom', 'padding-left', 'padding', 'page', 'page-break-after',
'page-break-before', 'page-break-inside', 'quotes', 'right', 'size', 'text-align', 'top', 'table-layout',
'text-decoration', 'text-indent', 'text-shadow', 'text-transform', 'unicode-bidi', 'visibility', 'vertical-align',
'width', 'widows', 'white-space', 'word-spacing', 'word-wrap', 'z-index'
]
LATEX_MATH_PLACEHOLDER = "\uE000"
def remove_accents(text):
return ''.join(c for c in unicodedata.normalize('NFD', text) if unicodedata.category(c) != 'Mn')
def remove_non_alpha(text):
return ''.join(c for c in text if c.isalnum())
def str_to_ascii(text):
return translitcodec.long_encode(text)[0].encode('ascii', 'ignore').decode().strip()
def strict_str(value):
"""Convert a value to unicode or fails if it is None.
Useful when converting e.g. IDs to path segments. Usually they
should not be ``None`` so we do not want to fail silently (and end
up with a literal ``None`` in the path).
"""
if value is None:
raise TypeError('strict_str does not accept `None`')
return str(value)
def slugify(*args, **kwargs):
"""Join a series of strings into a URL slug.
- normalizes strings to proper ascii repesentations
- removes non-alphanumeric characters
- replaces whitespace with dashes
:param lower: Whether the slug should be all-lowercase
:param maxlen: Maximum slug length
:param fallback: Fallback in case of an empty slug
"""
lower = kwargs.get('lower', True)
maxlen = kwargs.get('maxlen')
fallback = kwargs.get('fallback', '')
value = '-'.join(str(val) for val in args)
value = translitcodec.long_encode(value)[0]
value = re.sub(r'[^\w\s-]', '', value, flags=re.ASCII).strip()
if lower:
value = value.lower()
value = re.sub(r'[-\s]+', '-', value)
if maxlen:
value = value[0:maxlen].rstrip('-')
return value or fallback
def truncate(text, max_size, ellipsis='...'):
"""Truncate text if it's too long."""
if len(text) > max_size:
text = text[:max_size] + ellipsis
return text
def strip_tags(text):
"""Strip HTML tags and replace adjacent whitespace by one space."""
return do_striptags(text)
def render_markdown(text, escape_latex_math=True, md=None, **kwargs):
"""Mako markdown to HTML filter.
:param text: Markdown source to convert to HTML
:param escape_latex_math: Whether math expression should be left untouched or a function that will be called
to replace math-mode segments.
:param md: An alternative markdown processor (can be used
to generate e.g. a different format)
:param kwargs: Extra arguments to pass on to the markdown
processor
"""
if escape_latex_math:
math_segments = []
def _math_replace(m):
segment = m.group(0)
if callable(escape_latex_math):
segment = escape_latex_math(segment)
math_segments.append(segment)
return LATEX_MATH_PLACEHOLDER
text = re.sub(r'\$[^\$]+\$|\$\$(^\$)\$\$', _math_replace, text)
if md is None:
result = bleach.clean(markdown.markdown(text, **kwargs), tags=BLEACH_ALLOWED_TAGS,
attributes=BLEACH_ALLOWED_ATTRIBUTES)
else:
result = md(text, **kwargs)
if escape_latex_math:
return re.sub(LATEX_MATH_PLACEHOLDER, lambda _: math_segments.pop(0), result)
else:
return result
def sanitize_for_platypus(text):
"""Sanitize HTML to be used in platypus."""
tags = ['b', 'br', 'em', 'font', 'i', 'img', 'strike', 'strong', 'sub', 'sup', 'u', 'span', 'div', 'p']
attrs = {
'font': ['size', 'face', 'color'],
'img': ['src', 'width', 'height', 'valign']
}
res = bleach.clean(text, tags=tags, attributes=attrs, strip=True).strip()
if not res:
return ''
# Convert to XHTML
doc = html.fromstring(res)
return etree.tostring(doc)
def is_valid_mail(emails_string, multi=True):
# XXX: This is deprecated, use `validate_email` or `validate_emails` instead!
# Remove this in 2.2 when the 'multi' mode is not needed anymore (only used in RB)
# and don't forget to update the paypal plugin as well!
if not emails_string:
return False
return validate_emails(emails_string) if multi else validate_email(emails_string)
def validate_email(email):
"""Validate the given email address.
This checks both if it looks valid and if it has valid
MX (or A/AAAA) records.
"""
try:
email_validator.validate_email(email)
except email_validator.EmailNotValidError:
return False
else:
return True
def validate_email_verbose(email):
"""Validate the given email address.
This checks both if it looks valid and if it has valid
MX (or A/AAAA) records.
:return: ``None`` for a valid email address, otherwise ``'invalid'`` or
``'undeliverable'`` depending on whether the email address has
syntax errors or dns validation failed.
"""
try:
email_validator.validate_email(email)
except email_validator.EmailUndeliverableError:
return 'undeliverable'
except email_validator.EmailNotValidError:
return 'invalid'
else:
return None
def validate_emails(emails):
"""Validate a space/semicolon/comma-separated list of email addresses."""
emails = re.split(r'[\s;,]+', emails)
return all(validate_email(email) for email in emails if email)
def natural_sort_key(s, _nsre=re.compile('([0-9]+)')):
return [int(text) if text.isdigit() else text.lower() for text in re.split(_nsre, s)]
def seems_html(text):
return re.search(r'<[a-z]+?>', text) is not None
def strip_control_chars(text):
return re.sub(r'[\x0B-\x1F]', '', text)
def html_color_to_rgb(hexcolor):
"""Convert #RRGGBB to an (R, G, B) tuple."""
if not hexcolor.startswith('#'):
raise ValueError(f"Invalid color string '{hexcolor}' (should start with '#')")
hexcolor = hexcolor[1:]
if len(hexcolor) not in {3, 6}:
raise ValueError(f"'#{hexcolor}'' is not in #RRGGBB or #RGB format")
if len(hexcolor) == 3:
hexcolor = ''.join(c * 2 for c in hexcolor)
return tuple(float(int(hexcolor[i:i + 2], 16)) / 255 for i in range(0, 6, 2))
def strip_whitespace(s):
"""Remove trailing/leading whitespace if a string was passed.
This utility is useful in cases where you might get None or
non-string values such as WTForms filters.
"""
if isinstance(s, str):
s = s.strip()
return s
def make_unique_token(is_unique):
"""Create a unique UUID4-based token.
:param is_unique: a callable invoked with the token which should
return a boolean indicating if the token is actually
"""
token = str(uuid4())
while not is_unique(token):
token = str(uuid4())
return token
def is_legacy_id(id_):
"""Check if an ID is a broken legacy ID.
These IDs are not compatible with new code since they are not
numeric or have a leading zero, resulting in different objects
with the same numeric id.
"""
return not isinstance(id_, int) and (not id_.isdigit() or str(int(id_)) != id_)
def text_to_repr(text, html=False, max_length=50):
"""Convert text to a suitable string for a repr.
:param text: A string which might contain html and/or linebreaks
:param html: If True, HTML tags are stripped.
:param max_length: The maximum length before the string is
truncated. Use ``None`` to disable.
:return: A string that contains no linebreaks or HTML tags.
"""
if text is None:
text = ''
if html:
text = bleach.clean(text, tags=[], strip=True)
text = re.sub(r'\s+', ' ', text)
if max_length is not None and len(text) > max_length:
text = text[:max_length] + '...'
return text.strip()
def alpha_enum(value):
"""Convert integer to ordinal letter code (a, b, c, ... z, aa, bb, ...)."""
max_len = len(string.ascii_lowercase)
return str(string.ascii_lowercase[value % max_len] * (value // max_len + 1))
def format_repr(obj, *args, **kwargs):
"""Create a pretty repr string from object attributes.
:param obj: The object to show the repr for.
:param args: The names of arguments to include in the repr.
The arguments are shown in order using their unicode
representation.
:param kwargs: Each kwarg is included as a ``name=value`` string
if it doesn't match the provided value. This is
mainly intended for boolean attributes such as
``is_deleted`` where you don't want them to
clutter the repr unless they are set.
:param _text: When the keyword argument `_text` is provided and
not ``None``, it will include its value as extra
text in the repr inside quotes. This is useful
for objects which have one longer title or text
that doesn't look well in the unquoted
comma-separated argument list.
:param _rawtext: Like `_text` but without surrounding quotes.
:param _repr: Similar as `_text`, but uses the `repr()` of the
passed object instead of quoting it. Cannot be
used together with `_text`.
"""
def _format_value(value):
if isinstance(value, Enum):
return value.name
else:
return value
text_arg = kwargs.pop('_text', None)
raw_text_arg = kwargs.pop('_rawtext', None)
repr_arg = kwargs.pop('_repr', None)
cls = type(obj)
obj_name = cls.__name__
fkeys = set(chain.from_iterable(c.column_keys
for t in inspect(cls).tables
for c in t.constraints
if isinstance(c, ForeignKeyConstraint))) if hasattr(cls, '__table__') else set()
formatted_args = [str(_format_value(getattr(obj, arg)))
if arg not in fkeys
else f'{arg}={_format_value(getattr(obj, arg))}'
for arg in args]
for name, default_value in sorted(kwargs.items()):
value = getattr(obj, name)
if value != default_value:
formatted_args.append(f'{name}={_format_value(value)}')
if text_arg is not None:
return '<{}({}): "{}">'.format(obj_name, ', '.join(formatted_args), text_arg)
elif raw_text_arg is not None:
return '<{}({}): {}>'.format(obj_name, ', '.join(formatted_args), raw_text_arg)
elif repr_arg is not None:
return '<{}({}): {!r}>'.format(obj_name, ', '.join(formatted_args), repr_arg)
else:
return '<{}({})>'.format(obj_name, ', '.join(formatted_args))
def snakify(name):
"""Convert a camelCased name to snake_case."""
# from http://stackoverflow.com/a/1176023/298479
name = re.sub(r'(.)([A-Z][a-z]+)', r'\1_\2', name)
return re.sub(r'([a-z0-9])([A-Z])', r'\1_\2', name).lower()
def camelize(name):
"""Convert a snake_cased name to camelCase."""
parts = name.split('_')
underscore = ''
if name.startswith('_'):
underscore = '_'
parts = parts[1:]
return underscore + parts[0] + ''.join(x.title() for x in parts[1:])
def _convert_keys(value, convert_func):
if isinstance(value, (list, tuple)):
return type(value)(_convert_keys(x, convert_func) for x in value)
elif not isinstance(value, dict):
return value
return {convert_func(k): _convert_keys(v, convert_func) for k, v in value.items()}
def camelize_keys(dict_):
"""Convert the keys of a dict to camelCase."""
return _convert_keys(dict_, camelize)
def snakify_keys(dict_):
"""Convert the keys of a dict to snake_case."""
return _convert_keys(dict_, snakify)
def crc32(data):
"""Calculate a CRC32 checksum.
When a str is passed, it is encoded as UTF-8.
"""
if isinstance(data, str):
data = data.encode('utf-8')
return binascii.crc32(data) & 0xffffffff
def normalize_phone_number(value):
"""Normalize phone number so it doesn't contain invalid characters.
This removes all characters besides a leading +, digits and x as
described here: http://stackoverflow.com/a/123681/298479
"""
return re.sub(r'((?!^)\+)|[^0-9x+]', '', value.strip())
def format_full_name(first_name, last_name, title=None, last_name_first=True, last_name_upper=True,
abbrev_first_name=True, show_title=False):
"""Return the user's name in the specified notation.
Note: Do not use positional arguments (except for the names/title)
when calling this method. Always use keyword arguments!
:param first_name: The first name (may be empty)
:param last_name: The last name
:param title: The title (may be empty/None)
:param last_name_first: if "lastname, firstname" instead of
"firstname lastname" should be used
:param last_name_upper: if the last name should be all-uppercase
:param abbrev_first_name: if the first name should be abbreviated to
use only the first character
:param show_title: if the title should be included
"""
if last_name_upper:
last_name = last_name.upper()
if not first_name:
full_name = last_name
else:
first_name = f'{first_name[0].upper()}.' if abbrev_first_name else first_name
full_name = f'{last_name}, {first_name}' if last_name_first else f'{first_name} {last_name}'
return full_name if not show_title or not title else f'{title} {full_name}'
def sanitize_email(email, require_valid=False):
if '<' in email:
m = re.search(r'<([^>]+)>', email)
email = email if m is None else m.group(1)
if not require_valid or validate_email(email):
return email
else:
return None
def sanitize_html(string):
return bleach.clean(string, tags=BLEACH_ALLOWED_TAGS_HTML, attributes=BLEACH_ALLOWED_ATTRIBUTES_HTML,
styles=BLEACH_ALLOWED_STYLES_HTML)
def html_to_plaintext(string):
return html.html5parser.fromstring(string).xpath('string()')
class RichMarkup(Markup):
"""Unicode/Markup subclass that detects preformatted text.
Note that HTML in this string will NOT be escaped when displaying
it in a jinja template.
"""
__slots__ = ('_preformatted',)
def __new__(cls, content='', preformatted=None):
obj = Markup.__new__(cls, content)
if preformatted is None:
tmp = content.lower()
obj._preformatted = not any(tag in tmp for tag in ('<p>', '<p ', '<br', '<li>'))
else:
obj._preformatted = preformatted
return obj
def __html__(self):
# XXX: ensure we have no harmful HTML - there are certain malicious values that
# are not caught by the legacy sanitizer that runs at submission time
string = RichMarkup(sanitize_html(str(self)), preformatted=self._preformatted)
if string._preformatted:
return f'<div class="preformatted">{string}</div>'
else:
return string
def __getstate__(self):
return {slot: getattr(self, slot) for slot in self.__slots__ if hasattr(self, slot)}
def __setstate__(self, state):
for slot, value in state.items():
setattr(self, slot, value)
class MarkdownText(Markup):
"""Unicode/Markup class that renders markdown."""
def __html__(self):
return render_markdown(str(self), extensions=('nl2br', 'tables'))
class PlainText(Markup):
"""Unicode/Markup class that renders plain text."""
def __html__(self):
return f'<div class="preformatted">{escape(str(self))}</div>'
def handle_legacy_description(field, obj, get_render_mode=attrgetter('render_mode'),
get_value=attrgetter('_description')):
"""Check if the object in question is using an HTML description and convert it.
The description will be automatically converted to Markdown and a warning will
be shown next to the field.
:param field: the WTForms field to be checked
:param obj: the object whose render mode/description will be checked
"""
from indico.core.db.sqlalchemy.descriptions import RenderMode
from indico.util.i18n import _
if get_render_mode(obj) == RenderMode.html:
field.warning = _("This text has been automatically converted from HTML to Markdown. "
"Please double-check that it's properly displayed.")
ht = HTML2Text(bodywidth=0)
desc = get_value(obj)
if RichMarkup(desc)._preformatted:
desc = desc.replace('\n', '<br>\n')
field.data = ht.handle(desc)
| DirkHoffmann/indico | indico/util/string.py | Python | gpl-3.0 | 20,141 |
from builtins import property
import collections
import json
import re
from django import template
from django.conf import settings
from django.conf.locale import ru
from django.contrib import messages
from django.contrib.auth.mixins import PermissionRequiredMixin
from django.contrib.contenttypes.models import ContentType
from django.db.models import Q
from django.forms.utils import ErrorList
from django.http.response import Http404, HttpResponse
from django.shortcuts import redirect, render
from django.template.loader import render_to_string
from django.urls.base import reverse
from django.utils import formats, timezone
from django.utils.safestring import mark_safe
from django.utils.translation import ugettext_lazy as _
from django.views.generic.base import RedirectView, TemplateView
from django.views.generic.detail import DetailView
from django.views.generic.list import ListView
from django_filters.views import FilterView
from rest_framework import viewsets, mixins
from rest_framework.authentication import SessionAuthentication,\
BasicAuthentication
from rest_framework.permissions import IsAuthenticated
from weasyprint import HTML
from cmj.core.forms import OperadorAreaTrabalhoForm, ImpressoEnderecamentoForm,\
ListWithSearchForm
from cmj.core.models import Cep, TipoLogradouro, Logradouro, RegiaoMunicipal,\
Distrito, Bairro, Trecho, AreaTrabalho, OperadorAreaTrabalho,\
ImpressoEnderecamento, groups_remove_user, groups_add_user, Notificacao,\
CertidaoPublicacao, Bi
from cmj.core.serializers import TrechoSearchSerializer, TrechoSerializer
from cmj.utils import normalize
from sapl.crud.base import Crud, CrudAux, MasterDetailCrud, RP_DETAIL, RP_LIST
from sapl.parlamentares.models import Partido, Filiacao
CepCrud = CrudAux.build(Cep, None, 'cep')
RegiaoMunicipalCrud = CrudAux.build(
RegiaoMunicipal, None, 'regiao_municipal')
DistritoCrud = CrudAux.build(Distrito, None, 'distrito')
BairroCrud = CrudAux.build(Bairro, None)
TipoLogradouroCrud = CrudAux.build(
TipoLogradouro, None, 'tipo_logradouro')
LogradouroCrud = CrudAux.build(Logradouro, None, 'logradouro')
def template_render(request, template_name):
return render(request, template_name, {})
def chanel_index(request):
return render(request, 'core/channel_index.html', {})
def chanel_room(request, room_name):
return render(request, 'core/channel_room.html', {
'room_name_json': mark_safe(json.dumps(room_name))
})
def time_refresh_log_test(request):
return render(request, 'core/time_refresh_log_test.html', {})
def app_vue_view(request):
return render(request, 'base.html')
class TrechoCrud(CrudAux):
help_text = 'trecho'
model = Trecho
class BaseMixin(CrudAux.BaseMixin):
list_field_names = [
('tipo', 'logradouro'), 'bairro', 'municipio', 'cep']
class ListView(CrudAux.ListView):
form_search_class = ListWithSearchForm
def get(self, request, *args, **kwargs):
"""trechos = Trecho.objects.all()
for t in trechos:
t.search = str(t)
t.save(auto_update_search=False)"""
return CrudAux.ListView.get(
self, request, *args, **kwargs)
def get_context_data(self, **kwargs):
context = CrudAux.ListView.get_context_data(
self, **kwargs)
context['title'] = _("Base de Cep's e Endereços")
return context
class CreateView(CrudAux.CreateView):
def post(self, request, *args, **kwargs):
response = super(CrudAux.CreateView, self).post(
self, request, *args, **kwargs)
# FIXME: necessário enquanto o metodo save não tratar fields m2m
self.object.search = str(self.object)
self.object.save(auto_update_search=False)
return response
class UpdateView(CrudAux.UpdateView):
def post(self, request, *args, **kwargs):
response = super(CrudAux.UpdateView, self).post(
self, request, *args, **kwargs)
# FIXME: necessário enquanto o metodo save não tratar fields m2m
self.object.search = str(self.object)
self.object.save(auto_update_search=False)
return response
"""
class TrechoSearchView(PermissionRequiredMixin, FilterView):
template_name = 'search/search.html'
filterset_class = TrechoFilterSet
permission_required = 'core.search_trecho'
paginate_by = 20
def get(self, request, *args, **kwargs):
return SearchView.get(self, request, *args, **kwargs)
def get_context_data(self, **kwargs):
context = super(TrechoSearchView,
self).get_context_data(**kwargs)
context['title'] = _('Pesquisa de Endereços')
paginator = context['paginator']
page_obj = context['page_obj']
context['page_range'] = make_pagination(
page_obj.number, paginator.num_pages)
qr = self.request.GET.copy()
if 'page' in qr:
del qr['page']
context['filter_url'] = ('&' + qr.urlencode()) if len(qr) > 0 else ''
return context"""
class TrechoJsonSearchView(mixins.ListModelMixin, viewsets.GenericViewSet):
serializer_class = TrechoSearchSerializer
permission_classes = (IsAuthenticated,)
authentication_classes = (SessionAuthentication, BasicAuthentication)
page_size = 0
def get_queryset(self, *args, **kwargs):
request = self.request
queryset = Trecho.objects.all()
if request.GET.get('q') is not None:
query = normalize(str(request.GET.get('q')))
query = query.split(' ')
if query:
q = Q()
for item in query:
if not item:
continue
q = q & Q(search__icontains=item)
if q:
queryset = queryset.filter(q)
return queryset
class TrechoJsonView(mixins.RetrieveModelMixin, viewsets.GenericViewSet):
serializer_class = TrechoSerializer
permission_classes = (IsAuthenticated,)
authentication_classes = (SessionAuthentication, BasicAuthentication)
queryset = Trecho.objects.all()
class AreaTrabalhoCrud(Crud):
model = AreaTrabalho
model_set = 'operadorareatrabalho_set'
class BaseMixin(Crud.BaseMixin):
list_field_names = ['nome', 'tipo', 'parlamentar',
'ativo', 'operadores']
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
if 'subnav_template_name' not in context:
context['subnav_template_name'] = 'core/subnav_areatrabalho.yaml'
return context
class DetailView(Crud.DetailView):
list_field_names_set = ['user_name', ]
class ListView(Crud.ListView):
paginate_by = 100
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context['subnav_template_name'] = ''
return context
def hook_header_ativo(self):
return "At. Ativa"
def hook_operadores(self, *args, **kwargs):
lista_html = ''
for u in args[0].operadores.all():
lista_html += '<li>{}<br><small>{}</small></li>'.format(
u.get_full_name(), u.email)
return '<ul>{}</ul>'.format(lista_html), ''
class OperadorAreaTrabalhoCrud(MasterDetailCrud):
parent_field = 'areatrabalho'
model = OperadorAreaTrabalho
help_path = 'operadorareatrabalho'
class BaseMixin(MasterDetailCrud.BaseMixin):
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context[
'subnav_template_name'] = 'core/subnav_areatrabalho.yaml'
return context
class ListView(MasterDetailCrud.ListView):
def hook_user(self, *args, **kwargs):
u = args[0].user
lista_html = '{}<br><small>{}</small></li>'.format(
u.get_full_name(), u.email)
return lista_html, args[2]
class UpdateView(MasterDetailCrud.UpdateView):
form_class = OperadorAreaTrabalhoForm
# TODO tornar operador readonly na edição
def form_valid(self, form):
old = OperadorAreaTrabalho.objects.get(pk=self.object.pk)
groups = list(old.grupos_associados.values_list('name', flat=True))
groups_remove_user(old.user, groups)
response = super().form_valid(form)
groups = list(self.object.grupos_associados.values_list(
'name', flat=True))
groups_add_user(self.object.user, groups)
return response
class CreateView(MasterDetailCrud.CreateView):
form_class = OperadorAreaTrabalhoForm
# TODO mostrar apenas usuários que não possuem grupo ou que são de
# acesso social
def form_valid(self, form):
self.object = form.save(commit=False)
oper = OperadorAreaTrabalho.objects.filter(
user_id=self.object.user_id,
areatrabalho_id=self.object.areatrabalho_id
).first()
if oper:
form._errors['user'] = ErrorList([_(
'Este Operador já está registrado '
'nesta Área de Trabalho.')])
return self.form_invalid(form)
response = super().form_valid(form)
groups = list(self.object.grupos_associados.values_list(
'name', flat=True))
groups_add_user(self.object.user, groups)
return response
class DeleteView(MasterDetailCrud.DeleteView):
def post(self, request, *args, **kwargs):
self.object = self.get_object()
groups = list(
self.object.grupos_associados.values_list('name', flat=True))
groups_remove_user(self.object.user, groups)
self.object.user.notificacao_set.filter(
areatrabalho=self.object.areatrabalho,
read=False).delete()
return MasterDetailCrud.DeleteView.post(
self, request, *args, **kwargs)
class PartidoCrud(Crud):
help_text = 'partidos'
model_set = 'filiacaopartidaria_set'
model = Partido
container_field_set = 'contato__workspace__operadores'
# container_field = 'filiacoes_partidarias_set__contato__workspace__operadores'
class DetailView(Crud.DetailView):
list_field_names_set = ['contato_nome', ]
class ListView(Crud.ListView):
paginate_by = 100
def get(self, request, *args, **kwargs):
ws = AreaTrabalho.objects.filter(operadores=request.user).first()
if ws and ws.parlamentar:
filiacao_parlamentar = Filiacao.objects.filter(
parlamentar=ws.parlamentar)
if filiacao_parlamentar.exists():
partido = filiacao_parlamentar.first().partido
return redirect(
reverse(
'sapl.parlamentares:partido_detail',
args=(partido.pk,)))
"""else:
self.kwargs['queryset_liberar_sem_container'] = True"""
return Crud.ListView.get(
self, request, *args, **kwargs)
"""def get_queryset(self):
queryset = CrudListView.get_queryset(self)
if not self.request.user.is_authenticated:
return queryset
if 'queryset_liberar_sem_container' in self.kwargs and\
self.kwargs['queryset_liberar_sem_container']:
return queryset
if self.container_field:
params = {}
params[self.container_field] = self.request.user.pk
return queryset.filter(**params)
return queryset"""
class ImpressoEnderecamentoCrud(Crud):
model = ImpressoEnderecamento
class UpdateView(Crud.UpdateView):
form_class = ImpressoEnderecamentoForm
class CreateView(Crud.CreateView):
form_class = ImpressoEnderecamentoForm
class NotificacaoRedirectView(RedirectView):
permanent = False
def get_redirect_url(self, *args, **kwargs):
try:
obj = Notificacao.objects.get(pk=kwargs['pk'])
except:
raise Http404()
if self.request.user != obj.user:
raise Http404()
obj.read = True
obj.not_send_mail = True # Não envia email no post_save
obj.save()
self.pattern_name = '%s:%s_detail' % (
obj.content_object._meta.app_config.name,
obj.content_object._meta.model_name
)
kwargs['pk'] = obj.content_object.pk
url = RedirectView.get_redirect_url(self, *args, **kwargs)
url += '#item-%s' % obj.content_object.pk
return url
class CertidaoPublicacaoCrud(Crud):
model = CertidaoPublicacao
public = [RP_DETAIL, RP_LIST]
DeleteView = None
class BaseMixin(Crud.BaseMixin):
list_field_names = ['id', 'created',
'content_type', 'content_object', 'signs']
@property
def create_url(self):
return ''
class ListView(Crud.ListView):
def has_permission(self):
return True
paginate_by = 100
def split_bylen(self, item, maxlen):
return [item[ind:ind + maxlen] for ind in range(0, len(item), maxlen)]
def hook_header_signs(self, **kwargs):
return 'Assinaturas Digitais'
def hook_signs(self, *args, **kwargs):
obj = args[0].content_object
sig_tuples = []
try:
signs = obj.metadata['signs']
for fn, sigs in signs.items():
for sig in sigs['signs']:
sig_tuples.append(sig)
sign_template = template.loader.get_template(
'core/sign_widget.html')
context = {}
context['signs'] = sig_tuples
rendered = sign_template.render(context, self.request)
return rendered, ''
except Exception as e:
return args[1], args[2]
def hook_content_object(self, *args, **kwargs):
hash = args[0].hash_code # self.split_bylen(args[0].hash_code, 64)
if hasattr(args[0].content_object, 'anexo_de') and\
args[0].content_object.anexo_de.exists():
vinculo = f'Vínculo com: {args[0].content_object.anexo_de.first()}'
else:
vinculo = ''
return """%s<br>
<small>%s</small><br>
<small><i>%s</i></small>""" % (
args[1],
args[0].content_object.__descr__,
vinculo
), ''
return """
%s<br><small>%s</small><br>
<button
class="hash_code btn btn-info"
data-trigger="focus"
data-container="body"
data-toggle="popover"
data-placement="top"
title="Hash 512"
data-content="%s">Hash 512</button>""" % (
args[1],
args[0].content_object.__descr__,
''.join(hash)), ''
def hook_header_content_object(self, **kwargs):
return 'Documentos Certificados'
def hook_header_content_type(self, **kwargs):
return 'Tipo do Documento'
def hook_header_id(self, **kwargs):
return 'Certidão'
def hook_id(self, *args, **kwargs):
return '%06d' % int(args[1]), args[2]
def hook_header_created(self, **kwargs):
return 'Data/Hora'
def hook_created(self, *args, **kwargs):
return '{}'.format(
formats.date_format(
timezone.template_localtime(args[0].created), 'd/m/Y \à\s H:i')
), args[2]
class DetailView(DetailView):
slug_field = 'hash_code'
@classmethod
def get_url_regex(cls):
return r'^/(?P<pk>\d+)$'
def get(self, request, *args, **kwargs):
self.object = self.get_object()
context = self.get_context_data(object=self.object)
return self.certidao_publicacao(request, context)
def get_context_data(self, **kwargs):
context = DetailView.get_context_data(self, **kwargs)
context['print'] = 'print' in self.request.GET
context['content_object_url'] = self.content_object_url()
return context
def certidao_publicacao(self, request, context):
base_url = request.build_absolute_uri()
html_template = render_to_string(
'core/certidao_publicacao.html', context)
html = HTML(base_url=base_url, string=html_template)
main_doc = html.render(stylesheets=[])
pdf_file = main_doc.write_pdf()
response = HttpResponse(content_type='application/pdf;')
response['Content-Disposition'] = 'inline; filename=relatorio.pdf'
response['Content-Transfer-Encoding'] = 'binary'
response.write(pdf_file)
return response
def content_object_url(self):
cert = self.object
co = cert.content_object
link = reverse(
'sapl.api:%s-%s' % (
co._meta.model_name,
cert.field_name.replace('_', '-')
),
kwargs={'pk': co.id}
)
urls = {
'original': '%s%s?original' % (settings.SITE_URL, link),
'ocr': '%s%s?ocr' % (settings.SITE_URL, link),
}
return urls
class CreateView(Crud.CreateView):
@classmethod
def get_url_regex(cls):
return r'^/(?P<content_type>\d+)/create/(?P<pk>\d+)/(?P<field_name>\w+)$'
def get(self, request, *args, **kwargs):
if self.certidao_generate():
return redirect(
reverse('cmj.core:certidaopublicacao_detail',
kwargs={'pk': self.content_object.certidao.pk})
)
else:
messages.add_message(
self.request,
messages.ERROR,
_('Não foi possível gerar certidão!'))
return redirect(
reverse('%s:%s_detail' % (
self.content_object._meta.app_config.name,
self.content_object._meta.model_name),
kwargs={'pk': self.content_object.pk})
)
def certidao_generate(self):
model = ContentType.objects.get_for_id(
self.kwargs['content_type']).model_class()
object = self.content_object = model.objects.get(
pk=self.kwargs['pk'])
if object.certidao:
return True
if not getattr(object, self.kwargs['field_name']):
messages.add_message(
self.request,
messages.ERROR,
_('Documento sem Arquivo.'))
return False
u = self.request.user
try:
CertidaoPublicacao.gerar_certidao(
u, object, self.kwargs['field_name'])
except Exception as e:
return False
return True
class BiView(ListView):
model = Bi
paginate_by = None
@property
def title(self):
return 'B'
def get_context_data(self, **kwargs):
context = ListView.get_context_data(self, **kwargs)
context['global'] = self.get_global()
context['producao_anual'] = self.get_producao_anual()
return context
def get_global(self):
qs = self.get_queryset()
g = {'Páginas Digitalizadas': {
'count': 0,
'color': ''
}}
for i in qs:
results = i.results
for user, ru in results.items(): # ru -> result user
for model, rm in ru.items(): # rm -> result model
if model not in g:
g[model] = {
'count': 0,
'color': re.sub('\s', '', normalize(model.lower()))
}
for ano, ra in rm.items(): # rm -> result anos
try:
g[model]['count'] += ra.get('total',
ra.get('count', 0))
g['Páginas Digitalizadas']['count'] += ra.get(
'paginas', 0)
except Exception as e:
print(ra)
g = filter(lambda x: x[1]['count'] > 500, g.items())
return g
def get_producao_anual(self):
qs = self.get_queryset()
pa = {}
for i in qs:
if i.ano not in pa:
pa[i.ano] = {
'documentos': 0,
'paginas': 0,
'tramitacao': 0
}
results = i.results
for user, ru in results.items(): # ru -> result user
for model, rm in ru.items(): # rm -> result model
for ano, ra in rm.items(): # rm -> result anos
pa[i.ano]['documentos'] += ra.get('total', 0)
pa[i.ano]['paginas'] += ra.get('paginas', 0)
pa[i.ano]['tramitacao'] += ra.get('tramitacao', 0)
sum_documentos = 0
sum_paginas = 0
sum_tramitacao = 0
for k, v in pa.items():
sum_documentos += v['documentos']
sum_paginas += v['paginas']
sum_tramitacao += v['tramitacao']
per_d_max = 0
per_p_max = 0
per_t_max = 0
for k, v in pa.items():
v['largura'] = {
'documentos': v['documentos'] / sum_documentos * 100,
'paginas': v['paginas'] / sum_paginas * 100,
'tramitacao': v['tramitacao'] / sum_paginas * 100
}
if v['documentos'] > per_d_max:
per_d_max = v['documentos']
if v['paginas'] > per_p_max:
per_p_max = v['paginas']
if v['tramitacao'] > per_t_max:
per_t_max = v['tramitacao']
for k, v in pa.items():
v['largura'] = {
'documentos': (v['documentos'] * (100 / per_d_max)) if per_d_max else 0,
'paginas': (v['paginas'] * (100 / per_p_max)) if per_p_max else 0,
'tramitacao': (v['tramitacao'] * (100 / per_t_max)) if per_t_max else 0
}
pa = list(pa.items())
pa.sort(key=lambda row: row[0])
pa.reverse()
return pa
| cmjatai/cmj | cmj/core/views.py | Python | gpl-3.0 | 23,280 |
#!/usr/local/bin/python3.4
# dbh.ph debounce hardware
"""
Pyboard:
Switch pins: Y1 or X19
usage:
>>> init()
>>> loop()
"""
from pyb import ExtInt,Pin
# declare the pin id
pinId = 'X19' # interrupt 0 'Y1' # interrupt 6
# itnerrupt mechanics and debounce globals
flag= False
interCount=0
eObj = None
# define ISR
def callback(line):
global flag
flag += 1
def init():
global eObj
eObj=ExtInt(pinId, ExtInt.IRQ_FALLING, Pin.PULL_UP, callback)
def doFlag ():
global flag,interCount
print('Flag:',flag,'\tInterCount: ',interCount)
flag=0
interCount +=1
def loop():
try:
while True:
if flag>0:
doFlag()
except KeyboardInterrupt:
print('Test ended!\nBye ...')
| gratefulfrog/ArduGuitar | Ardu2/design/POC-3_MAX395/pyboard/DraftDevt/dbh.py | Python | gpl-2.0 | 761 |
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
#
# mandelbrot_bsm.py
#
# Copyright 2015 Gabriel Hondet <gabrielhondet@gmail.com>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
# MA 02110-1301, USA.
#
#
import pylab as pl
def main(args):
dim = (600,600)
xydim = pl.array([[-2, -2],[2,2]])
iterate = 20 # Nombre de points de chaque orbite à calculer
im = bsm_mandelbrot(dim, xydim, F, iterate)
pl.imshow(im, cmap = "Greys")
pl.show()
return 0
def F(z, c):
return z**2 + c
def orbit_boundary(F, c, iterate):
i = 0
z = 0j
while i < iterate and abs(z) < 2:
z = F(z,c)
i += 1
return i
def pix2cplx(dim, xint, yint, pix):
a = xint[0] + (xint[1] - xint[0])*pix[1]/(dim[1] - 1)
b = yint[0] + (yint[1] - yint[0])*pix[0]/(dim[0] - 1)
return complex(a, b)
def bsm_mandelbrot(dim, xydim, F, iterate):
im = pl.zeros(dim)
for i0 in range(dim[0]):
for i1 in range(dim[1]):
if i0*i1 == 0 or i1 == dim[1] or i0 == dim[0]:
pass
c = pix2cplx(dim, xydim[:,0], xydim[:,1], (i0,i1))
i = orbit_boundary(F, c, iterate)
if i < iterate: # Si orbite diverge
im[i0, i1] = 0
elif i == iterate: # Si orbite ne diverge pas
escaped = False
pixs = ((i0+1, i1),(i0-1, i1),(i0, i1-1),(i0, i1+1))
p = 0
while not escaped and p < 4:
c = pix2cplx(dim, xydim[:,0], xydim[:,1], pixs[p])
z = 0j
i = orbit_boundary(F, c, iterate)
if i < iterate: # Si une orbite a divergé, on sort
escaped = True
else:
p += 1
if p < 4: # Si on est sorti avant la fin, une orbite a
im[i0, i1] = 1 # divergé
return im
if __name__ == '__main__':
import sys
sys.exit(main(sys.argv))
| gabrielhdt/dynamics_experiments | mandelbrot_bsm.py | Python | gpl-2.0 | 2,643 |
from tests.integration.create_token import create_token
from tests.integration.integration_test_case import IntegrationTestCase
from tests.integration.mci import mci_test_urls
class TestEmptyQuestionnaire(IntegrationTestCase):
def test_empty_questionnaire(self):
# Get a token
token = create_token('0205', '1')
resp = self.client.get('/session?token=' + token.decode(), follow_redirects=False)
self.assertEqual(resp.status_code, 302)
intro_page_url = resp.location
# Navigate to the Introduction Page
resp = self.client.get(intro_page_url, follow_redirects=False)
self.assertEqual(resp.status_code, 200)
# We are on the landing page
content = resp.get_data(True)
self.assertRegex(content, '>Start survey<')
post_data = {
'action[start_questionnaire]': "Submit Answers"
}
# Submit the Introduction page to get the first question page
resp = self.client.post(intro_page_url, data=post_data, follow_redirects=False)
self.assertEqual(resp.status_code, 302)
first_question_page = resp.location
# We try to access the submission page without entering anything
resp = self.client.get(mci_test_urls.MCI_0205_SUMMARY, follow_redirects=False)
self.assertEqual(resp.status_code, 302)
# Check we are redirected back to the questionnaire
self.assertEqual(resp.location, first_question_page)
# We try posting to the submission page without our answers
post_data = {
'action[submit_answers]': "Submit Answers"
}
resp = self.client.post(mci_test_urls.MCI_0205_SUBMIT, data=post_data, follow_redirects=False)
self.assertEqual(resp.status_code, 302)
# Check we are redirected back to the questionnaire
self.assertEqual(resp.location, first_question_page)
| qateam123/eq | tests/integration/mci/test_empty_questionnaire.py | Python | mit | 1,909 |
# -*- coding: utf-8 -*-
import urllib2, urllib, json
class SaltAPI(object):
def __init__(self, url, username, password):
self.__url = url.rstrip('/')
self.__user = username
self.__password = password
self.__token_id = self.saltLogin()
def saltLogin(self):
params = {'eauth': 'pam', 'username': self.__user, 'password': self.__password}
encode = urllib.urlencode(params)
obj = urllib.unquote(encode)
headers = {'X-Auth-Token': ''}
url = self.__url + '/login'
req = urllib2.Request(url, obj, headers)
opener = urllib2.urlopen(req)
content = json.loads(opener.read())
try:
token = content['return'][0]['token']
return token
except KeyError:
raise KeyError
def postRequest(self, obj, prefix='/'):
url = self.__url + prefix
headers = {'X-Auth-Token': self.__token_id}
req = urllib2.Request(url, obj, headers)
opener = urllib2.urlopen(req)
content = json.loads(opener.read())
return content
def asyncMasterToMinion(self, tgt, fun, arg):
'''
异步执行,当target为部分minion时,Master操作Minion;
:param target: 目标服务器ID组成的字符串;
:param fun: 使用的salt模块,如state.sls, cmd.run
:param arg: 传入的命令或sls文件
:return: jid字符串
'''
if tgt == '*':
params = {'client': 'local_async', 'tgt': tgt, 'fun': fun, 'arg': arg}
else:
params = {'client': 'local_async', 'tgt': tgt, 'fun': fun, 'arg': arg, 'expr_form': 'list'}
obj = urllib.urlencode(params)
content = self.postRequest(obj)
jid = content['return'][0]['jid']
return jid
def masterToMinionContent(self, tgt, fun, arg):
'''
Master控制Minion,返回的结果是内容,不是jid;
目标参数tgt是一个如下格式的字符串:'*' 或 'zhaogb-201, zhaogb-202, zhaogb-203, ...'
'''
if tgt == '*':
params = {'client': 'local', 'tgt': tgt, 'fun': fun, 'arg': arg}
else:
params = {'client': 'local', 'tgt': tgt, 'fun': fun, 'arg': arg, 'expr_form': 'list'}
obj = urllib.urlencode(params)
content = self.postRequest(obj)
result = content['return'][0]
return result
def allMinionKeys(self):
'''
返回所有Minion keys;
分别为 已接受、待接受、已拒绝;
:return: [u'local', u'minions_rejected', u'minions_denied', u'minions_pre', u'minions']
'''
params = {'client': 'wheel', 'fun': 'key.list_all'}
obj = urllib.urlencode(params)
content = self.postRequest(obj)
minions = content['return'][0]['data']['return']['minions']
minions_pre = content['return'][0]['data']['return']['minions_pre']
minions_rej = content['return'][0]['data']['return']['minions_rejected']
return minions, minions_pre, minions_rej
def actionKyes(self, keystrings, action):
'''
对Minion keys 进行指定处理;
:param keystrings: 将要处理的minion id字符串;
:param action: 将要进行的处理,如接受、拒绝、删除;
:return:
{"return": [{"tag": "salt/wheel/20160322171740805129", "data": {"jid": "20160322171740805129", "return": {}, "success": true, "_stamp": "2016-03-22T09:17:40.899757", "tag": "salt/wheel/20160322171740805129", "user": "zhaogb", "fun": "wheel.key.delete"}}]}
'''
func = 'key.' + action
params = {'client': 'wheel', 'fun': func, 'match': keystrings}
obj = urllib.urlencode(params)
content = self.postRequest(obj)
ret = content['return'][0]['data']['success']
return ret
def acceptKeys(self, keystrings):
'''
接受Minion发过来的key;
:return:
'''
params = {'client': 'wheel', 'fun': 'key.accept', 'match': keystrings}
obj = urllib.urlencode(params)
content = self.postRequest(obj)
ret = content['return'][0]['data']['success']
return ret
def deleteKeys(self, keystrings):
'''
删除Minion keys;
:param node_name:
:return:
'''
params = {'client': 'wheel', 'fun': 'key.delete', 'match': keystrings}
obj = urllib.urlencode(params)
content = self.postRequest(obj)
ret = content['return'][0]['data']['success']
return ret
| Hasal/dzhops | saltstack/saltapi.py | Python | apache-2.0 | 4,592 |
#!/usr/bin/python
import time
import os
import sqlite3
import subprocess
import sys
conn = sqlite3.connect("remote.db")
cursor = conn.cursor()
def playlist_add(key):
conn = sqlite3.connect("remote.db")
cursor = conn.cursor()
cursor.execute("SELECT file_keys FROM playlists WHERE key = 1")
row = cursor.fetchone()
new_keys = str(row[0]) + str(key)
cursor.execute("UPDATE playlists SET file_keys = ? WHERE key = 0", [new_keys])
conn.commit()
def start(executable, cmd_args, file_key, p):
conn = sqlite3.connect("remote.db")
cursor = conn.cursor()
try:
while p.poll() == None:
send_cmd(p, "q")
update_status("stopped")
except:
pass
cursor.execute("SELECT path, name FROM library WHERE key=?", [file_key])
path = cursor.fetchone()
try:
os.remove('fifo')
except:
pass
try:
os.mkfifo('fifo')
except:
sys.exit("Could not create pipe 'fifo'")
cmd_tup = [executable]
path_list = [path[0]]
cmd_tup = cmd_tup + cmd_args.split(' ')
cmd_tup = cmd_tup + path_list
print cmd_tup
pipe = os.open('fifo', os.O_NONBLOCK)
p = subprocess.Popen(cmd_tup, stdin=pipe)
while get_playing() != path[1]:
update_status('playing', path[1])
while p.poll() != None:
time.sleep(1)
return p
def send_cmd(p, cmd):
try:
with open("fifo","w") as fp:
fp.write(cmd)
except:
pass
return p
def update_status(status, name='None'):
conn = sqlite3.connect("remote.db")
cursor = conn.cursor()
while (get_status() != status and get_playing() != name):
cursor.execute("UPDATE status SET status=?, name=?", [status, name])
conn.commit()
def get_status():
conn = sqlite3.connect("remote.db")
cursor = conn.cursor()
result = cursor.execute("SELECT status FROM status")
return result.fetchone()[0]
def get_playing():
conn = sqlite3.connect("remote.db")
cursor = conn.cursor()
result = cursor.execute("SELECT name FROM status")
return result.fetchone()[0]
def process_status(p):
try:
return p.poll()
except:
return True
def add_path_to_library(path, recurse = 1):
file_exts = ['.mp3', '.avi', '.mp4', '.mkv', '.flac']
conn = sqlite3.connect("remote.db")
cursor = conn.cursor()
recurse = 1
if recurse == 1:
for root, dirs, files in os.walk(path):
print "Root:"
print root
print "Dirs:"
print dirs
print "Files:"
print files
for filename in files:
if os.path.splitext(filename)[1] in file_exts:
name = os.path.splitext(filename)[0]
full_path = root + "/" + filename.strip()
ext = os.path.splitext(filename)[1]
print full_path
size = os.path.getsize(full_path)
try:
cursor.execute("INSERT INTO library (name, path, type, size) VALUES ( ?, ?, ?, ?)", [name, full_path, ext, size])
except sqlite3.IntegrityError:
continue
else:
for filename in os.listdir(path):
if os.path.splitext(filename)[1] in file_exts:
full_path = path.stip() + filename.strip()
name = os.path.splitext(filename)[0]
ext = os.path.splitext(filename)[1]
size = os.path.getsize(full_path)
try:
cursor.execute("INSERT INTO library (name, path, type, size) VALUES (?, ?, ?, ?)", [name, full_path, ext, size])
except:
pass
conn.commit()
| bmbove/omxremote | controls.py | Python | bsd-3-clause | 3,762 |
import os
import pickle
import numpy as np
import pandas as pd
from os import path
import seaborn as sns
from operator import add
from scipy import sparse, io
import matplotlib.pyplot as plt
from mpl_toolkits.basemap import Basemap
from dotenv import load_dotenv, find_dotenv
from sklearn.preprocessing import LabelEncoder
%matplotlib inline
dotenv_path = find_dotenv()
load_dotenv(dotenv_path)
RAW_DATA_DIR = os.environ.get("RAW_DATA_DIR")
train = pd.read_csv(path.join(RAW_DATA_DIR, 'gender_age_train.csv'))
phone = pd.read_csv(path.join(RAW_DATA_DIR, 'phone_brand_device_model.csv'))
phone_t = train.merge(phone, how='left', on='device_id')
most_pop_brand = phone_t['phone_brand'].value_counts().head(20)
print most_pop_brand
least_pop_brand = phone_t['phone_brand'].value_counts(ascending=True).head(30)
print least_pop_brand
female_brands = []
male_brands = []
for brand in most_pop_brand.index:
app_rows = phone_t[ phone_t['phone_brand'] == brand]
female_brands.append(len(app_rows.query('gender == "F"')))
male_brands.append(len(app_rows.query('gender == "M"')))
f_users = list(female_brands/most_pop_brand)
m_users = list(male_brands/most_pop_brand)
f_users = [ '%.3f' % elem for elem in f_users]
m_users = [ '%.3f' % elem for elem in m_users]
print "For the first 5 most installed categories the percentage of users are: "
print "- females: {}\n- males: {}".format(f_users[:5], m_users[:5])
ind = np.arange(len(most_pop_brand)) # the x locations for the groups
width = 0.35
p1 = plt.bar(ind, male_brands, width, color="#1292db")
p2 = plt.bar(ind, female_brands, width, color="#ff69b4", bottom=male_brands)
plt.ylabel('Number of events')
plt.title('Difference in use of the ten most frequent labels between M and F')
plt.legend((p2[0], p1[0]), ('Women', 'Men'))
plt.show()
# FEMALE --------------------------------------------------------------------
brand_fem = phone_t.query("gender == 'F'")
most_pop_brand = brand_fem['phone_brand'].value_counts().head(20)
print most_pop_brand
least_pop_brand = brand_fem['phone_brand'].value_counts(ascending=True).head(30)
print least_pop_brand
female_brands = []
male_brands = []
for brand in most_pop_brand.index:
app_rows = phone_t[ phone_t['phone_brand'] == brand]
female_brands.append(len(app_rows.query('gender == "F"')))
male_brands.append(len(app_rows.query('gender == "M"')))
tot = map(add, female_brands, male_brands)
total = pd.Series(tot, index=most_pop_brand.index)
f_users = list(female_brands/total)
m_users = list(male_brands/total)
f_users = [ '%.3f' % elem for elem in f_users]
m_users = [ '%.3f' % elem for elem in m_users]
print "For the first 5 most installed categories the percentage of users are: "
print "- females: {}\n- males: {}".format(f_users[:5], m_users[:5])
ind = np.arange(len(most_pop_brand)) # the x locations for the groups
width = 0.35
p1 = plt.bar(ind, male_brands, width, color="#1292db")
p2 = plt.bar(ind, female_brands, width, color="#ff69b4", bottom=male_brands)
plt.ylabel('Number of events')
plt.title('Difference in use of the ten most frequent labels between M and F')
plt.legend((p2[0], p1[0]), ('Women', 'Men'))
plt.show()
# MALE -----------------------------------------------------------------------
brand_male = phone_t.query("gender == 'F'")
most_pop_brand = brand_male['phone_brand'].value_counts().head(20)
print most_pop_brand
least_pop_brand = brand_male['phone_brand'].value_counts(ascending=True).head(30)
print least_pop_brand
female_brands = []
male_brands = []
for brand in most_pop_brand.index:
app_rows = phone_t[ phone_t['phone_brand'] == brand]
female_brands.append(len(app_rows.query('gender == "F"')))
male_brands.append(len(app_rows.query('gender == "M"')))
tot = map(add, female_brands, male_brands)
total = pd.Series(tot, index=most_pop_brand.index)
f_users = list(female_brands/total)
m_users = list(male_brands/total)
f_users = [ '%.3f' % elem for elem in f_users]
m_users = [ '%.3f' % elem for elem in m_users]
print "For the first 5 most installed categories the percentage of users are: "
print "- females: {}\n- males: {}".format(f_users[:5], m_users[:5])
ind = np.arange(len(most_pop_brand)) # the x locations for the groups
width = 0.35
p1 = plt.bar(ind, male_brands, width, color="#1292db")
p2 = plt.bar(ind, female_brands, width, color="#ff69b4", bottom=male_brands)
plt.ylabel('Number of events')
plt.title('Difference in use of the ten most frequent labels between M and F')
plt.legend((p2[0], p1[0]), ('Women', 'Men'))
plt.show()
# DEVICE --------------------------------------------------------------------
phone_t['brand_model'] = phone_t['phone_brand'].str.cat(phone_t['device_model'])
most_pop_brand = phone_t['brand_model'].value_counts().head(20)
print most_pop_brand
least_pop_brand = phone_t['brand_model'].value_counts(ascending=True).head(30)
print least_pop_brand
female_brands = []
male_brands = []
for brand in most_pop_brand.index:
app_rows = phone_t[ phone_t['brand_model'] == brand]
female_brands.append(len(app_rows.query('gender == "F"')))
male_brands.append(len(app_rows.query('gender == "M"')))
f_users = list(female_brands/most_pop_brand)
m_users = list(male_brands/most_pop_brand)
f_users = [ '%.3f' % elem for elem in f_users]
m_users = [ '%.3f' % elem for elem in m_users]
print "For the first 5 most installed categories the percentage of users are: "
print "- females: {}\n- males: {}".format(f_users[:5], m_users[:5])
ind = np.arange(len(most_pop_brand)) # the x locations for the groups
width = 0.35
p1 = plt.bar(ind, male_brands, width, color="#1292db")
p2 = plt.bar(ind, female_brands, width, color="#ff69b4", bottom=male_brands)
plt.ylabel('Number of events')
plt.title('Difference in use of the ten most frequent device models between M and F')
plt.legend((p2[0], p1[0]), ('Women', 'Men'))
plt.show()
# FEMALE --------------------------------------------------------------------
brand_fem = phone_t.query("gender == 'F'")
most_pop_brand = brand_fem['brand_model'].value_counts().head(20)
print most_pop_brand
least_pop_brand = brand_fem['brand_model'].value_counts(ascending=True).head(30)
print least_pop_brand
female_brands = []
male_brands = []
for brand in most_pop_brand.index:
app_rows = phone_t[ phone_t['brand_model'] == brand]
female_brands.append(len(app_rows.query('gender == "F"')))
male_brands.append(len(app_rows.query('gender == "M"')))
tot = map(add, female_brands, male_brands)
total = pd.Series(tot, index=most_pop_brand.index)
f_users = list(female_brands/total)
m_users = list(male_brands/total)
f_users = [ '%.3f' % elem for elem in f_users]
m_users = [ '%.3f' % elem for elem in m_users]
print "For the first 5 most installed categories the percentage of users are: "
print "- females: {}\n- males: {}".format(f_users[:5], m_users[:5])
ind = np.arange(len(most_pop_brand)) # the x locations for the groups
width = 0.35
p1 = plt.bar(ind, male_brands, width, color="#1292db")
p2 = plt.bar(ind, female_brands, width, color="#ff69b4", bottom=male_brands)
plt.ylabel('Number of events')
plt.title('Difference in use of the ten most frequent labels between M and F')
plt.legend((p2[0], p1[0]), ('Women', 'Men'))
plt.show()
# MALE -----------------------------------------------------------------------
brand_male = phone_t.query("gender == 'F'")
most_pop_brand = brand_male['brand_model'].value_counts().head(20)
print most_pop_brand
least_pop_brand = brand_male['brand_model'].value_counts(ascending=True).head(30)
print least_pop_brand
female_brands = []
male_brands = []
for brand in most_pop_brand.index:
app_rows = phone_t[ phone_t['brand_model'] == brand]
female_brands.append(len(app_rows.query('gender == "F"')))
male_brands.append(len(app_rows.query('gender == "M"')))
tot = map(add, female_brands, male_brands)
total = pd.Series(tot, index=most_pop_brand.index)
f_users = list(female_brands/total)
m_users = list(male_brands/total)
f_users = [ '%.3f' % elem for elem in f_users]
m_users = [ '%.3f' % elem for elem in m_users]
print "For the first 5 most installed categories the percentage of users are: "
print "- females: {}\n- males: {}".format(f_users[:5], m_users[:5])
ind = np.arange(len(most_pop_brand)) # the x locations for the groups
width = 0.35
p1 = plt.bar(ind, male_brands, width, color="#1292db")
p2 = plt.bar(ind, female_brands, width, color="#ff69b4", bottom=male_brands)
plt.ylabel('Number of events')
plt.title('Difference in use of the ten most frequent labels between M and F')
plt.legend((p2[0], p1[0]), ('Women', 'Men'))
plt.show()
# ADD NEW FEATURES -----------------------------------------------------------
specs_table = pd.read_csv(path.join(FEATURES_DATA_DIR, 'specs_table.csv'))
model_mapping = pd.read_csv(path.join(FEATURES_DATA_DIR, 'model_mapping.csv'))
brand_mapping = pd.read_csv(path.join(FEATURES_DATA_DIR, 'brand_mapping.csv'))
phone_t = phone_t.drop_duplicates('device_id')
phone_t = phone_t.merge(brand_mapping, how='left', left_on='phone_brand',
right_on='phone_brand_chinese')
phone_t = phone_t.merge(model_mapping, how='left', left_on='device_model',
right_on='device_model_chinese')
phone_t = phone_t.drop(['phone_brand', 'device_model',
'phone_brand_chinese', 'device_model_chinese'], axis=1)
phone_t = phone_t.drop_duplicates('device_id')
phone_t = phone_t.rename( columns = {'phone_brand_latin': 'phone_brand',
'device_model_latin': 'device_model'})
phone_specs = phone_t.merge(specs_table,
on=['phone_brand', 'device_model'],
how='left',
suffixes=['', '_R'])
phone_specs = phone_specs.fillna(-1)
phone_specs = phone_specs[phone_specs['price_eur'] != -1]
v = phone_specs['price_eur'].value_counts()
v.iloc[0] > sum(v.iloc[1:])/2
sns.distplot(phone_specs['price_eur'])
sns.kdeplot(phone_specs.price_eur[phone_specs['gender'] == 'M'], label='Male')
sns.kdeplot(phone_specs.price_eur[phone_specs['gender'] == 'F'], label='Female')
plt.legend()
sns.kdeplot(phone_specs.screen_size[phone_specs['gender'] == 'M'], label='Male')
sns.kdeplot(phone_specs.screen_size[phone_specs['gender'] == 'F'], label='Female')
plt.legend()
sns.kdeplot(phone_specs.ram_gb[phone_specs['gender'] == 'M'], label='Male')
sns.kdeplot(phone_specs.ram_gb[phone_specs['gender'] == 'F'], label='Female')
plt.legend()
sns.kdeplot(phone_specs.release_year[phone_specs['gender'] == 'M'], label='Male')
sns.kdeplot(phone_specs.release_year[phone_specs['gender'] == 'F'], label='Female')
plt.legend()
sns.kdeplot(phone_specs.camera[phone_specs['gender'] == 'M'], label='Male')
sns.kdeplot(phone_specs.camera[phone_specs['gender'] == 'F'], label='Female')
plt.legend()
| Kebniss/TalkingData-Mobile-User-Demographics | src/data/phone_exploration.py | Python | mit | 10,784 |
##############################################################################
# Copyright (c) 2017-2018, The VOTCA Development Team (http://www.votca.org)
#
# This file is part of Spack.
# Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/spack/spack
# Please also see the LICENSE file for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
class VotcaXtp(CMakePackage):
"""Versatile Object-oriented Toolkit for Coarse-graining
Applications (VOTCA) is a package intended to reduce the amount of
routine work when doing systematic coarse-graining of various
systems. The core is written in C++.
This package contains the VOTCA exciton transport engine.
"""
homepage = "http://www.votca.org"
url = "https://github.com/votca/xtp/tarball/v1.4.1"
git = "https://github.com/votca/xtp.git"
version('develop', branch='master')
version('1.4.1', '31a2dbd8bd48bf337bc88b20ab312050')
depends_on("cmake@2.8:", type='build')
depends_on("votca-tools@develop", when='@develop')
depends_on("votca-tools@1.4:1.4.999", when='@1.4:1.4.999')
depends_on("votca-csg@develop", when='@develop')
depends_on("votca-csg@1.4:1.4.999", when='@1.4:1.4.999')
depends_on("votca-ctp@develop", when='@develop')
depends_on("libxc", when='@1.5:')
depends_on("ceres-solver", when='@1.5:')
| mfherbst/spack | var/spack/repos/builtin/packages/votca-xtp/package.py | Python | lgpl-2.1 | 2,206 |
import unittest
from conans.test.utils.tools import TestClient, TestServer
from collections import OrderedDict
from conans.util.files import load
class RemoteTest(unittest.TestCase):
def setUp(self):
self.servers = OrderedDict()
self.users = {}
for i in range(3):
test_server = TestServer()
self.servers["remote%d" % i] = test_server
self.users["remote%d" % i] = [("lasote", "mypass")]
self.client = TestClient(servers=self.servers, users=self.users)
def basic_test(self):
self.client.run("remote list")
self.assertIn("remote0: http://", self.client.user_io.out)
self.assertIn("remote1: http://", self.client.user_io.out)
self.assertIn("remote2: http://", self.client.user_io.out)
self.client.run("remote add origin https://myurl")
self.client.run("remote list")
lines = str(self.client.user_io.out).splitlines()
self.assertIn("origin: https://myurl", lines[3])
self.client.run("remote update origin https://2myurl")
self.client.run("remote list")
self.assertIn("origin: https://2myurl", self.client.user_io.out)
self.client.run("remote update remote0 https://remote0url")
self.client.run("remote list")
output = str(self.client.user_io.out)
self.assertIn("remote0: https://remote0url", output.splitlines()[0])
self.client.run("remote remove remote0")
self.client.run("remote list")
output = str(self.client.user_io.out)
self.assertIn("remote1: http://", output.splitlines()[0])
def insert_test(self):
self.client.run("remote add origin https://myurl --insert")
self.client.run("remote list")
first_line = str(self.client.user_io.out).splitlines()[0]
self.assertIn("origin: https://myurl", first_line)
self.client.run("remote add origin2 https://myurl2 --insert=0")
self.client.run("remote list")
lines = str(self.client.user_io.out).splitlines()
self.assertIn("origin2: https://myurl2", lines[0])
self.assertIn("origin: https://myurl", lines[1])
self.client.run("remote add origin3 https://myurl3 --insert=1")
self.client.run("remote list")
lines = str(self.client.user_io.out).splitlines()
self.assertIn("origin2: https://myurl2", lines[0])
self.assertIn("origin3: https://myurl3", lines[1])
self.assertIn("origin: https://myurl", lines[2])
def update_test_insert(self):
client = TestClient()
client.run("remote add r1 https://r1")
client.run("remote add r2 https://r2")
client.run("remote add r3 https://r3")
client.run("remote update r2 https://r2new --insert")
client.run("remote list")
lines = str(client.user_io.out).splitlines()
self.assertIn("r2: https://r2new", lines[0])
self.assertIn("r1: https://r1", lines[1])
self.assertIn("r3: https://r3", lines[2])
client.run("remote update r2 https://r2new2 --insert 2")
client.run("remote list")
lines = str(client.user_io.out).splitlines()
self.assertIn("r1: https://r1", lines[0])
self.assertIn("r3: https://r3", lines[1])
self.assertIn("r2: https://r2new2", lines[2])
def verify_ssl_test(self):
client = TestClient()
client.run("remote add my-remote http://someurl TRUE")
client.run("remote add my-remote2 http://someurl2 yes")
client.run("remote add my-remote3 http://someurl3 FALse")
client.run("remote add my-remote4 http://someurl4 No")
registry = load(client.client_cache.registry)
self.assertIn("my-remote http://someurl True", registry)
self.assertIn("my-remote2 http://someurl2 True", registry)
self.assertIn("my-remote3 http://someurl3 False", registry)
self.assertIn("my-remote4 http://someurl4 False", registry)
def verify_ssl_error_test(self):
client = TestClient()
error = client.run("remote add my-remote http://someurl some_invalid_option=foo",
ignore_error=True)
self.assertTrue(error)
self.assertIn("ERROR: Unrecognized boolean value 'some_invalid_option=foo'",
client.user_io.out)
self.assertEqual("", load(client.client_cache.registry))
def errors_test(self):
self.client.run("remote update origin url", ignore_error=True)
self.assertIn("ERROR: Remote 'origin' not found in remotes", self.client.user_io.out)
self.client.run("remote remove origin", ignore_error=True)
self.assertIn("ERROR: Remote 'origin' not found in remotes", self.client.user_io.out)
def duplicated_error_tests(self):
""" check remote name and URL are not duplicated
"""
error = self.client.run("remote add remote1 http://otherurl", ignore_error=True)
self.assertTrue(error)
self.assertIn("ERROR: Remote 'remote1' already exists in remotes (use update to modify)",
self.client.user_io.out)
self.client.run("remote list")
url = str(self.client.user_io.out).split()[1]
error = self.client.run("remote add newname %s" % url, ignore_error=True)
self.assertTrue(error)
self.assertIn("Remote 'remote0' already exists with same URL",
self.client.user_io.out)
error = self.client.run("remote update remote1 %s" % url, ignore_error=True)
self.assertTrue(error)
self.assertIn("Remote 'remote0' already exists with same URL",
self.client.user_io.out)
def basic_refs_test(self):
self.client.run("remote add_ref Hello/0.1@user/testing remote0")
self.client.run("remote list_ref")
self.assertIn("Hello/0.1@user/testing: remote0", self.client.user_io.out)
self.client.run("remote add_ref Hello1/0.1@user/testing remote1")
self.client.run("remote list_ref")
self.assertIn("Hello/0.1@user/testing: remote0", self.client.user_io.out)
self.assertIn("Hello1/0.1@user/testing: remote1", self.client.user_io.out)
self.client.run("remote remove_ref Hello1/0.1@user/testing")
self.client.run("remote list_ref")
self.assertIn("Hello/0.1@user/testing: remote0", self.client.user_io.out)
self.assertNotIn("Hello1/0.1@user/testing", self.client.user_io.out)
self.client.run("remote add_ref Hello1/0.1@user/testing remote1")
self.client.run("remote list_ref")
self.assertIn("Hello/0.1@user/testing: remote0", self.client.user_io.out)
self.assertIn("Hello1/0.1@user/testing: remote1", self.client.user_io.out)
self.client.run("remote update_ref Hello1/0.1@user/testing remote2")
self.client.run("remote list_ref")
self.assertIn("Hello/0.1@user/testing: remote0", self.client.user_io.out)
self.assertIn("Hello1/0.1@user/testing: remote2", self.client.user_io.out)
| tivek/conan | conans/test/command/remote_test.py | Python | mit | 7,032 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.