code
stringlengths 2
1.05M
| repo_name
stringlengths 5
104
| path
stringlengths 4
251
| language
stringclasses 1
value | license
stringclasses 15
values | size
int32 2
1.05M
|
|---|---|---|---|---|---|
# -*- coding: utf-8 -*-
r"""
Monkey patches to make the MacLane code work in standalone mode, i.e., without
modifying the sage source code at build time.
"""
#*****************************************************************************
# Copyright (C) 2016 Julian Rüth <julian.rueth@fsfe.org>
#
# Distributed under the terms of the GNU General Public License (GPL)
# as published by the Free Software Foundation; either version 2 of
# the License, or (at your option) any later version.
# http://www.gnu.org/licenses/
#*****************************************************************************
# Fix doctests so they work in standalone mode (when invoked with sage -t, they run within the mac_lane/ directory)
import sys, os
if hasattr(sys.modules['__main__'], 'DC') and 'standalone' in sys.modules['__main__'].DC.options.optional:
sys.path.append(os.getcwd())
sys.path.append(os.path.dirname(os.getcwd()))
import valuation_space
from valuation_space import DiscretePseudoValuationSpace
import trivial_valuation
from trivial_valuation import TrivialValuation, TrivialPseudoValuation
import padic_valuation
from padic_valuation import pAdicValuation
import gauss_valuation
from gauss_valuation import GaussValuation
import value_group
from value_group import DiscreteValuationCodomain, DiscreteValueGroup, DiscreteValueSemigroup
import function_field_valuation
from function_field_valuation import FunctionFieldValuation
import augmented_valuation
from augmented_valuation import AugmentedValuation
import scaled_valuation
from scaled_valuation import ScaledValuation
# fix unpickling and type checks of classes (otherwise, the instances of the
# local file and the instances that come from the mac_lane import define
# different types)
from trivial_valuation import TrivialDiscreteValuation, TrivialDiscretePseudoValuation
from function_field_valuation import FunctionFieldValuation_base, DiscreteFunctionFieldValuation_base, RationalFunctionFieldValuation_base, InducedFunctionFieldValuation_base, ClassicalFunctionFieldValuation_base, FunctionFieldFromLimitValuation, InfiniteRationalFunctionFieldValuation, FiniteRationalFunctionFieldValuation, NonClassicalRationalFunctionFieldValuation, InfiniteRationalFunctionFieldValuation, FunctionFieldMappedValuation_base, FunctionFieldExtensionMappedValuation, RationalFunctionFieldMappedValuation
from limit_valuation import LimitValuation, MacLaneLimitValuation, LimitValuation_generic
from mapped_valuation import MappedValuation_base, FiniteExtensionFromLimitValuation, FiniteExtensionFromInfiniteValuation, MappedValuation_base
from augmented_valuation import FiniteAugmentedValuation, InfiniteAugmentedValuation
from gauss_valuation import GaussValuation_generic
from valuation import DiscretePseudoValuation, DiscreteValuation, InfiniteDiscretePseudoValuation
from padic_valuation import pAdicValuation_base, pAdicValuation_int, pAdicValuation_padic, pAdicFromLimitValuation
from developing_valuation import DevelopingValuation
from augmented_valuation import AugmentedValuation_base, FinalAugmentedValuation, NonFinalAugmentedValuation, FinalFiniteAugmentedValuation, NonFinalFiniteAugmentedValuation
from inductive_valuation import FiniteInductiveValuation, FinalInductiveValuation, InfiniteInductiveValuation, NonFinalInductiveValuation
from scaled_valuation import ScaledValuation_generic
# =================
# MONKEY PATCH SAGE
# =================
import sage
# Implement Qp/Zp.valuation
sage.rings.padics.padic_generic.pAdicGeneric.valuation = lambda self: pAdicValuation(self)
# Fix contains check of rational fuction fields
def to_polynomial(self, x):
r"""
TESTS::
sage: sys.path.append(os.getcwd()); from mac_lane import * # optional: standalone
sage: K.<x> = FunctionField(QQ)
sage: K(x) in K._ring # indirect doctest
True
"""
R = x.parent()._ring
K = x.parent().constant_base_field()
if x.denominator() in K:
return x.numerator()/K(x.denominator())
raise ValueError("Only polynomials can be converted to the underlying polynomial ring")
def to_constant(self, x):
r"""
TESTS::
sage: sys.path.append(os.getcwd()); from mac_lane import * # optional: standalone
sage: K.<x> = FunctionField(QQ)
sage: K(1) in QQ # indirect doctest
True
"""
K = x.parent().constant_base_field()
if x.denominator() in K and x.numerator() in K:
return K(x.numerator()) / K(x.denominator())
raise ValueError("only constants can be converted to the underlying constant field")
sage.rings.function_field.function_field.RationalFunctionField._to_polynomial = to_polynomial
sage.rings.function_field.function_field.RationalFunctionField._to_constant = to_constant
if not hasattr(sage.rings.function_field.function_field.RationalFunctionField, "__old_init__"):
sage.rings.function_field.function_field.RationalFunctionField.__old_init__ = sage.rings.function_field.function_field.RationalFunctionField.__init__
def __init__(self, *args, **kwargs):
r"""
TESTS::
sage: sys.path.append(os.getcwd()); from mac_lane import * # optional: standalone
sage: K.<x> = FunctionField(QQ)
sage: K(1/2) in QQ
True
"""
self.__old_init__(*args, **kwargs)
from sage.categories.morphism import SetMorphism
self._ring.register_conversion(SetMorphism(self.Hom(self._ring), self._to_polynomial))
try:
self.constant_base_field().register_conversion(SetMorphism(self.Hom(self.constant_base_field()), self._to_constant))
except AssertionError:
# since #21872 there is already such a conversion
pass
sage.rings.function_field.function_field.RationalFunctionField.__init__ = __init__
del(__init__)
del(to_polynomial)
# implement principal_part for newton polygons
r"""
TESTS::
sage: sys.path.append(os.getcwd()); from mac_lane import * # optional: standalone
sage: NP = sage.geometry.newton_polygon.NewtonPolygon([(0,1),(1,0),(2,1)])
sage: NP.principal_part()
Infinite Newton polygon with 2 vertices: (0, 1), (1, 0) ending by an infinite line of slope 0
"""
import sage.geometry.newton_polygon
sage.geometry.newton_polygon.NewtonPolygon_element.principal_part = lambda self: sage.geometry.newton_polygon.NewtonPolygon(self.vertices(), last_slope=0)
sage.geometry.newton_polygon.NewtonPolygon_element.sides = lambda self: zip(self.vertices(), self.vertices()[1:])
# implement coercion of function fields that comes from coercion of their base fields
# Frac(K[x]) injects into K(x)
class DefaultConvertMap_unique_patched2(sage.structure.coerce_maps.DefaultConvertMap_unique):
def is_injective(self):
r"""
TESTS::
sage: sys.path.append(os.getcwd()); from mac_lane import * # optional: standalone
sage: R.<x> = QQ[]
sage: K.<x> = FunctionField(QQ)
sage: R.fraction_field().is_subring(K) # indirect doctest
True
"""
from sage.categories.fields import Fields
if self.domain() in Fields():
return True
raise NotImplementedError
def _coerce_map_from_(target, source):
r"""
TESTS::
sage: sys.path.append(os.getcwd()); from mac_lane import * # optional: standalone
sage: K.<x> = FunctionField(QQ)
sage: L.<x> = FunctionField(GaussianIntegers().fraction_field())
sage: L.has_coerce_map_from(K)
True
sage: K.<x> = FunctionField(QQ)
sage: R.<y> = K[]
sage: L.<y> = K.extension(y^3 + 1)
sage: K.<x> = FunctionField(GaussianIntegers().fraction_field())
sage: R.<y> = K[]
sage: M.<y> = K.extension(y^3 + 1)
sage: M.has_coerce_map_from(L) # not tested, base morphism is not implemented
True
sage: K.<x> = FunctionField(QQ)
sage: R.<I> = K[]
sage: L.<I> = K.extension(I^2 + 1)
sage: M.<x> = FunctionField(GaussianIntegers().fraction_field())
sage: M.has_coerce_map_from(L) # not tested, base_morphism is not implemented
True
"""
from sage.categories.function_fields import FunctionFields
if source in FunctionFields():
if source.base_field() is source:
if target.base_field() is target:
# source and target are rational function fields
if source.variable_name() == target.variable_name():
# ... in the same variable
base_coercion = target.constant_field().coerce_map_from(source.constant_field())
if base_coercion is not None:
return source.hom([target.gen()], base_morphism=base_coercion)
else:
# source is an extensions of rational function fields
base_coercion = target.coerce_map_from(source.base_field())
if base_coercion is not None:
# the base field of source coerces into the base field of target
target_polynomial = source.polynomial().map_coefficients(base_coercion)
# try to find a root of the defining polynomial in target
if target_polynomial(target.gen()) == 0:
# The defining polynomial of source has a root in target,
# therefore there is a map. To be sure that it is
# canonical, we require a root of the defining polynomial
# of target to be a root of the defining polynomial of
# source (and that the variables are named equally):
if source.variable_name() == target.variable_name():
return source.hom([target.gen()], base_morphism=base_coercion)
roots = target_polynomial.roots()
for root, _ in roots:
if target_polynomial(root) == 0:
# The defining polynomial of source has a root in target,
# therefore there is a map. To be sure that it is
# canonical, we require the names of the roots to match
if source.variable_name() == repr(root):
return source.hom([root], base_morphism=base_coercion)
if source is target._ring:
return DefaultConvertMap_unique_patched2(source, target)
if source is target._ring.fraction_field():
return DefaultConvertMap_unique_patched2(source, target)
sage.rings.function_field.function_field.FunctionField._coerce_map_from_ = _coerce_map_from_
del(_coerce_map_from_)
# patch is_injective() for many morphisms
def patch_is_injective(method, patch_map):
r"""
Patch ``method`` to return ``patch_map[type]`` if it returned a result of
``type``.
TESTS::
sage: sys.path.append(os.getcwd()); from mac_lane import * # optional: standalone
sage: QQ.coerce_map_from(ZZ).is_injective() # indirect doctest
True
"""
def patched(*args, **kwargs):
ret = method(*args, **kwargs)
if type(ret) in patch_map:
ret = patch_map[type(ret)](ret)
return ret
return patched
# a ring homomorphism from a field into a ring is injective (as it respects inverses)
class RingHomomorphism_coercion_patched(sage.rings.morphism.RingHomomorphism_coercion):
def is_injective(self):
r"""
TESTS::
sage: sys.path.append(os.getcwd()); from mac_lane import * # optional: standalone
sage: QQ.coerce_map_from(ZZ).is_injective() # indirect doctest
True
sage: Hom(ZZ,QQ['x']).natural_map().is_injective()
True
sage: R.<x> = ZZ[]
sage: R.<xbar> = R.quo(x^2+x+1)
sage: Hom(ZZ,R).natural_map().is_injective()
True
sage: R.<x> = QQbar[]
sage: R.coerce_map_from(QQbar).is_injective()
True
"""
from sage.categories.all import Fields, IntegralDomains
from sage.rings.number_field.order import AbsoluteOrder
from sage.rings.polynomial.polynomial_ring import is_PolynomialRing
# this should be implemented as far down as possible
if self.domain() in Fields(): return True
if self.domain() == sage.all.ZZ and self.codomain().characteristic() == 0: return True
if isinstance(self.domain(), AbsoluteOrder) and self(self.domain().gen()) != 0 and self.codomain() in IntegralDomains(): return True
# this should be implemented somewhere else
if is_PolynomialRing(self.codomain()) and self.codomain().base_ring() is self.domain():
return True
coercion = self.codomain().coerce_map_from(self.domain())
if coercion is not None:
try:
return coercion.is_injective()
except NotImplementedError:
# PolynomialBaseringInjection does not implement is_surjective/is_injective
if isinstance(coercion, sage.categories.map.FormalCompositeMap):
if all([f.is_injective() for f in list(coercion)]):
return True
except AttributeError: # DefaultConvertMap_unique does not implement is_injective/surjective at all
pass
raise NotImplementedError
def is_surjective(self):
r"""
TESTS::
sage: sys.path.append(os.getcwd()); from mac_lane import * # optional: standalone
sage: QQ.coerce_map_from(ZZ).is_surjective() # indirect doctest
False
"""
from sage.categories.fields import Fields
if self.domain() in Fields(): return True
coercion = self.codomain().coerce_map_from(self.domain())
if coercion is not None:
try:
return coercion.is_surjective()
except AttributeError: # DefaultConvertMap_unique does not implement is_injective/surjective at all
# PolynomialBaseringInjection does not implement is_surjective/is_injective (TODO: fix the logic of FormalCompositeMap, i.e., postpone without_bij)
if isinstance(coercion, sage.categories.map.FormalCompositeMap):
if all([f.is_surjective() for f in list(coercion)]):
return True
pass
raise NotImplementedError
sage.rings.homset.RingHomset_generic.natural_map = patch_is_injective(sage.rings.homset.RingHomset_generic.natural_map, {sage.rings.morphism.RingHomomorphism_coercion: (lambda coercion: RingHomomorphism_coercion_patched(coercion.parent()))})
# a morphism of polynomial rings which is induced by a ring morphism on the base is injective if the morphis on the base is
class PolynomialRingHomomorphism_from_base_patched(sage.rings.polynomial.polynomial_ring_homomorphism.PolynomialRingHomomorphism_from_base):
def is_injective(self):
r"""
TESTS::
sage: sys.path.append(os.getcwd()); from mac_lane import * # optional: standalone
sage: QQ['x'].coerce_map_from(ZZ['x']).is_injective() # indirect doctest
True
This should be fixed in
`sage.rings.padics.qadic_flint_CA.pAdicCoercion_CA_frac_field`
instead::
sage: R.<a> = ZqCA(9)
sage: R['x'].is_subring(R.fraction_field()['x'])
True
"""
if self.underlying_map().codomain() is self.underlying_map().domain().fraction_field():
# fix this in pAdicCoercion_CA_frac_field and similar
return True
return self.underlying_map().is_injective()
sage.rings.polynomial.polynomial_ring.PolynomialRing_general._coerce_map_from_ = patch_is_injective(sage.rings.polynomial.polynomial_ring.PolynomialRing_general._coerce_map_from_, {sage.rings.polynomial.polynomial_ring_homomorphism.PolynomialRingHomomorphism_from_base: (lambda morphism: PolynomialRingHomomorphism_from_base_patched(morphism.parent(), morphism.underlying_map()))})
# morphisms of number fields are injective
class Q_to_quadratic_field_element_patched(sage.rings.number_field.number_field_element_quadratic.Q_to_quadratic_field_element):
def is_injective(self):
r"""
TESTS::
sage: sys.path.append(os.getcwd()); from mac_lane import * # optional: standalone
sage: GaussianIntegers().fraction_field().coerce_map_from(QQ).is_injective()
True
"""
return True
class Z_to_quadratic_field_element_patched(sage.rings.number_field.number_field_element_quadratic.Z_to_quadratic_field_element):
def is_injective(self):
r"""
TESTS::
sage: sys.path.append(os.getcwd()); from mac_lane import * # optional: standalone
sage: GaussianIntegers().fraction_field().coerce_map_from(ZZ).is_injective()
True
"""
return True
sage.rings.number_field.number_field.NumberField_quadratic._coerce_map_from_ = patch_is_injective(sage.rings.number_field.number_field.NumberField_quadratic._coerce_map_from_, {sage.rings.number_field.number_field_element_quadratic.Q_to_quadratic_field_element: (lambda morphism: Q_to_quadratic_field_element_patched(morphism.codomain())), sage.rings.number_field.number_field_element_quadratic.Z_to_quadratic_field_element: (lambda morphism: Z_to_quadratic_field_element_patched(morphism.codomain()))})
# the integers embed into the rationals
class Z_to_Q_patched(sage.rings.rational.Z_to_Q):
def is_injective(self):
r"""
TESTS::
sage: sys.path.append(os.getcwd()); from mac_lane import * # optional: standalone
sage: QQ.coerce_map_from(ZZ).is_injective()
True
"""
return True
def is_surjective(self):
r"""
TESTS::
sage: sys.path.append(os.getcwd()); from mac_lane import * # optional: standalone
sage: QQ.coerce_map_from(ZZ).is_surjective()
False
"""
return False
from sage.rings.all import QQ
QQ.coerce_map_from = patch_is_injective(QQ.coerce_map_from, {sage.rings.rational.Z_to_Q: (lambda morphism: Z_to_Q_patched())})
# the integers embed into their extensions in number fields
class DefaultConvertMap_unique_patched(sage.structure.coerce_maps.DefaultConvertMap_unique):
def is_injective(self):
r"""
TESTS::
sage: sys.path.append(os.getcwd()); from mac_lane import * # optional: standalone
sage: CyclotomicField(5).maximal_order().coerce_map_from(ZZ).is_injective()
True
"""
from sage.rings.all import ZZ
if self.domain() is ZZ or domain is int or domain is long:
return True
return super(DefaultConvertMap_unique, self).is_injective()
def _coerce_map_from_patched(self, domain):
r"""
TESTS::
sage: sys.path.append(os.getcwd()); from mac_lane import * # optional: standalone
sage: CyclotomicField(5).maximal_order().coerce_map_from(ZZ).is_injective() # indirect doctest
True
"""
from sage.rings.all import ZZ
if domain is ZZ or domain is int or domain is long:
return DefaultConvertMap_unique_patched(domain, self)
return False
sage.rings.number_field.order.Order._coerce_map_from_ = _coerce_map_from_patched
del(_coerce_map_from_patched)
# quotient rings embed if their underlying rings do
class DefaultConvertMap_unique_patched3(sage.structure.coerce_maps.DefaultConvertMap_unique):
def is_injective(self):
r"""
TESTS::
sage: sys.path.append(os.getcwd()); from mac_lane import * # optional: standalone
sage: R.<x> = ZZ[]
sage: S.<x> = QQ[]
sage: S.quo(x^2 + 1).coerce_map_from(R.quo(x^2 + 1)).is_injective()
True
"""
if self.codomain().base().coerce_map_from(self.domain().base()).is_injective():
return True
raise NotImplementedError
sage.rings.polynomial.polynomial_quotient_ring.PolynomialQuotientRing_generic._coerce_map_from_original = sage.rings.polynomial.polynomial_quotient_ring.PolynomialQuotientRing_generic._coerce_map_from_
def _coerce_map_from_patched(self, domain):
r"""
TESTS::
sage: sys.path.append(os.getcwd()); from mac_lane import * # optional: standalone
sage: R.<x> = ZZ[]
sage: S.<x> = QQ[]
sage: S.quo(x^2 + 1).coerce_map_from(R.quo(x^2 + 1)).is_injective() # indirect doctest
True
"""
from sage.rings.polynomial.polynomial_quotient_ring import is_PolynomialQuotientRing
if is_PolynomialQuotientRing(domain) and domain.modulus() == self.modulus():
if self.base().has_coerce_map_from(domain.base()):
return DefaultConvertMap_unique_patched3(domain, self)
from sage.rings.fraction_field import is_FractionField
if is_FractionField(domain):
# this should be implemented on a much higher level:
# if there is a morphism R -> K then there is a morphism Frac(R) -> K
if self.has_coerce_map_from(domain.base()):
return True
return self._coerce_map_from_original(domain)
sage.rings.polynomial.polynomial_quotient_ring.PolynomialQuotientRing_generic._coerce_map_from_ = _coerce_map_from_patched
del(_coerce_map_from_patched)
# a ring embeds into its field of fractions
class CallableConvertMap_patched(sage.rings.fraction_field.CallableConvertMap):
def is_injective(self):
r"""
TESTS::
sage: sys.path.append(os.getcwd()); from mac_lane import * # optional: standalone
sage: R.<x> = QQ[]
sage: R.is_subring(R.fraction_field()) # indirect doctest
True
"""
return True
def is_surjective(self):
r"""
TESTS::
sage: sys.path.append(os.getcwd()); from mac_lane import * # optional: standalone
sage: R.<x> = QQ[]
sage: R.fraction_field().coerce_map_from(R).is_surjective()
False
"""
return False
sage.rings.fraction_field.CallableConvertMap = CallableConvertMap_patched
# inverses of quotient ring elements
def inverse_of_unit(self):
r"""
TESTS::
sage: sys.path.append(os.getcwd()); from mac_lane import * # optional: standalone
sage: R.<x> = ZZ[]
sage: S = R.quo(x^2+x+1)
sage: S(1).inverse_of_unit()
1
"""
inverse = ~self
if inverse.parent() is self.parent():
return inverse
raise NotImplementedError
sage.rings.polynomial.polynomial_quotient_ring_element.PolynomialQuotientRingElement.inverse_of_unit = inverse_of_unit
del(inverse_of_unit)
# factorization in polynomial quotient fields
def _factor_univariate_polynomial(self, f):
r"""
TESTS::
sage: sys.path.append(os.getcwd()); from mac_lane import * # optional: standalone
sage: K = GF(2)
sage: R.<x> = K[]
sage: L.<x> = K.extension(x^2 + x + 1)
sage: R.<y> = L[]
sage: L.<y> = L.extension(y^2 + y + x)
sage: R.<T> = L[]
sage: (T^2 + T + x).factor() # indirect doctest
(T + y) * (T + y + 1)
"""
from sage.structure.factorization import Factorization
if f.is_zero():
raise ValueError("factorization of 0 is not defined")
elif f.degree() <= 1:
return Factorization([(f,1)])
from_absolute_field, to_absolute_field, absolute_field = self.absolute_extension()
F = f.map_coefficients(lambda c:to_absolute_field(c), absolute_field).factor()
return Factorization([(g.map_coefficients(lambda c:from_absolute_field(c), self), e) for g,e in F], unit=from_absolute_field(F.unit()))
sage.rings.polynomial.polynomial_quotient_ring.PolynomialQuotientRing_generic._factor_univariate_polynomial = _factor_univariate_polynomial
del(_factor_univariate_polynomial)
# factorization needs to go to the absolute field and back
from sage.misc.cachefunc import cached_method
@cached_method
def absolute_extension(self):
"""
Return a ring isomorphic to this ring which is not a
:class:`PolynomialQuotientRing` but of a type which offers more
functionality.
INUPT:
- ``name`` -- a list of strings or ``None`` (default: ``None``), the
name of the generator of the absolute extension. If ``None``, this
will be the same as the name of the generator of this ring.
EXAMPLES::
sage: sys.path.append(os.getcwd()); from mac_lane import * # optional: standalone
sage: k.<a> = GF(4)
sage: R.<b> = k[]
sage: l.<b> = k.extension(b^2+b+a); l
Univariate Quotient Polynomial Ring in b over Finite Field in a of size 2^2 with modulus b^2 + b + a
sage: from_ll,to_ll, ll = l.absolute_extension(); ll
Finite Field in v4 of size 2^4
sage: all([to_ll(from_ll(ll.gen()**i)) == ll.gen()**i for i in range(ll.degree())])
True
sage: R.<c> = l[]
sage: m.<c> = l.extension(c^2+b*c+b); m
Univariate Quotient Polynomial Ring in c over Univariate Quotient Polynomial Ring in b over Finite Field in a of size 2^2 with modulus b^2 + b + a with modulus c^2 + b*c + b
sage: from_mm, to_mm, mm = m.absolute_extension(); mm
Finite Field in v8 of size 2^8
sage: all([to_mm(from_mm(mm.gen()**i)) == mm.gen()**i for i in range(mm.degree())])
True
"""
from sage.rings.polynomial.polynomial_quotient_ring import PolynomialQuotientRing_generic
if not self.is_field():
raise NotImplementedError("absolute_extension() only implemented for fields")
if self.is_finite():
if self.base_ring().is_prime_field():
if self.modulus().degree() == 1:
ret = self.base_ring()
from sage.categories.homset import Hom
from sage.categories.morphism import SetMorphism
to_ret = SetMorphism(Hom(self, ret), lambda x: x.lift()[0])
from_ret = self.coerce_map_from(ret)
return from_ret, to_ret, ret
else:
raise NotImplementedError
if isinstance(self.base_ring(), PolynomialQuotientRing_generic):
abs_base_to_base, base_to_abs_base, abs_base = self.base_ring().absolute_extension()
modulus_over_abs_base = self.modulus().map_coefficients(lambda c:base_to_abs_base(c), abs_base)
new_self = modulus_over_abs_base.parent().quo(modulus_over_abs_base)
ret_to_new_self, new_self_to_ret, ret = new_self.absolute_extension()
from_ret = ret.hom([ret_to_new_self(ret.gen()).lift().map_coefficients(abs_base_to_base, self.base_ring())(self.gen())], check=False)
to_ret = lambda x: x.lift().map_coefficients(lambda c: new_self_to_ret(base_to_abs_base(c)), ret)(new_self_to_ret(new_self.gen()))
from sage.categories.homset import Hom
from sage.categories.morphism import SetMorphism
to_ret = SetMorphism(Hom(self, ret), to_ret)
return from_ret, to_ret, ret
else:
N = self.cardinality()
from sage.rings.all import GF
ret = GF(N,prefix='v')
base_to_ret = self.base_ring().hom([self.base_ring().modulus().change_ring(ret).roots()[0][0]])
im_gen = self.modulus().map_coefficients(lambda c:base_to_ret(c), ret).roots()[0][0]
to_ret = lambda x: x.lift().map_coefficients(base_to_ret, ret)(im_gen)
from sage.categories.homset import Hom
from sage.categories.morphism import SetMorphism
to_ret = SetMorphism(Hom(self, ret), to_ret)
basis = [self.gen()**i*self.base_ring().gen()**j for i in range(self.degree()) for j in range(self.base_ring().degree())]
assert len(basis) == ret.degree()
basis_in_ret = [to_ret(b)._vector_() for b in basis]
from sage.matrix.constructor import matrix
A = matrix(basis_in_ret)
assert A.is_square()
x = A.solve_left(A.column_space().basis()[1])
from_ret = ret.hom([sum(c*b for c,b in zip(x.list(),basis))], check=False)
return from_ret, to_ret, ret
else:
raise NotImplementedError
sage.rings.polynomial.polynomial_quotient_ring.PolynomialQuotientRing_generic.absolute_extension = absolute_extension
del(absolute_extension)
# factorization needs some linear algebra (it seems)
def vector_space(self):
r"""
TESTS::
sage: sys.path.append(os.getcwd()); from mac_lane import * # optional: standalone
sage: K = GF(2)
sage: R.<x> = K[]
sage: L.<x> = K.extension(x^2 + x + 1)
sage: R.<y> = L[]
sage: L.<y> = L.extension(y^2 + y + x)
sage: L.vector_space()
Vector space of dimension 2 over Finite Field in x of size 2^2
"""
if not self.base().base_ring().is_field():
raise ValueError
return self.base().base_ring()**self.modulus().degree()
sage.rings.polynomial.polynomial_quotient_ring.PolynomialQuotientRing_generic.vector_space = vector_space
del(vector_space)
# make some_elements() non-trivial for number fields
def some_elements(self):
r"""
TESTS::
sage: sys.path.append(os.getcwd()); from mac_lane import * # optional: standalone
sage: K = GaussianIntegers().fraction_field()
sage: list(K.some_elements())
[I, 0, 1, 1/2, 2*I, -I, -2, 0, 0]
"""
for element in self.polynomial_ring().some_elements():
yield element(self.gen())
sage.rings.number_field.number_field.NumberField_generic.some_elements = some_elements
del(some_elements)
# make some_elements() deterministic for function fields
def some_elements(self):
r"""
TESTS::
sage: sys.path.append(os.getcwd()); from mac_lane import * # optional: standalone
sage: K.<x> = FunctionField(QQ)
sage: list(K.some_elements()) == list(K.some_elements())
True
"""
for num in self._ring.some_elements():
for den in self._ring.some_elements():
if den != 0:
yield self(num) / self(den)
sage.rings.function_field.function_field.RationalFunctionField.some_elements = some_elements
del(some_elements)
def some_elements(self):
r"""
TESTS::
sage: sys.path.append(os.getcwd()); from mac_lane import * # optional: standalone
sage: K.<x> = FunctionField(QQ)
sage: R.<y> = K[]
sage: L.<y> = K.extension(y^2 - x)
sage: list(L.some_elements()) == list(L.some_elements())
True
"""
for element in self._ring.some_elements():
yield self(element)
sage.rings.function_field.function_field.FunctionField_polymod.some_elements = some_elements
del(some_elements)
# make some_elements() non-trivial for fraction fields
def some_elements(self):
r"""
TESTS::
sage: sys.path.append(os.getcwd()); from mac_lane import * # optional: standalone
sage: R.<x> = QQ[]
sage: K = R.fraction_field()
sage: len(list(K.some_elements()))
72
"""
for num in self.ring().some_elements():
for den in self.ring().some_elements():
if den != 0:
yield self(num) / self(den)
sage.rings.fraction_field.FractionField_generic.some_elements = some_elements
# make some_elements() non-trivial for orders in number fields
def some_elements(self):
r"""
TESTS::
sage: sys.path.append(os.getcwd()); from mac_lane import * # optional: standalone
sage: R = GaussianIntegers()
sage: list(R.some_elements())
[I, 0, 1, 2*I, -I, -2, 0, 0]
"""
for element in self.fraction_field().some_elements():
if element in self:
yield self(element)
sage.rings.number_field.order.Order.some_elements = some_elements
del(some_elements)
# register modules at some standard places so imports work as exepcted
r"""
sage: from sage.rings.valuation.gauss_valuation import GaussValuation
"""
import imp, sys
sage.rings.valuation = sys.modules['sage.rings.valuation'] = imp.new_module('sage.rings.valuation')
sage.rings.valuation.gauss_valuation = sys.modules['sage.rings.valuation.gauss_valuation'] = gauss_valuation
sage.rings.valuation.valuation = sys.modules['sage.rings.valuation.valuation'] = valuation
sage.rings.valuation.valuation_space = sys.modules['sage.rings.valuation.valuation_space'] = valuation_space
sage.rings.valuation.augmented_valuation = sys.modules['sage.rings.valuation.augmented_valuation'] = augmented_valuation
sage.rings.function_field.function_field_valuation = sys.modules['sage.rings.function_field.function_field_valuation'] = function_field_valuation
# fix unpickling of factories
from sage.structure.factory import register_factory_unpickle
register_factory_unpickle("pAdicValuation", pAdicValuation)
register_factory_unpickle("GaussValuation", GaussValuation)
register_factory_unpickle("TrivialValuation", TrivialValuation)
register_factory_unpickle("TrivialPseudoValuation", TrivialPseudoValuation)
register_factory_unpickle("FunctionFieldValuation", FunctionFieldValuation)
register_factory_unpickle("AugmentedValuation", AugmentedValuation)
register_factory_unpickle("LimitValuation", LimitValuation)
register_factory_unpickle("ScaledValuation", ScaledValuation)
|
saraedum/mac_lane
|
__init__.py
|
Python
|
gpl-2.0
| 33,298
|
# This file is part of Merlin.
# Merlin is the Copyright (C)2008,2009,2010 of Robin K. Hansen, Elliot Rosemarine, Andreas Jacobsen.
# Individual portions may be copyright by individual contributors, and
# are included in this collective work with permission of the copyright
# owners.
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
from Core.db import session
from Core.maps import User
from Core.loadable import loadable, route, require_user
class alias(loadable):
"""Set an alias that maps to your pnick, useful if you have a different nick than your pnick and people use autocomplete."""
usage = " <alias> (at most 15 characters)"
@route(r"(\S{3,15})?")
@require_user
def execute(self, message, user, params):
# assign param variables
alias=params.group(1)
if alias is None:
m = message.get_msg().split()
if len(m) > 1 and m[1] in self.nulls:
pass
else:
message.reply("You are %s, your alias is %s"%(user.name,user.alias,))
return
if alias is not None:
if User.load(name=alias) is not None:
message.reply("Your alias is already in use or is someone else's pnick (not allowed). Tough noogies.")
return
if session.query(User).filter(User.active==True).filter(User.alias.ilike(alias)).first() is not None:
message.reply("Your alias is already in use or is someone else's pnick (not allowed). Tough noogies.")
return
user.alias = alias
session.commit()
message.reply("Update alias for %s (that's you) to %s"%(user.name,user.alias,))
|
ellonweb/merlin
|
Hooks/user/alias.py
|
Python
|
gpl-2.0
| 2,365
|
#
# Gramps - a GTK+/GNOME based genealogy program
#
# Copyright (C) 2002-2006 Donald N. Allingham
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
# $Id$
#-------------------------------------------------------------------------
#
# Standard Python modules
#
#-------------------------------------------------------------------------
from ....ggettext import gettext as _
#-------------------------------------------------------------------------
#
# GRAMPS modules
#
#-------------------------------------------------------------------------
from .. import Rule
#-------------------------------------------------------------------------
# "People with children"
#-------------------------------------------------------------------------
class HaveChildren(Rule):
"""People with children"""
name = _('People with children')
description = _("Matches people who have children")
category = _('Family filters')
def apply(self,db,person):
for family_handle in person.get_family_handle_list():
family = db.get_family_from_handle(family_handle)
return (family is not None) and len(family.get_child_ref_list()) > 0
|
arunkgupta/gramps
|
gramps/gen/filters/rules/person/_havechildren.py
|
Python
|
gpl-2.0
| 1,831
|
import svgwrite as svg
import xml
import sys
def convert(input,output,width,height,scale):
d = xml.etree.ElementTree.parse(input)
r = d.getroot()
scale = int(scale)
width = int(width)
height = int(height)
canvas = svg.Drawing(output, size=(str(width*scale), str(height*scale)))
canvas.add(canvas.rect(insert=(0, 0), size=('100%', '100%'), rx=None, ry=None, fill='rgb(0,0,0)'))
for poly in r.find('Polygons'):
red = int(poly.find('Brush').find('Red').text)
blue = int(poly.find('Brush').find('Blue').text)
green = int(poly.find('Brush').find('Green').text)
alpha = float(poly.find('Brush').find('Alpha').text)
alpha = alpha/255
color = svg.rgb(red,green,blue)
pts = []
for point in poly.find('Points'):
x = int(point.find('X').text)*scale
y = int(point.find('Y').text)*scale
pts.append((x,y))
canvas.add(svg.shapes.Polygon(points=pts, fill=color, opacity=alpha))
canvas.save()
if __name__ == "__main__":
if (len(sys.argv) != 6):
print("usage: python xml2svg.py <input.xml> <output.svg> <INT original_width> <INT original_height> <INT scale>")
sys.exit(0)
else:
input = str(sys.argv[1])
output = str(sys.argv[2])
width = str(sys.argv[3])
height = str(sys.argv[4])
scale = int(sys.argv[5])
convert(input,output,width,height,scale)
|
dannyperson/evolisa-tools
|
xml2svg.py
|
Python
|
gpl-2.0
| 1,707
|
# The MIT License (MIT)
#
# Copyright (c) 2016 Samuel Bear Powell
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import numpy as np
from datetime import datetime
class _sp:
@staticmethod
def calendar_time(dt):
try:
x = dt.year, dt.month, dt.day, dt.hour, dt.minute, dt.second, dt.microsecond
return x
except AttributeError:
try:
return _sp.calendar_time(datetime.utcfromtimestamp(dt)) #will raise OSError if dt is not acceptable
except:
raise TypeError('dt must be datetime object or POSIX timestamp')
@staticmethod
def julian_day(dt):
"""Calculate the Julian Day from a datetime.datetime object in UTC"""
# year and month numbers
yr, mo, dy, hr, mn, sc, us = _sp.calendar_time(dt)
if mo <= 2: # From paper: "if M = 1 or 2, then Y = Y - 1 and M = M + 12"
mo += 12
yr -= 1
# day of the month with decimal time
dy = dy + hr/24.0 + mn/(24.0*60.0) + sc/(24.0*60.0*60.0) + us/(24.0*60.0*60.0*1e6)
# b is equal to 0 for the julian calendar and is equal to (2- A +
# INT(A/4)), A = INT(Y/100), for the gregorian calendar
a = int(yr / 100)
b = 2 - a + int(a / 4)
jd = int(365.25 * (yr + 4716)) + int(30.6001 * (mo + 1)) + dy + b - 1524.5
return jd
@staticmethod
def julian_ephemeris_day(jd, deltat):
"""Calculate the Julian Ephemeris Day from the Julian Day and delta-time = (terrestrial time - universal time) in seconds"""
return jd + deltat / 86400.0
@staticmethod
def julian_century(jd):
"""Caluclate the Julian Century from Julian Day or Julian Ephemeris Day"""
return (jd - 2451545.0) / 36525.0
@staticmethod
def julian_millennium(jc):
"""Calculate the Julian Millennium from Julian Ephemeris Century"""
return jc / 10.0
# Earth Periodic Terms
# Earth Heliocentric Longitude coefficients (L0, L1, L2, L3, L4, and L5 in paper)
_EHL_ = [#L0:
[(175347046, 0.0, 0.0), (3341656, 4.6692568, 6283.07585), (34894, 4.6261, 12566.1517),
(3497, 2.7441, 5753.3849), (3418, 2.8289, 3.5231), (3136, 3.6277, 77713.7715),
(2676, 4.4181, 7860.4194), (2343, 6.1352, 3930.2097), (1324, 0.7425, 11506.7698),
(1273, 2.0371, 529.691), (1199, 1.1096, 1577.3435), (990, 5.233, 5884.927),
(902, 2.045, 26.298), (857, 3.508, 398.149), (780, 1.179, 5223.694),
(753, 2.533, 5507.553), (505, 4.583, 18849.228), (492, 4.205, 775.523),
(357, 2.92, 0.067), (317, 5.849, 11790.629), (284, 1.899, 796.298),
(271, 0.315, 10977.079), (243, 0.345, 5486.778), (206, 4.806, 2544.314),
(205, 1.869, 5573.143), (202, 2.4458, 6069.777), (156, 0.833, 213.299),
(132, 3.411, 2942.463), (126, 1.083, 20.775), (115, 0.645, 0.98),
(103, 0.636, 4694.003), (102, 0.976, 15720.839), (102, 4.267, 7.114),
(99, 6.21, 2146.17), (98, 0.68, 155.42), (86, 5.98, 161000.69),
(85, 1.3, 6275.96), (85, 3.67, 71430.7), (80, 1.81, 17260.15),
(79, 3.04, 12036.46), (71, 1.76, 5088.63), (74, 3.5, 3154.69),
(74, 4.68, 801.82), (70, 0.83, 9437.76), (62, 3.98, 8827.39),
(61, 1.82, 7084.9), (57, 2.78, 6286.6), (56, 4.39, 14143.5),
(56, 3.47, 6279.55), (52, 0.19, 12139.55), (52, 1.33, 1748.02),
(51, 0.28, 5856.48), (49, 0.49, 1194.45), (41, 5.37, 8429.24),
(41, 2.4, 19651.05), (39, 6.17, 10447.39), (37, 6.04, 10213.29),
(37, 2.57, 1059.38), (36, 1.71, 2352.87), (36, 1.78, 6812.77),
(33, 0.59, 17789.85), (30, 0.44, 83996.85), (30, 2.74, 1349.87),
(25, 3.16, 4690.48)],
#L1:
[(628331966747, 0.0, 0.0), (206059, 2.678235, 6283.07585), (4303, 2.6351, 12566.1517),
(425, 1.59, 3.523), (119, 5.796, 26.298), (109, 2.966, 1577.344),
(93, 2.59, 18849.23), (72, 1.14, 529.69), (68, 1.87, 398.15),
(67, 4.41, 5507.55), (59, 2.89, 5223.69), (56, 2.17, 155.42),
(45, 0.4, 796.3), (36, 0.47, 775.52), (29, 2.65, 7.11),
(21, 5.34, 0.98), (19, 1.85, 5486.78), (19, 4.97, 213.3),
(17, 2.99, 6275.96), (16, 0.03, 2544.31), (16, 1.43, 2146.17),
(15, 1.21, 10977.08), (12, 2.83, 1748.02), (12, 3.26, 5088.63),
(12, 5.27, 1194.45), (12, 2.08, 4694), (11, 0.77, 553.57),
(10, 1.3, 3286.6), (10, 4.24, 1349.87), (9, 2.7, 242.73),
(9, 5.64, 951.72), (8, 5.3, 2352.87), (6, 2.65, 9437.76),
(6, 4.67, 4690.48)],
#L2:
[(52919, 0.0, 0.0), (8720, 1.0721, 6283.0758), (309, 0.867, 12566.152),
(27, 0.05, 3.52), (16, 5.19, 26.3), (16, 3.68, 155.42),
(10, 0.76, 18849.23), (9, 2.06, 77713.77), (7, 0.83, 775.52),
(5, 4.66, 1577.34), (4, 1.03, 7.11), (4, 3.44, 5573.14),
(3, 5.14, 796.3), (3, 6.05, 5507.55), (3, 1.19, 242.73),
(3, 6.12, 529.69), (3, 0.31, 398.15), (3, 2.28, 553.57),
(2, 4.38, 5223.69), (2, 3.75, 0.98)],
#L3:
[(289, 5.844, 6283.076), (35, 0.0, 0.0,), (17, 5.49, 12566.15),
(3, 5.2, 155.42), (1, 4.72, 3.52), (1, 5.3, 18849.23),
(1, 5.97, 242.73)],
#L4:
[(114, 3.142, 0.0), (8, 4.13, 6283.08), (1, 3.84, 12566.15)],
#L5:
[(1, 3.14, 0.0)]
]
#Earth Heliocentric Longitude coefficients (B0 and B1 in paper)
_EHB_ = [ #B0:
[(280, 3.199, 84334.662), (102, 5.422, 5507.553), (80, 3.88, 5223.69),
(44, 3.7, 2352.87), (32, 4.0, 1577.34)],
#B1:
[(9, 3.9, 5507.55), (6, 1.73, 5223.69)]
]
#Earth Heliocentric Radius coefficients (R0, R1, R2, R3, R4)
_EHR_ = [#R0:
[(100013989, 0.0, 0.0), (1670700, 3.0984635, 6283.07585), (13956, 3.05525, 12566.1517),
(3084, 5.1985, 77713.7715), (1628, 1.1739, 5753.3849), (1576, 2.8469, 7860.4194),
(925, 5.453, 11506.77), (542, 4.564, 3930.21), (472, 3.661, 5884.927),
(346, 0.964, 5507.553), (329, 5.9, 5223.694), (307, 0.299, 5573.143),
(243, 4.273, 11790.629), (212, 5.847, 1577.344), (186, 5.022, 10977.079),
(175, 3.012, 18849.228), (110, 5.055, 5486.778), (98, 0.89, 6069.78),
(86, 5.69, 15720.84), (86, 1.27, 161000.69), (85, 0.27, 17260.15),
(63, 0.92, 529.69), (57, 2.01, 83996.85), (56, 5.24, 71430.7),
(49, 3.25, 2544.31), (47, 2.58, 775.52), (45, 5.54, 9437.76),
(43, 6.01, 6275.96), (39, 5.36, 4694), (38, 2.39, 8827.39),
(37, 0.83, 19651.05), (37, 4.9, 12139.55), (36, 1.67, 12036.46),
(35, 1.84, 2942.46), (33, 0.24, 7084.9), (32, 0.18, 5088.63),
(32, 1.78, 398.15), (28, 1.21, 6286.6), (28, 1.9, 6279.55),
(26, 4.59, 10447.39)],
#R1:
[(103019, 1.10749, 6283.07585), (1721, 1.0644, 12566.1517), (702, 3.142, 0.0),
(32, 1.02, 18849.23), (31, 2.84, 5507.55), (25, 1.32, 5223.69),
(18, 1.42, 1577.34), (10, 5.91, 10977.08), (9, 1.42, 6275.96),
(9, 0.27, 5486.78)],
#R2:
[(4359, 5.7846, 6283.0758), (124, 5.579, 12566.152), (12, 3.14, 0.0),
(9, 3.63, 77713.77), (6, 1.87, 5573.14), (3, 5.47, 18849)],
#R3:
[(145, 4.273, 6283.076), (7, 3.92, 12566.15)],
#R4:
[(4, 2.56, 6283.08)]
]
@staticmethod
def heliocentric_longitude(jme):
"""Compute the Earth Heliocentric Longitude (L) in degrees given the Julian Ephemeris Millennium"""
#L5, ..., L0
Li = [sum(a*np.cos(b + c*jme) for a,b,c in abcs) for abcs in reversed(_sp._EHL_)]
L = np.polyval(Li, jme) / 1e8
L = np.rad2deg(L) % 360
return L
@staticmethod
def heliocentric_latitude(jme):
"""Compute the Earth Heliocentric Latitude (B) in degrees given the Julian Ephemeris Millennium"""
Bi = [sum(a*np.cos(b + c*jme) for a,b,c in abcs) for abcs in reversed(_sp._EHB_)]
B = np.polyval(Bi, jme) / 1e8
B = np.rad2deg(B) % 360
return B
@staticmethod
def heliocentric_radius(jme):
"""Compute the Earth Heliocentric Radius (R) in astronimical units given the Julian Ephemeris Millennium"""
Ri = [sum(a*np.cos(b + c*jme) for a,b,c in abcs) for abcs in reversed(_sp._EHR_)]
R = np.polyval(Ri, jme) / 1e8
return R
@staticmethod
def heliocentric_position(jme):
"""Compute the Earth Heliocentric Longitude, Latitude, and Radius given the Julian Ephemeris Millennium
Returns (L, B, R) where L = longitude in degrees, B = latitude in degrees, and R = radius in astronimical units
"""
return _sp.heliocentric_longitude(jme), _sp.heliocentric_latitude(jme), _sp.heliocentric_radius(jme)
@staticmethod
def geocentric_position(helio_pos):
"""Compute the geocentric latitude (Theta) and longitude (beta) (in degrees) of the sun given the earth's heliocentric position (L, B, R)"""
L,B,R = helio_pos
th = L + 180
b = -B
return (th, b)
#Nutation Longitude and Obliquity coefficients (Y)
_NLOY_ = [(0, 0, 0, 0, 1), (-2, 0, 0, 2, 2), (0, 0, 0, 2, 2),
(0, 0, 0, 0, 2), (0, 1, 0, 0, 0), (0, 0, 1, 0, 0),
(-2, 1, 0, 2, 2), (0, 0, 0, 2, 1), (0, 0, 1, 2, 2),
(-2, -1, 0, 2, 2), (-2, 0, 1, 0, 0), (-2, 0, 0, 2, 1),
(0, 0, -1, 2, 2), (2, 0, 0, 0, 0), (0, 0, 1, 0, 1),
(2, 0, -1, 2, 2), (0, 0, -1, 0, 1), (0, 0, 1, 2, 1),
(-2, 0, 2, 0, 0), (0, 0, -2, 2, 1), (2, 0, 0, 2, 2),
(0, 0, 2, 2, 2), (0, 0, 2, 0, 0), (-2, 0, 1, 2, 2),
(0, 0, 0, 2, 0), (-2, 0, 0, 2, 0), (0, 0, -1, 2, 1),
(0, 2, 0, 0, 0), (2, 0, -1, 0, 1), (-2, 2, 0, 2, 2),
(0, 1, 0, 0, 1), (-2, 0, 1, 0, 1), (0, -1, 0, 0, 1),
(0, 0, 2, -2, 0), (2, 0, -1, 2, 1), (2, 0, 1, 2, 2),
(0, 1, 0, 2, 2), (-2, 1, 1, 0, 0), (0, -1, 0, 2, 2),
(2, 0, 0, 2, 1), (2, 0, 1, 0, 0), (-2, 0, 2, 2, 2),
(-2, 0, 1, 2, 1), (2, 0, -2, 0, 1), (2, 0, 0, 0, 1),
(0, -1, 1, 0, 0), (-2, -1, 0, 2, 1), (-2, 0, 0, 0, 1),
(0, 0, 2, 2, 1), (-2, 0, 2, 0, 1), (-2, 1, 0, 2, 1),
(0, 0, 1, -2, 0), (-1, 0, 1, 0, 0), (-2, 1, 0, 0, 0),
(1, 0, 0, 0, 0), (0, 0, 1, 2, 0), (0, 0, -2, 2, 2),
(-1, -1, 1, 0, 0), (0, 1, 1, 0, 0), (0, -1, 1, 2, 2),
(2, -1, -1, 2, 2), (0, 0, 3, 2, 2), (2, -1, 0, 2, 2)]
#Nutation Longitude and Obliquity coefficients (a,b)
_NLOab_ = [(-171996, -174.2), (-13187, -1.6), (-2274, -0.2), (2062, 0.2), (1426, -3.4), (712, 0.1),
(-517, 1.2), (-386, -0.4), (-301, 0), (217, -0.5), (-158, 0), (129, 0.1),
(123, 0), (63, 0), (63, 0.1), (-59, 0), (-58, -0.1), (-51, 0),
(48, 0), (46, 0), (-38, 0), (-31, 0), (29, 0), (29, 0),
(26, 0), (-22, 0), (21, 0), (17, -0.1), (16, 0), (-16, 0.1),
(-15, 0), (-13, 0), (-12, 0), (11, 0), (-10, 0), (-8, 0),
(7, 0), (-7, 0), (-7, 0), (-7, 0), (6, 0), (6, 0),
(6, 0), (-6, 0), (-6, 0), (5, 0), (-5, 0), (-5, 0),
(-5, 0), (4, 0), (4, 0), (4, 0), (-4, 0), (-4, 0),
(-4, 0), (3, 0), (-3, 0), (-3, 0), (-3, 0), (-3, 0),
(-3, 0), (-3, 0), (-3, 0)]
#Nutation Longitude and Obliquity coefficients (c,d)
_NLOcd_ = [(92025, 8.9), (5736, -3.1), (977, -0.5), (-895, 0.5),
(54, -0.1), (-7, 0), (224, -0.6), (200, 0),
(129, -0.1), (-95, 0.3), (0, 0), (-70, 0),
(-53, 0), (0, 0), (-33, 0), (26, 0),
(32, 0), (27, 0), (0, 0), (-24, 0),
(16, 0), (13, 0), (0, 0), (-12, 0),
(0, 0), (0, 0), (-10, 0), (0, 0),
(-8, 0), (7, 0), (9, 0), (7, 0),
(6, 0), (0, 0), (5, 0), (3, 0),
(-3, 0), (0, 0), (3, 0), (3, 0),
(0, 0), (-3, 0), (-3, 0), (3, 0),
(3, 0), (0, 0), (3, 0), (3, 0),
(3, 0)]
@staticmethod
def ecliptic_obliquity(jme, delta_epsilon):
"""Calculate the true obliquity of the ecliptic (epsilon, in degrees) given the Julian Ephemeris Millennium and the obliquity"""
u = jme/10
e0 = np.polyval([2.45, 5.79, 27.87, 7.12, -39.05, -249.67, -51.38, 1999.25, -1.55, -4680.93, 84381.448], u)
e = e0/3600.0 + delta_epsilon
return e
@staticmethod
def nutation_obliquity(jce):
"""compute the nutation in longitude (delta_psi) and the true obliquity (epsilon) given the Julian Ephemeris Century"""
#mean elongation of the moon from the sun, in radians:
#x0 = 297.85036 + 445267.111480*jce - 0.0019142*(jce**2) + (jce**3)/189474
x0 = np.deg2rad(np.polyval([1./189474, -0.0019142, 445267.111480, 297.85036],jce))
#mean anomaly of the sun (Earth), in radians:
x1 = np.deg2rad(np.polyval([-1/3e5, -0.0001603, 35999.050340, 357.52772], jce))
#mean anomaly of the moon, in radians:
x2 = np.deg2rad(np.polyval([1./56250, 0.0086972, 477198.867398, 134.96298], jce))
#moon's argument of latitude, in radians:
x3 = np.deg2rad(np.polyval([1./327270, -0.0036825, 483202.017538, 93.27191], jce))
#Longitude of the ascending node of the moon's mean orbit on the ecliptic
# measured from the mean equinox of the date, in radians
x4 = np.deg2rad(np.polyval([1./45e4, 0.0020708, -1934.136261, 125.04452], jce))
x = (x0, x1, x2, x3, x4)
dp = 0.0
for y, ab in zip(_sp._NLOY_, _sp._NLOab_):
a,b = ab
dp += (a + b*jce)*np.sin(np.dot(x, y))
dp = np.rad2deg(dp)/36e6
de = 0.0
for y, cd in zip(_sp._NLOY_, _sp._NLOcd_):
c,d = cd
de += (c + d*jce)*np.cos(np.dot(x, y))
de = np.rad2deg(de)/36e6
e = _sp.ecliptic_obliquity(_sp.julian_millennium(jce), de)
return dp, e
@staticmethod
def abberation_correction(R):
"""Calculate the abberation correction (delta_tau, in degrees) given the Earth Heliocentric Radius (in AU)"""
return -20.4898/(3600*R)
@staticmethod
def sun_longitude(helio_pos, delta_psi):
"""Calculate the apparent sun longitude (lambda, in degrees) and geocentric longitude (beta, in degrees) given the earth heliocentric position and delta_psi"""
L,B,R = helio_pos
theta = L + 180 #geocentric latitude
beta = -B
ll = theta + delta_psi + _sp.abberation_correction(R)
return ll, beta
@staticmethod
def greenwich_sidereal_time(jd, delta_psi, epsilon):
"""Calculate the apparent Greenwich sidereal time (v, in degrees) given the Julian Day"""
jc = _sp.julian_century(jd)
#mean sidereal time at greenwich, in degrees:
v0 = (280.46061837 + 360.98564736629*(jd - 2451545) + 0.000387933*(jc**2) - (jc**3)/38710000) % 360
v = v0 + delta_psi*np.cos(np.deg2rad(epsilon))
return v
@staticmethod
def sun_ra_decl(llambda, epsilon, beta):
"""Calculate the sun's geocentric right ascension (alpha, in degrees) and declination (delta, in degrees)"""
l, e, b = map(np.deg2rad, (llambda, epsilon, beta))
alpha = np.arctan2(np.sin(l)*np.cos(e) - np.tan(b)*np.sin(e), np.cos(l)) #x1 / x2
alpha = np.rad2deg(alpha) % 360
delta = np.arcsin(np.sin(b)*np.cos(e) + np.cos(b)*np.sin(e)*np.sin(l))
delta = np.rad2deg(delta)
return alpha, delta
@staticmethod
def sun_topo_ra_decl_hour(latitude, longitude, elevation, jd, delta_t = 0):
"""Calculate the sun's topocentric right ascension (alpha'), declination (delta'), and hour angle (H')"""
jde = _sp.julian_ephemeris_day(jd, delta_t)
jce = _sp.julian_century(jde)
jme = _sp.julian_millennium(jce)
helio_pos = _sp.heliocentric_position(jme)
R = helio_pos[-1]
phi, sigma, E = latitude, longitude, elevation
#equatorial horizontal parallax of the sun, in radians
xi = np.deg2rad(8.794/(3600*R)) #
#rho = distance from center of earth in units of the equatorial radius
#phi-prime = geocentric latitude
#NB: These equations look like their based on WGS-84, but are rounded slightly
# The WGS-84 reference ellipsoid has major axis a = 6378137 m, and flattening factor 1/f = 298.257223563
# minor axis b = a*(1-f) = 6356752.3142 = 0.996647189335*a
u = np.arctan(0.99664719*np.tan(phi)) #
x = np.cos(u) + E*np.cos(phi)/6378140 #rho sin(phi-prime)
y = 0.99664719*np.sin(u) + E*np.sin(phi)/6378140 #rho cos(phi-prime)
delta_psi, epsilon = _sp.nutation_obliquity(jce) #
llambda, beta = _sp.sun_longitude(helio_pos, delta_psi) #
alpha, delta = _sp.sun_ra_decl(llambda, epsilon, beta) #
v = _sp.greenwich_sidereal_time(jd, delta_psi, epsilon) #
H = v + longitude - alpha #
Hr, dr = map(np.deg2rad,(H,delta))
dar = np.arctan2(-x*np.sin(xi)*np.sin(Hr), np.cos(dr)-x*np.sin(xi)*np.cos(Hr))
delta_alpha = np.rad2deg(dar) #
alpha_prime = alpha + delta_alpha #
delta_prime = np.rad2deg(np.arctan2((np.sin(dr) - y*np.sin(xi))*np.cos(dar), np.cos(dr) - y*np.sin(xi)*np.cos(Hr))) #
H_prime = H - delta_alpha #
return alpha_prime, delta_prime, H_prime
@staticmethod
def sun_topo_azimuth_zenith(latitude, delta_prime, H_prime, temperature=14.6, pressure=1013):
"""Compute the sun's topocentric azimuth and zenith angles
azimuth is measured eastward from north, zenith from vertical
temperature = average temperature in C (default is 14.6 = global average in 2013)
pressure = average pressure in mBar (default 1013 = global average)
"""
phi = np.deg2rad(latitude)
dr, Hr = map(np.deg2rad,(delta_prime, H_prime))
P, T = pressure, temperature
e0 = np.rad2deg(np.arcsin(np.sin(phi)*np.sin(dr) + np.cos(phi)*np.cos(dr)*np.cos(Hr)))
tmp = np.deg2rad(e0 + 10.3/(e0+5.11))
delta_e = (P/1010.0)*(283.0/(273+T))*(1.02/(60*np.tan(tmp)))
e = e0 + delta_e
zenith = 90 - e
gamma = np.rad2deg(np.arctan2(np.sin(Hr), np.cos(Hr)*np.sin(phi) - np.tan(dr)*np.cos(phi))) % 360
Phi = (gamma + 180) % 360 #azimuth from north
return Phi, zenith
@staticmethod
def norm_lat_lon(lat,lon):
if lat < -90 or lat > 90:
#convert to cartesian and back
x = cos(deg2rad(lon))*cos(deg2rad(lat))
y = sin(deg2rad(lon))*cos(deg2rad(lat))
z = sin(deg2rad(lat))
r = sqrt(x**2 + y**2 + z**2)
lon = rad2deg(arctan2(y,x)) % 360
lat = rad2deg(arcsin(z/r))
elif lon < 0 or lon > 360:
lon = lon % 360
return lat,lon
@staticmethod
def topo_pos(t,lat,lon,elev,temp,press,dt):
"""compute RA,dec,H, all in degrees"""
lat,lon = _sp.norm_lat_lon(lat,lon)
jd = _sp.julian_day(t)
RA, dec, H = _sp.sun_topo_ra_decl_hour(lat, lon, elev, jd, dt)
return RA, dec, H
@staticmethod
def pos(t,lat,lon,elev,temp,press,dt):
"""Compute azimute,zenith,RA,dec,H all in degrees"""
lat,lon = _sp.norm_lat_lon(lat,lon)
jd = _sp.julian_day(t)
RA, dec, H = _sp.sun_topo_ra_decl_hour(lat, lon, elev, jd, dt)
azimuth, zenith = _sp.sun_topo_azimuth_zenith(lat, dec, H, temp, press)
return azimuth,zenith,RA,dec,H
def julian_day(dt):
"""Convert UTC datetimes or UTC timestamps to Julian days
Parameters
----------
dt : array_like
UTC datetime objects or UTC timestamps (as per datetime.utcfromtimestamp)
Returns
-------
jd : ndarray
datetimes converted to fractional Julian days
"""
dts = np.array(dt)
if len(dts.shape) == 0:
return _sp.julian_day(dt)
jds = np.empty(dts.shape)
for i,d in enumerate(dts.flat):
jds.flat[i] = _sp.julian_day(d)
return jds
def arcdist(p0,p1,radians=False):
"""Angular distance between azimuth,zenith pairs
Parameters
----------
p0 : array_like, shape (..., 2)
p1 : array_like, shape (..., 2)
p[...,0] = azimuth angles, p[...,1] = zenith angles
radians : boolean (default False)
If False, angles are in degrees, otherwise in radians
Returns
-------
ad : array_like, shape is broadcast(p0,p1).shape
Arcdistances between corresponding pairs in p0,p1
In degrees by default, in radians if radians=True
"""
#formula comes from translating points into cartesian coordinates
#taking the dot product to get the cosine between the two vectors
#then arccos to return to angle, and simplify everything assuming real inputs
p0,p1 = np.array(p0), np.array(p1)
if not radians:
p0,p1 = np.deg2rad(p0), np.deg2rad(p1)
a0,z0 = p0[...,0], p0[...,1]
a1,z1 = p1[...,0], p1[...,1]
d = np.arccos(np.cos(z0)*np.cos(z1)+np.cos(a0-a1)*np.sin(z0)*np.sin(z1))
if radians:
return d
else:
return np.rad2deg(d)
def observed_sunpos(dt, latitude, longitude, elevation, temperature=None, pressure=None, delta_t=0, radians=False):
"""Compute the observed coordinates of the sun as viewed at the given time and location.
Parameters
----------
dt : array_like of datetime or float
UTC datetime objects or UTC timestamps (as per datetime.utcfromtimestamp) representing the times of observations
latitude, longitude : array_like of float
decimal degrees, positive for north of the equator and east of Greenwich
elevation : array_like of float
meters, relative to the WGS-84 ellipsoid
temperature : None or array_like of float, optional
celcius, default is 14.6 (global average in 2013)
pressure : None or array_like of float, optional
millibar, default is 1013 (global average in ??)
delta_t : array_like of float, optional
seconds, default is 0, difference between the earth's rotation time (TT) and universal time (UT)
radians : bool, optional
return results in radians if True, degrees if False (default)
Returns
-------
coords : ndarray, (...,2)
The shape of the array is parameters broadcast together, plus a final dimension for the coordinates.
coords[...,0] = observed azimuth angle, measured eastward from north
coords[...,1] = observed zenith angle, measured down from vertical
"""
if temperature is None:
temperature = 14.6
if pressure is None:
pressure = 1013
#6367444 = radius of earth
#numpy broadcasting
b = np.broadcast(dt,latitude,longitude,elevation,temperature,pressure,delta_t)
res = np.empty(b.shape+(2,))
res_vec = res.reshape((-1,2))
for i,x in enumerate(b):
res_vec[i] = _sp.pos(*x)[:2]
if radians:
res = np.deg2rad(res)
return res
def topocentric_sunpos(dt, latitude, longitude, temperature=None, pressure=None, delta_t=0, radians=False):
"""Compute the topocentric coordinates of the sun as viewed at the given time and location.
Parameters
----------
dt : array_like of datetime or float
UTC datetime objects or UTC timestamps (as per datetime.utcfromtimestamp) representing the times of observations
latitude, longitude : array_like of float
decimal degrees, positive for north of the equator and east of Greenwich
elevation : array_like of float
meters, relative to the WGS-84 ellipsoid
temperature : None or array_like of float, optional
celcius, default is 14.6 (global average in 2013)
pressure : None or array_like of float, optional
millibar, default is 1013 (global average in ??)
delta_t : array_like of float, optional
seconds, default is 0, difference between the earth's rotation time (TT) and universal time (UT)
radians : bool, optional
return results in radians if True, degrees if False (default)
Returns
-------
coords : ndarray, (...,3)
The shape of the array is parameters broadcast together, plus a final dimension for the coordinates.
coords[...,0] = topocentric right ascension
coords[...,1] = topocentric declination
coords[...,2] = topocentric hour angle
"""
if temperature is None:
temperature = 14.6
if pressure is None:
pressure = 1013
#6367444 = radius of earth
#numpy broadcasting
b = np.broadcast(dt,latitude,longitude,elevation,temperature,pressure,delta_t)
res = np.empty(b.shape+(2,))
res_vec = res.reshape((-1,2))
for i,x in enumerate(b):
res_vec[i] = _sp.topo_pos(*x)
if radians:
res = np.deg2rad(res)
return res
def sunpos(dt, latitude, longitude, elevation, temperature=None, pressure=None, delta_t=0, radians=False):
"""Compute the observed and topocentric coordinates of the sun as viewed at the given time and location.
Parameters
----------
dt : array_like of datetime or float
UTC datetime objects or UTC timestamps (as per datetime.utcfromtimestamp) representing the times of observations
latitude, longitude : array_like of float
decimal degrees, positive for north of the equator and east of Greenwich
elevation : array_like of float
meters, relative to the WGS-84 ellipsoid
temperature : None or array_like of float, optional
celcius, default is 14.6 (global average in 2013)
pressure : None or array_like of float, optional
millibar, default is 1013 (global average in ??)
delta_t : array_like of float, optional
seconds, default is 0, difference between the earth's rotation time (TT) and universal time (UT)
radians : bool, optional
return results in radians if True, degrees if False (default)
Returns
-------
coords : ndarray, (...,5)
The shape of the array is parameters broadcast together, plus a final dimension for the coordinates.
coords[...,0] = observed azimuth angle, measured eastward from north
coords[...,1] = observed zenith angle, measured down from vertical
coords[...,2] = topocentric right ascension
coords[...,3] = topocentric declination
coords[...,4] = topocentric hour angle
"""
if temperature is None:
temperature = 14.6
if pressure is None:
pressure = 1013
#6367444 = radius of earth
#numpy broadcasting
b = np.broadcast(dt,latitude,longitude,elevation,temperature,pressure,delta_t)
res = np.empty(b.shape+(5,))
res_vec = res.reshape((-1,5))
for i,x in enumerate(b):
res_vec[i] = _sp.pos(*x)
if radians:
res = np.deg2rad(res)
return res
def main(args):
az, zen, ra, dec, h = sunpos(args.t, args.lat, args.lon, args.elev, args.temp, args.p, args.dt, args.rad)
if args.csv:
#machine readable
print('{t}, {dt}, {lat}, {lon}, {elev}, {temp}, {p}, {az}, {zen}, {ra}, {dec}, {h}'.format(t=args.t, dt=args.dt, lat=args.lat, lon=args.lon, elev=args.elev,temp=args.temp, p=args.p,az=az, zen=zen, ra=ra, dec=dec, h=h))
else:
dr='deg'
if args.rad:
dr='rad'
print("Computing sun position at T = {t} + {dt} s".format(t=args.t, dt=args.dt))
print("Lat, Lon, Elev = {lat} deg, {lon} deg, {elev} m".format(lat=args.lat, lon=args.lon, elev=args.elev))
print("T, P = {temp} C, {press} mbar".format(temp=args.temp, press=args.p))
print("Results:")
print("Azimuth, zenith = {az} {dr}, {zen} {dr}".format(az=az,zen=zen,dr=dr))
print("RA, dec, H = {ra} {dr}, {dec} {dr}, {h} {dr}".format(ra=ra, dec=dec, h=h, dr=dr))
if __name__ == '__main__':
from argparse import ArgumentParser
import datetime, sys
parser = ArgumentParser(prog='sunposition',description='Compute sun position parameters given the time and location')
parser.add_argument('--version',action='version',version='%(prog)s 1.0')
parser.add_argument('--citation',dest='cite',action='store_true',help='Print citation information')
parser.add_argument('-t,--time',dest='t',type=str,default='now',help='"now" or date and time (UTC) in "YYYY-MM-DD hh:mm:ss.ssssss" format or a (UTC) POSIX timestamp')
parser.add_argument('-lat,--latitude',dest='lat',type=float,default=51.48,help='latitude, in decimal degrees, positive for north')
parser.add_argument('-lon,--longitude',dest='lon',type=float,default=0.0,help='longitude, in decimal degrees, positive for east')
parser.add_argument('-e,--elevation',dest='elev',type=float,default=0,help='elevation, in meters')
parser.add_argument('-T,--temperature',dest='temp',type=float,default=14.6,help='temperature, in degrees celcius')
parser.add_argument('-p,--pressure',dest='p',type=float,default=1013.0,help='atmospheric pressure, in millibar')
parser.add_argument('-dt',type=float,default=0.0,help='difference between earth\'s rotation time (TT) and universal time (UT1)')
parser.add_argument('-r,--radians',dest='rad',action='store_true',help='Output in radians instead of degrees')
parser.add_argument('--csv',dest='csv',action='store_true',help='Comma separated values (time,dt,lat,lon,elev,temp,pressure,az,zen,RA,dec,H)')
args = parser.parse_args()
if args.cite:
print("Implementation: Samuel Bear Powell, 2016")
print("Algorithm:")
print("Ibrahim Reda, Afshin Andreas, \"Solar position algorithm for solar radiation applications\", Solar Energy, Volume 76, Issue 5, 2004, Pages 577-589, ISSN 0038-092X, doi:10.1016/j.solener.2003.12.003")
sys.exit(0)
if args.t == "now":
args.t = datetime.datetime.utcnow()
elif ":" in args.t and "-" in args.t:
try:
args.t = datetime.datetime.strptime(args.t,'%Y-%m-%d %H:%M:%S.%f') #with microseconds
except:
try:
args.t = datetime.datetime.strptime(args.t,'%Y-%m-%d %H:%M:%S.') #without microseconds
except:
args.t = datetime.datetime.strptime(args.t,'%Y-%m-%d %H:%M:%S')
else:
args.t = datetime.datetime.utcfromtimestamp(int(args.t))
main(args)
|
phborba/dsgtoolsop
|
auxiliar/sunposition/sunposition.py
|
Python
|
gpl-2.0
| 32,594
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Original Module by SIESA (<http://www.siesacr.com>)
# Refactored by CLEARCORP S.A. (<http://clearcorp.co.cr>)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# license, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
##############################################################################
import purchase_import
import product
import report
|
3dfxsoftware/cbss-addons
|
purchase_import/__init__.py
|
Python
|
gpl-2.0
| 1,087
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
A Morse Binary plotter
Copyright (C) 2015 by
Sébastien Celles <s.celles@gmail.com>
All rights reserved.
"""
import matplotlib.pyplot as plt
import morse_talk as mtalk
def _create_ax(ax):
"""
Create a Matplotlib Axe from ax
if ax is None a new Matplotlib figure is create
and also an ax
else ax is returned
"""
if ax is None:
fig, axs = plt.subplots(1, 1)
return axs
else:
return ax
def _create_x_y(l, duration=1):
"""
Create 2 lists
x: time (as unit of dot (dit)
y: bits
from a list of bit
>>> l = [1, 0, 1, 0, 1, 0, 0, 0, 1, 1, 1, 0, 1, 1, 1, 0, 1, 1, 1, 0, 0, 0, 1, 0, 1, 0, 1]
>>> x, y = _create_x_y(l)
>>> x
[-1, 0, 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7, 8, 8, 9, 9, 10, 10, 11, 11, 12, 12, 13, 13, 14, 14, 15, 15, 16, 16, 17, 17, 18, 18, 19, 19, 20, 20, 21, 21, 22, 22, 23, 23, 24, 24, 25, 25, 26, 26, 27, 27, 28]
>>> y
[0, 0, 1, 1, 0, 0, 1, 1, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 1, 1, 0, 0, 1, 1, 0, 0]
"""
l = [0] + l + [0]
y = []
x = []
for i, bit in enumerate(l):
y.append(bit)
y.append(bit)
x.append((i - 1) * duration)
x.append(i * duration)
return x, y
def plot(message, duration=1, ax = None):
"""
Plot a message
Returns: ax a Matplotlib Axe
"""
lst_bin = mtalk.encoding._encode_binary(message)
x, y = _create_x_y(lst_bin, duration)
ax = _create_ax(ax)
ax.plot(x, y, linewidth=2.0)
delta_y = 0.1
ax.set_ylim(-delta_y, 1 + delta_y)
ax.set_yticks([0, 1])
delta_x = 0.5 * duration
ax.set_xlim(-delta_x, len(lst_bin) * duration + delta_x)
return ax
def main():
import doctest
doctest.testmod()
if __name__ == '__main__':
main()
|
OrkoHunter/morse-talk
|
morse_talk/plotter.py
|
Python
|
gpl-2.0
| 1,931
|
import nest
import pylab as plt
import numpy as np
"""
Reproduce result of the pairing experiment from Pfister-Gerstner (2006) with the triplet model.
"""
nest.Install("stdpmodule")
nest.set_verbosity("M_WARNING")
def generateSpikes(neuron, times):
"""Trigger spike to given neuron at specified times."""
delay = 1.0
gen = nest.Create("spike_generator", 1, { "spike_times": [t - delay for t in times] })
nest.Connect(gen, neuron, syn_spec = { "delay": delay })
def create(model, number):
"""Allow multiple model instance to be unpack as they are created."""
return map(lambda x: (x,), nest.Create(model, number))
neuron_model = "parrot_neuron"
synapse_model = "stdp_triplet_all_in_one_synapse"
syn_spec = {
"model": synapse_model,
"receptor_type": 1, # set receptor 1 post-synaptically, to not generate extra spikes
"weight": 1.0,
"tau_plus": 16.8,
"tau_plus_triplet": 101.0,
"tau_minus": 33.7,
"tau_minus_triplet": 125.0,
"Aplus": 5e-10,
"Aminus": 7e-3,
"Aplus_triplet": 6.2e-3,
"Aminus_triplet": 2.3e-4,
"Kplus": 0.0,
"Kplus_triplet": 0.0,
"Kminus": 0.0,
"Kminus_triplet": 0.0,
}
n = 60 # pair of presynaptic and post synpatic spikes
dt = 10 # ms shift pre/post
start_spikes = dt + 20
rhos = np.arange(1.0, 55.0, 5.0) # hz spiking frequence
weights_plus = []
weights_minus = []
def evaluate(rho, dt):
"""Evaluate connection change of weight and returns it."""
nest.ResetKernel()
nest.SetKernelStatus({"local_num_threads" : 1, "resolution" : 0.1, "print_time": False})
step = 1000.0 / rho
simulation_duration = np.ceil(n * step)
times_pre = np.arange(start_spikes, simulation_duration, step).round(1)
times_post = [t + dt for t in times_pre]
# Entities
neuron_pre = nest.Create(neuron_model)
neuron_post = nest.Create(neuron_model)
# Connections
generateSpikes(neuron_pre, times_pre)
generateSpikes(neuron_post, times_post)
nest.Connect(neuron_pre, neuron_post, syn_spec = syn_spec)
# Simulation
connection_stats = nest.GetConnections(neuron_pre, synapse_model = synapse_model)
current_weight = nest.GetStatus(connection_stats, ["weight"])[0][0]
nest.Simulate(start_spikes + simulation_duration)
# Results
connection_stats = nest.GetConnections(neuron_pre, synapse_model = synapse_model)
end_weight = nest.GetStatus(connection_stats, ["weight"])[0][0]
return end_weight - current_weight
for rho in rhos:
weights_plus.append(evaluate(rho, dt))
weights_minus.append(evaluate(rho, -dt))
plt.figure()
plt.title('Pairing experiment (Pfister-Gerstner 2006)')
plt.xlabel("rho (Hz)")
plt.ylabel("weight delta")
plt.plot(rhos, weights_plus, "b")
plt.plot(rhos, weights_minus, "b", ls = "--")
plt.legend(["dt = +10 ms", "dt = -10 ms"], loc = "upper left", frameon = False)
plt.xlim([0, 50])
plt.ylim([-0.6, 0.8])
plt.show()
|
zifeo/nest-stdpmodule
|
examples/PfisterGerstnerPairing_connection.py
|
Python
|
gpl-2.0
| 2,909
|
# coding: utf-8
"""
======================================================================
Learning and Visualizing the BMS sensor-time-weather data structure
======================================================================
This example employs several unsupervised learning techniques to extract
the energy data structure from variations in Building Automation System (BAS)
and historial weather data.
The fundermental timelet for analysis are 15 min, referred to as Q.
** currently use H (Hour) as a fundermental timelet, need to change later **
The following analysis steps are designed and to be executed.
Data Pre-processing
--------------------------
- Data Retrieval and Standardization
- Outlier Detection
- Interpolation
Data Summarization
--------------------------
- Data Transformation
- Sensor Clustering
Model Discovery Bayesian Network
--------------------------
- Automatic State Classification
- Structure Discovery and Analysis
"""
#print(__doc__)
# Author: Deokwooo Jung deokwoo.jung@gmail.compile
##################################################################
# General Moduels
from __future__ import division # To forace float point division
from data_summerization import *
##################################################################
# Interactive mode for plotting
plt.ion()
##################################################################
# Processing Configuraiton Settings
##################################################################
# Analysis buildings set
# Main building x where x is 1-16
# Conference bldg
# Machine Room
# All Power Measurements
IS_USING_SAVED_DICT=-1
print 'Extract a common time range...'
##################################################################
# List buildings and substation names
PRE_BN_STAGE=0
if PRE_BN_STAGE==0:
bldg_key_set=[]
print 'skip PRE_BN_STAGE....'
else:
bldg_key_set=['GW1','GW2','VAK1','VAK2']
#########################################
# 1. Electricity Room and Machine Room - 'elec_machine_room_bldg'
#########################################
#########################################
# 2. Conference Building - 'conference_bldg'
#########################################
#########################################
# 3. Main Building - 'main_bldg_x'
#########################################
for bldg_key in bldg_key_set:
print '###############################################################################'
print '###############################################################################'
print 'Processing '+ bldg_key+'.....'
print '###############################################################################'
print '###############################################################################'
temp=subprocess.check_output('ls '+DATA_DIR+'*'+bldg_key+'*.bin', shell=True)
input_files_temp =shlex.split(temp)
# Get rid of duplicated files
input_files_temp=list(set(input_files_temp))
input_files=input_files_temp
#input_files=['../gvalley/Binfiles/'+temp for temp in input_files_temp]
IS_USING_SAVED_DICT=0
print 'Extract a common time range...'
# Analysis period
ANS_START_T=dt.datetime(2013,4,1,0)
ANS_END_T=dt.datetime(2014,4,1,0)
# Interval of timelet, currently set to 1 Hour
TIMELET_INV=dt.timedelta(minutes=15)
print TIMELET_INV, 'time slot interval is set for this data set !!'
print '-------------------------------------------------------------------'
PROC_AVG=True
PROC_DIFF=True
###############################################################################
# This directly searches files from bin file name
print '###############################################################################'
print '# Data Pre-Processing'
print '###############################################################################'
# define input_files to be read
if IS_USING_SAVED_DICT==0:
ANS_START_T,ANS_END_T,input_file_to_be_included=\
time_range_check(input_files,ANS_START_T,ANS_END_T,TIMELET_INV)
print 'time range readjusted to (' ,ANS_START_T, ', ', ANS_END_T,')'
start__dictproc_t=time.time()
if IS_SAVING_INDIVIDUAL==True:
data_dict=construct_data_dict_2(input_files,ANS_START_T,ANS_END_T,TIMELET_INV,binfilename='data_dict', IS_USING_PARALLEL=IS_USING_PARALLEL_OPT)
else:
data_dict,purge_list=construct_data_dict(input_file_to_be_included,ANS_START_T,ANS_END_T,TIMELET_INV,binfilename='data_dict',IS_USING_PARALLEL=IS_USING_PARALLEL_OPT)
end__dictproc_t=time.time()
print 'the time of construct data dict.bin is ', end__dictproc_t-start__dictproc_t, ' sec'
print '--------------------------------------'
elif IS_USING_SAVED_DICT==1:
print 'Loading data dictionary......'
start__dictproc_t=time.time()
data_dict = mt.loadObjectBinaryFast('data_dict.bin')
end__dictproc_t=time.time()
print 'the time of loading data dict.bin is ', end__dictproc_t-start__dictproc_t, ' sec'
print '--------------------------------------'
else:
print 'Skip data dict'
CHECK_DATA_FORMAT=0
if CHECK_DATA_FORMAT==1:
if IS_SAVING_INDIVIDUAL==True:
list_of_wrong_data_format=verify_data_format_2(data_used,data_dict,time_slots)
else:
list_of_wrong_data_format=verify_data_format(data_used,data_dict,time_slots)
if len(list_of_wrong_data_format)>0:
print 'Measurement list below'
print '----------------------------------------'
print list_of_wrong_data_format
raise NameError('Errors in data format')
Data_Summarization=1
if Data_Summarization==1:
bldg_out=data_summerization(bldg_key,data_dict,PROC_AVG=True,PROC_DIFF=False)
RECON_BLDG_BIN_OUT=0
if RECON_BLDG_BIN_OUT==1:
for bldg_key in ['GW1_','GW2_','VAK1_','VAK2_']:
avgdata_dict=mt.loadObjectBinaryFast('./VTT/'+bldg_key+'avgdata_dict.bin')
diffdata_dict=mt.loadObjectBinaryFast('./VTT/'+bldg_key+'diffdata_dict.bin')
data_dict=mt.loadObjectBinaryFast('./VTT/'+bldg_key+'data_dict.bin')
cmd_str=remove_dot(bldg_key)+'out={\'data_dict\':data_dict}'
exec(cmd_str)
cmd_str=remove_dot(bldg_key)+'out.update({\'avgdata_dict\':avgdata_dict})'
exec(cmd_str)
cmd_str=remove_dot(bldg_key)+'out.update({\'diffdata_dict\':diffdata_dict})'
exec(cmd_str)
cmd_str=remove_dot(bldg_key)+'out.update({\'bldg_key\':remove_dot(bldg_key)})'
exec(cmd_str)
cmd_str='mt.saveObjectBinaryFast('+remove_dot(bldg_key)+'out'+',\''+PROC_OUT_DIR+remove_dot(bldg_key)+'out.bin\')'
exec(cmd_str)
print '###############################################################################'
print '# Model_Discovery'
print '###############################################################################'
bldg_key_set=['GW1','GW2','VAK1','VAK2']
Model_Discovery=1
if Model_Discovery==1:
pwr_key='_POWER_';dict_dir='./VTT/'
LOAD_BLDG_OBJ=0
if LOAD_BLDG_OBJ==1:
print 'not yet ready'
bldg_=mt.loadObjectBinaryFast(PROC_OUT_DIR+'vtt_bldg_obj.bin')
else:
bldg_dict={}
bldg_load_out = {}
for bldg_load_key in bldg_key_set:
print 'Building for ',bldg_load_key, '....'
try:
bldg_tag='vtt_'+bldg_load_key
bldg_load_out=mt.loadObjectBinaryFast(dict_dir+bldg_load_key+'_out.bin')
except:
print 'not found, skip....'
pass
mt.saveObjectBinaryFast(bldg_load_out['data_dict'],dict_dir+'data_dict.bin')
if 'avgdata_dict' in bldg_load_out.keys():
mt.saveObjectBinaryFast(bldg_load_out['avgdata_dict'],dict_dir+'avgdata_dict.bin')
if 'diffdata_dict' in bldg_load_out.keys():
mt.saveObjectBinaryFast(bldg_load_out['diffdata_dict'],dict_dir+'diffdata_dict.bin')
pname_key= pwr_key
bldg_dict.update({bldg_tag:create_bldg_obj(dict_dir,bldg_tag,pname_key)})
bldg_=obj(bldg_dict)
#cmd_str='bldg_.'+bldg_tag+'.data_out=obj(bldg_load_out)'
#exec(cmd_str)
cmd_str='bldg_obj=bldg_.'+bldg_tag
exec(cmd_str)
anal_out={}
if 'avgdata_dict' in bldg_load_out.keys():
anal_out.update({'avg':bn_prob_analysis(bldg_obj,sig_tag_='avg')})
if 'diffdata_dict' in bldg_load_out.keys():
anal_out.update({'diff':bn_prob_analysis(bldg_obj,sig_tag_='diff')})
cmd_str='bldg_.'+bldg_tag+'.anal_out=obj(anal_out)'
exec(cmd_str)
mt.saveObjectBinaryFast(bldg_ ,PROC_OUT_DIR+'vtt_bldg_obj.bin')
mt.saveObjectBinaryFast('LOAD_BLDG_OBJ' ,PROC_OUT_DIR+'vtt_bldg_obj_is_done.txt')
def convert_vtt_name(id_labels):
if isinstance(id_labels,list)==False:
id_labels=[id_labels]
out_name=[key_label_ for key_label_ in id_labels ]
return out_name
bldg_.convert_name=convert_vtt_name
#######################################################################################
# Analysis For GSBC
#######################################################################################
# Analysis of BN network result
BN_ANAL=0
if BN_ANAL==1:
# Plotting individual LHs
PLOTTING_LH=0
if PLOTTING_LH==1:
plotting_bldg_lh(bldg_,attr_class='sensor',num_picks=30)
plotting_bldg_lh(bldg_,attr_class='time',num_picks=30)
plotting_bldg_lh(bldg_,attr_class='weather',num_picks=30)
PLOTTING_BN=1
if PLOTTING_BN==1:
plotting_bldg_bn(bldg_)
More_BN_ANAL=0
if More_BN_ANAL==1:
#######################################################################################
# Analysis For GSBC
#######################################################################################
#bldg_obj=bldg_.GSBC_main_bldg_power_machine_room
bldg_obj=bldg_.GSBC_main_bldg_power_machine_room
bldg_.GSBC_main_bldg_power_machine_room.anal_out=bn_prob_analysis(bldg_obj,sig_tag_='avg')
bldg_obj=bldg_.GSBC_main_bldg_1
bldg_.GSBC_main_bldg_1.anal_out=bn_prob_analysis(bldg_obj,sig_tag_='avg')
import pdb;pdb.set_trace()
#--------------------------------------------------------------------------
# Analysis Display
#--------------------------------------------------------------------------
# Data set 1 - GSBC_main_bldg_power_machine_room
p_name_sets_1=bldg_.GSBC_main_bldg_power_machine_room.anal_out.__dict__.keys()
bn_out_sets_1=bldg_.GSBC_main_bldg_power_machine_room.anal_out.__dict__
# Data set 2 - GSBC_main_bldg_1
p_name_sets_2=bldg_.GSBC_main_bldg_1.anal_out.__dict__.keys()
bn_out_sets_2=bldg_.GSBC_main_bldg_1.anal_out.__dict__
# Data set 2 Analysis
print 'List power meters for analysis'
print '------------------------------------'
pprint.pprint(np.array([p_name_sets_1,convert_gsbc_name(p_name_sets_1)]).T)
print '------------------------------------'
p_name=p_name_sets_1[3]
bn_out=bn_out_sets_1[p_name]
fig_name='BN for Sensors '+convert_gsbc_name(p_name)[0]
fig=figure(fig_name,figsize=(30.0,30.0))
col_name=[str(np.array([[lab1],[remove_dot(lab2)]])) \
for lab1,lab2 in zip(bn_out.s_labels, convert_gsbc_name(bn_out.s_labels))]
rbn.nx_plot(bn_out.s_hc,col_name,graph_layout='spring',node_text_size=15)
png_name=fig_name+'_'+str(uuid.uuid4().get_hex().upper()[0:2])
fig.savefig(fig_dir+png_name+'.png', bbox_inches='tight')
fig_name='BN for Time '+convert_gsbc_name(p_name)[0]
fig=figure(fig_name,figsize=(30.0,30.0))
rbn.nx_plot(bn_out.t_hc,convert_gsbc_name(bn_out.t_labels),graph_layout='spring',node_text_size=12)
png_name=fig_name+'_'+str(uuid.uuid4().get_hex().upper()[0:2])
fig.savefig(fig_dir+png_name+'.png', bbox_inches='tight')
fig_name='BN for Weather '+convert_gsbc_name(p_name)[0]
fig=figure(fig_name,figsize=(30.0,30.0))
rbn.nx_plot(bn_out.w_hc,convert_gsbc_name(bn_out.w_labels),graph_layout='spring',node_text_size=12)
png_name=fig_name+str(uuid.uuid4().get_hex().upper()[0:2])
fig.savefig(fig_dir+png_name+'.png', bbox_inches='tight')
fig_name='BN for Sensor-Time-Weather '+convert_gsbc_name(p_name)[0]
fig=figure(fig_name,figsize=(30.0,30.0))
rbn.nx_plot(bn_out.all_hc,convert_gsbc_name(bn_out.all_labels),graph_layout='spring',node_text_size=20)
png_name=fig_name+'_'+str(uuid.uuid4().get_hex().upper()[0:2])
fig.savefig(fig_dir+png_name+'.png', bbox_inches='tight')
fig_name='BN PEAK LH Analysis for Sensor-Time-Weather '+convert_gsbc_name(p_name)[0]
fig=figure(fig_name, figsize=(30.0,30.0))
subplot(2,1,1)
plot(bn_out.all_cause_symbol_xtick,bn_out.high_peak_prob,'-^')
plot(bn_out.all_cause_symbol_xtick,bn_out.low_peak_prob,'-v')
plt.ylabel('Likelihood',fontsize='large')
plt.xticks(bn_out.all_cause_symbol_xtick,bn_out.all_cause_symbol_xlabel,rotation=270, fontsize=10)
plt.tick_params(labelsize='large')
plt.legend(('High Peak', 'Low Peak'),loc='center right')
plt.tick_params(labelsize='large')
plt.grid();plt.ylim([-0.05,1.05])
plt.title('Likelihood of '+ str(remove_dot(convert_gsbc_name(p_name)))+\
' given '+'\n'+str(remove_dot(convert_gsbc_name(bn_out.all_cause_label))))
png_name=fig_name+'_'+str(uuid.uuid4().get_hex().upper()[0:2])
fig.savefig(fig_dir+png_name+'.png', bbox_inches='tight')
# Compare with the raw data
#-------------------------------------------
start_t=datetime.datetime(2013, 8, 9, 0, 0, 0)
end_t=datetime.datetime(2013, 8, 13, 0, 0, 0)
data_x=get_data_set([label_[2:] for label_ in bn_out.all_cause_label]+[p_name[2:]],start_t,end_t)
png_namex=plot_data_x(data_x,stype='raw',smark='-^')
png_namex=plot_data_x(data_x,stype='diff',smark='-^')
name_list_out=[[p_name]+bn_out.all_cause_label,convert_gsbc_name([p_name]+bn_out.all_cause_label)]
pprint.pprint(np.array(name_list_out).T)
pprint.pprint(name_list_out)
start_t=datetime.datetime(2013, 7, 1, 0, 0, 0)
end_t=datetime.datetime(2013, 12, 31, 0, 0, 0)
data_x=get_data_set([label_[2:] for label_ in bn_out.s_labels],start_t,end_t)
png_namex=plot_data_x(data_x,stype='raw',smark='-^',fontsize='small',xpos=0.00)
png_namex=plot_data_x(data_x,stype='diff',smark='-^')
"""
png_name=str(uuid.uuid4().get_hex().upper()[0:6])
fig.savefig(fig_dir+png_name+'.png', bbox_inches='tight')
print '----------------------------------------'
print 'Likelihoods '
print '----------------------------------------'
print cause_label+['Low Peak','High Peak']
print '----------------------------------------'
print np.vstack((np.int0(peak_state).T,np.int0(100*lowpeak_prob).T,np.int0(100*peak_prob).T)).T
print '----------------------------------------'
s_val_set=set(peak_state[:,0])
m_val_set=set(peak_state[:,1])
Z_peak=np.ones((len(s_val_set),len(m_val_set)))*np.inf
for i,s_val in enumerate(s_val_set):
for j,m_val in enumerate(m_val_set):
idx=np.nonzero((peak_state[:,0]==s_val)&(peak_state[:,1]==m_val))[0][0]
Z_peak[i,j]=peak_prob[idx]
s_val_set=set(lowpeak_state[:,0])
m_val_set=set(lowpeak_state[:,1])
Z_lowpeak=np.ones((len(s_val_set),len(m_val_set)))*np.inf
for i,s_val in enumerate(s_val_set):
for j,m_val in enumerate(m_val_set):
idx=np.nonzero((lowpeak_state[:,0]==s_val)&(lowpeak_state[:,1]==m_val))[0][0]
Z_lowpeak[i,j]=lowpeak_prob[idx]
Z_lowpeak=lowpeak_prob.reshape((len(s_val_set),len(m_val_set)))
Z_peak=peak_prob.reshape((len(s_val_set),len(m_val_set)))
fig1=figure()
im = plt.imshow(Z_peak, cmap='hot',vmin=0, vmax=1,aspect='auto')
plt.colorbar(im, orientation='horizontal')
plt.xticks(monthDict.keys(),monthDict.values(),fontsize='large')
plt.yticks(range(len(s_val_set)),list(s_val_set),fontsize='large')
plt.xlabel(cause_label[1],fontsize='large')
plt.ylabel(cause_label[0],fontsize='large')
plt.title('Likelihood of High-Peak')
png_name=str(uuid.uuid4().get_hex().upper()[0:6])
fig1.savefig(fig_dir+png_name+'.png', bbox_inches='tight')
fig2=figure()
im = plt.imshow(Z_lowpeak, cmap='hot',vmin=0, vmax=1,aspect='auto')
plt.colorbar(im, orientation='horizontal')
plt.xticks(monthDict.keys(),monthDict.values(),fontsize='large')
plt.yticks(range(len(s_val_set)),list(s_val_set),fontsize='large')
plt.xlabel(cause_label[1],fontsize='large')
plt.ylabel(cause_label[0],fontsize='large')
plt.title('Likelihood of Low-Peak')
png_name=str(uuid.uuid4().get_hex().upper()[0:6])
fig2.savefig(fig_dir+png_name+'.png', bbox_inches='tight')
"""
print '**************************** End of Program ****************************'
|
TinyOS-Camp/DDEA-DEV
|
Archive/[14_09_13] Code_Base/df_data_analysis_ddea.py
|
Python
|
gpl-2.0
| 17,131
|
from rutinas import *
inputdir="ETERNA-INI/"
outputdir="ETERNA-OUT/"
inputdb=argv[1]
f=open(inputdir+"%s-INI/numeracionsismos.dat"%inputdb,"r")
allphases=[]
n=1
for line in f:
if "#" in line:continue
line=line.strip()
parts=line.split()
numsismo=parts[0]
fecha=parts[1]
hora=parts[2]
#print fecha, hora, numsismo
fechavin=fecha.split("/")
horavin=hora.split(":")
#print fechavin
#print horavin
fechadelarchivo=[fechavin[2],fechavin[1],fechavin[0],horavin[0],horavin[1],horavin[2]]
jd=fecha2jd_all(fechadelarchivo)
#print "jd", jd
fechadelsismo=jd+90.0
print "fechadelsismo=",fechadelsismo
####################################
#pasando nuevamente a dato gregoriano
####################################
fechaorigensismo=jdcal.jd2gcal(0.0,fechadelsismo)
#print "tiempot=", fechaorigensismo
dia_inicio=str(fechaorigensismo[2])
mes_inicio=str(fechaorigensismo[1])
ano_inicio=str(fechaorigensismo[0])
origensismo=dia_inicio+"/"+mes_inicio+"/"+ano_inicio
print "origensismo=", origensismo
print "Calculando fases de sismo '%s'..."%numsismo
cmd="python fourier-sismos-datojuliano.py %s/%s-OUT/ %s %s %s"%(outputdir,inputdb,origensismo,hora,numsismo)
system(cmd)
phases=loadtxt(".phases")
allphases+=[[int(numsismo)]+[int(fechadelsismo)]+phases.tolist()]
n+=1
# if n>2:break
savetxt("allphases-%s.dat"%(inputdb),allphases)
f.close()
|
seap-udea/tQuakes
|
util/Legacy/allphases-datojuliano.py
|
Python
|
gpl-2.0
| 1,457
|
import bpy
d = bpy.context.scene.cam_operations[bpy.context.scene.cam_active_operation]
d.cutter_type = 'END'
d.cutter_diameter = 0.003
d.cutter_length = 25.0
d.cutter_tip_angle = 60.0
|
vilemnovak/blendercam
|
scripts/presets/cam_cutters/end_cyl_3.00mm.py
|
Python
|
gpl-2.0
| 186
|
#!/home/paulk/software/bin/python
from sys import argv,exit,stderr
from cPickle import dump,HIGHEST_PROTOCOL
import argparse
plain_chroms = map(str,range(1,23))+['X','Y','MT']
parser = argparse.ArgumentParser(description="Script to parse a gene-formatted file to give a PIC of 0-based regions by default")
parser.add_argument('infile',help="gene-formatted input file; chromosomes can be either <id> or chr<id> e.g. '3' or 'chr3'")
parser.add_argument('outfile',help="output PIC file")
parser.add_argument('-o','--one-based',default=False,action='store_true',help="use 1-based indexing basis [default: 0-based]")
parser.add_argument('-a','--all-chroms',default=False,action='store_true',help="use all chromosome names (even '*MHC*') [default: false]")
parser.add_argument('-s','--suppress-col-two',default=False,action='store_true',help="to be used when dealing with genes; suppress the second column of the gene file [default: false]")
args = parser.parse_args()
fn = args.infile
ofn = args.outfile
one_based_indexing = args.one_based
use_all_chroms = args.all_chroms
suppress = args.suppress_col_two
f = open(fn)
data = dict()
for row in f:
l = row.strip().split('\t')
if l[2] not in plain_chroms and not use_all_chroms: continue
if not one_based_indexing:
st = int(l[3])-1
sp = int(l[4])-1
else:
st = int(l[3])
sp = int(l[4])
if suppress:
if l[2][0] == 'c': data[l[0]+":"+l[1]] = l[2]+":"+str(st)+"-"+str(sp)+":"+l[5]
else: data[l[0]+":"+l[1]] = "chr"+l[2]+":"+str(st)+"-"+str(sp)+":"+l[5]
else:
if l[2][0] == 'c': data[l[0]] = l[2]+":"+str(st)+"-"+str(sp)+":"+l[5]
else: data[l[0]] = "chr"+l[2]+":"+str(st)+"-"+str(sp)+":"+l[5]
f.close()
f = open(ofn,'w')
dump(data,f,HIGHEST_PROTOCOL)
f.close()
|
polarise/RP-python
|
genefile_to_PIC.py
|
Python
|
gpl-2.0
| 1,729
|
# Copyright (C) 2013-2015
# Sean Poyser (seanpoyser@gmail.com)
#
# This Program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2, or (at your option)
# any later version.
#
# This Program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with XBMC; see the file COPYING. If not, write to
# the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
# http://www.gnu.org/copyleft/gpl.html
#
import xbmc
import os
import re
import urllib
import utils
import sfile
SHOWUNAVAIL = utils.ADDON.getSetting('SHOWUNAVAIL') == 'true'
def getFavourites(file, limit=10000, validate=True, superSearch=False, chooser=False):
import xbmcgui
prefix = ''
if not chooser:
prefix = 'HOME:' if xbmcgui.getCurrentWindowId() == 10000 else ''
xml = '<favourites></favourites>'
if sfile.exists(file):
xml = sfile.read(file)
items = []
faves = re.compile('<favourite(.+?)</favourite>').findall(xml)
for fave in faves:
fave = fave.replace('"', '&_quot_;')
fave = fave.replace('\'', '"')
fave = utils.unescape(fave)
fave = fave.replace('name=""', '')
try: name = re.compile('name="(.+?)"').findall(fave)[0]
except: name = ''
try: thumb = re.compile('thumb="(.+?)"').findall(fave)[0]
except: thumb = ''
try: cmd = fave.split('>', 1)[-1]
except: cmd = ''
#name = utils.Clean(name.replace( '&_quot_;', '"'))
name = name.replace( '&_quot_;', '"')
thumb = thumb.replace('&_quot_;', '"')
cmd = cmd.replace( '&_quot_;', '"')
add = False
if superSearch:
add = isValid(cmd)
elif (SHOWUNAVAIL) or (not validate) or isValid(cmd):
add = True
if add:
cmd = upgradeCmd(cmd)
if cmd.startswith('PlayMedia'):
option = 'mode'
try:
mode = int(favourite.getOption(cmd, option))
except:
win = xbmcgui.getCurrentWindowId()
cmd = updateSFOption(cmd, 'winID', win)
name = resolve(name)
cmd = patch(cmd)
cmd = resolve(cmd)
cmd = prefix + cmd
items.append([name, thumb, cmd])
if len(items) > limit:
return items
return items
def resolve(text):
try:
if '$LOCALIZE' in text:
id = int(re.compile('\$LOCALIZE\[(.+?)\]').search(text).group(1))
text = text.replace('$LOCALIZE[%d]' % id, xbmc.getLocalizedString(id))
return resolve(text)
if '$INFO' in text:
str = re.compile('\$INFO\[(.+?)\]').search(text).group(1)
text = text.replace('$INFO[%s]' % str, xbmc.getInfoLabel(str))
return resolve(text)
except:
pass
return text
def patch(cmd):
cmd = cmd.replace('",return', 'SF_PATCHING1')
cmd = cmd.replace('",return', 'SF_PATCHING2')
cmd = cmd.replace(',return' , '')
cmd = cmd.replace('SF_PATCHING1' , '",return')
cmd = cmd.replace('SF_PATCHING2' , '",return')
return cmd
def upgradeCmd(cmd):
fanart = _getFanart(cmd)
winID = _getWinID(cmd)
cmd = _removeFanart(cmd)
cmd = _removeWinID(cmd)
options = {}
if fanart:
options['fanart'] = fanart
if winID > -1:
options['winID'] = winID
if len(options) > 0:
cmd = updateSFOptions(cmd, options)
return cmd
def writeFavourites(file, faves):
kodiFile = os.path.join('special://profile', utils.FILENAME)
isKodi = xbmc.translatePath(file) == xbmc.translatePath(kodiFile)
f = sfile.file(file, 'w')
f.write('<favourites>')
for fave in faves:
try:
name = utils.escape(fave[0])
thumb = utils.escape(fave[1])
cmd = utils.escape(fave[2])
if isKodi and cmd.lower().startswith('playmedia'):
cmd = removeSFOptions(cmd)
thumb = utils.convertToHome(thumb)
name = 'name="%s" ' % name
thumb = 'thumb="%s">' % thumb
f.write('\n\t<favourite ')
f.write(name)
f.write(thumb)
f.write(cmd)
f.write('</favourite>')
except:
pass
f.write('\n</favourites>')
f.close()
import xbmcgui
try: count = int(xbmcgui.Window(10000).getProperty('Super_Favourites_Count'))
except: count = 0
xbmcgui.Window(10000).setProperty('Super_Favourites_Count', str(count+1))
def tidy(cmd):
cmd = cmd.replace('"', '')
cmd = cmd.replace('&', '&')
cmd = removeSFOptions(cmd)
if cmd.startswith('RunScript'):
cmd = cmd.replace('?content_type=', '&content_type=')
cmd = re.sub('/&content_type=(.+?)"\)', '")', cmd)
if cmd.endswith('/")'):
cmd = cmd.replace('/")', '")')
if cmd.endswith(')")'):
cmd = cmd.replace(')")', ')')
return cmd
def isValid(cmd):
if len(cmd) == 0:
return False
cmd = tidy(cmd)
#if 'PlayMedia' in cmd:
if cmd.startswith('PlayMedia'):
return utils.verifyPlayMedia(cmd)
#if 'RunScript' in cmd:
if cmd.startswith('RunScript'):
cmd = re.sub('/&content_type=(.+?)"\)', '")', cmd)
if not utils.verifyScript(cmd):
return False
if 'plugin' in cmd:
if not utils.verifyPlugin(cmd):
return False
return True
def updateFave(file, update):
cmd = update[2]
fave, index, nFaves = findFave(file, cmd)
removeFave(file, cmd)
return insertFave(file, update, index)
def replaceFave(file, update, oldCmd):
fave, index, nFaves = findFave(file, oldCmd)
if index < 0:
return addFave(file, update)
removeFave(file, oldCmd)
return insertFave(file, update, index)
def findFave(file, cmd):
cmd = removeSFOptions(cmd)
faves = getFavourites(file, validate=False)
for idx, fave in enumerate(faves):
if equals(fave[2], cmd):
return fave, idx, len(faves)
search = os.path.join(xbmc.translatePath(utils.ROOT), 'Search', utils.FILENAME).lower()
if file.lower() != search:
return None, -1, 0
for idx, fave in enumerate(faves):
if '[%SF%]' in fave[2]:
test = fave[2].split('[%SF%]', 1)
if cmd.startswith(test[0]) and cmd.endswith(test[1]):
return fave, idx, len(faves)
if '[%SF+%]' in fave[2]:
test = fave[2].split('[%SF+%]', 1)
if cmd.startswith(test[0]) and cmd.endswith(test[1]):
return fave, idx, len(faves)
return None, -1, 0
def insertFave(file, newFave, index):
copy = []
faves = getFavourites(file, validate=False)
for fave in faves:
if len(copy) == index:
copy.append(newFave)
copy.append(fave)
if index >= len(copy):
copy.append(newFave)
writeFavourites(file, copy)
return True
def addFave(file, newFave):
faves = getFavourites(file, validate=False)
faves.append(newFave)
writeFavourites(file, faves)
return True
def moveFave(src, dst, fave):
if not copyFave(dst, fave):
return False
return removeFave(src, fave[2])
def copyFave(file, original):
faves = getFavourites(file, validate=False)
updated = False
copy = list(original)
copy = removeSFOptions(copy[2])
#if it is already in then just update it
for idx, fave in enumerate(faves):
if equals(removeSFOptions(fave[2]), copy):
updated = True
faves[idx] = original
break
if not updated:
faves.append(original)
writeFavourites(file, faves)
return True
def removeFave(file, cmd):
cmd = removeSFOptions(cmd)
copy = []
faves = getFavourites(file, validate=False)
for fave in faves:
if not equals(removeSFOptions(fave[2]), cmd):
copy.append(fave)
if len(copy) == len(faves):
return False
writeFavourites(file, copy)
return True
def _shiftUpIndex(index, max, faves):
index -= 1
if index < 0:
index = max
cmd = faves[index][2]
if isValid(cmd):
return index
return _shiftUpIndex(index, max, faves)
def _shiftDownIndex(index, max, faves):
index += 1
if index > max:
index = 0
cmd = faves[index][2]
if isValid(cmd):
return index
return _shiftDownIndex(index, max, faves)
def shiftFave(file, cmd, up):
faves = getFavourites(file, validate=True)
if len(faves) < 2:
return
faves = getFavourites(file, validate=False)
fave, index, nFaves = findFave(file, cmd)
max = nFaves - 1
if up:
index = _shiftUpIndex(index, max, faves)
else:
index = _shiftDownIndex(index, max, faves)
removeFave(file, cmd)
return insertFave(file, fave, index)
def renameFave(file, cmd, newName):
copy = []
faves = getFavourites(file, validate=False)
for fave in faves:
if equals(fave[2], cmd):
fave[0] = newName
copy.append(fave)
writeFavourites(file, copy)
return True
def equals(fave, cmd):
fave = fave.strip()
cmd = cmd.strip()
if fave == cmd:
return True
fave = removeSFOptions(fave)
cmd = removeSFOptions(cmd)
if fave == cmd:
return True
if fave == cmd.replace('")', '/")'):
return True
if '[%SF%]' in fave:
test = fave.split('[%SF%]', 1)
if cmd.startswith(test[0]) and cmd.endswith(test[1]):
return True
if '[%SF+%]' in fave:
test = fave.split('[%SF+%]', 1)
if cmd.startswith(test[0]) and cmd.endswith(test[1]):
return True
return False
def addFanart(cmd, fanart):
if len(fanart) < 1:
return cmd
return updateSFOption(cmd, 'fanart', utils.convertToHome(fanart))
def updateSFOption(cmd, option, value):
options = getSFOptions(cmd)
options[option] = value
return updateSFOptions(cmd, options)
def updateSFOptions(cmd, options):
cmd = removeSFOptions(cmd)
if len(options) == 0:
return cmd
hasReturn = False
if cmd.endswith(',return)'):
hasReturn = True
cmd = cmd.replace(',return', '')
if cmd.endswith('")'):
cmd = cmd.rsplit('")', 1)[0]
suffix = '?'
if '?' in cmd:
suffix = '&'
values = ''
for key in options.keys():
value = str(options[key])
if len(value) > 0:
values += '%s=%s&' % (key, value)
if len(values) > 0:
cmd += suffix + 'sf_options=%s_options_sf"' % urllib.quote_plus(values)
else:
cmd += '"'
if hasReturn:
cmd += ',return)'
else:
cmd += ')'
return cmd
def getSFOptions(cmd):
try: options = urllib.unquote_plus(re.compile('sf_options=(.+?)_options_sf').search(cmd).group(1))
except: return {}
return get_params(options)
def removeSFOptions(cmd):
if 'sf_options=' not in cmd:
return cmd
cmd = cmd.replace('?sf_options=', '&sf_options=')
cmd = re.sub('&sf_options=(.+?)_options_sf"\)', '")', cmd)
cmd = re.sub('&sf_options=(.+?)_options_sf",return\)', '",return)', cmd)
cmd = re.sub('&sf_options=(.+?)_options_sf', '', cmd)
cmd = cmd.replace('/")', '")')
return cmd
def getFanart(cmd):
return getOption(cmd, 'fanart')
def getOption(cmd, option):
options = getSFOptions(cmd)
try: return options[option]
except: return ''
def get_params(p):
param=[]
paramstring=p
if len(paramstring)>=2:
params=p
cleanedparams=params.replace('?','')
if (params[len(params)-1]=='/'):
params=params[0:len(params)-2]
pairsofparams=cleanedparams.split('&')
param={}
for i in range(len(pairsofparams)):
splitparams={}
splitparams=pairsofparams[i].split('=')
if (len(splitparams))==2:
param[splitparams[0]]=splitparams[1]
return param
#used only during upgrade process
def _removeFanart(cmd):
if 'sf_fanart=' not in cmd:
return cmd
cmd = cmd.replace('?sf_fanart=', '&sf_fanart=')
cmd = cmd.replace('&sf_fanart=', '&sf_fanart=X') #in case no fanart
cmd = re.sub('&sf_fanart=(.+?)_"\)', '")', cmd)
cmd = re.sub('&sf_fanart=(.+?)_",return\)', '",return)', cmd)
cmd = re.sub('&sf_fanart=(.+?)_', '', cmd)
cmd = cmd.replace('/")', '")')
return cmd
#used only during upgrade process
def _getFanart(cmd):
cmd = cmd.replace(',return', '')
try: return urllib.unquote_plus(re.compile('sf_fanart=(.+?)_"\)').search(cmd).group(1))
except: pass
cmd = urllib.unquote_plus(cmd)
cmd = cmd.replace(',return', '')
try: return urllib.unquote_plus(re.compile('sf_fanart=(.+?)_"\)').search(cmd).group(1))
except: pass
return ''
#used only during upgrade process
def _removeWinID(cmd):
if 'sf_win_id' not in cmd:
return cmd
cmd = cmd.replace('?sf_win_id=', '&sf_win_id=')
cmd = cmd.replace('&sf_win_id=', '&sf_win_id=X') #in case no win_id
cmd = re.sub('&sf_win_id=(.+?)_"\)', '")', cmd)
return cmd
#used only during upgrade process
def _getWinID(cmd):
if 'sf_win_id' not in cmd:
return -1
try: return int(re.compile('sf_win_id=(.+?)_').search(cmd).group(1))
except: pass
return -1
|
EdLogan18/logan-repository
|
plugin.program.super.favourites/favourite.py
|
Python
|
gpl-2.0
| 14,110
|
# -*- coding: utf-8 -*-
def social_nail(entity, argument):
return True
#- Fine Funzione -
|
Onirik79/aaritmud
|
src/socials/social_nail.py
|
Python
|
gpl-2.0
| 95
|
######################
# ANALYSIS QUESTIONS #
######################
# Change these default values to obtain the specified policies through
# value iteration.
def question2():
answerDiscount = 0.9
answerNoise = 0.2
return answerDiscount, answerNoise
def question3a():
answerDiscount = 0.9
answerNoise = 0.2
answerLivingReward = 0.0
return answerDiscount, answerNoise, answerLivingReward
# If not possible, return 'NOT POSSIBLE'
def question3b():
answerDiscount = 0.9
answerNoise = 0.2
answerLivingReward = 0.0
return answerDiscount, answerNoise, answerLivingReward
# If not possible, return 'NOT POSSIBLE'
def question3c():
answerDiscount = 0.9
answerNoise = 0.2
answerLivingReward = 0.0
return answerDiscount, answerNoise, answerLivingReward
# If not possible, return 'NOT POSSIBLE'
def question3d():
answerDiscount = 0.9
answerNoise = 0.2
answerLivingReward = 0.0
return answerDiscount, answerNoise, answerLivingReward
# If not possible, return 'NOT POSSIBLE'
def question3e():
answerDiscount = 0.9
answerNoise = 0.2
answerLivingReward = 0.0
return answerDiscount, answerNoise, answerLivingReward
# If not possible, return 'NOT POSSIBLE'
def question6():
answerEpsilon = None
answerLearningRate = None
return answerEpsilon, answerLearningRate
# If not possible, return 'NOT POSSIBLE'
if __name__ == '__main__':
print 'Answers to analysis questions:'
import analysis
for q in [q for q in dir(analysis) if q.startswith('question')]:
response = getattr(analysis, q)()
print ' Question %s:\t%s' % (q, str(response))
|
PhilippeMorere/PacmanRL
|
analysis.py
|
Python
|
gpl-2.0
| 1,604
|
# Copyright (C) 2008-2011 Dejan Muhamedagic <dmuhamedagic@suse.de>
# See COPYING for license information.
import os
import subprocess
from lxml import etree, doctestcompare
import copy
import bz2
from collections import defaultdict
from . import config
from . import options
from . import schema
from . import constants
from .msg import common_err, common_error, common_debug, cib_parse_err, err_buf
from . import userdir
from . import utils
from .utils import add_sudo, str2file, str2tmp, get_boolean
from .utils import get_stdout, stdout2list, crm_msec, crm_time_cmp
from .utils import olist, get_cib_in_use, get_tempdir
def xmlparse(f):
try:
cib_elem = etree.parse(f).getroot()
except Exception, msg:
common_err("cannot parse xml: %s" % msg)
return None
return cib_elem
def file2cib_elem(s):
try:
f = open(s, 'r')
except IOError, msg:
common_err(msg)
return None
cib_elem = xmlparse(f)
f.close()
if options.regression_tests and cib_elem is None:
print "Failed to read CIB from file: %s" % (s)
return cib_elem
def compressed_file_to_cib(s):
try:
if s.endswith('.bz2'):
import bz2
f = bz2.BZ2File(s)
elif s.endswith('.gz'):
import gzip
f = gzip.open(s)
else:
f = open(s)
except IOError, msg:
common_err(msg)
return None
cib_elem = xmlparse(f)
if options.regression_tests and cib_elem is None:
print "Failed to read CIB from file %s" % (s)
f.seek(0)
print f.read()
f.close()
return cib_elem
cib_dump = "cibadmin -Ql"
def sudocall(cmd):
cmd = add_sudo(cmd)
if options.regression_tests:
print ".EXT", cmd
p = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
try:
outp, errp = p.communicate()
p.wait()
return p.returncode, outp, errp
except IOError, msg:
common_err("running %s: %s" % (cmd, msg))
return None, None, None
def cibdump2file(fname):
_, outp, _ = sudocall(cib_dump)
if outp is not None:
return str2file(outp, fname)
return None
def cibdump2tmp():
try:
_, outp, _ = sudocall(cib_dump)
if outp is not None:
return str2tmp(outp)
except IOError, msg:
common_err(msg)
return None
def cibtext2elem(cibtext):
"""
Convert a text format CIB to
an XML tree.
"""
try:
return etree.fromstring(cibtext)
except Exception, err:
cib_parse_err(err, cibtext)
return None
def cibdump2elem(section=None):
if section:
cmd = "%s -o %s" % (cib_dump, section)
else:
cmd = cib_dump
rc, outp, errp = sudocall(cmd)
if rc == 0:
return cibtext2elem(outp)
elif rc != constants.cib_no_section_rc:
common_error("running %s: %s" % (cmd, errp))
return None
def read_cib(fun, params=None):
cib_elem = fun(params)
if cib_elem is None or cib_elem.tag != "cib":
return None
return cib_elem
def sanity_check_nvpairs(id, node, attr_list):
rc = 0
for nvpair in node.iterchildren("nvpair"):
n = nvpair.get("name")
if n and n not in attr_list:
common_err("%s: attribute %s does not exist" % (id, n))
rc |= utils.get_check_rc()
return rc
def sanity_check_meta(id, node, attr_list):
rc = 0
if node is None or not attr_list:
return rc
for c in node.iterchildren():
if c.tag == "meta_attributes":
rc |= sanity_check_nvpairs(id, c, attr_list)
return rc
def get_interesting_nodes(node, nodes_l):
'''
All nodes which can be represented as CIB objects.
'''
for c in node.iterchildren():
if is_cib_element(c):
nodes_l.append(c)
get_interesting_nodes(c, nodes_l)
return nodes_l
def get_top_cib_nodes(node, nodes_l):
'''
All nodes which can be represented as CIB objects, but not
nodes which are children of other CIB objects.
'''
for c in node.iterchildren():
if is_cib_element(c):
nodes_l.append(c)
else:
get_top_cib_nodes(c, nodes_l)
return nodes_l
class RscState(object):
'''
Get the resource status and some other relevant bits.
In particular, this class should allow for a bit of caching
of cibadmin -Q -o resources output in case we need to check
more than one resource in a row.
'''
rsc_status = "crm_resource -W -r '%s'"
def __init__(self):
self.current_cib = None
self.rsc_elem = None
self.prop_elem = None
self.rsc_dflt_elem = None
def _init_cib(self):
cib = cibdump2elem("configuration")
self.current_cib = cib
self.rsc_elem = get_first_conf_elem(cib, "resources")
self.prop_elem = get_first_conf_elem(cib, "crm_config/cluster_property_set")
self.rsc_dflt_elem = get_first_conf_elem(cib, "rsc_defaults/meta_attributes")
def rsc2node(self, id):
'''
Get a resource XML element given the id.
NB: this is called from almost all other methods.
Hence we initialize the cib here. CIB reading is
expensive.
'''
if self.rsc_elem is None:
self._init_cib()
if self.rsc_elem is None:
return None
# does this need to be optimized?
expr = './/*[@id="%s"]' % id
try:
return self.rsc_elem.xpath(expr)[0]
except (IndexError, AttributeError):
return None
def is_ms(self, id):
'''
Test if the resource is master-slave.
'''
rsc_node = self.rsc2node(id)
if rsc_node is None:
return False
return is_ms(rsc_node)
def rsc_clone(self, id):
'''
Return id of the clone/ms containing this resource
or None if it's not cloned.
'''
rsc_node = self.rsc2node(id)
if rsc_node is None:
return None
pnode = rsc_node.getparent()
if pnode is None:
return None
if is_group(pnode):
pnode = pnode.getparent()
if is_clonems(pnode):
return pnode.get("id")
return None
def is_managed(self, id):
'''
Is this resource managed?
'''
rsc_node = self.rsc2node(id)
if rsc_node is None:
return False
# maintenance-mode, if true, overrides all
attr = get_attr_value(self.prop_elem, "maintenance-mode")
if attr and is_xs_boolean_true(attr):
return False
# then check the rsc is-managed meta attribute
rsc_meta_node = get_rsc_meta_node(rsc_node)
attr = get_attr_value(rsc_meta_node, "is-managed")
if attr:
return is_xs_boolean_true(attr)
# then rsc_defaults is-managed attribute
attr = get_attr_value(self.rsc_dflt_elem, "is-managed")
if attr:
return is_xs_boolean_true(attr)
# finally the is-managed-default property
attr = get_attr_value(self.prop_elem, "is-managed-default")
if attr:
return is_xs_boolean_true(attr)
return True
def is_running(self, id):
'''
Is this resource running?
'''
if not is_live_cib():
return False
test_id = self.rsc_clone(id) or id
rc, outp = get_stdout(self.rsc_status % test_id, stderr_on=False)
return outp.find("running") > 0 and outp.find("NOT") == -1
def is_group(self, id):
'''
Test if the resource is a group
'''
rsc_node = self.rsc2node(id)
if rsc_node is None:
return False
return is_group(rsc_node)
def can_delete(self, id):
'''
Can a resource be deleted?
The order below is important!
'''
return not (self.is_running(id) and not self.is_group(id) and self.is_managed(id))
def resources_xml():
return cibdump2elem("resources")
def is_normal_node(n):
return n.tag == "node" and (n.get("type") in (None, "normal", "member", ""))
def unique_ra(typ, klass, provider):
"""
Unique:
* it's explicitly ocf:heartbeat:
* no explicit class or provider
* only one provider (heartbeat counts as one provider)
Not unique:
* class is not ocf
* multiple providers
"""
if klass is None and provider is None:
return True
return klass == 'ocf' and provider is None or provider == 'heartbeat'
def mk_rsc_type(n):
"""
Returns prefixless for unique RAs
"""
ra_type = n.get("type")
ra_class = n.get("class")
ra_provider = n.get("provider")
if unique_ra(ra_type, ra_class, ra_provider):
ra_class = None
ra_provider = None
s1 = s2 = ''
if ra_class:
s1 = "%s:" % ra_class
if ra_provider:
s2 = "%s:" % ra_provider
return ''.join((s1, s2, ra_type))
def listnodes():
cib = cibdump2elem()
if cib is None:
return []
local_nodes = cib.xpath('/cib/configuration/nodes/node/@uname')
remote_nodes = cib.xpath('/cib/status/node_state[@remote_node="true"]/@uname')
return list(set([n for n in local_nodes + remote_nodes if n]))
def is_our_node(s):
'''
Check if s is in a list of our nodes (ignore case).
This is not fast, perhaps should be cached.
Includes remote nodes as well
'''
for n in listnodes():
if n.lower() == s.lower():
return True
return False
def is_live_cib():
'''We working with the live cluster?'''
return not get_cib_in_use() and not os.getenv("CIB_file")
def is_crmuser():
return (config.core.user in ("root", config.path.crm_daemon_user)
or userdir.getuser() in ("root", config.path.crm_daemon_user))
def cib_shadow_dir():
if os.getenv("CIB_shadow_dir"):
return os.getenv("CIB_shadow_dir")
if is_crmuser():
return config.path.crm_config
home = userdir.gethomedir(config.core.user)
if home and home.startswith(os.path.sep):
return os.path.join(home, ".cib")
return get_tempdir()
def listshadows():
dir = cib_shadow_dir()
if not os.path.isdir(dir):
return []
rc, l = stdout2list("ls %s | fgrep shadow. | sed 's/^shadow\\.//'" % dir)
return l
def shadowfile(name):
return "%s/shadow.%s" % (cib_shadow_dir(), name)
def pe2shadow(pe_file, name):
'''Copy a PE file (or any CIB file) to a shadow.'''
try:
s = open(pe_file).read()
except IOError, msg:
common_err("open: %s" % msg)
return False
# decompresed if it ends with .bz2
if pe_file.endswith(".bz2"):
s = bz2.decompress(s)
# copy input to the shadow
try:
open(shadowfile(name), "w").write(s)
except IOError, msg:
common_err("open: %s" % msg)
return False
return True
def is_xs_boolean_true(bool):
return bool.lower() in ("true", "1")
def cloned_el(node):
for c in node.iterchildren():
if is_resource(c):
return c.tag
def get_topmost_rsc(node):
'''
Return a topmost node which is a resource and contains this resource
'''
if is_container(node.getparent()):
return get_topmost_rsc(node.getparent())
return node
attr_defaults = {
"rule": (("boolean-op", "and"),),
"expression": (("type", "string"),),
}
def drop_attr_defaults(node, ts=0):
try:
for defaults in attr_defaults[node.tag]:
if node.get(defaults[0]) == defaults[1]:
del node.attrib[defaults[0]]
except:
pass
def nameandid(e, level):
if e.tag:
print level*' ', e.tag, e.get("id"), e.get("name")
def xmltraverse(e, fun, ts=0):
for c in e.iterchildren():
fun(c, ts)
xmltraverse(c, fun, ts+1)
def xmltraverse_thin(e, fun, ts=0):
'''
Skip elements which may be resources themselves.
NB: Call this only on resource (or constraint) nodes, but
never on cib or configuration!
'''
for c in e.iterchildren():
if c.tag not in ('primitive', 'group'):
xmltraverse_thin(c, fun, ts+1)
fun(e, ts)
def xml_processnodes(e, node_filter, proc):
'''
Process with proc all nodes that match filter.
'''
node_list = []
for child in e.iterchildren():
if node_filter(child):
node_list.append(child)
if len(child) > 0:
xml_processnodes(child, node_filter, proc)
if node_list:
proc(node_list)
# filter the cib
def true(e):
'Just return True.'
return True
def is_entity(e):
return e.tag == etree.Entity
def is_comment(e):
return e.tag == etree.Comment
def is_status_node(e):
return e.tag == "status"
def is_emptyelem(node, tag_l):
if node.tag in tag_l:
for a in constants.precious_attrs:
if node.get(a):
return False
for n in node.iterchildren():
return False
return True
else:
return False
def is_emptynvpairs(node):
return is_emptyelem(node, constants.nvpairs_tags)
def is_emptyops(node):
return is_emptyelem(node, ("operations",))
def is_cib_element(node):
return node.tag in constants.cib_cli_map
def is_group(node):
return node.tag == "group"
def is_ms(node):
return node.tag in ("master", "ms")
def is_clone(node):
return node.tag == "clone"
def is_clonems(node):
return node.tag in constants.clonems_tags
def is_cloned(node):
return (node.getparent().tag in constants.clonems_tags or
(node.getparent().tag == "group" and
node.getparent().getparent().tag in constants.clonems_tags))
def is_container(node):
return node.tag in constants.container_tags
def is_primitive(node):
return node.tag == "primitive"
def is_resource(node):
return node.tag in constants.resource_tags
def is_template(node):
return node.tag == "template"
def is_child_rsc(node):
return node.tag in constants.children_tags
def is_constraint(node):
return node.tag in constants.constraint_tags
def is_defaults(node):
return node.tag in constants.defaults_tags
def rsc_constraint(rsc_id, con_elem):
for attr in con_elem.keys():
if attr in constants.constraint_rsc_refs \
and rsc_id == con_elem.get(attr):
return True
for rref in con_elem.xpath("resource_set/resource_ref"):
if rsc_id == rref.get("id"):
return True
return False
def is_related(rsc_id, node):
"""
checks if the given node is an element
that has a direct relation to rsc_id. That is,
if it contains it, if it references it...
"""
if is_constraint(node) and rsc_constraint(rsc_id, node):
return True
if node.tag == 'tag':
if len(node.xpath('.//obj_ref[@id="%s"]' % (rsc_id))) > 0:
return True
return False
if is_container(node):
for tag in ('primitive', 'group', 'clone', 'master'):
if len(node.xpath('.//%s[@id="%s"]' % (tag, rsc_id))) > 0:
return True
return False
return False
def sort_container_children(e_list):
'''
Make sure that attributes's nodes are first, followed by the
elements (primitive/group). The order of elements is not
disturbed, they are just shifted to end!
'''
for node in e_list:
children = [x for x in node.iterchildren()
if x.tag in constants.children_tags]
for c in children:
node.remove(c)
for c in children:
node.append(c)
def rmnode(e):
if e is not None and e.getparent() is not None:
e.getparent().remove(e)
def rmnodes(e_list):
for e in e_list:
rmnode(e)
def printid(e_list):
for e in e_list:
id = e.get("id")
if id:
print "element id:", id
def remove_dflt_attrs(e_list):
'''
Drop optional attributes which are already set to default
'''
for e in e_list:
try:
d = constants.attr_defaults[e.tag]
for a in d.keys():
if e.get(a) == d[a]:
del e.attrib[a]
except:
pass
def remove_text(e_list):
for e in e_list:
if not is_comment(e):
e.text = None
e.tail = None
def sanitize_cib(doc):
xml_processnodes(doc, is_status_node, rmnodes)
#xml_processnodes(doc, true, printid)
#xml_processnodes(doc, is_emptynvpairs, rmnodes)
#xml_processnodes(doc, is_emptyops, rmnodes)
xml_processnodes(doc, is_entity, rmnodes)
#xml_processnodes(doc, is_comment, rmnodes)
xml_processnodes(doc, is_container, sort_container_children)
xml_processnodes(doc, true, remove_dflt_attrs)
xml_processnodes(doc, true, remove_text)
xmltraverse(doc, drop_attr_defaults)
def is_simpleconstraint(node):
return len(node.xpath("resource_set/resource_ref")) == 0
match_list = defaultdict(tuple,
{"node": ("uname",),
"nvpair": ("name",),
"op": ("name", "interval"),
"rule": ("score", "score-attribute", "role"),
"expression": ("attribute", "operation", "value"),
"fencing-level": ("target", "devices")})
def add_comment(e, s):
'''
Add comment s to e from doc.
'''
if e is None or not s:
return
comm_elem = etree.Comment(s)
firstelem_idx = 0
for c in e.iterchildren():
firstelem_idx = e.index(c)
break
e.insert(firstelem_idx, comm_elem)
def stuff_comments(node, comments):
if not comments:
return
for s in reversed(comments):
add_comment(node, s)
def fix_comments(e):
'Make sure that comments start with #'
celems = [x for x in e.iterchildren() if is_comment(x)]
for c in celems:
c.text = c.text.strip()
if not c.text.startswith("#"):
c.text = "# %s" % c.text
def set_id_used_attr(e):
e.set("__id_used", "Yes")
def is_id_used_attr(e):
return e.get("__id_used") == "Yes"
def remove_id_used_attr(e, lvl):
if is_id_used_attr(e):
del e.attrib["__id_used"]
def remove_id_used_attributes(e):
if e is not None:
xmltraverse(e, remove_id_used_attr)
def lookup_node(node, oldnode, location_only=False, ignore_id=False):
'''
Find a child of oldnode which matches node.
This is used to "harvest" existing ids in order to prevent
irrelevant changes to the XML code.
The list of attributes to match is in the dictionary
match_list.
The "id" attribute is treated differently. In case the new node
(the first parameter here) contains the id, then the "id"
attribute is added to the match list.
'''
if oldnode is None:
return None
attr_list = list(match_list[node.tag])
if not ignore_id and node.get("id"):
attr_list.append("id")
for c in oldnode.iterchildren():
if not location_only and is_id_used_attr(c):
continue
if node.tag == c.tag:
for a in attr_list:
if node.get(a) != c.get(a):
break
else:
return c
return None
def find_operation(rsc_node, name, interval=None):
'''
Setting interval to "non-0" means get the first op with interval
different from 0.
Not setting interval at all means get the only matching op, or the
0 op (if any)
'''
matching_name = []
for ops in rsc_node.findall("operations"):
matching_name.extend([op for op in ops.iterchildren("op")
if op.get("name") == name])
if interval is None and len(matching_name) == 1:
return matching_name[0]
interval = interval or "0"
for op in matching_name:
opint = op.get("interval")
if interval == "non-0" and crm_msec(opint) > 0:
return op
if crm_time_cmp(opint, interval) == 0:
return op
return None
def get_op_timeout(rsc_node, op, default_timeout):
interval = (op == "monitor" and "non-0" or "0")
op_n = find_operation(rsc_node, op == "probe" and "monitor" or op, interval)
timeout = op_n is not None and op_n.get("timeout") or default_timeout
return crm_msec(timeout)
def op2list(node):
pl = []
action = ""
for name in node.keys():
if name == "name":
action = node.get(name)
elif name != "id": # skip the id
pl.append([name, node.get(name)])
if not action:
common_err("op is invalid (no name)")
return action, pl
def get_rsc_operations(rsc_node):
actions = [op2list(op) for op in rsc_node.xpath('.//operations/op')]
actions = [[op, pl] for op, pl in actions if op]
return actions
# lower score = earlier sort
def make_sort_map(*order):
m = {}
for i, o in enumerate(order):
if isinstance(o, basestring):
m[o] = i
else:
for k in o:
m[k] = i
return m
_sort_xml_order = make_sort_map('node',
'template', 'primitive', 'group', 'master', 'clone', 'op',
'tag',
['rsc_location', 'rsc_colocation', 'rsc_order'],
['rsc_ticket', 'fencing-topology'],
'cluster_property_set', 'rsc_defaults', 'op_defaults',
'acl_role', ['acl_target', 'acl_group', 'acl_user'])
_sort_cli_order = make_sort_map('node',
'rsc_template', 'primitive', 'group',
['ms', 'master'], 'clone', 'op',
'tag',
['location', 'colocation', 'collocation', 'order'],
['rsc_ticket', 'fencing_topology'],
'property', 'rsc_defaults', 'op_defaults',
'role', ['acl_target', 'acl_group', 'user'])
_SORT_LAST = 1000
def processing_sort(nl):
'''
It's usually important to process cib objects in this order,
i.e. simple objects first.
TODO: if sort_elements is disabled, only sort to resolve inter-dependencies.
'''
if config.core.sort_elements:
sortfn = lambda k: (_sort_xml_order.get(k.tag, _SORT_LAST), k.get('id'))
else:
sortfn = lambda k: _sort_xml_order.get(k.tag, _SORT_LAST)
return sorted(nl, key=sortfn)
def processing_sort_cli(cl):
'''
cl: list of objects (CibObject)
Returns the given list in order
TODO: if sort_elements is disabled, only sort to resolve inter-dependencies.
'''
if config.core.sort_elements:
sortfn = lambda k: (_sort_cli_order.get(k.obj_type, _SORT_LAST), k.obj_id)
else:
sortfn = lambda k: _sort_cli_order.get(k.obj_type, _SORT_LAST)
return sorted(cl, key=sortfn)
def is_resource_cli(s):
return s in olist(constants.resource_cli_names)
def is_constraint_cli(s):
return s in olist(constants.constraint_cli_names)
def referenced_resources(node):
if not is_constraint(node):
return []
xml_obj_type = node.tag
rsc_list = []
if xml_obj_type == "rsc_location" and node.get("rsc"):
rsc_list = [node.get("rsc")]
elif node.xpath("resource_set/resource_ref"):
# resource sets
rsc_list = [x.get("id")
for x in node.xpath("resource_set/resource_ref")]
elif xml_obj_type == "rsc_colocation":
rsc_list = [node.get("rsc"), node.get("with-rsc")]
elif xml_obj_type == "rsc_order":
rsc_list = [node.get("first"), node.get("then")]
elif xml_obj_type == "rsc_ticket":
rsc_list = [node.get("rsc")]
return [rsc for rsc in rsc_list if rsc is not None]
def rename_id(node, old_id, new_id):
if node.get("id") == old_id:
node.set("id", new_id)
def rename_rscref_simple(c_obj, old_id, new_id):
c_modified = False
for attr in c_obj.node.keys():
if attr in constants.constraint_rsc_refs and \
c_obj.node.get(attr) == old_id:
c_obj.node.set(attr, new_id)
c_obj.updated = True
c_modified = True
return c_modified
def delete_rscref_simple(c_obj, rsc_id):
c_modified = False
for attr in c_obj.node.keys():
if attr in constants.constraint_rsc_refs and \
c_obj.node.get(attr) == rsc_id:
del c_obj.node.attrib[attr]
c_obj.updated = True
c_modified = True
return c_modified
def rset_uniq(c_obj, d):
'''
Drop duplicate resource references.
'''
l = []
for rref in c_obj.node.xpath("resource_set/resource_ref"):
rsc_id = rref.get("id")
if d[rsc_id] > 1:
# drop one
l.append(rref)
d[rsc_id] -= 1
rmnodes(l)
def delete_rscref_rset(c_obj, rsc_id):
'''
Drop all reference to rsc_id.
'''
c_modified = False
l = []
for rref in c_obj.node.xpath("resource_set/resource_ref"):
if rsc_id == rref.get("id"):
l.append(rref)
c_obj.updated = True
c_modified = True
rmnodes(l)
l = []
cnt = 0
nonseq_rset = False
for rset in c_obj.node.findall("resource_set"):
rref_cnt = len(rset.findall("resource_ref"))
if rref_cnt == 0:
l.append(rset)
c_obj.updated = True
c_modified = True
elif not get_boolean(rset.get("sequential"), True) and rref_cnt > 1:
nonseq_rset = True
cnt += rref_cnt
rmnodes(l)
if not nonseq_rset and cnt == 2:
rset_convert(c_obj)
return c_modified
def rset_convert(c_obj):
l = c_obj.node.xpath("resource_set/resource_ref")
if len(l) != 2:
return # eh?
rsetcnt = 0
for rset in c_obj.node.findall("resource_set"):
# in case there are multiple non-sequential sets
if rset.get("sequential"):
del rset.attrib["sequential"]
rsetcnt += 1
c_obj.modified = True
cli = c_obj.repr_cli(format=-1)
cli = cli.replace("_rsc_set_ ", "")
newnode = c_obj.cli2node(cli)
if newnode is not None:
c_obj.node.getparent().replace(c_obj.node, newnode)
c_obj.node = newnode
if rsetcnt == 1 and c_obj.obj_type == "colocation":
# exchange the elements in colocations
rsc = newnode.get("rsc")
with_rsc = newnode.get("with-rsc")
if with_rsc is not None:
newnode.set("rsc", with_rsc)
if rsc is not None:
newnode.set("with-rsc", rsc)
def rename_rscref_rset(c_obj, old_id, new_id):
c_modified = False
d = {}
for rref in c_obj.node.xpath("resource_set/resource_ref"):
rsc_id = rref.get("id")
if rsc_id == old_id:
rref.set("id", new_id)
rsc_id = new_id
c_obj.updated = True
c_modified = True
if rsc_id not in d:
d[rsc_id] = 1
else:
d[rsc_id] += 1
rset_uniq(c_obj, d)
# if only two resource references remained then, to preserve
# sanity, convert it to a simple constraint (sigh)
cnt = 0
for key in d:
cnt += d[key]
if cnt == 2:
rset_convert(c_obj)
return c_modified
def rename_rscref(c_obj, old_id, new_id):
if rename_rscref_simple(c_obj, old_id, new_id) or \
rename_rscref_rset(c_obj, old_id, new_id):
err_buf.info("modified %s from %s to %s" % (str(c_obj), old_id, new_id))
def delete_rscref(c_obj, rsc_id):
return delete_rscref_simple(c_obj, rsc_id) or \
delete_rscref_rset(c_obj, rsc_id)
def silly_constraint(c_node, rsc_id):
'''
Remove a constraint from rsc_id to rsc_id.
Or an invalid one.
'''
if c_node.xpath("resource_set/resource_ref"):
# it's a resource set
# the resource sets have already been uniq-ed
cnt = len(c_node.xpath("resource_set/resource_ref"))
if c_node.tag in ("rsc_location", "rsc_ticket"): # locations and tickets are never silly
return cnt < 1
return cnt <= 1
cnt = 0 # total count of referenced resources have to be at least two
rsc_cnt = 0
for attr in c_node.keys():
if attr in constants.constraint_rsc_refs:
cnt += 1
if c_node.get(attr) == rsc_id:
rsc_cnt += 1
if c_node.tag in ("rsc_location", "rsc_ticket"): # locations and tickets are never silly
return cnt < 1
else:
return rsc_cnt == 2 or cnt < 2
def is_climove_location(node):
'Figure out if the location was created by crm resource move.'
rule_l = node.findall("rule")
expr_l = node.xpath(".//expression")
return len(rule_l) == 1 and len(expr_l) == 1 and \
node.get("id").startswith("cli-") and \
expr_l[0].get("attribute") == "#uname" and \
expr_l[0].get("operation") == "eq"
def is_pref_location(node):
'Figure out if the location is a node preference.'
rule_l = node.findall("rule")
expr_l = node.xpath(".//expression")
return len(rule_l) == 1 and len(expr_l) == 1 and \
expr_l[0].get("attribute") == "#uname" and \
expr_l[0].get("operation") == "eq"
def get_rsc_ref_ids(node):
return [x.get("id")
for x in node.xpath("./resource_ref")]
def get_rsc_children_ids(node):
return [x.get("id")
for x in node.iterchildren() if is_child_rsc(x)]
def get_prim_children_ids(node):
l = [x for x in node.iterchildren() if is_child_rsc(x)]
if len(l) and l[0].tag == "group":
l = [x for x in l[0].iterchildren() if is_child_rsc(x)]
return [x.get("id") for x in l]
def get_child_nvset_node(node, attr_set="meta_attributes"):
if node is None:
return None
for c in node.iterchildren():
if c.tag != attr_set:
continue
return c
return None
def get_rscop_defaults_meta_node(node):
return get_child_nvset_node(node)
def get_rsc_meta_node(node):
return get_child_nvset_node(node)
def get_properties_node(node):
return get_child_nvset_node(node, attr_set="cluster_property_set")
def new_cib():
cib_elem = etree.Element("cib")
conf_elem = etree.SubElement(cib_elem, "configuration")
for name in schema.get('sub', "configuration", 'r'):
etree.SubElement(conf_elem, name)
return cib_elem
def get_conf_elems(cib_elem, path):
'''
Get a list of configuration elements. All elements are within
/configuration
'''
if cib_elem is None:
return None
return cib_elem.xpath("//configuration/%s" % path)
def get_first_conf_elem(cib_elem, path):
try:
elems = get_conf_elems(cib_elem, path)
return elems[0] if elems else None
except IndexError:
return None
def get_topnode(cib_elem, tag):
"Get configuration element or create/append if there's none."
conf_elem = cib_elem.find("configuration")
if conf_elem is None:
common_err("no configuration element found!")
return None
if tag == "configuration":
return conf_elem
e = cib_elem.find("configuration/%s" % tag)
if e is None:
common_debug("create configuration section %s" % tag)
e = etree.SubElement(conf_elem, tag)
return e
def get_attr_in_set(e, attr):
if e is None:
return None
for c in e.iterchildren("nvpair"):
if c.get("name") == attr:
return c
return None
def get_attr_value(e, attr):
try:
return get_attr_in_set(e, attr).get("value")
except:
return None
def set_attr(e, attr, value):
'''
Set an attribute in the attribute set.
'''
nvpair = get_attr_in_set(e, attr)
if nvpair is None:
from . import idmgmt
nvpair = etree.SubElement(e, "nvpair", id="", name=attr, value=value)
nvpair.set("id", idmgmt.new(nvpair, e.get("id")))
else:
nvpair.set("name", attr)
nvpair.set("value", value)
def get_set_nodes(e, setname, create=False):
"""Return the attributes set nodes (create one if requested)
setname can for example be meta_attributes
"""
l = [c for c in e.iterchildren(setname)]
if l:
return l
if create:
from . import idmgmt
elem = etree.SubElement(e, setname, id="")
elem.set("id", idmgmt.new(elem, e.get("id")))
l.append(elem)
return l
_checker = doctestcompare.LXMLOutputChecker()
def xml_equals_unordered(a, b):
"used by xml_equals to compare xml trees without ordering"
def fail(msg):
common_debug("%s!=%s: %s" % (a.tag, b.tag, msg))
return False
def tagflat(x):
return isinstance(x.tag, basestring) and x.tag or x.text
def sortby(v):
if v.tag == 'primitive':
return v.tag
return tagflat(v) + ''.join(sorted(v.attrib.keys() + v.attrib.values()))
def safe_strip(text):
return text is not None and text.strip() or ''
if a.tag != b.tag:
return fail("tags differ: %s != %s" % (a.tag, b.tag))
elif a.attrib != b.attrib:
return fail("attributes differ: %s != %s" % (a.attrib, b.attrib))
elif safe_strip(a.text) != safe_strip(b.text):
return fail("text differ %s != %s" % (repr(a.text), repr(b.text)))
elif safe_strip(a.tail) != safe_strip(b.tail):
return fail("tails differ: %s != %s" % (a.tail, b.tail))
elif len(a) != len(b):
return fail("number of children differ")
elif len(a) == 0:
return True
# order matters here, but in a strange way:
# all primitive tags should sort the same..
sorted_children = zip(sorted(a, key=sortby), sorted(b, key=sortby))
return all(xml_equals_unordered(a, b) for a, b in sorted_children)
def xml_equals(n, m, show=False):
rc = xml_equals_unordered(n, m)
if not rc and show and config.core.debug:
# somewhat strange, but that's how this works
from doctest import Example
example = Example("etree.tostring(n)", etree.tostring(n))
got = etree.tostring(m)
print _checker.output_difference(example, got, 0)
return rc
def merge_attributes(dnode, snode, tag):
rc = False
add_children = []
for sc in snode.iterchildren(tag):
dc = lookup_node(sc, dnode, ignore_id=True)
if dc is not None:
for a, v in sc.items():
if a == "id":
continue
if v != dc.get(a):
dc.set(a, v)
rc = True
else:
add_children.append(sc)
rc = True
for c in add_children:
dnode.append(copy.deepcopy(c))
return rc
def merge_nodes(dnode, snode):
'''
Import elements from snode into dnode.
If an element is attributes set (constants.nvpairs_tags) or
"operations", then merge attributes in the children.
Otherwise, replace the whole element. (TBD)
'''
rc = False # any changes done?
if dnode is None or snode is None:
return rc
add_children = []
for sc in snode.iterchildren():
dc = lookup_node(sc, dnode, ignore_id=True)
if dc is None:
if sc.tag in constants.nvpairs_tags or sc.tag == "operations":
add_children.append(sc)
rc = True
elif dc.tag in constants.nvpairs_tags:
rc = merge_attributes(dc, sc, "nvpair") or rc
elif dc.tag == "operations":
rc = merge_attributes(dc, sc, "op") or rc
for c in add_children:
dnode.append(copy.deepcopy(c))
return rc
def merge_tmpl_into_prim(prim_node, tmpl_node):
'''
Create a new primitive element which is a merge of a
rsc_template and a primitive which references it.
'''
dnode = etree.Element(prim_node.tag)
merge_nodes(dnode, tmpl_node)
merge_nodes(dnode, prim_node)
# the resulting node should inherit all primitives attributes
for a, v in prim_node.items():
dnode.set(a, v)
# but class/provider/type are coming from the template
# savannah#41410: stonith resources do not have the provider
# attribute
for a in ("class", "provider", "type"):
v = tmpl_node.get(a)
if v is not None:
dnode.set(a, v)
return dnode
def check_id_ref(elem, id_ref):
target = elem.xpath('.//*[@id="%s"]' % (id_ref))
if len(target) == 0:
common_err("Reference not found: %s" % id_ref)
elif len(target) > 1:
common_err("Ambiguous reference to %s" % id_ref)
# vim:ts=4:sw=4:et:
|
aspiers/crmsh
|
modules/xmlutil.py
|
Python
|
gpl-2.0
| 36,919
|
import sys
def bf(project, code=None, threshold=3):
Backfill(project, threshold=threshold, code=code).RTdict()
return
class Backfill():
def __init__(self, project, threshold=3, code=None):
self._project = project
self._threshold = threshold
self._code = code
def RTdict(self):
if not self._code:
for code in self._project.lib.library:
self.check_code(code)
else:
self.check_code(self._code)
return
def check_code(self, code):
if self.enough(code):
self.bf(code)
return
def enough(self, code):
count = 0
for sample in self._project.runlist:
if code in self._project.RTdict[sample]:
count += 1
if count >= self._threshold:
return True
else:
return False
def bf(self, code):
for sample in self._project.runlist:
if not self._project.RTdict[sample].get(code, (None, 0))[0]:
RT_lib = self._project.lib.RT(code)
CF = self._project.CFdict[sample]
self._project._RTdict[sample][code] = \
((RT_lib - CF[1]) / CF[0], 0)
return
|
lycopoda/pyquan2
|
backfill.py
|
Python
|
gpl-2.0
| 1,277
|
#!/usr/bin/python
#
# A mod_python environment for the symbol store collector
#
"""
Linux uploader:
// symupload.cc: Upload a symbol file to a HTTP server. The upload is sent as
// a multipart/form-data POST request with the following parameters:
// code_file: the basename of the module, e.g. "app"
// debug_file: the basename of the debugging file, e.g. "app"
// debug_identifier: the debug file's identifier, usually consisting of
// the guid and age embedded in the pdb, e.g.
// "11111111BBBB3333DDDD555555555555F"
// version: the file version of the module, e.g. "1.2.3.4"
// os: the operating system that the module was built for
// cpu: the CPU that the module was built for
// symbol_file: the contents of the breakpad-format symbol file
Mac uploader:
// symupload.m: Upload a symbol file to a HTTP server. The upload is sent as
// a multipart/form-data POST request with the following parameters:
// code_file: the basename of the module, e.g. "app"
// debug_file: the basename of the debugging file, e.g. "app"
// debug_identifier: the debug file's identifier, usually consisting of
// the guid and age embedded in the pdb, e.g.
// "11111111BBBB3333DDDD555555555555F"
// os: the operating system that the module was built for
// cpu: the CPU that the module was built for (x86 or ppc)
// symbol_file: the contents of the breakpad-format symbol file
Win uploader:
// Tool to upload an exe/dll and its associated symbols to an HTTP server.
// The PDB file is located automatically, using the path embedded in the
// executable. The upload is sent as a multipart/form-data POST request,
// with the following parameters:
// code_file: the basename of the module, e.g. "app.exe"
// debug_file: the basename of the debugging file, e.g. "app.pdb"
// debug_identifier: the debug file's identifier, usually consisting of
// the guid and age embedded in the pdb, e.g.
// "11111111BBBB3333DDDD555555555555F"
// version: the file version of the module, e.g. "1.2.3.4"
// os: the operating system that the module was built for, always
// "windows" in this implementation.
// cpu: the CPU that the module was built for, typically "x86".
// symbol_file: the contents of the breakpad-format symbol file
we create a symbol store path as follows:
// simple_symbol_supplier.h: A simple SymbolSupplier implementation
//
// SimpleSymbolSupplier is a straightforward implementation of SymbolSupplier
// that stores symbol files in a filesystem tree. A SimpleSymbolSupplier is
// created with one or more base directories, which are the root paths for all
// symbol files. Each symbol file contained therein has a directory entry in
// the base directory with a name identical to the corresponding debugging
// file (pdb). Within each of these directories, there are subdirectories
// named for the debugging file's identifier. For recent pdb files, this is
// a concatenation of the pdb's uuid and age, presented in hexadecimal form,
// without any dashes or separators. The uuid is in uppercase hexadecimal
// and the age is in lowercase hexadecimal. Within that subdirectory,
// SimpleSymbolSupplier expects to find the symbol file, which is named
// identically to the debug file, but with a .sym extension. If the original
// debug file had a name ending in .pdb, the .pdb extension will be replaced
// with .sym. This sample hierarchy is rooted at the "symbols" base
// directory:
//
// symbols
// symbols/test_app.pdb
// symbols/test_app.pdb/63FE4780728D49379B9D7BB6460CB42A1
// symbols/test_app.pdb/63FE4780728D49379B9D7BB6460CB42A1/test_app.sym
// symbols/kernel32.pdb
// symbols/kernel32.pdb/BCE8785C57B44245A669896B6A19B9542
// symbols/kernel32.pdb/BCE8785C57B44245A669896B6A19B9542/kernel32.sym
//
// In this case, the uuid of test_app.pdb is
// 63fe4780-728d-4937-9b9d-7bb6460cb42a and its age is 1.
//
// This scheme was chosen to be roughly analogous to the way that
// symbol files may be accessed from Microsoft Symbol Server. A hierarchy
// used for Microsoft Symbol Server storage is usable as a hierarchy for
// SimpleSymbolServer, provided that the pdb files are transformed to dumped
// format using a tool such as dump_syms, and given a .sym extension.
//
// SimpleSymbolSupplier will iterate over all root paths searching for
// a symbol file existing in that path.
//
// SimpleSymbolSupplier supports any debugging file which can be identified
// by a CodeModule object's debug_file and debug_identifier accessors. The
// expected ultimate source of these CodeModule objects are MinidumpModule
// objects; it is this class that is responsible for assigning appropriate
// values for debug_file and debug_identifier.
//
so, processorconfig.symbolCachePath is the root, say ~/symbols
~/symbols/HyvesDesktop.pdb
~/symbols/HyvesDesktopLib.pdb
~/symbols/Kwekker.pdb
~/symbols/PhotoUploader.pdb
these toplevel dirs must be in the processorconfig.processorSymbolsPathnameList
"""
import sys
import os
import datetime as dt
import config.processorconfig as configModule
import socorro.lib.util as sutil
import socorro.lib.ooid as ooid
import socorro.lib.ConfigurationManager
import socorro.symbolcollector.initializer
import psycopg2
import socorro.lib.psycopghelper as psy
import socorro.database.schema as sch
#-----------------------------------------------------------------------------------------------------------------
if __name__ != "__main__":
from mod_python import apache
from mod_python import util
else:
# this is a test being run from the command line
# these objects are to provide a fake environment for testing
from socorro.symbolcollector.modpython_testhelper import apache
from socorro.symbolcollector.modpython_testhelper import util
#-----------------------------------------------------------------------------------------------------------------
def handler(req):
global persistentStorage
try:
x = persistentStorage
except NameError:
persistentStorage = socorro.symbolcollector.initializer.createPersistentInitialization(configModule)
logger = persistentStorage["logger"]
config = persistentStorage["config"]
logger.debug("handler invoked using subinterpreter: %s", req.interpreter)
if req.method == "POST":
try:
req.content_type = "text/plain"
theform = util.FieldStorage(req)
# get all the stuff from the request
code_file = theform["code_file"]
if not code_file:
return apache.HTTP_BAD_REQUEST
debug_file = theform["debug_file"]
if not debug_file:
return apache.HTTP_BAD_REQUEST
debug_identifier = theform["debug_identifier"]
if not debug_identifier:
return apache.HTTP_BAD_REQUEST
os = theform["os"]
if not code_file:
return apache.HTTP_BAD_REQUEST
version = theform["version"]
if not version and os != "mac":
return apache.HTTP_BAD_REQUEST
cpu = theform["cpu"]
if not cpu:
return apache.HTTP_BAD_REQUEST
symbol_file = theform["symbol_file"]
if not symbol_file:
return apache.HTTP_BAD_REQUEST
currentTimestamp = dt.datetime.now()
# first, create a version in the database (so we don't have to do that manually,
# which is boring and cumbersome). We cannot do that from a mac build, since the
# mac uploader doesn't give us a version.
if os != "mac":
assert "databaseHost" in config, "databaseHost is missing from the configuration"
assert "databaseName" in config, "databaseName is missing from the configuration"
assert "databaseUserName" in config, "databaseUserName is missing from the configuration"
assert "databasePassword" in config, "databasePassword is missing from the configuration"
databaseConnectionPool = psy.DatabaseConnectionPool(config.databaseHost,
config.databaseName,
config.databaseUserName,
config.databasePassword,
logger)
logger.info("%s - connecting to database", threading.currentThread().getName())
try:
databaseConnection, databaseCursor = self.databaseConnectionPool.connectionCursorPair()
except:
logger.critical("%s - cannot connect to the database", threading.currentThread().getName())
return HTTP_INTERNAL_SERVER_ERROR
# if the insert failed, well, it's most likely because the app
# and branch are already in here.
try:
databaseCursor.execute("""insert into branches (product, version, branch) values ("%s", "%s", "0")""", (codefile, version, ) )
databaseCursor.connection.commit()
except:
databaseCursor.connection.rollback()
# then store the symbol file in the right place
symbolroot = config.symbolCachePath
# if it already exists, life is weird...
symboldir = os.path.join(symbolroot, debug_file.replace("sym", "pdb"), debug_identifier)
if not os.path.exists(symboldir):
os.mkdir(symboldir)
f = open(os.path.join(symboldir, debug_file.replace("pdb", "sym")), "w+")
f.write(symbol_file)
f.close()
return apache.OK
except:
logger.info("mod-python subinterpreter name: %s", req.interpreter)
sutil.reportExceptionAndContinue(logger)
print >>sys.stderr, "Exception: %s" % sys.exc_info()[0]
print >>sys.stderr, sys.exc_info()[1]
print >>sys.stderr
sys.stderr.flush()
return apache.HTTP_INTERNAL_SERVER_ERROR
else:
return apache.HTTP_METHOD_NOT_ALLOWED
#-----------------------------------------------------------------------------------------------------------------
if __name__ == "__main__":
from socorro.symbolcollector.modpython_testhelper import *
# mac
req = FakeReq()
req.method = "POST"
req.fields = {
"code_file" : "app",
"debug_file" : "app.sym",
"debug_identifier" : "43E447AD7830BEA940017B18229F318F0",
"version" : "1.2.3.4",
"os" : "mac",
"cpu" : "x86",
"symbol_file" : FakeSymbolFile(FakeFile("""MODULE mac x86 43E447AD7830BEA940017B18229F318F0 app
PUBLIC 86b8 0 start"""))
}
req.interpreter = "FakeReq interpreter"
print handler (req)
# linux
req = FakeReq()
req.method = "POST"
req.fields = {
"code_file" : "app",
"debug_file" : "app.sym",
"debug_identifier" : "D22DD4B2AF9C9361486728DA28556D1D0",
"version" : "1.2.3.4",
"os" : "windows",
"cpu" : "x86",
"symbol_file" : FakeSymbolFile(FakeFile("""MODULE Linux x86 D22DD4B2AF9C9361486728DA28556D1D0 app
PUBLIC 86b8 0 start"""))
}
req.interpreter = "FakeReq interpreter"
print handler (req)
# windows
req = FakeReq()
req.method = "POST"
req.fields = {
"code_file" : "app.exe",
"debug_file" : "app.pdb",
"debug_identifier" : "11111111BBBB3333DDDD555555555555F",
"version" : "1.2.3.4",
"os" : "windows",
"cpu" : "x86",
"symbol_file" : FakeSymbolFile(FakeFile("""MODULE windows x86 11111111BBBB3333DDDD555555555555F app.exe
PUBLIC 86b8 0 start"""))
}
req.interpreter = "FakeReq interpreter"
print handler (req)
|
boudewijnrempt/HyvesDesktop
|
3rdparty/socorro/socorro/symbolcollector/modpython-symbolstore.py
|
Python
|
gpl-2.0
| 11,430
|
#!/usr/bin/env python3
# -*- coding: UTF-8 -*-# enable debugging
#/***************************************************************************
# * <+$FILENAME$;R+++++++++++++++++++++++++> Version <+$VERSION$;R+++++++>*
# * *
# * <+#BRIEF#;R++++++++++++++++++++++++++++++++++++++++++++++++++++++++++>*
# * Copyright (C) <+$YEAR$;R+> by <+$AUTHOR$;R+++++++++++++++++++++++++++>*
# ***************************************************************************
# * This program is free software; you can redistribute it and/or modify *
# * it under the terms of the GNU General Public License as published by *
# * the Free Software Foundation; version 2 of the License. *
# * *
# * This program is distributed in the hope that it will be useful, *
# * but WITHOUT ANY WARRANTY; without even the implied warranty of *
# * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
# * GNU General Public License for more details. *
# * *
# * You should have received a copy of the GNU General Public License *
# * along with this program; if not, write to the *
# * Free Software Foundation, Inc., *
# * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. *
# ***************************************************************************
# * To contact the author, please write to: *
# * <+$AUTHOR$;R+++++++++++++++++++++++++++++++++++++++++++++++++++++++++>*
# * Email: <+$EMAIL$;R+++++++++++++++++++++++++++++++++++++++++++++++++++>*
# * Webpage: http://<+$WEBPAGE$;R++++++++++++++++++++++++++++++++++++++++>*
# * Phone: <+$PHONE$;R+++++++++++++++++++++++++++++++++++++++++++++++++++>*
# ***************************************************************************/
"""
<+$FILENAME$+> Version <+$VERSION$+>
Copyright (C) <+$YEAR$+> by <+$AUTHOR$+>
Brief: <+#BRIEF#+>
Contact:
Author: <+$AUTHOR$+>
Email: <+$EMAIL$+>
Webpage: <+$WEBPAGE$+>
Phone: <+$PHONE$+>
Date: <+$DATE$+>
"""
import cgi
import cgitb
cgitb.enable()
print("Content-Type: text/html;charset=utf-8")
print()
# Web page starts here...
def main():
print("<b>H</b>ello W<b>orld!</b>")
if __name__ == "__main__":
main()
#/* ------------------------------------------------------------------------ */
#/* Python config for Vim modeline */
#/* vi: set ai sta et ts=4 sts=4 sw=4 tw=0 wm=0 cc=+1 lbr fo=croq : */
#/* Template by Dr. Beco <rcb at beco dot cc> Version 20170724.161751 */
|
drbeco/vimtemplates
|
templates/vim1_web.py
|
Python
|
gpl-2.0
| 2,851
|
import django_tables2 as tables
from django_tables2.utils import A
from django.utils.html import mark_safe
from kaka.utils import *
from .models import *
class TreeTable(KakaTable):
class Meta(KakaTable.Meta):
report = 'tree'
model = Tree
sequence = ["datasource", "name", "createddate", "createdby", "lastupdateddate", "lastupdatedby", "obs"]
class FishTable(KakaTable):
class Meta(KakaTable.Meta):
report = 'fish'
model = Fish
sequence = ["datasource", "name", "createddate", "createdby", "lastupdateddate", "lastupdatedby", "obs"]
|
hdzierz/Kaka
|
mongseafood/tables.py
|
Python
|
gpl-2.0
| 599
|
# F3AT - Flumotion Asynchronous Autonomous Agent Toolkit
# Copyright (C) 2010,2011 Flumotion Services, S.A.
# All rights reserved.
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
# See "LICENSE.GPL" in the source distribution for more information.
# Headers in this file shall remain intact.
import operator
from zope.interface.interface import InterfaceClass
from feat.common import annotate, reflect, container
from feat.agents.base import replay
def register(component, canonical_name, mode):
annotate.injectClassCallback("dependency", 3, "_register_dependency",
component, canonical_name, mode)
class UndefinedDependency(Exception):
pass
class AgentDependencyMixin(object):
'''
Mixin for the BaseAgent to handle dependencies.
'''
_dependencies = container.MroDict("_mro_dependencies")
@classmethod
def _register_dependency(cls, component, canonical_name, mode):
if not isinstance(component, InterfaceClass):
raise AttributeError(
'Component %r should be an Interface. Got %r instead.' % \
component.__class__.__name__)
if component not in cls._dependencies:
cls._dependencies[component] = dict()
cls._dependencies[component][mode] = canonical_name
@classmethod
def _get_dependency_for_component(cls, component):
return cls._dependencies.get(component, None)
@classmethod
def _iter_dependencies(cls):
return cls._dependencies.iteritems()
@classmethod
def _get_defined_components(cls):
return cls._dependencies.keys()
@replay.immutable
def dependency(self, state, component, *args, **kwargs):
mode = state.medium.get_mode(component)
for_component = self._get_dependency_for_component(component)
if for_component is None:
raise UndefinedDependency(
'Component %s is not defined. Defined components are: %r' %\
(component, self._get_defined_components(), ))
canonical_name = for_component.get(mode, None)
if canonical_name is None:
raise UndefinedDependency(
'Component %s is not defined for the mode %r. '
'Defined handlers are for the modes: %r' %\
(component, mode, for_component.keys(), ))
# What we might pass in registration is either a callable object
# or its canonical name.
# Here we handle lazy imports in this second case.
if callable(canonical_name):
function = canonical_name
else:
function = reflect.named_object(canonical_name)
if not component.providedBy(function):
raise UndefinedDependency(
'Expected object %r to provide the interface %r!' %\
(function, component, ))
result = function(*args, **kwargs)
# for purpose of registration we might want to pass the reference
# to the dependency to the inside to make it easier to register it
if getattr(state.medium, 'keeps_track_of_dependencies', False):
state.medium.register_dependency_reference(
result, component, mode, args, kwargs)
return result
|
f3at/feat
|
src/feat/agents/base/dependency.py
|
Python
|
gpl-2.0
| 3,925
|
# This file is part of MyPaint.
# Copyright (C) 2009-2012 by Martin Renold <martinxyz@gmx.ch>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
## Imports
import math
from gettext import gettext as _
import gtk2compat
import gobject
import gtk
from gtk import gdk
import cairo
import windowing
import gui.mode
from overlays import rounded_box, Overlay
import colors
import lib.color
import fill
## Color picking mode, with a preview rectangle overlay
class ColorPickMode (gui.mode.OneshotDragMode):
"""Mode for picking colors from the screen, with a preview
This can be invoked in quite a number of ways:
* The keyboard hotkey ("R" by default)
* Modifier and pointer button: (Ctrl+Button1 by default)
* From the toolbar or menu
The first two methods pick immediately. Moving the mouse with the
initial keys or buttons held down keeps picking with a little
preview square appearing.
The third method doesn't pick immediately: you have to click on the
canvas to start picking.
While the preview square is visible, it's possible to pick outside
the window. This "hidden" functionality may not work at all with
more modern window managers and DEs, and may be removed if it proves
slow or faulty.
"""
# Class configuration
ACTION_NAME = 'ColorPickMode'
PICK_SIZE = 6
# Keyboard activation behaviour (instance defaults)
# See keyboard.py and doc.mode_flip_action_activated_cb()
keyup_timeout = 0 # don't change behaviour by timeout
pointer_behavior = gui.mode.Behavior.EDIT_OBJECTS
scroll_behavior = gui.mode.Behavior.NONE # XXX grabs ptr, so no CHANGE_VIEW
supports_button_switching = False
@property
def inactive_cursor(self):
return self.doc.app.cursor_color_picker
@classmethod
def get_name(cls):
return _(u"Pick Color")
def get_usage(self):
return _(u"Set the color used for painting")
def __init__(self, **kwds):
super(ColorPickMode, self).__init__(unmodified_persist=False, **kwds)
self._overlay = None
self._started_from_key_press = self.ignore_modifiers
self._start_drag_on_next_motion_event = False
def enter(self, **kwds):
"""Enters the mode, arranging for necessary grabs ASAP"""
super(ColorPickMode, self).enter(**kwds)
if self._started_from_key_press:
# Pick now, and start the drag when possible
self.doc.app.pick_color_at_pointer(self.doc.tdw, self.PICK_SIZE)
self._start_drag_on_next_motion_event = True
self._needs_drag_start = True
def leave(self, **kwds):
self._remove_overlay()
super(ColorPickMode, self).leave(**kwds)
def button_press_cb(self, tdw, event):
self.doc.app.pick_color_at_pointer(self.doc.tdw, self.PICK_SIZE)
# Supercall will start the drag normally
self._start_drag_on_next_motion_event = False
return super(ColorPickMode, self).button_press_cb(tdw, event)
def motion_notify_cb(self, tdw, event):
if self._start_drag_on_next_motion_event:
self._start_drag(tdw, event)
self._start_drag_on_next_motion_event = False
return super(ColorPickMode, self).motion_notify_cb(tdw, event)
def drag_stop_cb(self, tdw):
self._remove_overlay()
super(ColorPickMode, self).drag_stop_cb(tdw)
def drag_update_cb(self, tdw, event, dx, dy):
self.doc.app.pick_color_at_pointer(tdw, self.PICK_SIZE)
self._place_overlay(tdw, event.x, event.y)
return super(ColorPickMode, self).drag_update_cb(tdw, event, dx, dy)
def _place_overlay(self, tdw, x, y):
if self._overlay is None:
self._overlay = ColorPickPreviewOverlay(self.doc, tdw, x, y)
else:
self._overlay.move(x, y)
def _remove_overlay(self):
if self._overlay is None:
return
self._overlay.cleanup()
self._overlay = None
class ColorPickPreviewOverlay (Overlay):
"""Preview overlay during color picker mode.
This is only shown when dragging the pointer with a button or the
hotkey held down, to avoid flashing and distraction.
"""
PREVIEW_SIZE = 70
OUTLINE_WIDTH = 3
CORNER_RADIUS = 10
def __init__(self, doc, tdw, x, y):
"""Initialize, attaching to the brush and to the tdw.
Observer callbacks and canvas overlays are registered by this
constructor, so cleanup() must be called when the owning mode leave()s.
"""
Overlay.__init__(self)
self._doc = doc
self._tdw = tdw
self._x = int(x)+0.5
self._y = int(y)+0.5
alloc = tdw.get_allocation()
self._tdw_w = alloc.width
self._tdw_h = alloc.height
self._color = self._get_app_brush_color()
app = doc.app
app.brush.observers.append(self._brush_color_changed_cb)
tdw.display_overlays.append(self)
self._previous_area = None
self._queue_tdw_redraw()
def cleanup(self):
"""Cleans up temporary observer stuff, allowing garbage collection.
"""
app = self._doc.app
app.brush.observers.remove(self._brush_color_changed_cb)
self._tdw.display_overlays.remove(self)
assert self._brush_color_changed_cb not in app.brush.observers
assert self not in self._tdw.display_overlays
self._queue_tdw_redraw()
def move(self, x, y):
"""Moves the preview square to a new location, in tdw pointer coords.
"""
self._x = int(x)+0.5
self._y = int(y)+0.5
self._queue_tdw_redraw()
def _get_app_brush_color(self):
app = self._doc.app
return lib.color.HSVColor(*app.brush.get_color_hsv())
def _brush_color_changed_cb(self, settings):
if not settings.intersection(('color_h', 'color_s', 'color_v')):
return
self._color = self._get_app_brush_color()
self._queue_tdw_redraw()
def _queue_tdw_redraw(self):
if self._previous_area is not None:
self._tdw.queue_draw_area(*self._previous_area)
self._previous_area = None
area = self._get_area()
if area is not None:
self._tdw.queue_draw_area(*area)
def _get_area(self):
# Returns the drawing area for the square
size = self.PREVIEW_SIZE
# Start with the pointer location
x = self._x
y = self._y
offset = size // 2
# Only show if the pointer is inside the tdw
alloc = self._tdw.get_allocation()
if x < 0 or y < 0 or y > alloc.height or x > alloc.width:
return None
# Convert to preview location
# Pick a direction - N,W,E,S - in which to offset the preview
if y + size > alloc.height - offset:
x -= offset
y -= size + offset
elif x < offset:
x += offset
y -= offset
elif x > alloc.width - offset:
x -= size + offset
y -= offset
else:
x -= offset
y += offset
## Correct to place within the tdw
#if x < 0:
# x = 0
#if y < 0:
# y = 0
#if x + size > alloc.width:
# x = alloc.width - size
#if y + size > alloc.height:
# y = alloc.height - size
return (int(x), int(y), size, size)
def paint(self, cr):
area = self._get_area()
if area is not None:
x, y, w, h = area
cr.set_source_rgb(*self._color.get_rgb())
x += (self.OUTLINE_WIDTH // 2) + 1.5
y += (self.OUTLINE_WIDTH // 2) + 1.5
w -= self.OUTLINE_WIDTH + 3
h -= self.OUTLINE_WIDTH + 3
rounded_box(cr, x, y, w, h, self.CORNER_RADIUS)
cr.fill_preserve()
cr.set_source_rgb(0, 0, 0)
cr.set_line_width(self.OUTLINE_WIDTH)
cr.stroke()
self._previous_area = area
## More conventional color-picking button, with grab
class BrushColorPickerButton (colors.ColorPickerButton):
"""Color picker button that sets the app's working brush color."""
__gtype_name__ = "MyPaintBrushColorPickerButton"
def __init__(self):
colors.ColorPickerButton.__init__(self)
self.connect("realize", self._init_color_manager)
def _init_color_manager(self, widget):
from application import get_app
app = get_app()
mgr = app.brush_color_manager
assert mgr is not None
self.set_color_manager(mgr)
|
prescott66/mypaint
|
gui/colorpicker.py
|
Python
|
gpl-2.0
| 8,831
|
"""
Copyright (c) 2004, CherryPy Team (team@cherrypy.org)
All rights reserved.
Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution.
* Neither the name of the CherryPy Team nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
import cherrytemplate, sys
print "#"*20
print "# Running unittest for Python-%s and CherryTemplate-%s" % (
sys.version.split()[0], cherrytemplate.__version__)
print "#"*20
print
def checkRes(res, expectedRes):
if res != expectedRes:
f = open('result', 'w')
f.write("Result: " + repr(res) + "\n")
f.close()
f = open('result.raw', 'w')
f.write(res)
f.close()
print "\nThe expected result was:\n%s and the real result was:\n%s\n*** ERROR ***" % (
repr(expectedRes), repr(res))
sys.exit(-1)
print "OK"
print "Testing CGTL...",
name = "world"
res = cherrytemplate.renderTemplate(file = 'testTags.html')
checkRes(res, open('testTags.result', 'r').read())
print "Testing latin-1 template, latin-1 output (1)...",
europoundUnicode = u'\x80\xa3'
europoundLatin1 = europoundUnicode.encode('latin-1')
res = cherrytemplate.renderTemplate(europoundLatin1 + """<py-eval="europoundLatin1">""")
checkRes(res, europoundLatin1*2)
print "Testing latin-1 template, latin-1 output (2)...",
res = cherrytemplate.renderTemplate(europoundLatin1 + """<py-eval="europoundLatin1">""", inputEncoding = 'latin-1', outputEncoding = 'latin-1')
checkRes(res, europoundLatin1*2)
print "Testing latin-1 template, utf-16 output...",
res = cherrytemplate.renderTemplate(europoundLatin1 + """<py-eval="europoundLatin1">""", inputEncoding = 'latin-1', outputEncoding = 'utf-16')
checkRes(res, (europoundUnicode*2).encode('utf-16'))
print "Testing unicode template, latin-1 output...",
res = cherrytemplate.renderTemplate(europoundUnicode + """<py-eval="europoundUnicode">""", outputEncoding = 'latin-1')
checkRes(res, europoundLatin1*2)
print "Testing external latin-1 template, latin-1 output...",
res = cherrytemplate.renderTemplate(file = 't.html', inputEncoding = 'latin-1', outputEncoding = 'latin-1')
checkRes(res, europoundLatin1*2 + '\n')
print "Testing py-include...",
res = cherrytemplate.renderTemplate("""Hello, <py-include="t.html">""", inputEncoding = 'latin-1', outputEncoding = 'latin-1')
checkRes(res, "Hello, " + europoundLatin1*2 + '\n')
print "Testing generator result...",
template = """<py-for="i in xrange(10000)"><py-eval="str(i)"></py-for>"""
for i, line in enumerate(cherrytemplate.renderTemplate(template, returnGenerator = True)):
try:
assert int(line) == i
except:
print "\nError in returnGenerator template\n*** ERROR ***"
sys.exit(-1)
i += 1
assert(i == 10000)
print "OK"
|
Juanvvc/scfs
|
webserver/cherrytemplate/unittest/unittest.py
|
Python
|
gpl-2.0
| 4,016
|
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
import bodhi.tests.functional.base
from bodhi.models import (
DBSession,
Release,
ReleaseState,
Update,
)
class TestReleasesService(bodhi.tests.functional.base.BaseWSGICase):
def setUp(self):
super(TestReleasesService, self).setUp()
session = DBSession()
release = Release(
name=u'F22', long_name=u'Fedora 22',
id_prefix=u'FEDORA', version=u'22',
dist_tag=u'f22', stable_tag=u'f22-updates',
testing_tag=u'f22-updates-testing',
candidate_tag=u'f22-updates-candidate',
pending_testing_tag=u'f22-updates-testing-pending',
pending_stable_tag=u'f22-updates-pending',
override_tag=u'f22-override')
session.add(release)
session.flush()
def test_404(self):
self.app.get('/releases/watwatwat', status=404)
def test_get_single_release_by_lower(self):
res = self.app.get('/releases/f22')
self.assertEquals(res.json_body['name'], 'F22')
def test_get_single_release_by_upper(self):
res = self.app.get('/releases/F22')
self.assertEquals(res.json_body['name'], 'F22')
def test_get_single_release_by_long(self):
res = self.app.get('/releases/Fedora%2022')
self.assertEquals(res.json_body['name'], 'F22')
def test_list_releases(self):
res = self.app.get('/releases/')
body = res.json_body
self.assertEquals(len(body['releases']), 2)
self.assertEquals(body['releases'][0]['name'], u'F17')
self.assertEquals(body['releases'][1]['name'], u'F22')
def test_list_releases_with_pagination(self):
res = self.app.get('/releases/')
body = res.json_body
self.assertEquals(len(body['releases']), 2)
res = self.app.get('/releases/', {'rows_per_page': 1})
body = res.json_body
self.assertEquals(len(body['releases']), 1)
self.assertEquals(body['releases'][0]['name'], 'F17')
res = self.app.get('/releases/', {'rows_per_page': 1, 'page': 2})
body = res.json_body
self.assertEquals(len(body['releases']), 1)
self.assertEquals(body['releases'][0]['name'], 'F22')
def test_list_releases_by_name(self):
res = self.app.get('/releases/', {"name": 'F22'})
body = res.json_body
self.assertEquals(len(body['releases']), 1)
self.assertEquals(body['releases'][0]['name'], 'F22')
def test_list_releases_by_name_match(self):
res = self.app.get('/releases/', {"name": '%1%'})
body = res.json_body
self.assertEquals(len(body['releases']), 1)
self.assertEquals(body['releases'][0]['name'], 'F17')
def test_list_releases_by_name_match_miss(self):
res = self.app.get('/releases/', {"name": '%wat%'})
self.assertEquals(len(res.json_body['releases']), 0)
def test_list_releases_by_update_title(self):
res = self.app.get('/releases/', {"updates": 'bodhi-2.0-1.fc17'})
body = res.json_body
self.assertEquals(len(body['releases']), 1)
self.assertEquals(body['releases'][0]['name'], 'F17')
def test_list_releases_by_update_alias(self):
session = DBSession()
update = session.query(Update).first()
update.alias = u'some_alias'
session.flush()
res = self.app.get('/releases/', {"updates": 'some_alias'})
body = res.json_body
self.assertEquals(len(body['releases']), 1)
self.assertEquals(body['releases'][0]['name'], 'F17')
def test_list_releases_by_nonexistant_update(self):
res = self.app.get('/releases/', {"updates": 'carbunkle'}, status=400)
self.assertEquals(res.json_body['errors'][0]['name'], 'updates')
self.assertEquals(res.json_body['errors'][0]['description'],
'Invalid updates specified: carbunkle')
def test_list_releases_by_package_name(self):
res = self.app.get('/releases/', {"packages": 'bodhi'})
body = res.json_body
self.assertEquals(len(body['releases']), 1)
self.assertEquals(body['releases'][0]['name'], 'F17')
def test_list_releases_by_nonexistant_package(self):
res = self.app.get('/releases/', {"packages": 'carbunkle'}, status=400)
self.assertEquals(res.json_body['errors'][0]['name'], 'packages')
self.assertEquals(res.json_body['errors'][0]['description'],
'Invalid packages specified: carbunkle')
def test_new_release(self):
attrs = {"name": "F42", "long_name": "Fedora 42", "version": "42",
"id_prefix": "FEDORA", "branch": "f42", "dist_tag": "f42",
"stable_tag": "f42-updates",
"testing_tag": "f42-updates-testing",
"candidate_tag": "f42-updates-candidate",
"pending_stable_tag": "f42-updates-pending",
"pending_testing_tag": "f42-updates-testing-pending",
"override_tag": "f42-override",
"csrf_token": self.get_csrf_token(),
}
self.app.post("/releases/", attrs, status=200)
attrs.pop('csrf_token')
r = DBSession().query(Release).filter(Release.name==attrs["name"]).one()
for k, v in attrs.items():
self.assertEquals(getattr(r, k), v)
self.assertEquals(r.state, ReleaseState.disabled)
def test_new_release_invalid_tags(self):
attrs = {"name": "EL42", "long_name": "EPEL 42", "version": "42",
"id_prefix": "FEDORA EPEL", "branch": "f42",
"dist_tag": "epel42", "stable_tag": "epel42",
"testing_tag": "epel42-testing",
"candidate_tag": "epel42-candidate",
"override_tag": "epel42-override",
"csrf_token": self.get_csrf_token(),
}
res = self.app.post("/releases/", attrs, status=400)
self.assertEquals(len(res.json_body['errors']), 4)
for error in res.json_body['errors']:
self.assertEquals(error["description"], "Invalid tag: %s" % attrs[error["name"]])
def test_edit_release(self):
name = u"F22"
res = self.app.get('/releases/%s' % name, status=200)
r = res.json_body
r["edited"] = name
r["state"] = "current"
r["csrf_token"] = self.get_csrf_token()
res = self.app.post("/releases/", r, status=200)
r = DBSession().query(Release).filter(Release.name==name).one()
self.assertEquals(r.state, ReleaseState.current)
def test_get_single_release_html(self):
res = self.app.get('/releases/f17', headers={'Accept': 'text/html'})
self.assertEquals(res.content_type, 'text/html')
self.assertIn('f17-updates-testing', res)
def test_get_non_existent_release_html(self):
self.app.get('/releases/x', headers={'Accept': 'text/html'}, status=404)
def test_get_releases_html(self):
res = self.app.get('/releases/', headers={'Accept': 'text/html'})
self.assertEquals(res.content_type, 'text/html')
self.assertIn('Fedora 22', res)
|
michel-slm/bodhi
|
bodhi/tests/functional/test_releases.py
|
Python
|
gpl-2.0
| 7,861
|
class Solution:
# @param head, a ListNode
# @param m, an integer
# @param n, an integer
# @return a ListNode
# @ tail point to previous node, maybe head
# @ for m to n insert the element to tail.
# @ for m+1, insert at n
def reverseBetween(self, head, m, n):
Tpivot, newHead = ListNode(0), head
tail, nNode, newHead, Tpivot.next = Tpivot, Tpivot, Tpivot, newHead
for i in range(m) :
tail, nNode = nNode, nNode.next
nextHead,tail.next = nNode, None
for i in range(m, n+1) :
cur, nextHead = nextHead, nextHead.next
tail.next, cur.next = cur, tail.next
nNode.next = nextHead
return newHead.next
def printList(self, head) :
while head :
print head.val
head = head.next
# Definition for singly-linked list.
class ListNode:
def __init__(self, x):
self.val = x
self.next = None
t1, t2, t3, t4, t5 = ListNode(1), ListNode(2), ListNode(3), ListNode(4), ListNode(5)
t1.next = t2; t2.next = t3; t3.next = t4; t4.next = t5
sol = Solution()
sol.printList(sol.reverseBetween(t1, 2,4))
|
yelu/leetcode
|
ReverseLinkedListII.py
|
Python
|
gpl-2.0
| 1,179
|
#
# ast_input_line.py
#
# This file is part of NEST.
#
# Copyright (C) 2004 The NEST Initiative
#
# NEST is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# NEST is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with NEST. If not, see <http://www.gnu.org/licenses/>.
from pynestml.meta_model.ast_data_type import ASTDataType
from pynestml.meta_model.ast_input_type import ASTInputType
from pynestml.meta_model.ast_node import ASTNode
from pynestml.meta_model.ast_signal_type import ASTSignalType
class ASTInputLine(ASTNode):
"""
This class is used to store a declaration of an input line.
ASTInputLine represents a single line form the input, e.g.:
spikeBuffer <- inhibitory excitatory spike
@attribute sizeParameter Optional parameter representing multisynapse neuron.
@attribute sizeParameter Type of the inputchannel: e.g. inhibitory or excitatory (or both).
@attribute spike true iff the neuron is a spike.
@attribute current true iff. the neuron is a current.
Grammar:
inputLine :
name=NAME
('[' sizeParameter=NAME ']')?
(datatype)?
'<-' inputType*
(is_current = 'current' | is_spike = 'spike');
Attributes:
name = None
size_parameter = None
data_type = None
input_types = None
signal_type = None
"""
def __init__(self, name=None, size_parameter=None, data_type=None, input_types=list(), signal_type=None,
source_position=None):
"""
Standard constructor.
:param name: the name of the buffer
:type name: str
:param size_parameter: a parameter indicating the index in an array.
:type size_parameter: str
:param data_type: the data type of this buffer
:type data_type: ASTDataType
:param input_types: a list of input types specifying the buffer.
:type input_types: list(ASTInputType)
:param signal_type: type of signal received, i.e., spikes or currents
:type signal_type: SignalType
:param source_position: the position of this element in the source file.
:type source_position: ASTSourceLocation.
"""
assert (name is not None and isinstance(name, str)), \
'(PyNestML.AST.InputLine) No or wrong type of name provided (%s)!' % type(name)
assert (signal_type is not None and isinstance(signal_type, ASTSignalType)), \
'(PyNestML.AST.InputLine) No or wrong type of input signal type provided (%s)!' % type(signal_type)
assert (input_types is not None and isinstance(input_types, list)), \
'(PyNestML.AST.InputLine) No or wrong type of input types provided (%s)!' % type(input_types)
for typ in input_types:
assert (typ is not None and isinstance(typ, ASTInputType)), \
'(PyNestML.AST.InputLine) No or wrong type of input type provided (%s)!' % type(typ)
assert (size_parameter is None or isinstance(size_parameter, str)), \
'(PyNestML.AST.InputLine) Wrong type of index parameter provided (%s)!' % type(size_parameter)
assert (data_type is None or isinstance(data_type, ASTDataType)), \
'(PyNestML.AST.InputLine) Wrong type of data-type provided (%s)!' % type(data_type)
super(ASTInputLine, self).__init__(source_position)
self.signal_type = signal_type
self.input_types = input_types
self.size_parameter = size_parameter
self.name = name
self.data_type = data_type
return
def get_name(self):
"""
Returns the name of the declared buffer.
:return: the name.
:rtype: str
"""
return self.name
def has_index_parameter(self):
"""
Returns whether a index parameter has been defined.
:return: True if index has been used, otherwise False.
:rtype: bool
"""
return self.size_parameter is not None
def get_index_parameter(self):
"""
Returns the index parameter.
:return: the index parameter.
:rtype: str
"""
return self.size_parameter
def has_input_types(self):
"""
Returns whether input types have been defined.
:return: True, if at least one input type has been defined.
:rtype: bool
"""
return len(self.input_types) > 0
def get_input_types(self):
"""
Returns the list of input types.
:return: a list of input types.
:rtype: list(ASTInputType)
"""
return self.input_types
def is_spike(self):
"""
Returns whether this is a spike buffer or not.
:return: True if spike buffer, False else.
:rtype: bool
"""
return self.signal_type is ASTSignalType.SPIKE
def is_current(self):
"""
Returns whether this is a current buffer or not.
:return: True if current buffer, False else.
:rtype: bool
"""
return self.signal_type is ASTSignalType.CURRENT
def is_excitatory(self):
"""
Returns whether this buffer is excitatory or not. For this, it has to be marked explicitly by the
excitatory keyword or no keywords at all shall occur (implicitly all types).
:return: True if excitatory, False otherwise.
:rtype: bool
"""
if self.get_input_types() is not None and len(self.get_input_types()) == 0:
return True
for typE in self.get_input_types():
if typE.is_excitatory:
return True
return False
def is_inhibitory(self):
"""
Returns whether this buffer is inhibitory or not. For this, it has to be marked explicitly by the
inhibitory keyword or no keywords at all shall occur (implicitly all types).
:return: True if inhibitory, False otherwise.
:rtype: bool
"""
if self.get_input_types() is not None and len(self.get_input_types()) == 0:
return True
for typE in self.get_input_types():
if typE.is_inhibitory:
return True
return False
def has_datatype(self):
"""
Returns whether this buffer has a defined data type or not.
:return: True if it has a datatype, otherwise False.
:rtype: bool
"""
return self.data_type is not None and isinstance(self.data_type, ASTDataType)
def get_datatype(self):
"""
Returns the currently used data type of this buffer.
:return: a single data type object.
:rtype: ASTDataType
"""
return self.data_type
def equals(self, other):
"""
The equals method.
:param other: a different object.
:type other: object
:return: True if equal,otherwise False.
:rtype: bool
"""
if not isinstance(other, ASTInputLine):
return False
if self.get_name() != other.get_name():
return False
if self.has_index_parameter() + other.has_index_parameter() == 1:
return False
if (self.has_index_parameter() and other.has_index_parameter() and
self.get_input_types() != other.get_index_parameter()):
return False
if self.has_datatype() + other.has_datatype() == 1:
return False
if self.has_datatype() and other.has_datatype() and not self.get_datatype().equals(other.get_datatype()):
return False
if len(self.get_input_types()) != len(other.get_input_types()):
return False
my_input_types = self.get_input_types()
your_input_types = other.get_input_types()
for i in range(0, len(my_input_types)):
if not my_input_types[i].equals(your_input_types[i]):
return False
return self.is_spike() == other.is_spike() and self.is_current() == other.is_current()
|
kperun/nestml
|
pynestml/meta_model/ast_input_line.py
|
Python
|
gpl-2.0
| 8,453
|
#!/usr/bin/env python3
# L-system with MIDI output state machine
# set production rules to match Inkscape
# By Mike Cook - December 2017
import time, copy
import rtmidi
midiout = rtmidi.MidiOut()
noteDuration = 0.3
axiom = "++F" # Bush
rules = [("F->","FF-[-F+F+F]+[+F-F-F]")]
newAxiom = axiom
def main():
global newAxiom
init() # open MIDI port
offMIDI()
initKey()
print("Rules :-")
print(rules)
print("Axiom :-")
print(axiom)
composition = [newAxiom]
for r in range(0,4): # change for deeper levels
newAxiom = applyRules(newAxiom)
composition.append(newAxiom)
sonify(composition)
def applyRulesOrginal(start):
expand = ""
for i in range(0,len(start)):
rule = start[i:i+1] +"->"
for j in range(0,len(rules)):
if rule == rules[j][0] :
expand += rules[j][1]
return expand
def applyRules(start):
expand = ""
for i in range(0,len(start)):
symbol = start[i:i+1]
rule = symbol +"->"
found = False
for j in range(0,len(rules)):
if rule == rules[j][0] :
expand += rules[j][1]
found = True
if not found :
expand += symbol
return expand
def sonify(data): # turn data into sound
initMIDI(0,65) # set volume
noteIncrement = 1
notePlay = len(notes) / 2
midiout.send_message([0xC0 | 0,19]) # voice 19 Church organ
lastNote = 1
for j in range(0,len(data)):
duration = noteDuration # start with same note length
notePlay = len(notes) / 2 # and same start note
noteIncrement = 1 # and same note increment
stack = [] # clear stack
print("")
if j==0:
print("Axiom ",j,data[j])
else:
print("Recursion ",j,data[j])
for i in range(0,len(data[j])):
symbol = ord(data[j][i:i+1])
if symbol >= ord('A') and symbol <= ord('F') : # play current note
#print(" playing",notePlay)
note = notes[int(notePlay)]
#print("note", note, "note increment",noteIncrement )
midiout.send_message([0x80 | 0,lastNote,68]) # last note off
midiout.send_message([0x90 | 0,note,68]) # next note on
lastNote = note
if symbol >= ord('A') and symbol <= ord('L') : # move note
notePlay += noteIncrement
if notePlay < 0: # wrap round playing note
notePlay = len(notes)-1
elif notePlay >= len(notes):
notePlay = 0
time.sleep(duration)
if symbol == ord('+'):
noteIncrement += 1
if noteIncrement > 6:
noteIncrement = 1
if symbol == ord('-'):
noteIncrement -= 1
if noteIncrement < -6:
noteIncrement = -1
if symbol == ord('|'): # turn back
noteIncrement = -noteIncrement
if symbol == ord('['): # push state on stack
stack.append((duration,notePlay,noteIncrement))
#print("pushed",duration,notePlay,noteIncrement,"Stack depth",len(stack))
if symbol == ord(']'): # pull state from stack
if len(stack) != 0 :
recovered = stack.pop(int(len(stack)-1))
duration = recovered[0]
notePlay = recovered[1]
noteIncrement = recovered[2]
#print("recovered",duration,notePlay,noteIncrement,"Stack depth",len(stack))
else:
print("stack empty")
midiout.send_message([0x80 | 0,lastNote,68]) # last note off
time.sleep(2.0)
def initKey():
global startNote,endNote,notes
key = [2,1,2,2,1,2] # defines scale type - a Major scale
notes =[] # look up list note number to MIDI note
startNote = 24 # defines the key (this is C )
endNote = 84
i = startNote
j = 0
while i< endNote:
notes.append(i)
i += key[j]
j +=1
if j >= 6:
j = 0
#print(notes)
def init():
available_ports = midiout.get_ports()
print("MIDI ports available:-")
for i in range(0,len(available_ports)):
print(i,available_ports[i])
if available_ports:
midiout.open_port(1)
else:
midiout.open_virtual_port("My virtual output")
def initMIDI(ch,vol):
midiout.send_message([0xB0 | ch,0x07,vol]) # set to volume
midiout.send_message([0xB0 | ch,0x00,0x00]) # set default bank
def offMIDI():
for ch in range(0,16):
midiout.send_message([0xB0 | ch,0x78,0]) # notes off
# Main program logic:
if __name__ == '__main__':
try:
main()
except:
offMIDI()
|
Grumpy-Mike/Mikes-Pi-Bakery
|
Fractal_Music/Classic_Fractals.py
|
Python
|
gpl-2.0
| 4,725
|
import fabio, sys, numpy as np
stem = sys.argv[1]
if 1:
out=open("%s.bin"%(stem),"wb")
for i in range(1024):
im=fabio.open("%s/%s_%04d.edf"%(stem,stem,i)).data
im = (im+10)*10
out.write(im.clip(0,255).astype(np.uint8))
im=fabio.open("%s/%s_%04d.edf"%(stem,stem,0))
open("%s.nrrd"%(stem),"w").write("""NRRD0001
# my first nrrd
type: uchar
dimension: 3
sizes: 1024 1024 1024
encoding: raw
data file: %s.bin
"""%(stem))
|
jonwright/ImageD11
|
sandbox/ev78/edf2nrrd.py
|
Python
|
gpl-2.0
| 453
|
# -*- coding: utf-8 -*-
"""This module tests various ways how to set up the provisioning using the provisioning dialog."""
import fauxfactory
import pytest
import re
from datetime import datetime, timedelta
from cfme.common.provider import cleanup_vm
from cfme.common.vm import VM
from cfme.provisioning import provisioning_form
from cfme.services import requests
from cfme.web_ui import InfoBlock, fill, flash
from utils import mgmt_system, testgen
from utils.blockers import BZ
from utils.log import logger
from utils.wait import wait_for, TimedOutError
pytestmark = [
pytest.mark.meta(server_roles="+automate"),
pytest.mark.usefixtures('uses_infra_providers'),
pytest.mark.long_running,
pytest.mark.meta(blockers=[
BZ(
1265466,
unblock=lambda provider: not isinstance(provider.mgmt, mgmt_system.RHEVMSystem))
]),
pytest.mark.tier(3)
]
def pytest_generate_tests(metafunc):
# Filter out providers without provisioning data or hosts defined
argnames, argvalues, idlist = testgen.infra_providers(metafunc,
required_fields=[
['provisioning', 'template'],
['provisioning', 'host'],
['provisioning', 'datastore']
])
testgen.parametrize(metafunc, argnames, argvalues, ids=idlist, scope="module")
@pytest.fixture(scope="function")
def prov_data(provisioning, provider):
data = {
"first_name": fauxfactory.gen_alphanumeric(),
"last_name": fauxfactory.gen_alphanumeric(),
"email": "{}@{}.test".format(
fauxfactory.gen_alphanumeric(), fauxfactory.gen_alphanumeric()),
"manager_name": "{} {}".format(
fauxfactory.gen_alphanumeric(), fauxfactory.gen_alphanumeric()),
"vlan": provisioning.get("vlan", None),
# "datastore_create": False,
"datastore_name": {"name": provisioning["datastore"]},
"host_name": {"name": provisioning["host"]},
# "catalog_name": provisioning["catalog_item_type"],
}
if provider.type == 'rhevm':
data['provision_type'] = 'Native Clone'
elif provider.type == 'virtualcenter':
data['provision_type'] = 'VMware'
# Otherwise just leave it alone
return data
@pytest.fixture(scope="function")
def provisioner(request, setup_provider, provider):
def _provisioner(template, provisioning_data, delayed=None):
pytest.sel.force_navigate('infrastructure_provision_vms', context={
'provider': provider,
'template_name': template,
})
vm_name = provisioning_data["vm_name"]
fill(provisioning_form, provisioning_data, action=provisioning_form.submit_button)
flash.assert_no_errors()
request.addfinalizer(lambda: cleanup_vm(vm_name, provider))
if delayed is not None:
total_seconds = (delayed - datetime.utcnow()).total_seconds()
row_description = 'Provision from [{}] to [{}]'.format(template, vm_name)
cells = {'Description': row_description}
try:
row, __ = wait_for(requests.wait_for_request, [cells],
fail_func=requests.reload, num_sec=total_seconds, delay=5)
pytest.fail("The provisioning was not postponed")
except TimedOutError:
pass
logger.info('Waiting for vm %s to appear on provider %s', vm_name, provider.key)
wait_for(provider.mgmt.does_vm_exist, [vm_name], handle_exception=True, num_sec=600)
# nav to requests page happens on successful provision
logger.info('Waiting for cfme provision request for vm %s', vm_name)
row_description = 'Provision from [{}] to [{}]'.format(template, vm_name)
cells = {'Description': row_description}
row, __ = wait_for(requests.wait_for_request, [cells],
fail_func=requests.reload, num_sec=900, delay=20)
assert row.last_message.text == 'Vm Provisioned Successfully'
return VM.factory(vm_name, provider)
return _provisioner
def test_change_cpu_ram(provisioner, soft_assert, provider, prov_data):
""" Tests change RAM and CPU in provisioning dialog.
Prerequisities:
* A provider set up, supporting provisioning in CFME
Steps:
* Open the provisioning dialog.
* Apart from the usual provisioning settings, set number of CPUs and amount of RAM.
* Submit the provisioning request and wait for it to finish.
* Visit the page of the provisioned VM. The summary should state correct values for CPU&RAM.
Metadata:
test_flag: provision
"""
prov_data["vm_name"] = "test_prov_dlg_{}".format(fauxfactory.gen_alphanumeric())
prov_data["num_sockets"] = "4"
prov_data["cores_per_socket"] = "1"
prov_data["memory"] = "4096"
template_name = provider.data['provisioning']['template']
vm = provisioner(template_name, prov_data)
# Go to the VM info
data = vm.get_detail(properties=("Properties", "Container")).strip()
# No longer possible to use version pick because of cherrypicking?
regexes = map(re.compile, [
r"^[^(]*\((\d+) CPUs?, ([^)]+)\)[^)]*$",
r"^.*?(\d+) CPUs? .*?(\d+ MB)$"])
for regex in regexes:
match = regex.match(data)
if match is not None:
num_cpus, memory = match.groups()
break
else:
raise ValueError("Could not parse string {}".format(repr(data)))
soft_assert(num_cpus == "4", "num_cpus should be {}, is {}".format("4", num_cpus))
soft_assert(memory == "4096 MB", "memory should be {}, is {}".format("4096 MB", memory))
# Special parametrization in testgen above
@pytest.mark.meta(blockers=[1209847])
@pytest.mark.parametrize("disk_format", ["thin", "thick", "preallocated"])
@pytest.mark.uncollectif(lambda provider, disk_format:
(provider.type == "rhevm" and disk_format == "thick") or
(provider.type != "rhevm" and disk_format == "preallocated") or
# Temporarily, our storage domain cannot handle preallocated disks
(provider.type == "rhevm" and disk_format == "preallocated"))
def test_disk_format_select(provisioner, disk_format, provider, prov_data):
""" Tests disk format selection in provisioning dialog.
Prerequisities:
* A provider set up, supporting provisioning in CFME
Steps:
* Open the provisioning dialog.
* Apart from the usual provisioning settings, set the disk format to be thick or thin.
* Submit the provisioning request and wait for it to finish.
* Visit the page of the provisioned VM.
* The ``Thin Provisioning Used`` field should state true of false according to the selection
Metadata:
test_flag: provision
"""
prov_data["vm_name"] = "test_prov_dlg_{}".format(fauxfactory.gen_alphanumeric())
prov_data["disk_format"] = disk_format
template_name = provider.data['provisioning']['template']
vm = provisioner(template_name, prov_data)
# Go to the VM info
vm.load_details(refresh=True)
thin = InfoBlock.text(
"Datastore Allocation Summary", "Thin Provisioning Used").strip().lower() == "true"
if disk_format == "thin":
assert thin, "The disk format should be Thin"
else:
assert not thin, "The disk format should not be Thin"
@pytest.mark.parametrize("started", [True, False])
def test_power_on_or_off_after_provision(provisioner, prov_data, provider, started):
""" Tests setting the desired power state after provisioning.
Prerequisities:
* A provider set up, supporting provisioning in CFME
Steps:
* Open the provisioning dialog.
* Apart from the usual provisioning settings, set whether you want or not the VM to be
powered on after provisioning.
* Submit the provisioning request and wait for it to finish.
* The VM should become steady in the desired VM power state.
Metadata:
test_flag: provision
"""
vm_name = "test_prov_dlg_{}".format(fauxfactory.gen_alphanumeric())
prov_data["vm_name"] = vm_name
prov_data["power_on"] = started
template_name = provider.data['provisioning']['template']
provisioner(template_name, prov_data)
wait_for(
lambda: provider.mgmt.does_vm_exist(vm_name) and
(provider.mgmt.is_vm_running if started else provider.mgmt.is_vm_stopped)(vm_name),
num_sec=240, delay=5
)
def test_tag(provisioner, prov_data, provider):
""" Tests tagging VMs using provisioning dialogs.
Prerequisities:
* A provider set up, supporting provisioning in CFME
Steps:
* Open the provisioning dialog.
* Apart from the usual provisioning settings, pick a tag.
* Submit the provisioning request and wait for it to finish.
* Visit th page of VM, it should display the selected tags
Metadata:
test_flag: provision
"""
prov_data["vm_name"] = "test_prov_dlg_{}".format(fauxfactory.gen_alphanumeric())
prov_data["apply_tags"] = [(["Service Level *", "Gold"], True)]
template_name = provider.data['provisioning']['template']
vm = provisioner(template_name, prov_data)
tags = vm.get_tags()
assert any(tag.category.display_name == "Service Level" and tag.display_name == "Gold"
for tag in tags), "Service Level: Gold not in tags ({})".format(str(tags))
@pytest.mark.meta(blockers=[1204115])
def test_provisioning_schedule(provisioner, provider, prov_data):
""" Tests provision scheduling.
Prerequisities:
* A provider set up, supporting provisioning in CFME
Steps:
* Open the provisioning dialog.
* Apart from the usual provisioning settings, set a scheduled provision and pick a time.
* Submit the provisioning request, it should not start before the scheduled time.
Metadata:
test_flag: provision
"""
now = datetime.utcnow()
prov_data["vm_name"] = "test_prov_dlg_{}".format(fauxfactory.gen_alphanumeric())
prov_data["schedule_type"] = "schedule"
prov_data["provision_date"] = now.strftime("%m/%d/%Y")
STEP = 5
minutes_diff = (STEP - (now.minute % STEP))
# To have some gap for automation
if minutes_diff <= 3:
minutes_diff += 5
provision_time = timedelta(minutes=minutes_diff) + now
prov_data["provision_start_hour"] = str(provision_time.hour)
prov_data["provision_start_min"] = str(provision_time.minute)
template_name = provider.data['provisioning']['template']
provisioner(template_name, prov_data, delayed=provision_time)
|
akrzos/cfme_tests
|
cfme/tests/infrastructure/test_provisioning_dialog.py
|
Python
|
gpl-2.0
| 10,707
|
"""This class contains the TestIcon class."""
import unittest
import pygame
from src.GameWindow import GameWindow
from src.Icon import Icon
from src.Point import Point
class TestIcon(unittest.TestCase):
"""This class unit tests the Icon class."""
def setUp(self):
"""Create a Test Icon and start pygame."""
pygame.init()
self.test_text_icon = Icon('test icon',
GameWindow.BLACK, GameWindow.WHITE,
Point(0, 0), (10, 10))
self.test_vertex_icon = Icon('vertex icon',
GameWindow.BLACK, GameWindow.WHITE,
Point(0, 0), (10, 10))
self.test_vertex_icon.vertices = [(10, 10), (-10, -10), (0, 0)]
@classmethod
def test_init(cls):
"""Test the init method of the Icon class."""
_unused_icon = Icon('test icon',
GameWindow.BLACK, GameWindow.WHITE,
Point(0, 0), (10, 10))
def test_get_vertex_list(self):
"""Test the _get_vertex_list method."""
input_list = [Point(0, 0), Point(5, 0), Point(0, -5)]
center = (10, 10)
# _get_vertex_list(input_list, center_x, center_y) should map
# (0,0) of the input_list to center_x and center_y (and all other
# points accordingly)
expected_output = [Point(10, 10), Point(15, 10), Point(10, 5)]
# We need to use an instance of the Icon class
# to call the _get_vertex_list method
output = self.test_text_icon._get_vertex_list(input_list, center)
self.assertEqual(expected_output, output)
def test_display(self):
"""
Test the display method of a Icon.
All this really does is make sure the method executes correctly.
If the method call errors, the test will fail.
"""
# Create a test screen to dsiplay things on
test_screen = pygame.display.set_mode((1500, 1500))
# Attempt to display the test text Icon
self.test_text_icon.display(test_screen)
# Attempt to display the test vertex Icon
self.test_vertex_icon.display(test_screen)
# Make the test text Icon not displayed
self.test_text_icon.displayed = False
# This will test the branch of code for when you attempt to
# display a non displayed Icon
self.test_text_icon.display(test_screen)
# We don't have to worry about re-displaying the Icon
# as each test case recreates the test Icon
if __name__ == "__main__":
unittest.main()
|
gregcorbett/SciBot
|
test/test_icon.py
|
Python
|
gpl-2.0
| 2,633
|
# ireal/irmath.py
#
# Copyright 2008 Rafael Menezes Barreto <rmb3@cin.ufpe.br,
# rafaelbarreto87@gmail.com>
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License version 2
# as published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
# MA 02110-1301, USA.
"""Real Interval standard functions extensions module
Provides the standard functions extensions for Real Intervals.
It was developed in CIn/UFPE (Brazil) by Rafael Menezes Barreto
<rmb3@cin.ufpe.br, rafaelbarreto87@gmail.com> as part of the IntPy package and
it's free software.
"""
|
yig/intpy
|
intpy/src/ireal/irmath.py
|
Python
|
gpl-2.0
| 1,041
|
#!/usr/bin/env python
import os
import sys
import shutil
import os.path
import createrepo_c as cr
def do_repodata(path):
# Prepare repodata/ directory
repodata_path = os.path.join(path, "repodata")
if os.path.exists(repodata_path):
x = 0
while True:
new_repodata_path = "%s_%s" % (repodata_path, x)
if not os.path.exists(new_repodata_path):
shutil.move(repodata_path, new_repodata_path)
break
os.mkdir(repodata_path)
# Prepare metadata files
repomd_path = os.path.join(repodata_path, "repomd.xml")
pri_xml_path = os.path.join(repodata_path, "primary.xml.gz")
fil_xml_path = os.path.join(repodata_path, "filelists.xml.gz")
oth_xml_path = os.path.join(repodata_path, "other.xml.gz")
pri_db_path = os.path.join(repodata_path, "primary.sqlite.gz")
fil_db_path = os.path.join(repodata_path, "filelists.sqlite.gz")
oth_db_path = os.path.join(repodata_path, "other.sqlite.gz")
pri_xml = cr.PrimaryXmlFile(pri_xml_path)
fil_xml = cr.FilelistsXmlFile(fil_xml_path)
oth_xml = cr.OtherXmlFile(oth_xml_path)
pri_db = cr.PrimarySqlite(pri_db_path)
fil_db = cr.FilelistsSqlite(fil_db_path)
oth_db = cr.OtherSqlite(oth_db_path)
# List directory and prepare list of files to process
pkg_list = []
for filename in os.listdir(path):
filename = os.path.join(path, filename)
if os.path.isfile(filename) and filename.endswith(".rpm"):
pkg_list.append(filename)
pri_xml.set_num_of_pkgs(len(pkg_list))
fil_xml.set_num_of_pkgs(len(pkg_list))
oth_xml.set_num_of_pkgs(len(pkg_list))
# Process all packages
for filename in pkg_list:
pkg = cr.package_from_rpm(filename)
print "Processing: %s" % pkg.nevra()
pri_xml.add_pkg(pkg)
fil_xml.add_pkg(pkg)
oth_xml.add_pkg(pkg)
pri_db.add_pkg(pkg)
fil_db.add_pkg(pkg)
oth_db.add_pkg(pkg)
pri_xml.close()
fil_xml.close()
oth_xml.close()
# Note: DBs are still open! We have to calculate checksums of xml files
# and insert them to the databases first!
# Prepare repomd.xml
repomd = cr.Repomd()
# Add records into the repomd.xml
repomdrecords = (("primary", pri_xml_path, pri_db),
("filelists", fil_xml_path, fil_db),
("other", oth_xml_path, oth_db),
("primary_db", pri_db_path, None),
("filelists_db", fil_db_path, None),
("other_db", oth_db_path, None))
for name, path, db_to_update in repomdrecords:
record = cr.RepomdRecord(name, path)
record.fill(cr.SHA256)
if (db_to_update):
db_to_update.dbinfo_update(record.checksum)
db_to_update.close()
repomd.set_record(record)
# Write repomd.xml
open(repomd_path, "w").write(repomd.xml_dump())
# DONE!
if __name__ == "__main__":
if len(sys.argv) != 2 or not os.path.isdir(sys.argv[1]):
print "Usage: %s <directory>" % (sys.argv[0])
sys.exit(1)
do_repodata(sys.argv[1])
print "Repository created in %s" % sys.argv[1]
|
janncker/createrepo_c
|
examples/python/simple_createrepo.py
|
Python
|
gpl-2.0
| 3,229
|
import psycopg2, json
import sys
import psycopg2.extras
conn = psycopg2.connect('dbname=wgraph user=nmcmaster')
def export():
out = {'links':{},'nodes':{}}
with conn.cursor(cursor_factory=psycopg2.extras.RealDictCursor) as cursor:
cursor.execute('select id,name as title,description,image_size,image,source,sourceurl as url from nodes where needs_review = false')
for row in cursor:
out['nodes'][row['id']] = row
cursor.execute('select node_to,node_from from edges join nodes on nodes.id=node_from where nodes.needs_review = false')
for row in cursor:
if not row['node_from'] in out['links']:
out['links'][row['node_from']]=[]
out['links'][row['node_from']].append(row['node_to'])
todel=[]
for key in out['nodes']:
if not key in out['links']:
todel.append(key)
for i in todel:
del out['nodes'][i]
return out
def writefile(filename):
with open(filename,'w') as f:
f.write(json.dumps(export()))
if __name__ == '__main__':
writefile(sys.argv[1])
|
natemcmaster/historyexplorer
|
builder/exporter.py
|
Python
|
gpl-2.0
| 984
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (C) 2011 S2S Network Consultoria e Tecnologia da Informacao LTDA
#
# Author: Alan Wang <wzj401@gmail.com>
# Tianwei Liu <liutianweidlut@gmail.com>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
import gtk
import sys
from higwidgets.higwindows import HIGWindow
from higwidgets.higboxes import HIGVBox
from higwidgets.higbuttons import HIGButton
from higwidgets.higboxes import HIGVBox, HIGHBox
from higwidgets.higboxes import HIGSpacer, hig_box_space_holder
from higwidgets.higlabels import HIGSectionLabel, HIGEntryLabel
from higwidgets.higtables import HIGTable
from higwidgets.higdialogs import HIGAlertDialog
from higwidgets.higlabels import HIGLabel
from higwidgets.higentries import HIGTextEntry, HIGPasswordEntry
from umit.icm.agent.I18N import _
from umit.icm.agent.Application import theApp
from umit.icm.agent.Global import *
from umit.icm.agent.test import test_name_by_id
from umit.icm.agent.test import ALL_TESTS
from umit.icm.agent.utils.Startup import StartUP
from twisted.internet import reactor
###################################################
#Peer Information Page in Preference Window
class PeerInfoPage(HIGVBox):
""""""
#----------------------------------------------------------------------
def __init__(self):
"""Constructor"""
HIGVBox.__init__(self)
self.__create_widgets()
self.__pack_widgets()
def __create_widgets(self):
""""""
self.peerinfo_hbox = HIGHBox()
self.cloudagg_hbox = HIGHBox()
self.superpeers_hbox = HIGHBox()
self.pref_location_hbox = HIGHBox()
self.peerinfo_section = HIGSectionLabel(_("Peer Info"))
self.peerinfo_table = HIGTable()
self.pref_location_section = HIGSectionLabel(_("Preferred Locations"))
self.pref_location_table = HIGTable()
self.cloudagg_section = HIGSectionLabel(_("Cloud Aggregator"))
self.cloudagg_table = HIGTable()
self.cloudagg_subhbox = HIGHBox()
self.superpeers_section = HIGSectionLabel(_("Super Peers"))
self.superpeers_table = HIGTable()
self.peerid_label = HIGEntryLabel(_("Peer ID:"))
self.email_label = HIGEntryLabel(_("Email Address:"))
self.test_version_label = HIGEntryLabel(_("Test Sets Version:"))
self.peerid_label2 = HIGEntryLabel()
self.email_entry = gtk.Entry()
self.test_version_label2 = HIGEntryLabel()
self.longitude_label = HIGLabel(_("longitude:"))
self.longitude_entry = gtk.Entry()
self.latitude_label = HIGLabel(_("latitude:"))
self.latitude_entry = gtk.Entry()
self.cloudagg_entry = gtk.Entry()
self.cloudagg_button = HIGButton(_("Reset"))
self.cloudagg_button.connect('clicked', lambda w: self.reset_aggregator_url())
self.cloudagg_button.set_size_request(80, 28)
self.superpeers_ip_label = HIGLabel(_("IP:"))
self.superpeers_ip_entry = gtk.Entry()
self.superpeers_ip_entry.set_size_request(160, 26)
self.superpeers_port_label = HIGLabel(_("Port:"))
self.superpeers_port_entry = gtk.Entry()
self.superpeers_port_entry.set_size_request(80, 26)
self.superpeers_subhbox = HIGHBox()
self.btn_box = gtk.HButtonBox()
self.superpeers_button1 = HIGButton(_("Add"))
self.superpeers_button1.connect('clicked',lambda w:self.add_superpeer())
self.superpeers_button2 = HIGButton(_("Show all"))
self.superpeers_button2.connect('clicked', lambda w:
self.show_super_peer_list_window())
def __pack_widgets(self):
self.set_border_width(12)
self._pack_noexpand_nofill(self.peerinfo_section)
self._pack_noexpand_nofill(self.peerinfo_hbox)
self._pack_noexpand_nofill(self.pref_location_section)
self._pack_noexpand_nofill(self.pref_location_hbox)
self._pack_noexpand_nofill(self.cloudagg_section)
self._pack_noexpand_nofill(self.cloudagg_hbox)
self._pack_noexpand_nofill(self.superpeers_section)
self._pack_noexpand_nofill(self.superpeers_hbox)
self.peerinfo_hbox._pack_noexpand_nofill(hig_box_space_holder())
self.peerinfo_hbox._pack_expand_fill(self.peerinfo_table)
self.pref_location_hbox._pack_noexpand_nofill(hig_box_space_holder())
self.pref_location_hbox._pack_expand_fill(self.pref_location_table)
self.cloudagg_hbox._pack_noexpand_nofill(hig_box_space_holder())
self.cloudagg_hbox._pack_expand_fill(self.cloudagg_table)
self.superpeers_hbox._pack_noexpand_nofill(hig_box_space_holder())
self.superpeers_hbox._pack_expand_fill(self.superpeers_table)
self.peerinfo_table.attach_label(self.peerid_label, 0, 1, 0, 1)
self.peerinfo_table.attach_label(self.email_label, 0, 1, 2, 3)
self.peerinfo_table.attach_label(self.test_version_label, 0, 1, 1, 2)
self.peerinfo_table.attach_label(self.test_version_label2, 1, 2, 1, 2)
self.peerinfo_table.attach_label(self.peerid_label2, 1, 2, 0, 1)
self.peerinfo_table.attach_entry(self.email_entry, 1, 2, 2, 3)
self.pref_location_table.attach(self.longitude_label,0,1,0,1)
self.pref_location_table.attach(self.longitude_entry,1,2,0,1)
self.pref_location_table.attach(self.latitude_label,2,3,0,1)
self.pref_location_table.attach(self.latitude_entry,3,4,0,1)
self.cloudagg_subhbox._pack_expand_fill(self.cloudagg_entry)
self.cloudagg_subhbox._pack_noexpand_nofill(self.cloudagg_button)
self.cloudagg_table.attach_entry(self.cloudagg_subhbox, 0, 1, 0, 1)
self.btn_box.set_layout(gtk.BUTTONBOX_END)
self.btn_box.set_spacing(8)
self.btn_box.pack_start(self.superpeers_button1)
self.btn_box.pack_start(self.superpeers_button2)
self.superpeers_subhbox._pack_expand_fill(self.superpeers_ip_label)
self.superpeers_subhbox._pack_expand_fill(self.superpeers_ip_entry)
self.superpeers_subhbox._pack_expand_fill(self.superpeers_port_label)
self.superpeers_subhbox._pack_expand_fill(self.superpeers_port_entry)
self.superpeers_subhbox._pack_noexpand_nofill(self.btn_box)
self.superpeers_table.attach_label(self.superpeers_subhbox, 0, 1, 0, 1)
def add_superpeer(self):
"""
Add Super Peer by manual into database
"""
ip = self.superpeers_ip_entry.get_text()
port = self.superpeers_port_entry.get_text()
g_db_helper.set_super_peer_manual(ip,port)
def reset_aggregator_url(self):
"""
"""
aggregator_url = 'http://east1.openmonitor.org'
self.cloudagg_entry.set_text(aggregator_url)
theApp.aggregator.base_url = aggregator_url
g_config.set('network', 'aggregator_url', aggregator_url)
g_db_helper.set_value('config','aggregator_url', aggregator_url)
def show_super_peer_list_window(self):
from umit.icm.agent.gui.Preference.SuperPeerSetting import SuperPeerListWindow,SuperPeersBox
wnd = SuperPeerListWindow()
wnd.show_all()
|
umitproject/openmonitor-desktop-agent
|
umit/icm/agent/gui/Preference/PeerPage.py
|
Python
|
gpl-2.0
| 8,098
|
# -*- coding: utf-8 -*-
__author__ = 'zappyk'
PROJECT = 'GoogleSheets'
DESCRIPTION = 'Manage your Google Spreadsheets'
DESCRIPTION = 'Google Spreadsheet Read/Write (more?) manipulation'
VERSION = (0, 0, 1, 'beta', 1)
VERSION = (0, 0, 2, 'beta', 1)
###########################################################
def get_project():
return(PROJECT)
###########################################################
def get_description():
return(DESCRIPTION)
###########################################################
#def get_version():
# return(VERSION)
###########################################################
#def get_version(*arg, **kwargs):
# from GoogleSheets.src.version import get_version
# return(get_version(*arg, **kwargs))
###########################################################
def get_version():
from GoogleSheets.src.version import get_version
return(get_version())
|
zappyk-github/zappyk-python
|
src/src_zappyk/googleSheets/GoogleSheets/__init__.py
|
Python
|
gpl-2.0
| 913
|
#
# Copyright (C) 2006, 2013, 2014 Red Hat, Inc.
# Copyright (C) 2006 Hugh O. Brock <hbrock@redhat.com>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
# MA 02110-1301 USA.
#
import logging
# pylint: disable=E0611
from gi.repository import GObject
# pylint: enable=E0611
from virtManager.baseclass import vmmGObjectUI
from virtManager.mediadev import MEDIA_FLOPPY
from virtManager.mediacombo import vmmMediaCombo
from virtManager.storagebrowse import vmmStorageBrowser
from virtManager.addstorage import vmmAddStorage
class vmmChooseCD(vmmGObjectUI):
__gsignals__ = {
"cdrom-chosen": (GObject.SignalFlags.RUN_FIRST, None, [object, str])
}
def __init__(self, vm, disk):
vmmGObjectUI.__init__(self, "choosecd.ui", "vmm-choose-cd")
self.vm = vm
self.conn = self.vm.conn
self.storage_browser = None
# This is also overwritten from details.py when targetting a new disk
self.disk = disk
self.media_type = disk.device
self.mediacombo = vmmMediaCombo(self.conn, self.builder, self.topwin,
self.media_type)
self.widget("media-combo-align").add(self.mediacombo.top_box)
self.builder.connect_signals({
"on_vmm_choose_cd_delete_event": self.close,
"on_media_toggled": self.media_toggled,
"on_fv_iso_location_browse_clicked": self.browse_fv_iso_location,
"on_ok_clicked": self.ok,
"on_cancel_clicked": self.close,
})
self.reset_state()
def close(self, ignore1=None, ignore2=None):
logging.debug("Closing media chooser")
self.topwin.hide()
if self.storage_browser:
self.storage_browser.close()
return 1
def show(self, parent):
logging.debug("Showing media chooser")
self.reset_state()
self.topwin.set_transient_for(parent)
self.topwin.present()
self.conn.schedule_priority_tick(pollnodedev=True, pollmedia=True)
def _cleanup(self):
self.vm = None
self.conn = None
self.disk = None
if self.storage_browser:
self.storage_browser.cleanup()
self.storage_browser = None
if self.mediacombo:
self.mediacombo.cleanup()
self.mediacombo = None
def _init_ui(self):
if self.media_type == MEDIA_FLOPPY:
self.widget("physical-media").set_label(_("Floppy D_rive"))
self.widget("iso-image").set_label(_("Floppy _Image"))
def reset_state(self):
self.mediacombo.reset_state()
enable_phys = not self.vm.stable_defaults()
self.widget("physical-media").set_sensitive(enable_phys)
self.widget("physical-media").set_tooltip_text("" if enable_phys else
_("Physical CDROM passthrough not supported with this hypervisor"))
use_cdrom = (self.mediacombo.has_media()) and enable_phys
self.widget("physical-media").set_active(use_cdrom)
self.widget("iso-image").set_active(not use_cdrom)
def ok(self, ignore1=None, ignore2=None):
if self.widget("iso-image").get_active():
path = self.widget("iso-path").get_text()
else:
path = self.mediacombo.get_path()
if path == "" or path is None:
return self.err.val_err(_("Invalid Media Path"),
_("A media path must be specified."))
try:
self.disk.path = path
except Exception, e:
return self.err.val_err(_("Invalid Media Path"), e)
names = self.disk.is_conflict_disk()
if names:
res = self.err.yes_no(
_('Disk "%s" is already in use by other guests %s') %
(self.disk.path, names),
_("Do you really want to use the disk?"))
if not res:
return False
vmmAddStorage.check_path_search(self, self.conn, path)
self.emit("cdrom-chosen", self.disk, path)
self.close()
def media_toggled(self, ignore1=None, ignore2=None):
is_phys = bool(self.widget("physical-media").get_active())
self.mediacombo.combo.set_sensitive(is_phys)
self.widget("iso-path").set_sensitive(not is_phys)
self.widget("iso-file-chooser").set_sensitive(not is_phys)
def browse_fv_iso_location(self, ignore1=None, ignore2=None):
self._browse_file()
def set_storage_path(self, src_ignore, path):
self.widget("iso-path").set_text(path)
def _browse_file(self):
if self.storage_browser is None:
self.storage_browser = vmmStorageBrowser(self.conn)
self.storage_browser.connect("storage-browse-finish",
self.set_storage_path)
self.storage_browser.stable_defaults = self.vm.stable_defaults()
if self.media_type == MEDIA_FLOPPY:
self.storage_browser.set_browse_reason(
self.config.CONFIG_DIR_FLOPPY_MEDIA)
else:
self.storage_browser.set_browse_reason(
self.config.CONFIG_DIR_ISO_MEDIA)
self.storage_browser.show(self.topwin, self.conn)
|
aurex-linux/virt-manager
|
virtManager/choosecd.py
|
Python
|
gpl-2.0
| 5,911
|
"""
2504 : 괄호의 값
URL : https://www.acmicpc.net/problem/2504
Input :
(()[[]])([])
Output :
28
"""
class Stack:
def __init__(self):
self._data = []
def __str__(self):
return str(self._data)
def push(self, x):
self._data += [x]
def pop(self):
if self.empty():
return None
else:
last = self._data[-1]
self._data = self._data[:-1]
return last
def size(self):
return len(self._data)
def empty(self):
if self._data:
return False
else:
return True
def top(self):
if self.empty():
return None
else:
return self._data[-1]
def calculate_score(stack, current):
score = 0
top = stack.pop()
if current == ')':
if top == '(':
score += 2
else:
score += 2 * calculate_score(stack, top)
if stack.top() != '(':
raise RuntimeError()
else:
stack.pop()
elif current == ']':
if top == '[':
score += 3
else:
score += 3 * calculate_score(stack, top)
if stack.top() != '[':
raise RuntimeError()
else:
stack.pop()
else:
raise RuntimeError()
if stack.top() == ')' or stack.top() == ']':
score += calculate_score(stack, stack.pop())
return score
stack = Stack()
string = input().strip()
for c in string:
stack.push(c)
try:
score = 0
while not stack.empty():
score += calculate_score(stack, stack.pop())
print(score)
except:
print(0)
|
0x1306e6d/Baekjoon
|
baekjoon/2504.py
|
Python
|
gpl-2.0
| 1,729
|
"""
17010 : Time to Decompress
URL : https://www.acmicpc.net/problem/17010
Input :
4
9 +
3 -
12 A
2 X
Output :
+++++++++
---
AAAAAAAAAAAA
XX
"""
l = int(input())
for i in range(l):
n, x = input().split()
print(x * int(n))
|
0x1306e6d/Baekjoon
|
baekjoon/17010.py
|
Python
|
gpl-2.0
| 319
|
from Tools.Directories import resolveFilename, SCOPE_SYSETC
from Tools.HardwareInfo import HardwareInfo
import sys
def getVersionString():
return getImageVersionString()
def getImageVersionString():
try:
file = open(resolveFilename(SCOPE_SYSETC, 'image-version'), 'r')
lines = file.readlines()
for x in lines:
splitted = x.split('=')
if splitted[0] == "version":
version = splitted[1].replace('\n','')
file.close()
return version
except IOError:
return "unavailable"
def getEnigmaVersionString():
import enigma
enigma_version = enigma.getEnigmaVersionString()
return enigma_version
def getKernelVersionString():
try:
return open("/proc/version","r").read().split(' ', 4)[2].split('-',2)[0]
except:
return _("unknown")
def getBuildVersionString():
try:
file = open(resolveFilename(SCOPE_SYSETC, 'image-version'), 'r')
lines = file.readlines()
for x in lines:
splitted = x.split('=')
if splitted[0] == "build":
version = splitted[1].replace('\n','')
file.close()
return version
except IOError:
return "unavailable"
def getLastUpdateString():
try:
file = open(resolveFilename(SCOPE_SYSETC, 'image-version'), 'r')
lines = file.readlines()
for x in lines:
splitted = x.split('=')
if splitted[0] == "date":
#YYYY MM DD hh mm
#2005 11 29 01 16
string = splitted[1].replace('\n','')
year = string[0:4]
month = string[4:6]
day = string[6:8]
date = '-'.join((year, month, day))
hour = string[8:10]
minute = string[10:12]
time = ':'.join((hour, minute))
lastupdated = ' '.join((date, time))
file.close()
return lastupdated
except IOError:
return "unavailable"
def getDriversString():
try:
file = open(resolveFilename(SCOPE_SYSETC, 'image-version'), 'r')
lines = file.readlines()
for x in lines:
splitted = x.split('=')
if splitted[0] == "drivers":
#YYYY MM DD hh mm
#2005 11 29 01 16
string = splitted[1].replace('\n','')
year = string[0:4]
month = string[4:6]
day = string[6:8]
date = '-'.join((year, month, day))
file.close()
return date
except IOError:
return "unavailable"
def getImageTypeString():
try:
file = open(resolveFilename(SCOPE_SYSETC, 'image-version'), 'r')
lines = file.readlines()
for x in lines:
splitted = x.split('=')
if splitted[0] == "build_type":
image_type = splitted[1].replace('\n','') # 0 = release, 1 = experimental
file.close()
if image_type == '0':
image_type = _("Release")
else:
image_type = _("Experimental")
return image_type
except IOError:
return "unavailable"
import socket, fcntl, struct
def _ifinfo(sock, addr, ifname):
iface = struct.pack('256s', ifname[:15])
info = fcntl.ioctl(sock.fileno(), addr, iface)
if addr == 0x8927:
return ''.join(['%02x:' % ord(char) for char in info[18:24]])[:-1].upper()
else:
return socket.inet_ntoa(info[20:24])
def getIfConfig(ifname):
ifreq = {'ifname': ifname}
infos = {}
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
# offsets defined in /usr/include/linux/sockios.h on linux 2.6
infos['addr'] = 0x8915 # SIOCGIFADDR
infos['brdaddr'] = 0x8919 # SIOCGIFBRDADDR
infos['hwaddr'] = 0x8927 # SIOCSIFHWADDR
infos['netmask'] = 0x891b # SIOCGIFNETMASK
try:
for k,v in infos.items():
ifreq[k] = _ifinfo(sock, v, ifname)
except:
pass
sock.close()
return ifreq
# For modules that do "from About import about"
about = sys.modules[__name__]
|
bally12345/enigma2
|
lib/python/Components/About.py
|
Python
|
gpl-2.0
| 3,452
|
#
# Copyright 2014-2017 Red Hat, Inc.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
#
# Refer to the README and COPYING files for full details of the license
#
from __future__ import absolute_import
from copy import deepcopy
import functools
import hashlib
import json
import logging
import os
import uuid
import yaml
import six
from lago import log_utils, plugins, utils
from lago.config import config
from lago.providers.libvirt.network import BridgeNetwork, NATNetwork
LOGGER = logging.getLogger(__name__)
LogTask = functools.partial(log_utils.LogTask, logger=LOGGER)
log_task = functools.partial(log_utils.log_task, logger=LOGGER)
class LagoUnknownVMTypeError(utils.LagoUserException):
def __init__(self, vm_type_name, vm_types):
super().__init__(
'Unknown VM type: {0}, available types: {1}, \
maybe you need to install lago plugin'
.format(vm_type_name, vm_types)
)
def _gen_ssh_command_id():
return uuid.uuid1().hex[:8]
def _guestfs_copy_path(g, guest_path, host_path):
if g.is_file(guest_path):
with open(host_path, 'w') as f:
f.write(g.read_file(guest_path))
elif g.is_dir(guest_path):
os.mkdir(host_path)
for path in g.ls(guest_path):
_guestfs_copy_path(
g,
os.path.join(
guest_path,
path,
),
os.path.join(host_path, os.path.basename(path)),
)
def _path_to_xml(basename):
return os.path.join(
os.path.dirname(__file__),
basename,
)
class VirtEnv(object):
'''Env properties:
* prefix
* vms
* net
'''
def __init__(self, prefix, vm_specs, net_specs):
self.vm_types = plugins.load_plugins(
plugins.PLUGIN_ENTRY_POINTS['vm'],
instantiate=False,
)
self.prefix = prefix
with open(self.prefix.paths.uuid(), 'r') as uuid_fd:
self.uuid = uuid_fd.read().strip()
self._nets = {}
compat = self.get_compat()
for name, spec in net_specs.items():
self._nets[name] = self._create_net(spec, compat)
self._vms = {}
self._default_vm_type = config.get('default_vm_type')
for name, spec in vm_specs.items():
self._vms[name] = self._create_vm(spec)
def _create_net(self, net_spec, compat):
if net_spec['type'] == 'nat':
cls = NATNetwork
elif net_spec['type'] == 'bridge':
cls = BridgeNetwork
return cls(self, net_spec, compat=compat)
def _create_vm(self, vm_spec):
vm_type_name = vm_spec.get('vm-type', self._default_vm_type)
try:
vm_type = self.vm_types[vm_type_name]
except KeyError:
raise LagoUnknownVMTypeError(
vm_type_name, ','.join(self.vm_types.keys())
)
vm_spec['vm-type'] = vm_type_name
return vm_type(self, vm_spec)
def prefixed_name(self, unprefixed_name, max_length=0):
"""
Returns a uuid pefixed identifier
Args:
unprefixed_name(str): Name to add a prefix to
max_length(int): maximum length of the resultant prefixed name,
will adapt the given name and the length of the uuid ot fit it
Returns:
str: prefixed identifier for the given unprefixed name
"""
if max_length == 0:
prefixed_name = '%s-%s' % (self.uuid[:8], unprefixed_name)
else:
if max_length < 6:
raise RuntimeError(
"Can't prefix with less than 6 chars (%s)" %
unprefixed_name
)
if max_length < 16:
_uuid = self.uuid[:4]
else:
_uuid = self.uuid[:8]
name_max_length = max_length - len(_uuid) - 1
if name_max_length < len(unprefixed_name):
unprefixed_name = unprefixed_name.encode('utf-8')
hashed_name = hashlib.sha1(unprefixed_name).hexdigest()
unprefixed_name = hashed_name[:name_max_length]
prefixed_name = '%s-%s' % (_uuid, unprefixed_name)
return prefixed_name
def virt_path(self, *args):
return self.prefix.paths.virt(*args)
def bootstrap(self):
vms = [
vm for vm in self._vms.values() if vm.spec.get('bootstrap', True)
]
if vms:
utils.invoke_in_parallel(lambda vm: vm.bootstrap(), vms)
def export_vms(
self,
vms_names,
standalone,
dst_dir,
compress,
init_file_name,
out_format,
collect_only=False,
with_threads=True
):
# todo: move this logic to PrefixExportManager
if not vms_names:
vms_names = list(self._vms.keys())
running_vms = []
vms = []
for name in vms_names:
try:
vm = self._vms[name]
if not vm.spec.get('skip-export'):
vms.append(vm)
if vm.running():
running_vms.append(vm)
except KeyError:
raise utils.LagoUserException(
'Entity {} does not exist'.format(name)
)
if running_vms:
raise utils.LagoUserException(
'The following vms must be off:\n{}'.format(
'\n'.join([_vm.name() for _vm in running_vms])
)
)
with LogTask('Exporting disks to: {}'.format(dst_dir)):
if not os.path.isdir(dst_dir):
os.mkdir(dst_dir)
def _export_disks(vm):
return vm.export_disks(
standalone, dst_dir, compress, collect_only, with_threads
)
if collect_only:
return (
functools.reduce(
lambda x, y: x.update(y) or x,
[_export_disks(v) for v in vms]
)
)
else:
if with_threads:
results = utils.invoke_in_parallel(_export_disks, vms)
else:
results = [_export_disks(v) for v in vms]
results = functools.reduce(
lambda x, y: x.update(y) or x, results
)
self.generate_init(
os.path.join(dst_dir, init_file_name), out_format, vms
)
results['init-file'] = os.path.join(dst_dir, init_file_name)
return results
def generate_init(self, dst, out_format, vms_to_include, filters=None):
"""
Generate an init file which represents this env and can
be used with the images created by self.export_vms
Args:
dst (str): path and name of the new init file
out_format (plugins.output.OutFormatPlugin):
formatter for the output (the default is yaml)
filters (list): list of paths to keys that should be removed from
the init file
vms_to_include (list of :class:lago.plugins.vm.VMPlugin):
list of vms to include in the init file
Returns:
None
"""
# todo: move this logic to PrefixExportManager
with LogTask('Exporting init file to: {}'.format(dst)):
# Set the default formatter to yaml. The default formatter
# doesn't generate a valid init file, so it's not reasonable
# to use it
if isinstance(out_format, plugins.output.DefaultOutFormatPlugin):
out_format = plugins.output.YAMLOutFormatPlugin()
if not filters:
filters = [
'domains/*/disks/*/metadata',
'domains/*/metadata/deploy-scripts', 'domains/*/snapshots',
'domains/*/name', 'nets/*/mapping', 'nets/*/dns_records'
]
spec = self.get_env_spec(filters)
temp = {}
for vm in vms_to_include:
temp[vm.name()] = spec['domains'][vm.name()]
spec['domains'] = temp
for _, domain in six.iteritems(spec['domains']):
domain['disks'] = [
d for d in domain['disks'] if not d.get('skip-export')
]
for disk in domain['disks']:
if disk['type'] == 'template':
disk['template_type'] = 'qcow2'
elif disk['type'] == 'empty':
disk['type'] = 'file'
disk['make_a_copy'] = 'True'
# Insert the relative path to the exported images
disk['path'] = os.path.join(
'$LAGO_INITFILE_PATH', os.path.basename(disk['path'])
)
with open(dst, 'wt') as f:
if isinstance(out_format, plugins.output.YAMLOutFormatPlugin):
# Dump the yaml file without type tags
# TODO: Allow passing parameters to output plugins
f.write(yaml.safe_dump(spec))
else:
f.write(out_format.format(spec))
def get_env_spec(self, filters=None):
"""
Get the spec of the current env.
The spec will hold the info about all the domains and
networks associated with this env.
Args:
filters (list): list of paths to keys that should be removed from
the init file
Returns:
dict: the spec of the current env
"""
spec = {
'domains':
{
vm_name: deepcopy(vm_object.spec)
for vm_name, vm_object in six.iteritems(self._vms)
},
'nets':
{
net_name: deepcopy(net_object.spec)
for net_name, net_object in six.iteritems(self._nets)
}
}
if filters:
utils.filter_spec(spec, filters)
return spec
def start(self, vm_names=None):
if not vm_names:
log_msg = 'Start Prefix'
vms = list(self._vms.values())
nets = list(self._nets.values())
else:
log_msg = 'Start specified VMs'
vms = [self._vms[vm_name] for vm_name in vm_names]
nets = set()
for vm in vms:
nets = nets.union(
set(self._nets[net_name] for net_name in vm.nets())
)
with LogTask(log_msg), utils.RollbackContext() as rollback:
with LogTask('Start nets'):
for net in nets:
net.start()
rollback.prependDefer(net.stop)
with LogTask('Start vms'):
for vm in vms:
vm.start()
rollback.prependDefer(vm.stop)
rollback.clear()
def _get_stop_shutdown_common_args(self, vm_names):
"""
Get the common arguments for stop and shutdown commands
Args:
vm_names (list of str): The names of the requested vms
Returns
list of plugins.vm.VMProviderPlugin:
vms objects that should be stopped
list of virt.Network: net objects that should be stopped
str: log message
Raises:
utils.LagoUserException: If a vm name doesn't exist
"""
vms_to_stop = list(self.get_vms(vm_names).values())
if not vm_names:
log_msg = '{} prefix'
nets = list(self._nets.values())
else:
log_msg = '{} specified VMs'
nets = self._get_unused_nets(vms_to_stop)
return vms_to_stop, nets, log_msg
def _get_unused_nets(self, vms_to_stop):
"""
Return a list of nets that used only by the vms in vms_to_stop
Args:
vms_to_stop (list of str): The names of the requested vms
Returns
list of virt.Network: net objects that used only by
vms in vms_to_stop
Raises:
utils.LagoUserException: If a vm name doesn't exist
"""
vm_names = [vm.name() for vm in vms_to_stop]
unused_nets = set()
for vm in vms_to_stop:
unused_nets = unused_nets.union(vm.nets())
for vm in self._vms.values():
if not vm.running() or vm.name() in vm_names:
continue
for net in vm.nets():
unused_nets.discard(net)
nets = [self._nets[net] for net in unused_nets]
return nets
def stop(self, vm_names=None):
vms, nets, log_msg = self._get_stop_shutdown_common_args(vm_names)
with LogTask(log_msg.format('Stop')):
with LogTask('Stop vms'):
for vm in vms:
vm.stop()
with LogTask('Stop nets'):
for net in nets:
net.stop()
def shutdown(self, vm_names, reboot=False):
vms, nets, log_msg = self._get_stop_shutdown_common_args(vm_names)
if reboot:
with LogTask(log_msg.format('Reboot')):
with LogTask('Reboot vms'):
for vm in vms:
vm.reboot()
else:
with LogTask(log_msg.format('Shutdown')):
with LogTask('Shutdown vms'):
for vm in vms:
vm.shutdown()
with LogTask('Stop nets'):
for net in nets:
net.stop()
def get_nets(self):
return self._nets.copy()
def get_net(self, name=None):
if name:
return self.get_nets().get(name)
else:
try:
return [
net for net in self.get_nets().values()
if net.is_management()
].pop()
except IndexError:
return self.get_nets().values().pop()
def get_vms(self, vm_names=None):
"""
Returns the vm objects associated with vm_names
if vm_names is None, return all the vms in the prefix
Args:
vm_names (list of str): The names of the requested vms
Returns
dict: Which contains the requested vm objects indexed by name
Raises:
utils.LagoUserException: If a vm name doesn't exist
"""
if not vm_names:
return self._vms.copy()
missing_vms = []
vms = {}
for name in vm_names:
try:
vms[name] = self._vms[name]
except KeyError:
# TODO: add resolver by suffix
missing_vms.append(name)
if missing_vms:
raise utils.LagoUserException(
'The following vms do not exist: \n{}'.format(
'\n'.join(missing_vms)
)
)
return vms
def get_vm(self, name):
return self._vms[name]
@classmethod
def from_prefix(cls, prefix):
virt_path = functools.partial(prefix.paths.prefixed, 'virt')
with open(virt_path('env'), 'r') as f:
env_dom = json.load(f)
net_specs = {}
for name in env_dom['nets']:
with open(virt_path('net-%s' % name), 'r') as f:
net_specs[name] = json.load(f)
vm_specs = {}
for name in env_dom['vms']:
with open(virt_path('vm-%s' % name), 'r') as f:
vm_specs[name] = json.load(f)
return cls(prefix, vm_specs, net_specs)
@log_task('Save prefix')
def save(self):
with LogTask('Save nets'):
for net in self._nets.values():
net.save()
with LogTask('Save VMs'):
for vm in self._vms.values():
vm.save()
spec = {
'nets': list(self._nets.keys()),
'vms': list(self._vms.keys()),
}
with LogTask('Save env'):
with open(self.virt_path('env'), 'w') as f:
utils.json_dump(spec, f)
@log_task('Create VMs snapshots')
def create_snapshots(self, name):
utils.invoke_in_parallel(
lambda vm: vm.create_snapshot(name),
list(self._vms.values()),
)
@log_task('Revert VMs snapshots')
def revert_snapshots(self, name):
utils.invoke_in_parallel(
lambda vm: vm.revert_snapshot(name),
list(self._vms.values()),
)
def get_snapshots(self, domains=None):
"""
Get the list of snapshots for each domain
Args:
domanins(list of str): list of the domains to get the snapshots
for, all will be returned if none or empty list passed
Returns:
dict of str -> list(str): with the domain names and the list of
snapshots for each
"""
snapshots = {}
for vm_name, vm in self.get_vms().items():
if domains and vm_name not in domains:
continue
snapshots[vm_name] = vm._spec['snapshots']
return snapshots
def get_compat(self):
"""Get compatibility level for this environment - which is the Lago
version used to create this environment """
# Prior to version 0.37.0, the version which the environment was
# initialized in was not saved, so we default to 0.36.
return self.prefix.metadata.get('lago_version', '0.36.0')
|
lago-project/lago
|
lago/virt.py
|
Python
|
gpl-2.0
| 18,438
|
# Copyright 2011-2012 James McCauley
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at:
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
An L2 learning switch.
It is derived from one written live for an SDN crash course.
It is somwhat similar to NOX's pyswitch in that it installs
exact-match rules for each flow.
"""
from pox.core import core
import pox.openflow.libopenflow_01 as of
from pox.lib.util import dpid_to_str
from pox.lib.util import str_to_bool
import time
import signal
import pox.openflow.nicira as nx
log = core.getLogger()
# We don't want to flood immediately when a switch connects.
# Can be overriden on commandline.
_flood_delay = 0
class LearningSwitch (object):
"""
The learning switch "brain" associated with a single OpenFlow switch.
When we see a packet, we'd like to output it on a port which will
eventually lead to the destination. To accomplish this, we build a
table that maps addresses to ports.
We populate the table by observing traffic. When we see a packet
from some source coming from some port, we know that source is out
that port.
When we want to forward traffic, we look up the desintation in our
table. If we don't know the port, we simply send the message out
all ports except the one it came in on. (In the presence of loops,
this is bad!).
In short, our algorithm looks like this:
For each packet from the switch:
1) Use source address and switch port to update address/port table
2) Is transparent = False and either Ethertype is LLDP or the packet's
destination address is a Bridge Filtered address?
Yes:
2a) Drop packet -- don't forward link-local traffic (LLDP, 802.1x)
DONE
3) Is destination multicast?
Yes:
3a) Flood the packet
DONE
4) Port for destination address in our address/port table?
No:
4a) Flood the packet
DONE
5) Is output port the same as input port?
Yes:
5a) Drop packet and similar ones for a while
6) Install flow table entry in the switch so that this
flow goes out the appopriate port
6a) Send the packet out appropriate port
"""
def __init__ (self, connection, transparent,isMaster):
# Switch we'll be adding L2 learning switch capabilities to
self.connection = connection
self.transparent = transparent
self.isMaster = isMaster
# Our table
self.macToPort = {}
# We want to hear PacketIn messages, so we listen
# to the connection
connection.addListeners(self)
# We just use this to know when to log a helpful message
self.hold_down_expired = _flood_delay == 0
#log.debug("Initializing LearningSwitch, transparent=%s",
# str(self.transparent))
print 'SET ROLE L2 LEARNING INIT MODE',isMaster
# #tell this switch, that this controller is master/slave
for con in core.openflow.connections:
if isMaster == 1:
con.send(nx.nx_role_request(master="true"))
elif isMaster == -1:
con.send(nx.nx_role_request(slave="true"))
def _handle_PacketIn (self, event):
"""
Handle packet in messages from the switch to implement above algorithm.
"""
packet = event.parsed
def flood (message = None):
""" Floods the packet """
msg = of.ofp_packet_out()
if time.time() - self.connection.connect_time >= _flood_delay:
# Only flood if we've been connected for a little while...
if self.hold_down_expired is False:
# Oh yes it is!
self.hold_down_expired = True
log.info("%s: Flood hold-down expired -- flooding",
dpid_to_str(event.dpid))
if message is not None: log.debug(message)
#log.debug("%i: flood %s -> %s", event.dpid,packet.src,packet.dst)
# OFPP_FLOOD is optional; on some switches you may need to change
# this to OFPP_ALL.
msg.actions.append(of.ofp_action_output(port = of.OFPP_FLOOD))
else:
pass
#log.info("Holding down flood for %s", dpid_to_str(event.dpid))
msg.data = event.ofp
msg.in_port = event.port
self.connection.send(msg)
def drop (duration = None):
"""
Drops this packet and optionally installs a flow to continue
dropping similar ones for a while
"""
if duration is not None:
if not isinstance(duration, tuple):
duration = (duration,duration)
msg = of.ofp_flow_mod()
msg.match = of.ofp_match.from_packet(packet)
msg.idle_timeout = duration[0]
msg.hard_timeout = duration[1]
msg.buffer_id = event.ofp.buffer_id
self.connection.send(msg)
elif event.ofp.buffer_id is not None:
msg = of.ofp_packet_out()
msg.buffer_id = event.ofp.buffer_id
msg.in_port = event.port
self.connection.send(msg)
self.macToPort[packet.src] = event.port # 1
if not self.transparent: # 2
if packet.type == packet.LLDP_TYPE or packet.dst.isBridgeFiltered():
drop() # 2a
return
if packet.dst.is_multicast:
flood() # 3a
else:
if packet.dst not in self.macToPort: # 4
flood("Port for %s unknown -- flooding" % (packet.dst,)) # 4a
else:
port = self.macToPort[packet.dst]
if port == event.port: # 5
# 5a
log.warning("Same port for packet from %s -> %s on %s.%s. Drop."
% (packet.src, packet.dst, dpid_to_str(event.dpid), port))
drop(10)
return
# 6
log.debug("installing flow for %s.%i -> %s.%i" %
(packet.src, event.port, packet.dst, port))
msg = of.ofp_flow_mod()
msg.match = of.ofp_match.from_packet(packet, event.port)
msg.idle_timeout = 10
msg.hard_timeout = 30
msg.actions.append(of.ofp_action_output(port = port))
msg.data = event.ofp # 6a
self.connection.send(msg)
class l2_learning (object):
"""
Waits for OpenFlow switches to connect and makes them learning switches.
"""
def __init__ (self, transparent, isMaster):
core.openflow.addListeners(self)
self.transparent = transparent
self.isMaster = isMaster
def _handle_ConnectionUp (self, event):
log.debug("Connection %s" % (event.connection,))
LearningSwitch(event.connection, self.transparent, self.isMaster)
# added new parameter, isMaster
def launch (transparent=False, hold_down=_flood_delay, isMaster=0):
"""
Starts an L2 learning switch.
"""
try:
global _flood_delay
_flood_delay = int(str(hold_down), 10)
assert _flood_delay >= 0
except:
raise RuntimeError("Expected hold-down to be a number")
core.registerNew(l2_learning, str_to_bool(transparent),isMaster)
|
ardhipoetra/SDN-workbench
|
l2_learning.py
|
Python
|
gpl-2.0
| 7,194
|
# talk.py
# Purpose - Simple. Get me the HTML for talkweb lingo
# Copyright (c) Madhukumar Seshadri
# All rights reserved
# Trademark and Licensing yet to be decided
# started - Unknown,assumed to be arond may 18th 2012
# Refer /doc/changes and /doc/features for change log and additional features
# Changes
# get a talktree (x,y,z) or
# come to cells (attrib) (events)
from vocab import *
from cparser import *
from wparser import *
from xtag import *
from xattrib import *
from xcontent import *
from xtext import *
def tohtml(x):
""" a global that uses talk class to convert given talkweb string to html """
t = talk(x)
return t.html()
class talk:
## constants
attribv_brokeon_attrib=0
attribv_brokeon_openingtag=1
attribv_brokeon_closingtag=2
attribv_brokeon_closingquote=3
def __init__(self,inputis,outputis=None,inputtype='s'):
""" where talk comes to life
inputis is string or file name
inputtype becomes 's' stating string or 'f' stating file
outputis file name otherwise remains none """
self.fn=inputis
self.of=outputis
self.p = wparser(inputis,inputtype)
self.cssstack=[]
self.tagstack=[]; self.attribstack={}
self.output=[]
def __reinit__(self):
""" reinitialize the same talk as if you cannot create talk again """
self.p = wparser(self.inputis)
def search(self,c,esc,s,start=0):
""" search for c not escaped by esc in string s and start from start """
for i,x in enumerate(s):
if c == x and s[i-1] <> esc and i >= start:
return i
return -1
def preprocess(self,x):
""" not integrated completely - provide talkweb comments so that they can send to /dev/null """
output=""
i=0;l=len(x);
w,b,i=cparser.cparse(x,i,l,"\n")
while (w is not None):
output += cparser.decomment(self.comment,w) + "\n"
w,b,i=cparser.cparse(x,i,l,"\n")
return output[0:-1]
##-- come in on attrib - read till next attrib or open or close tag
def attribv(self,tag,breakonattrib=1,breakforchars=0):
""" breaks the stream if tag or attrib occurs if breakonattrib is on, if breakforchars is on, then breaks on double quote """
#print "class:talk:attribv>inputs(tag,breakonattrib,breakforchars) are",tag,breakonattrib,breakforchars
out = ''; br = ''; #scc=""
oc = '"'; cc = '"'; nesc = "\\"
prw=""
w = self.p.nw()
while (w != None):
#print "class:talk:attribv> word",w
#print "class:talk:attribv> pos",self.p.f.tell()
if breakforchars:
lp=wparser(w,"s")
if lp.wtc(oc,nesc):
#br='t'
br=self.attribv_brokeon_closingquote
return (w[0:lp.f.tell()],self.p.nw(),br)
out = w
out = out + " " + self.p.wtc(cc,nesc)
br=self.attribv_brokeon_closingquote
#br = "t"
#print "class:talk:attribv>breakforchars is on. outputs out,word,breakreason",out,w,br
return (out,self.p.nw(),br)
if prw.strip()=="" and (w.strip() == cc or w[0:1] == cc):
out = w[1:len(w)]
out = out + " " + self.p.wtc(cc,nesc)
#br = "t"
br=self.attribv_brokeon_closingquote
return (out, self.p.nw(),br)
if vocab.isattrib(w,tag) and breakonattrib:
#br='a' #break on attrib
br=self.attribv_brokeon_attrib
break
if vocab.isotag(w):
#br='b' #break on btag
br=self.attribv_brokeon_openingtag
break
if vocab.isctag(w):
#br='e' #break on etag
br=self.attribv_brokeon_closingtag
break
out = out + self.p.w
prw = w
w=self.p.nw()
#print "class:talk:attribv>outputs out,word,breakreason",out,w,br
return (out,w,br)
##-- converts to html
##-- vocab answers whether something is tag, attribute
##-- attribv parses the attribute before sending to xtag's xattribute in isattrib section
##-- tag stack is (pid, ppid, tag) - (currentdepth,previousdepth,tag)
##-- attrib stack is (currentdepth:previousdepth,attrib)
def html(self):
""" make html initialize talk() and then call html """
stacktrace=0
twasks_pt=0
ot='';tagtext=""
term=">"
tagdepth=-1;prtagdepth=tagdepth;
otkey=""
html=''
xhtml=[]
#nottext = 1
popedstack=[]
w=self.p.nw()
while (w is not None):
#if w <> '':
#print ">" + w
if vocab.isotag(w):
#print "class:talk:html>",w,"was found as opening tag .."
#print "self.tagstack>",self.tagstack
prtagdepth = tagdepth
tagdepth += 1
x = newline
if tagdepth == 0:
x=""
#tagstack
self.tagstack.append((tagdepth,prtagdepth,w,tagtext))
html=html+ tagtext + newline + xtext.pad(tagdepth) + vocab.otageq(w)
if tagtext: xhtml.append([tagdepth,tagtext])
xhtml.append([tagdepth,vocab.otageq(w),1])
ot = w
tagtext = ""
elif vocab.isattrib(w,ot) and ot <> '':
#print "class:talk:html>",w,"was found as attribute..,operating tag for attrib ...",ot
breakonattrib=1
tagtext = ""
attribname = w
tagstackl=len(self.tagstack)-1
keyprefix = str(self.tagstack[tagstackl][0]) + ":" + \
str(self.tagstack[tagstackl][1]) + ":"
attribkey = keyprefix + attribname
otkey = keyprefix + ot
#print "class:talk:html>attribkey",attribkey
breakonchars=0; st=0
if attribname == "contains":
breakonattrib=0
#we are looking to cover contains" as a word
if attribname == "contains"+doublequote:
breakonchars=1
attribcontents,w,attribv_break_reason = self.attribv(ot,breakonattrib,breakonchars)
htmleq,xattrib_exitreason=xattrib.doattrib(attribname,\
attribcontents,w,attribv_break_reason,self)
#attribstack
if otkey not in self.attribstack:
self.attribstack[otkey]={}
if attribkey not in self.attribstack[otkey]:
self.attribstack[otkey][attribkey]=[]
self.attribstack[otkey][attribkey].append(htmleq)
if attribname in ["twasks"]:
if re.match("processtext",htmleq):
twasks_pt=1
continue
if attribname in tagopenterminators:
#tag has all attributes since current attribute is contains
#let us give the userdefs a chance to do what they want
if twasks_pt:
htmleq=xcontent.dotext(htmleq,ot,self.attribstack[otkey])
twasks_pt=0
else:
htmleq = xcontent.docontent(htmleq,ot,self.attribstack[otkey])
html = html + newline + xtext.pad(tagdepth) + xtext.pad(1) + htmleq.strip()
xhtml.append([tagdepth,htmleq,3])
else:
#it is text of contains or it is attrib
html = html[0:len(html)-len(term)] + whitespace + \
htmleq.strip() + xtag.cst(ot)
le=len(xhtml)-1
otag=xhtml[le][1]
xhtml[le][1] = otag[0:len(otag)-len(term)] + whitespace + \
htmleq.strip() + xtag.cst(ot)
continue
elif vocab.isctag(w):
#print "talk:html:", w,"found as ending tag .."
etagof = w; tagtext=""
if len(self.tagstack) > 0:
pe = self.tagstack.pop()
popedstack.append(pe)
etagof = pe[2]
tagtext=pe[3]
#comment is hardcoded
x=etagof.lower()
htmlx=vocab.ctageqforotag(x)
if x=="@comment":
html += tagtext + htmlx
else:
html += tagtext + newline + xtext.pad(tagdepth) + htmlx
xhtml.append([tagdepth,tagtext + htmlx,2])
ot=''
tagdepth -= 1
tagtext = ""
else:
#print "talk:html>",w,"found part of text outside of contains .. "
tagtext = tagtext + whitespace + w
if w:
l=len(xhtml); le=l-1
if l==0:
#text before any tag
xhtml.append([tagdepth,w,3])
elif xhtml[le][2] is 2:
#indicates last tag was closing tag
xhtml.append([tagdepth,w,3])
else:
#we are part of some open tag but we are outside
xhtml[le][1] += whitespace + w
w="";tagtext=""
w=self.p.nw()
self.tagstack.reverse()
for i in self.tagstack:
html = html + newline + xtext.pad(tagdepth) + xtag.et(vocab.kwofotag(i[2]))
xhtml.append([tagdepth,xtag.et(vocab.kwofotag(i[2]))])
tagdepth -= 1
print "xhtml .. "
for x in xhtml:
print x
print "html .."
print html
##--stacktrace
if stacktrace:
f1 = open("/home/madhu/Desktop/tagstack.out","w")
f2 = open("/home/madhu/Desktop/attribstack.out","w")
for i in popedstack:
f1.write(str(i)+"\n")
for i in self.tagstack:
f1.write(str(i) + "\n")
for i in self.attribstack.keys():
for j in self.attribstack[i].keys():
f2.write(i + j + "\n")
f1.close()
f2.close()
#print "\n\n<!-- html for page - " + xfn + ""Copyright(c) Madhukumar Seshadri. All rights reserved-->"
#print html
if self.of:
f = open(self.of,"w")
f.write("<!-- HTML for "+ self.fn + "-->" +\
"\n<!-- Generated using Letustalkweb. -->" +\
"\n<!-- LetustalkWeb is a Copyright and Trade Mark of Madhukumar Seshadri. -->" +\
"\n<!-- All rights reserved on Letustalkweb. -->\n"+\
"\n<!-- Change talkweb files instead of html files -->\n")
f.write(html)
f.close()
else:
return html
#@@rewrite - collation sequence to main html stream of htmleq
""" if xattrib_exitreason in [xattrib.exitreason_text_of_contains,xattrib.exitreason_found_no_is_or_are]:
html = html + newline + xtext.pad(tagdepth) + xtext.pad(1) + htmleq.strip()
xhtml.append([tagdepth+1,htmeq.strip()])
ot=""
elif xattrib_exitreason in [xattrib.exitreason_valueterminator,xattrib.exitreason_singlequote,xattrib.exitreason_doublequote]:
if attribv_break_reason in ['e','b']:
html = html + newline + xtext.pad(tagdepth) + xtext.pad(1) + htmleq.strip()
else:
html = html[0:len(html)-len(term)] + xtext.pad(1) + htmleq.strip() + xtag.cst(ot)
elif xattrib_exitreason in [xattrib.exitreason_found_is_or_are_no_butno_vt_or_quote_nextword_is_contains,\
xattrib.exitreason_found_is_or_are_no_butno_vt_or_quote_nextword_is_not_contains]:
html = html[0:len(html)-len(term)] + whitespace + htmleq.strip() + xtag.cst(ot)
elif xattrib_exitreason == "enc":
html = html[0:len(html)-len(term)] + whitespace + htmleq.strip() + xtag.cst(ot)
else:
html = html[0:len(html)-len(term)] + whitespace + htmleq.strip() + xtag.cst(ot)
continue """
|
madhusv/talkweb
|
talk/doc/py-tmp/talk.py
|
Python
|
gpl-2.0
| 9,902
|
# Tests for algorithm Remi (original)
from Remi_original import RemiAlgorithmOriginal
import networkx as nx
import unittest
class TestRemiOriginal(unittest.TestCase):
# Methode appelee avant chaque test
def setUp(self):
pass
# Methode appelee apres chaque test
def tearDown(self):
pass
def test_elementary(self):
g = nx.Graph()
g.add_nodes_from([1, 2, 3, 4, 5])
g.add_edges_from([(1, 2), (2, 1), (1, 3), (3, 1), (2, 4), (4, 2), (5, 2), (2, 5)])
remi = RemiAlgorithmOriginal()
remi.run(g)
|
Temigo/whisper
|
tests/test_remi_original.py
|
Python
|
gpl-2.0
| 517
|
#!/usr/bin/python
# coding: utf-8
import sys
import os
import django
from django.core.management import execute_from_command_line
import shlex
import urllib
import socket
import subprocess
jms_dir = os.path.dirname(os.path.abspath(os.path.dirname(__file__)))
sys.path.append(jms_dir)
os.environ['DJANGO_SETTINGS_MODULE'] = 'jumpserver.settings'
if django.get_version() != '1.6':
setup = django.setup()
from juser.user_api import db_add_user, get_object, User
from install import color_print
from jumpserver.api import get_mac_address, bash
socket.setdefaulttimeout(2)
class Setup(object):
"""
安装极云监控向导
"""
def __init__(self):
self.admin_user = 'admin'
self.admin_pass = 'QWEasd123'
@staticmethod
def _pull():
color_print('开始更新极云监控', 'green')
# bash('git pull')
try:
mac = get_mac_address()
version = urllib.urlopen('http://jumpserver.org/version/?id=%s' % mac)
except:
pass
def _input_admin(self):
while True:
print
admin_user = raw_input('请输入管理员用户名 [%s]: ' % self.admin_user).strip()
admin_pass = raw_input('请输入管理员密码: [%s]: ' % self.admin_pass).strip()
admin_pass_again = raw_input('请再次输入管理员密码: [%s]: ' % self.admin_pass).strip()
if admin_user:
self.admin_user = admin_user
if not admin_pass_again:
admin_pass_again = self.admin_pass
if admin_pass:
self.admin_pass = admin_pass
if self.admin_pass != admin_pass_again:
color_print('两次密码不相同请重新输入')
else:
break
print
@staticmethod
def _sync_db():
os.chdir(jms_dir)
execute_from_command_line(['manage.py', 'syncdb', '--noinput'])
def _create_admin(self):
user = get_object(User, username=self.admin_user)
if user:
user.delete()
db_add_user(username=self.admin_user, password=self.admin_pass, role='SU', name='admin', groups='',
admin_groups='', email='admin@jumpserver.org', uuid='MayBeYouAreTheFirstUser', is_active=True)
cmd = 'id %s 2> /dev/null 1> /dev/null || useradd %s' % (self.admin_user, self.admin_user)
shlex.os.system(cmd)
@staticmethod
def _chmod_file():
os.chdir(jms_dir)
os.chmod('init.sh', 0755)
os.chmod('connect.py', 0755)
os.chmod('manage.py', 0755)
os.chmod('run_server.py', 0755)
os.chmod('service.sh', 0755)
os.chmod('logs', 0777)
os.chmod('keys', 0777)
@staticmethod
def _run_service():
cmd = 'bash %s start' % os.path.join(jms_dir, 'service.sh')
shlex.os.system(cmd)
print
color_print('安装成功,Web登录请访问http://ip:8000, 祝你使用愉快。\n', 'green')
def start(self):
print "开始安装极云监控 ..."
self._pull()
self._sync_db()
self._input_admin()
self._create_admin()
self._chmod_file()
self._run_service()
if __name__ == '__main__':
setup = Setup()
setup.start()
|
Mr-Linus/geekcloud
|
install/next.py
|
Python
|
gpl-2.0
| 3,297
|
from database import Database
from models.blog import Blog
__author__ = 'jslvtr'
class Menu(object):
def __init__(self):
self.user = input("Enter your author name: ")
self.user_blog = None
if self._user_has_account():
print("Welcome back {}".format(self.user))
else:
self._prompt_user_for_account()
def _user_has_account(self):
blog = Database.find_one('blogs', {'author': self.user})
if blog is not None:
self.user_blog = Blog.from_mongo(blog['id'])
return True
else:
return False
def _prompt_user_for_account(self):
title = input("Enter blog title: ")
description = input("Enter blog description: ")
blog = Blog(author=self.user,
title=title,
description=description)
blog.save_to_mongo()
self.user_blog = blog
def run_menu(self):
read_or_write = input("Do you want to read (R) or write (W) blogs? ")
if read_or_write == 'R':
self._list_blogs()
self._view_blog()
pass
elif read_or_write == 'W':
self.user_blog.new_post()
else:
print("Thank you for blogging!")
def _list_blogs(self):
blogs = Database.find(collection='blogs',
query={})
for blog in blogs:
print("ID: {}, Title: {}, Author: {}".format(blog['id'], blog['title'], blog['author']))
def _view_blog(self):
blog_to_see = input("Enter the ID of the blog you'd like to read: ")
blog = Blog.from_mongo(blog_to_see)
posts = blog.get_posts()
for post in posts:
print("Date: {}, title: {}\n\n{}".format(post['created_date'], post['title'], post['content']))
|
brunotougeiro/python
|
udemy-python-web-apps/terminal_blog/menu.py
|
Python
|
gpl-2.0
| 1,833
|
# Rekall Memory Forensics
# Copyright (c) 2008-2011 Volatile Systems
# Copyright 2013 Google Inc. All Rights Reserved.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or (at
# your option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
"""
@author: Bradley L Schatz
@license: GNU General Public License 2.0 or later
@contact: bradley@schatzforensic.com.au
This file provides support for windows Windows 7 SP 0.
"""
# pylint: disable=protected-access
from rekall import addrspace
from rekall import kb
from rekall import obj
from rekall.plugins.overlays.windows import common
from rekall_lib import utils
def TagOffset(x):
if x.obj_profile.metadata("arch") == "AMD64":
return x.obj_offset - 12
return x.obj_offset - 4
# In windows 7 the VadRoot is actually composed from _MMADDRESS_NODEs instead of
# _MMVAD structs.
win7_overlays = {
'_EPROCESS': [None, {
# A symbolic link to the real vad root.
'RealVadRoot': lambda x: x.VadRoot.BalancedRoot
}],
'_MMADDRESS_NODE': [None, {
'Tag': [TagOffset, ['String', dict(length=4)]],
}],
'_MMVAD_SHORT': [None, {
'Tag': [TagOffset, ['String', dict(length=4)]],
'Start': lambda x: x.StartingVpn << 12,
'End': lambda x: ((x.EndingVpn + 1) << 12) - 1,
'Length': lambda x: x.End - x.Start + 1,
'CommitCharge': lambda x: x.u.VadFlags.CommitCharge,
}],
'_MMVAD': [None, {
'Tag': [TagOffset, ['String', dict(length=4)]],
'ControlArea': lambda x: x.Subsection.ControlArea,
'Start': lambda x: x.StartingVpn << 12,
'End': lambda x: ((x.EndingVpn + 1) << 12) - 1,
'Length': lambda x: x.End - x.Start + 1,
'CommitCharge': lambda x: x.u.VadFlags.CommitCharge,
}],
'_MMVAD_LONG': [None, {
'Tag': [TagOffset, ['String', dict(length=4)]],
'ControlArea': lambda x: x.Subsection.ControlArea,
'Start': lambda x: x.StartingVpn << 12,
'End': lambda x: ((x.EndingVpn + 1) << 12) - 1,
'Length': lambda x: x.End - x.Start + 1,
'CommitCharge': lambda x: x.u.VadFlags.CommitCharge,
}],
"_CONTROL_AREA": [None, {
'FilePointer': [None, ['_EX_FAST_REF', dict(
target="_FILE_OBJECT"
)]],
}],
"_OBJECT_HEADER": [None, {
"InfoMask": [None, ["Flags", dict(
maskmap=utils.Invert({
0x01: "CreatorInfo",
0x2: "NameInfo",
0x4: "HandleInfo",
0x8: "QuotaInfo",
0x10: "ProcessInfo",
0x20: "AuditInfo",
0x40: "PaddingInfo",
}),
target="unsigned char",
)]],
}],
'_MM_SESSION_SPACE': [None, {
# Specialized iterator to produce all the _IMAGE_ENTRY_IN_SESSION
# records.
'ImageIterator': lambda x: x.ImageList.list_of_type(
"_IMAGE_ENTRY_IN_SESSION", "Link")
}],
'_IMAGE_ENTRY_IN_SESSION': [None, {
'ImageBase': lambda x: x.Address.v() & ~7
}],
}
class _OBJECT_HEADER(common._OBJECT_HEADER):
"""A Rekall Memory Forensics object to handle Windows 7 object headers.
Windows 7 changes the way objects are handled:
References: http://www.codemachine.com/article_objectheader.html
The headers look like this:
_POOL_HEADER
# These are optional headers:
_OBJECT_HEADER_PROCESS_INFO
_OBJECT_HEADER_QUOTA_INFO
_OBJECT_HEADER_HANDLE_INFO
_OBJECT_HEADER:
.....
InfoMask
....
When the object manager wants to access a specific optional header, it can
use the constant lookup table nt!ObpInfoMaskToOffset to quickly calculate
the offset of that header (The headers always appear in the same order):
table = profile.get_constant_object(
"ObpInfoMaskToOffset",
target="Array",
target_args=dict(
target="byte"
count=0x80
)
)
option_header_offset = table[
OBJECT_HEADER->InfoMask & (DesiredHeaderBit | (DesiredHeaderBit-1))]
"""
# This specifies the order the headers are found below the
# _OBJECT_HEADER. It is obtained using "nt!ObpInfoMaskToOffset" which is a
# lookup table.
optional_header_mask = (
('CreatorInfo', '_OBJECT_HEADER_CREATOR_INFO', 0x01),
('NameInfo', '_OBJECT_HEADER_NAME_INFO', 0x02),
('HandleInfo', '_OBJECT_HEADER_HANDLE_INFO', 0x04),
('QuotaInfo', '_OBJECT_HEADER_QUOTA_INFO', 0x08),
('ProcessInfo', '_OBJECT_HEADER_PROCESS_INFO', 0x10),
('AuditInfo', '_OBJECT_HEADER_AUDIT_INFO', 0x20),
('PaddingInfo', '_OBJECT_HEADER_PADDING_INFO', 0x40),
)
def _GetOptionalHeader(self, struct_name, desired_bit):
if not self.InfoMask & desired_bit:
return obj.NoneObject("Header not set")
lookup = self.obj_session.GetParameter("ObpInfoMaskToOffset")
offset = lookup[self.InfoMask & (desired_bit | (desired_bit - 1))]
return self.obj_profile.Object(
struct_name, offset=self.obj_offset - offset,
vm=self.obj_vm, parent=self)
def get_object_type(self, vm=None):
"""Return the object's type as a string."""
return self.obj_session.GetParameter("ObjectTypeMap")[
self.TypeIndex].Name.v()
@utils.safe_property
def TypeIndex(self):
"""In windows 10 the type index is obfuscated.
Windows 10 obfuscates the object type using a cookie:
------ nt!ObpRemoveObjectRoutine ------: 0xf801a628e7e0
0xf801a628e7e0 MOV [RSP+0x10], RBX
0xf801a628e7e5 MOV [RSP+0x18], RBP
0xf801a628e7ea MOV [RSP+0x20], RSI
0xf801a628e7ef PUSH RDI
0xf801a628e7f0 SUB RSP, 0x50
0xf801a628e7f4 MOV RBX, RCX // RCX is object header.
0xf801a628e7f7 LEA RDI, [RIP-0x48f1e] 0x0 nt!ObTypeIndexTable
0xf801a628e7fe MOV RAX, RCX
0xf801a628e801 MOVZX ESI, DL
0xf801a628e804 SHR RAX, 0x8 // Shift address by 8
0xf801a628e808 MOVZX ECX, AL
0xf801a628e80b MOVZX EAX, BYTE [RBX+0x18] // _OBJECT_HEADER.TypeIndex
0xf801a628e80f XOR RCX, RAX // XOR with object type
0xf801a628e812 MOVZX EAX, BYTE [RIP-0x493ed] 0x1dd4015af55 nt!ObHeaderCookie
0xf801a628e819 XOR RCX, RAX // XOR with cookie
0xf801a628e81c MOV RDI, [RDI+RCX*8] // Dereference table.
"""
cookie = self.obj_profile.get_constant_object(
"ObHeaderCookie", target="byte").v()
# Windows 7 has no cookie.
if cookie == None:
return self.m("TypeIndex")
# Windows 10 xors the virtual address into this field so we need to use
# the virtual address to decode it.
# We are operating on the physical address space. We need to find the
# virtual address.
if self.obj_vm.metadata("image"):
# Resolve the virtual address for this physical address.
resolver = self.obj_session.GetParameter(
"physical_address_resolver")
vaddr, _ = resolver.PA2VA_for_DTB(
self.obj_offset,
self.obj_session.GetParameter("dtb"),
userspace=False)
# This hit does not exist in the kernel Address Space.
if vaddr is None:
return 0
else:
vaddr = self.obj_offset
return ((vaddr >> 8) ^ cookie ^ int(self.m("TypeIndex"))) & 0xFF
def is_valid(self):
"""Determine if the object makes sense."""
# These need to be reasonable.
pointer_count = int(self.PointerCount)
if pointer_count > 0x100000 or pointer_count < 0:
return False
handle_count = int(self.HandleCount)
if handle_count > 0x1000 or handle_count < 0:
return False
# Must be one of the types revealed by the object_types plugins.
if self.TypeIndex >= 50 or self.TypeIndex < 1:
return False
return True
# Build properties for the optional headers
for _name, _y, _z in _OBJECT_HEADER.optional_header_mask:
setattr(_OBJECT_HEADER, _name, property(
lambda x, y=_y, z=_z: x._GetOptionalHeader(y, z)))
class _MMADDRESS_NODE(common.VadTraverser):
"""In win7 the base of all Vad objects is _MMADDRESS_NODE.
The Vad structures can be either _MMVAD_SHORT or _MMVAD or _MMVAD_LONG. At
the base of each struct there is an _MMADDRESS_NODE which contains the
LeftChild and RightChild members. In order to traverse the tree, we follow
the _MMADDRESS_NODE and create the required _MMVAD type at each point
depending on their tags.
"""
## The actual type depends on this tag value.
tag_map = {'Vadl': '_MMVAD_LONG',
'VadS': '_MMVAD_SHORT',
'Vad ': '_MMVAD',
'VadF': '_MMVAD_SHORT',
'Vadm': '_MMVAD_LONG',
}
class _POOL_HEADER(common._POOL_HEADER):
"""A class for pool headers"""
MAX_PREAMBLE_SIZE = 0x50
@utils.safe_property
def NonPagedPool(self):
return self.PoolType.v() % 2 == 0 and self.PoolType.v() > 0
@utils.safe_property
def PagedPool(self):
return self.PoolType.v() % 2 == 1
@utils.safe_property
def FreePool(self):
return self.PoolType.v() == 0
# A class cached version of the lookup map. This is mutable and shared
# between all instances.
lookup = {}
def _BuildLookupTable(self):
"""Create a fast lookup table mapping InfoMask -> minimum_offset.
We are interested in the maximum distance between the _POOL_HEADER and
_OBJECT_HEADER. This is dictated by the InfoMask field. Here we build a
quick lookup table between the InfoMask field and the offset of the
first optional header.
"""
ObpInfoMaskToOffset = self.obj_session.GetParameter(
"ObpInfoMaskToOffset")
self.lookup["\x00"] = 0
# Iterate over all the possible InfoMask values (Bytes can take on 256
# values).
for i in range(0x100):
# Locate the largest offset from the start of
# _OBJECT_HEADER. Starting with the largest bit position 1 << 7.
bit_position = 0x80
while bit_position > 0:
# This is the optional header with the largest offset.
if bit_position & i:
self.lookup[chr(i)] = ObpInfoMaskToOffset[
i & (bit_position | (bit_position - 1))]
break
bit_position >>= 1
def IterObject(self, type=None, freed=True):
"""Generates possible _OBJECT_HEADER accounting for optional headers.
Note that not all pool allocations have an _OBJECT_HEADER - only ones
allocated from the the object manager. This means calling this method
depends on which pool allocation you are after.
On windows 8, pool allocations are done from preset sizes. This means
that the allocation is never exactly the same size and we can not use
the bottom up method like before.
We therefore, have to build the headers forward by checking the preamble
size and validity of each object. This is a little slower than with
earlier versions of windows.
Args:
type: The object type name. If not specified we return all objects.
"""
pool_align = self.obj_profile.get_constant("PoolAlignment")
allocation_size = self.BlockSize * pool_align
# Operate on a cached version of the next page.
# We use a temporary buffer for the object to save reads of the image.
start = self.obj_end
cached_data = self.obj_vm.read(start, allocation_size)
cached_vm = addrspace.BufferAddressSpace(
base_offset=start, data=cached_data, session=self.obj_session)
# We search for the _OBJECT_HEADER.InfoMask in close proximity to our
# object. We build a lookup table between the values in the InfoMask and
# the minimum distance there is between the start of _OBJECT_HEADER and
# the end of _POOL_HEADER. This way we can quickly skip unreasonable
# values.
# This is the offset within _OBJECT_HEADER of InfoMask.
info_mask_offset = self.obj_profile.get_obj_offset(
"_OBJECT_HEADER", "InfoMask")
# Build the cache if needed.
if not self.lookup:
self._BuildLookupTable()
# Walk over all positions in the address space and try to fit an object
# header there.
for i in utils.xrange(start,
start + allocation_size - info_mask_offset,
pool_align):
possible_info_mask = cached_data[i - start + info_mask_offset]
#if possible_info_mask > '\x7f':
# continue
# The minimum amount of space needed before the object header to
# hold all the optional headers.
minimum_offset = self.lookup[possible_info_mask]
# Obviously wrong because we need more space than we have.
if minimum_offset > i - start:
continue
# Create a test object header from the cached vm to test for
# validity.
test_object = self.obj_profile._OBJECT_HEADER(
offset=i, vm=cached_vm)
if test_object.is_valid():
if (type is None or
test_object.get_object_type() == type or
# Freed objects point to index 1
#(which is also 0xbad0b0b0).
(freed and test_object.TypeIndex <= 2)):
yield test_object
class ObjectTypeMapHook(kb.ParameterHook):
"""Get and cache the object type map.
In windows 7, rather than store a pointer to the _OBJECT_TYPE object
directly, there is a global table of object types, and the object simply
stores an index to it.
"""
name = "ObjectTypeMap"
def calculate(self):
return self.session.profile.get_constant_object(
"ObTypeIndexTable",
target="Array",
target_args=dict(
target="Pointer",
target_args=dict(
target="_OBJECT_TYPE"
)
)
)
def InitializeWindows7Profile(profile):
profile.add_overlay(win7_overlays)
profile.add_classes(
_OBJECT_HEADER=_OBJECT_HEADER,
_MMADDRESS_NODE=_MMADDRESS_NODE,
_POOL_HEADER=_POOL_HEADER,
)
|
dsweet04/rekall
|
rekall-core/rekall/plugins/overlays/windows/win7.py
|
Python
|
gpl-2.0
| 15,299
|
import re
from wx.stc import STC_FOLDLEVELHEADERFLAG, STC_FOLDLEVELBASE
from idn_highlight import ErlangHighlighter, ErlangHighlightType, IgorHighlighter, IgorHighlightType
from idn_lexer import BaseLexer
from idn_erlang_utils import IsModule
__author__ = 'Yaroslav'
class ErlangLexer(BaseLexer):
def __init__(self, stc):
BaseLexer.__init__(self, stc)
self.highlighter = ErlangHighlighter()
self.linesData = {}
def StyleText(self, startPos, endPos):
startLine = self.stc.LineFromPosition(startPos)
startLineBeginPos = self.stc.PositionFromLine(startLine)
endLine = self.stc.LineFromPosition(endPos)
endLineEndPos = self.stc.GetLineEndPosition(endLine)
self.stc.StartStyling(startLineBeginPos, 0x1f)
lastEnd = startLineBeginPos
defaultStyle = ErlangHighlightType.DEFAULT
while startLine <= endLine:
prevData = self.linesData[startLine] if startLine in self.linesData else None
if prevData and prevData.functionEnd:
line = startLine
while line > 0:
if self.linesData[line].functionName != None and self.linesData[line].functionEnd == prevData.functionEnd:
self.linesData[line].functionEnd = None
break
line -= 1
lineData = LineData()
self.linesData[startLine] = lineData
lineStart = self.stc.PositionFromLine(startLine)
text = self.stc.GetLineUTF8(startLine)
tokens = self.highlighter.GetHighlightingTokens(text)
for token in tokens:
start = lineStart + token.start
if start > lastEnd:
self.stc.SetStyling(start - lastEnd, defaultStyle)
self.stc.SetStyling(len(token.value), token.type)
lastEnd = lineStart + token.end
if token.type == ErlangHighlightType.FUNDEC:
lineData.functionName = token.value
lineData.functionStart = token.start + lineStart
line = startLine - 1
while line > 0:
if self.linesData[line].functionName == token.value:
self.linesData[line].functionEnd = self.stc.GetLineEndPosition(startLine - 1)
break
line -= 1
elif token.type == ErlangHighlightType.FUNCTION:
if tokens[0].value == "-spec":
lineData.specName = tokens[1].value
lineData.specStart = tokens[1].start + lineStart
elif token.type == ErlangHighlightType.FULLSTOP:
lineData.functionEnd = token.end + lineStart
line = startLine
while line > 0:
if self.linesData[line].functionName != None and self.linesData[line].functionEnd == None:
self.linesData[line].functionEnd = token.end + lineStart
break
elif self.linesData[line].specName != None and self.linesData[line].specEnd == None:
self.linesData[line].specEnd = token.end + lineStart
break
line -= 1
startLine += 1
if lastEnd < endLineEndPos:
self.stc.SetStyling(endLineEndPos - lastEnd, defaultStyle)
def DoFold(self, startPos, endPos):
startLine = self.stc.LineFromPosition(startPos) - 1
endLine = self.stc.LineFromPosition(endPos)
prevFoldLevel = 0
if startLine > 0:
prevFoldLevel = self.stc.GetFoldLevel(startLine - 1)
nextLineFoldLevel = prevFoldLevel
if prevFoldLevel ^ STC_FOLDLEVELHEADERFLAG == STC_FOLDLEVELBASE:
nextLineFoldLevel = STC_FOLDLEVELBASE + 1
elif prevFoldLevel == STC_FOLDLEVELBASE + 2:
nextLineFoldLevel = 0
while startLine <= endLine:
currentLineFoldLevel = nextLineFoldLevel
text = self.stc.GetLineUTF8(startLine)
tokens = self.highlighter.GetHighlightingTokens(text)
for token in tokens:
if (token.type in {ErlangHighlightType.FUNDEC, ErlangHighlightType.RECORDDEF}
or token.value == "-spec"):
currentLineFoldLevel = STC_FOLDLEVELBASE
nextLineFoldLevel = STC_FOLDLEVELBASE + 1
elif token.type == ErlangHighlightType.FULLSTOP:
if currentLineFoldLevel == STC_FOLDLEVELBASE + 1:
currentLineFoldLevel = STC_FOLDLEVELBASE + 2
elif currentLineFoldLevel == STC_FOLDLEVELBASE:
currentLineFoldLevel = 0
if prevFoldLevel == STC_FOLDLEVELHEADERFLAG | STC_FOLDLEVELBASE:
self.stc.SetFoldLevel(startLine - 1, 0)
nextLineFoldLevel = 0
if currentLineFoldLevel == STC_FOLDLEVELBASE:
currentLineFoldLevel |= STC_FOLDLEVELHEADERFLAG
if (currentLineFoldLevel == STC_FOLDLEVELHEADERFLAG | STC_FOLDLEVELBASE and
currentLineFoldLevel == prevFoldLevel):
self.stc.SetFoldLevel(startLine - 1, 0)
prevFoldLevel = currentLineFoldLevel
self.stc.SetFoldLevel(startLine, currentLineFoldLevel)
startLine += 1
def IsInFunction(self):
line = self.stc.GetCurrentLine()
caretPos = self.stc.GetCurrentPos()
while line > 0:
data = self.linesData[line]
if data.functionEnd and caretPos > data.functionEnd: return False
if data.functionName: return not data.functionEnd or data.functionEnd >= caretPos
line -= 1
return False
def GetCurrentFunction(self, line = None, caretPos = None):
if line is None:
line = self.stc.GetCurrentLine()
if caretPos is None:
caretPos = self.stc.GetCurrentPos()
while line > 0:
data = self.linesData[line]
if data.functionEnd and caretPos > data.functionEnd:
break
if data.functionName:
end = data.functionEnd
if not end:
end = caretPos
if end >= caretPos:
return (data.functionName,
data.functionStart,
end,
self.stc.GetTextRangeUTF8(data.functionStart, end))
else:
break
line -= 1
return None
def GetFunctionSpec(self, line, functionName):
startLine = line;
while line > 0:
data = self.linesData[line]
if data.functionName:
return None
if data.specName == functionName:
return self.stc.PositionFromLine(line), self.stc.GetTextRangeUTF8(self.stc.PositionFromLine(line), self.stc.GetLineEndPosition(startLine))
line -= 1
def IsInSpec(self):
line = self.stc.GetCurrentLine()
caretPos = self.stc.GetCurrentPos()
while line > 0:
data = self.linesData[line]
if data.functionEnd or data.functionStart or data.functionName:
return False
if data.specEnd: return data.specEnd > caretPos
line -= 1
return False
def RecordFieldUnderCursor(self):
opened = '({['
closed = ')}]'
comma = ','
eq = '='
recordOpenBracket = "{"
constructs = {"case", "try", "receive", "begin", "if"}
stateEnd = 1
stateClosed = 2
caretPos = self.stc.GetCurrentPos()
if self.IsInFunction():
funData = self.GetCurrentFunction()
text = funData[3]
text = text[:caretPos - funData[1]]
else:
line = self.stc.GetCurrentLine()
text = self.stc.GetTextRangeUTF8(self.stc.PositionFromLine(line - 10), caretPos)
tokens = self.highlighter.GetHighlightingTokens(text)
tokens.reverse()
tokens = [token for token in tokens if token.type not in [ErlangHighlightType.COMMENT]]
result = False
record = ""
prefix = ""
if len(tokens) > 1 and tokens[0].type == ErlangHighlightType.ATOM and tokens[1].value in [comma, recordOpenBracket]:
prefix = tokens[0].value
elif len(tokens) > 0 and tokens[0].value in [comma, recordOpenBracket]:
prefix = ""
else:
return (False, "", "")
state = None
first = None
lastBracket = None
for i, token in enumerate(tokens):
if not state:
if token.value == comma:
first = comma
elif token.value == eq:
if first != comma:
break
elif token.type == ErlangHighlightType.BRACKET and token.value == recordOpenBracket:
if len(tokens) > i+1 and tokens[i+1].type == ErlangHighlightType.RECORD:
result = True
record = tokens[i+1].value[1:]
break
elif token.value == "end":
state = stateEnd
elif token.value in closed:
state = stateClosed
lastBracket = (token.value, None)
elif token.value in "([" or token.value in constructs:
break
elif state == stateEnd and token.value in constructs:
state = None
elif state == stateClosed and token.value in closed:
if lastBracket:
lastBracket = (token.value, lastBracket)
else:
lastBracket = (token.value, None)
elif state == stateClosed and token.value == opened[closed.index(lastBracket[0])]:
if lastBracket[1]:
lastBracket = lastBracket[1]
else:
state = None
return (result, record, prefix)
def IsInTypeBlock(self):
r = re.compile(r"(?:^-spec|^-callback|^-type|^-record)(.*?)(?:^[a-z].*?|^-[a-z]+|\.)", re.MULTILINE | re.DOTALL)
text = self.stc.GetTextUTF8()
caretPos = self.stc.GetCurrentPos()
pos = 0
while True:
match = r.search(text, pos)
if not match: return False
if caretPos > match.start() and caretPos < match.end():
return True
pos = match.end()
return False
def GetAllExports(self):
r = re.compile("^-export\(\[\s*(.*?)\s*\]\)\.", re.MULTILINE | re.DOTALL)
text = self.stc.GetTextUTF8()
pos = 0
result = None
lastInsertPosition = None
start = None
ranges = []
while True:
match = r.search(text, pos)
if not match:
if result is None:
mre = re.compile("^-module\(.*?\)\.", re.MULTILINE | re.DOTALL)
match = mre.search(text, 0)
if match:
end = match.end()
else:
end = 0
if IsModule(self.stc.filePath):
self.stc.InsertTextUTF8(end, "\n-export([\n]).")
return self.GetAllExports()
else:
break
else:
break
if not start:
start = match.start(1)
pos = match.end(0)
ranges.append((match.start(1), match.end(1)))
lastInsertPosition = match.end(1)
if result is None:
result = ""
result += match.group(1)
if result is None:
result = ""
return result.strip(), start, pos, lastInsertPosition, ranges
def GetExportInsertPosition(self):
r = re.compile("^-include.*?\)\.", re.MULTILINE | re.DOTALL)
text = self.stc.GetTextUTF8()
pos = 0
start = None
while True:
match = r.search(text, pos)
if not match:
if start is None:
mre = re.compile("^-module\(.*?\)\.", re.MULTILINE | re.DOTALL)
match = mre.search(text, 0)
if match:
end = match.end()
else:
end = 0
self.stc.InsertTextUTF8(end, "\n")
return end + 1
else:
break
if not start:
start = match.start(0)
pos = match.end(0)
self.stc.InsertTextUTF8(pos, "\n")
return pos + 1
class LineData:
def __init__(self):
self.functionName = None
self.functionStart = None
self.functionEnd = None
self.specName = None
self.specStart = None
self.specEnd = None
def __str__(self):
return "Fun: {} ({}, {}). Spec: {} ({}, {})".format(self.functionName, self.functionStart, self.functionEnd,
self.specName, self.specStart, self.specEnd)
class RecordStart:
def __init__(self, record, start):
self.record = record
self.start = start
class IgorLexer(BaseLexer):
def __init__(self, stc):
BaseLexer.__init__(self, stc)
self.highlighter = IgorHighlighter()
def StyleText(self, startPos, endPos):
startLine = self.stc.LineFromPosition(startPos)
startLineBeginPos = self.stc.PositionFromLine(startLine)
endLine = self.stc.LineFromPosition(endPos)
endLineEndPos = self.stc.GetLineEndPosition(endLine)
self.stc.StartStyling(startLineBeginPos, 0x1f)
lastEnd = startLineBeginPos
defaultStyle = IgorHighlightType.DEFAULT
while startLine <= endLine:
lineStart = self.stc.PositionFromLine(startLine)
text = self.stc.GetLineUTF8(startLine)
tokens = self.highlighter.GetHighlightingTokens(text)
for token in tokens:
start = lineStart + token.start
if start > lastEnd:
self.stc.SetStyling(start - lastEnd, defaultStyle)
self.stc.SetStyling(len(token.value), token.type)
lastEnd = lineStart + token.end
startLine += 1
if lastEnd < endLineEndPos:
self.stc.SetStyling(endLineEndPos - lastEnd, defaultStyle)
def DoFold(self, startPos, endPos):
startLine = self.stc.LineFromPosition(startPos) - 1
endLine = self.stc.LineFromPosition(endPos)
prevFoldLevel = 0
if startLine > 0:
prevFoldLevel = self.stc.GetFoldLevel(startLine - 1)
nextLineFoldLevel = prevFoldLevel
if prevFoldLevel ^ STC_FOLDLEVELHEADERFLAG == STC_FOLDLEVELBASE:
nextLineFoldLevel = STC_FOLDLEVELBASE + 1
elif prevFoldLevel == STC_FOLDLEVELBASE + 2:
nextLineFoldLevel = 0
while startLine <= endLine:
currentLineFoldLevel = nextLineFoldLevel
text = self.stc.GetLineUTF8(startLine)
tokens = self.highlighter.GetHighlightingTokens(text)
for token in tokens:
if (token.value in ["record", "enum", "service", "variant"]):
currentLineFoldLevel = STC_FOLDLEVELBASE
nextLineFoldLevel = STC_FOLDLEVELBASE + 1
elif token.value == "}":
if currentLineFoldLevel == STC_FOLDLEVELBASE + 1:
currentLineFoldLevel = STC_FOLDLEVELBASE + 2
elif currentLineFoldLevel == STC_FOLDLEVELBASE:
currentLineFoldLevel = 0
if prevFoldLevel == STC_FOLDLEVELHEADERFLAG | STC_FOLDLEVELBASE:
self.stc.SetFoldLevel(startLine - 1, 0)
nextLineFoldLevel = 0
if currentLineFoldLevel == STC_FOLDLEVELBASE:
currentLineFoldLevel |= STC_FOLDLEVELHEADERFLAG
if (currentLineFoldLevel == STC_FOLDLEVELHEADERFLAG | STC_FOLDLEVELBASE and
currentLineFoldLevel == prevFoldLevel):
self.stc.SetFoldLevel(startLine - 1, 0)
prevFoldLevel = currentLineFoldLevel
self.stc.SetFoldLevel(startLine, currentLineFoldLevel)
startLine += 1
|
IDNoise/NoiseIDE
|
NoiseIDEPython/idn_erlang_lexer.py
|
Python
|
gpl-2.0
| 16,733
|
from nltk.probability import FreqDist
from nltk.corpus import stopwords
from nltk.tokenize import SpaceTokenizer, WhitespaceTokenizer
import nltk.tag, nltk.util, nltk.stem
from HTMLParser import HTMLParser
import htmlentitydefs
import re # regular expressions
import string, math
from bs4 import BeautifulSoup
import csv
from datetime import date
from datetime import datetime
from datetime import timedelta
import calendar
stopword_list = stopwords.words('english')
porter = nltk.PorterStemmer()
doc_frequency = {}
word_features = []
vocab_freq = {}
def NormalizeVector(vector):
length = ComputeVectorLength(vector)
for (k,v) in vector.items():
if length > 0:
vector[k] = v / length
return vector
def ComputeVectorLength(vector):
length = 0;
for d in vector.values():
length += d * d
length = math.sqrt(length)
return length
# Assumes that both vectors are normalized
def ComputeCosineSimilarity(v1, v2):
dotproduct = 0
for (key1,val1) in v1.items():
if key1 in v2:
dotproduct += val1 * v2[key1]
return dotproduct;
def CleanAndTokenize(text):
# Strip URLs and replace with token "URLURLURL"
r = re.compile(r"http[s]?://(?:[a-zA-Z]|[0-9]|[$-_@.&+]|[!*\(\),]|(?:%[0-9a-fA-F][0-9a-fA-F]))+")
text = re.sub(r, " URLURLURL", text)
# Strip html tags
soup = BeautifulSoup(text)
for tag in soup.findAll(True):
tag.replaceWithChildren()
text = soup.get_text()
# Normalize everything to lower case
text = text.lower()
# Strip line breaks and endings \r \n
r = re.compile(r"[\r\n]+")
text = re.sub(r, "", text)
table = {
ord(u'\u2018') : u"'",
ord(u'\u2019') : u"'",
ord(u'\u201C') : u'"',
ord(u'\u201d') : u'"',
ord(u'\u2026') : u'',
ord(u'\u2014') : u'', # get rid of em dashes
}
text = text.translate(table)
# Normalize contractions
# e.g. can't => can not, it's => it is, he'll => he will
text = text.replace("can't", "can not")
text = text.replace("couldn't", "could not")
text = text.replace("don't", "do not")
text = text.replace("didn't", "did not")
text = text.replace("doesn't", "does not")
text = text.replace("shouldn't", "should not")
text = text.replace("haven't", "have not")
text = text.replace("aren't", "are not")
text = text.replace("weren't", "were not")
text = text.replace("wouldn't", "would not")
text = text.replace("hasn't", "has not")
text = text.replace("hadn't", "had not")
text = text.replace("won't", "will not")
text = text.replace("wasn't", "was not")
text = text.replace("can't", "can not")
text = text.replace("isn't", "is not")
text = text.replace("ain't", "is not")
text = text.replace("it's", "it is")
text = text.replace("i'm", "i am")
text = text.replace("i'm", "i am")
text = text.replace("i've", "i have")
text = text.replace("i'll", "i will")
text = text.replace("i'd", "i would")
text = text.replace("we've", "we have")
text = text.replace("we'll", "we will")
text = text.replace("we'd", "we would")
text = text.replace("we're", "we are")
text = text.replace("you've", "you have")
text = text.replace("you'll", "you will")
text = text.replace("you'd", "you would")
text = text.replace("you're", "you are")
text = text.replace("he'll", "he will")
text = text.replace("he'd", "he would")
text = text.replace("he's", "he has")
text = text.replace("she'll", "she will")
text = text.replace("she'd", "she would")
text = text.replace("she's", "she has")
text = text.replace("they've", "they have")
text = text.replace("they'll", "they will")
text = text.replace("they'd", "they would")
text = text.replace("they're", "they are")
text = text.replace("that'll", "that will")
text = text.replace("that's", "that is")
text = text.replace("there's", "there is")
# Strip punctuation (except for a few)
punctuations = string.punctuation # includes following characters: !"#$%&\'()*+,-./:;<=>?@[\\]^_`{|}~
excluded_punctuations = ["$", "%"]
for p in punctuations:
if p not in excluded_punctuations:
text = text.replace(p, " ")
# Condense double spaces
text = text.replace(" ", " ")
# Tokenize the text
# NOTE: Using a simple tokenizer based on spaces ...
# Could also try a more sophisticated tokenizer if abbreviations / contractions should be conserved
tokenizer = WhitespaceTokenizer()
text_tokens = tokenizer.tokenize(text)
return text_tokens
# Computes the vocabulary to be used for vector operations
def ComputeVocabulary():
# Get the data
csvFile = open("data/comments_study.csv", 'Ur')
csvReader = csv.reader(csvFile, delimiter=',', quotechar='"')
comments = {}
for row in csvReader:
# don't read 1st line
if csvReader.line_num > 1:
comments[row[0]] = row
# Compute Vocabulary and output it for later
tokens = []
n = 0
nDocuments = len(comments)
for c in comments:
n = n + 1
if n % 100 == 0 :
print n
ct = CleanAndTokenize(comments[c][2].decode("utf8"))
ct = [w for w in ct if w not in stopword_list]
stemmed_tokens = [porter.stem(t) for t in ct]
tokens.extend(stemmed_tokens)
for t in stemmed_tokens:
if t not in doc_frequency:
doc_frequency[t] = 1
else:
doc_frequency[t] = doc_frequency[t]+1
#print tokens
fd = FreqDist(tokens)
# find cutoff
unigram_cutoff = 0
for (i, (key, val)) in enumerate(fd.items()):
#print str(i) + " " + str(key) + " " + str(fd[key])
if fd[key] < 10:
unigram_cutoff = i - 1
break
print "unigram cutoff: " + str(unigram_cutoff)
word_features.extend(fd.keys()[:unigram_cutoff])
fileWriter = csv.writer(open("data/vocab.csv", "w+"),delimiter=",")
for w in word_features:
row = [w.encode("utf8"), doc_frequency[w]]
fileWriter.writerow(row)
def ComputeCommentArticleRelevance():
# Read in the vocabulary and the document frequencies
csvFile = open("data/vocab.csv", 'Ur')
csvReader = csv.reader(csvFile, delimiter=',', quotechar='"')
# Vocab freq stores the vocab as keys and the doc frequency as values
for row in csvReader:
if csvReader.line_num > 1:
vocab_freq[row[0]] = int(row[1])
# commentID,commentTitle,commentBody,approveDate,recommendationCount,display_name,location,commentQuestion,commentSequence,status,articleURL,editorsSelection,in_study
csvFile = open("data/comments_study.csv", 'Ur')
csvReader = csv.reader(csvFile, delimiter=',', quotechar='"')
comments = {}
for row in csvReader:
if csvReader.line_num > 1:
comments[row[0]] = row
# The number of documents is the number of comments
nDocuments = len(comments)
# articleID,pubDate,headline,articleURL,fullText,materialType,snippet
csvFile = open("data/articles.csv", 'Ur')
csvReader = csv.reader(csvFile, delimiter=',', quotechar='"')
articles = {}
for row in csvReader:
if csvReader.line_num > 1:
# key on the article URL
articles[row[3]] = row
# output file will have have a final row that is the comment-article relevance
fileWriter = csv.writer(open("data/comment_study_article_relevance.csv", "w+"),delimiter=",")
# for each article and the comments on each
for (j, (commentID, comment)) in enumerate(comments.items()):
print "comment: " + str(j)
ct = CleanAndTokenize(comment[2].decode("utf8"))
ct = [w for w in ct if w not in stopword_list]
comment_stemmed_tokens = [porter.stem(t) for t in ct]
comment_stemmed_tokens_fd = FreqDist(comment_stemmed_tokens)
# Get the article full text
ct = CleanAndTokenize(articles[comment[10]][4].decode("utf8"))
ct = [w for w in ct if w not in stopword_list]
article_stemmed_tokens = [porter.stem(t) for t in ct]
article_stemmed_tokens_fd = FreqDist(article_stemmed_tokens)
# now create the feature vectors for article and comment (this is redudant for the article on every iteration)
article_features = {}
comment_features = {}
for w in vocab_freq:
df = vocab_freq[w]
article_features[w] = article_stemmed_tokens_fd[w] * math.log(nDocuments / df)
comment_features[w] = comment_stemmed_tokens_fd[w] * math.log(nDocuments / df)
# normalize vectors
article_features = NormalizeVector(article_features)
comment_features = NormalizeVector(comment_features)
comment_article_similarity = ComputeCosineSimilarity (article_features, comment_features)
# Extend the row with the similarity value and write it out
comment[2] = ""
comment.append(comment_article_similarity)
fileWriter.writerow(comment)
def ComputeCommentConversationalRelevance():
# Read in the vocabulary and the document frequencies
csvFile = open("data/vocab.csv", 'Ur')
csvReader = csv.reader(csvFile, delimiter=',', quotechar='"')
# Vocab freq stores the vocab as keys and the doc frequency as values
for row in csvReader:
if csvReader.line_num > 1:
vocab_freq[row[0]] = int(row[1])
# Get articles with only more than 10 commnts on them
articles = {}
# commentID,commentTitle,commentBody,approveDate,recommendationCount,display_name,location,commentQuestion,commentSequence,status,articleURL,editorsSelection,in_study
csvFile = open("data/comments_study.csv", 'Ur')
csvReader = csv.reader(csvFile, delimiter=',', quotechar='"')
nDocuments = 0
for row in csvReader:
if csvReader.line_num > 1:
nDocuments = nDocuments + 1
article_url = row[10]
# convert to unix time
row[3] = datetime.strptime(row[3], "%Y-%m-%d %H:%M:%S")
row[3] = calendar.timegm(row[3].utctimetuple())
if article_url not in articles:
articles[article_url] = []
articles[article_url].append(row)
else:
articles[article_url].append(row)
fileWriter = csv.writer(open("data/comment_study_comment_conversational_relevance.csv", "w+"),delimiter=",")
nc = 0
for a in articles:
print a
comments = articles[a]
if len(comments) >= 10:
# sort by time
comments.sort(key=lambda x: x[3])
# get the centroid of first 10 comments (then we incrementally update it)
centroid_comment_stemmed_tokens = []
#print articles[a]
for comment in comments[0:10]:
ct = CleanAndTokenize(comment[2].decode("utf8"))
ct = [w for w in ct if w not in stopword_list]
centroid_comment_stemmed_tokens.extend([porter.stem(t) for t in ct])
#print len(comment_stemmed_tokens)
#comment_stemmed_tokens_fd = FreqDist(comment_stemmed_tokens)
# Now just look at 11 though N comments (there must be at least 10 before it to compute the centroid)
for comment in comments[10:]:
ct = CleanAndTokenize(comment[2].decode("utf8"))
ct = [w for w in ct if w not in stopword_list]
# Update and compute the centroid
centroid_comment_stemmed_tokens.extend([porter.stem(t) for t in ct])
centroid_comment_stemmed_tokens_fd = FreqDist(centroid_comment_stemmed_tokens)
centroid_comment_features = {}
for w in vocab_freq:
centroid_comment_features[w] = centroid_comment_stemmed_tokens_fd[w] * math.log(nDocuments / vocab_freq[w])
centroid_comment_features = NormalizeVector(centroid_comment_features)
# Now compute dist to comment
comment_stemmed_tokens = []
comment_stemmed_tokens.extend([porter.stem(t) for t in ct])
comment_stemmed_tokens_fd = FreqDist(comment_stemmed_tokens)
comment_features = {}
for w in vocab_freq:
comment_features[w] = comment_stemmed_tokens_fd[w] * math.log(nDocuments / vocab_freq[w])
comment_features = NormalizeVector(comment_features)
comment_originality = ComputeCosineSimilarity (centroid_comment_features, comment_features)
# Extend the row with the similarity value and write it out
comment[2] = ""
comment.append(comment_originality)
fileWriter.writerow(comment)
#ComputeVocabulary()
# Compute similarities requires that the vocab file already by computed
#ComputeCommentArticleRelevance()
ComputeCommentConversationalRelevance()
|
comp-journalism/commentIQ
|
processComments.py
|
Python
|
gpl-2.0
| 11,737
|
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
def parse_square(raw):
if raw in ["S", "R", "E"]:
return raw
else:
return int(raw)
def append(counter):
if counter == 0:
return []
else:
return [parse_square(raw_input())] + append(counter-1)
# game_area = append(int(raw_input()))
game_area = ['S', 1, 'R', 4, 3, 4, 3, -5, 2, -4, 'E']
class Node(object):
def __init__(self, id, parents_list):
self.count = 1
self.winner_way = False
self.childrens_list = []
self.id = id
self.parents_list = parents_list
def next_square(self, steps):
if (self.id+steps)>=0 and (len(game_area) - (self.id+steps))>=0:
# Если я ещё не возвращался на этой ветке в эту клетку,
# то вернём номер следующего шага
if (self.id+steps) in self.parents_list:
return []
else:
return Node(self.id+steps, self.parents_list+[self.id])
else:
return []
def get_steps(self):
step_rule = game_area[self.id]
if step_rule == "R":
return xrange(1, 7)
elif step_rule == "E":
return []
else:
return [step_rule,]
def __str__(self):
return "%s %s %s" % (self.id, game_area[self.id], self.winner_way)
def __repr__(self):
return self.__str__()
def get_childrens(self):
if self.childrens_list == []:
for step in self.get_steps():
ns = self.next_square(step)
if ns == []:
to_append = []
elif game_area[ns.id] == "E":
to_append = ["E"]
self.winner_way = True
self.count += 1
else:
self.winner_way, count, to_append = ns.get_childrens()
if self.winner_way:
self.count += count
self.childrens_list.append([ns] + to_append)
return self.winner_way, self.count, self.childrens_list
first_steps = []
for step in xrange(1, 7):
node = Node(step, [0,])
way = node.get_childrens()
if way[0] == True:
first_steps.append(node)
if first_steps == []:
print("impossible")
else:
print(min([x.count for x in first_steps]))
|
pimiento/codingames
|
odeskcontest_2.py
|
Python
|
gpl-2.0
| 2,418
|
# a part of cropgui, a graphical front-end for lossless jpeg cropping
# Copyright (C) 2009 Jeff Epler <jepler@unpythonic.net>
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
import atexit
import fcntl
import os
import struct
import sys
import threading
lock = threading.RLock()
TIOCGWINSZ = 0x5413
screen_width = screen_height = None
def screen_size():
if not os.isatty(2): return 0, 0
import fcntl
res = fcntl.ioctl(2, TIOCGWINSZ, "\0" * 4)
return struct.unpack("hh", res)
screen_width, screen_height = screen_size()
last_width = 0
def locked(f):
def fu(*args, **kw):
lock.acquire()
try:
return f(*args, **kw)
finally:
lock.release()
return fu
@locked
def progress(message, *args):
if args: message = message % args
global last_width
if screen_width == 0: return
message = message[:screen_width - 1]
width = len(message)
if width < last_width:
message += " " * (last_width - width)
sys.stderr.write(message + "\r")
sys.stderr.flush()
last_width = width
@locked
def log(message, *args):
if args: message = message % args
progress_clear()
sys.stderr.write(message + "\n");
sys.stderr.flush()
def progress_clear():
if last_width: progress("")
atexit.register(progress_clear)
|
onno74/cropgui
|
log.py
|
Python
|
gpl-2.0
| 2,017
|
#!python2
#-*- coding: utf-8 -*-
# LearningModels.py
# Author: Larvasapiens <sebastian.narvaez@correounivalle.edu.co>
# Created: 2015-09-30
# Last Modification: 2015-11-22
# Version: 1.2
#
# Copyright (C) {2016} {Sebastián Narváez Rodríguez}
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
from __future__ import print_function
import numpy
import time
from nupic.research.spatial_pooler import SpatialPooler
from nupic.research.temporal_memory import TemporalMemory
from nupic.algorithms.CLAClassifier import CLAClassifier
from Utils.CLAClassifierCond import CLAClassifierCond
from LearningModel import LearningModel
from Utils.ArrayCommonOverlap import CommonOverlap
class FeedbackModel(LearningModel):
"""
Structure:
WordEncoder -> WordSP -> WordTM
ActionEncoder -> ActionSP -> ActionTM
WordTM, ActionTM -> GeneralSP -> GeneralTM
"""
def __init__(self, wordEncoder, actionEncoder, trainingSet,
modulesParams=None):
"""
@param wordEncoder
@param actionEncoder
@param trainingSet: A module containing the trainingData, all of
its categories and the inputIdx dict that maps each index
in categories to an input name.
"""
super(FeedbackModel, self).__init__(wordEncoder, actionEncoder,
trainingSet, modulesParams)
self.initModules(trainingSet.categories, trainingSet.inputIdx)
self.structure = {
'wordInput': 'wordEnc',
'wordEnc': 'wordSP',
'wordSP': 'wordTM',
'wordTM': 'generalSP',
###
'actionInput': 'actionEnc',
'actionEnc': 'actionSP',
'actionSP': 'actionTM',
'actionTM': 'generalSP',
###
'generalSP': 'generalTM',
'generalTM': None
}
self.modules = {
'generalTM': self.generalTM,
#'generalSP': self.generalSP,
'wordTM': self.wordTM,
'wordSP': self.wordSP,
'wordEnc': self.wordEncoder,
'actionTM': self.actionTM,
'actionSP': self.actionSP,
'actionEnc': self.actionEncoder
}
#self.layer = Layer(self.structure, self.modules, self.classifier)
def initModules(self, categories, inputIdx):
modulesNames = {'wordSP', 'wordTM', 'actionSP', 'actionTM',
'generalTM'}
if (self.modulesParams is not None) and\
(set(self.modulesParams) == modulesNames):
self.modulesParams['wordSP'].update(self.defaultWordSPParams)
self.modulesParams['wordTM'].update(self.defaultWordTMParams)
self.modulesParams['actionSP'].update(self.defaultActionSPParams)
self.modulesParams['actionTM'].update(self.defaultActionTMParams)
self.wordSP = SpatialPooler(**self.modulesParams['wordSP'])
self.wordTM = TemporalMemory(**self.modulesParams['wordTM'])
self.actionSP = SpatialPooler(**self.modulesParams['actionSP'])
self.actionTM = TemporalMemory(**self.modulesParams['actionTM'])
defaultGeneralTMParams = {
'columnDimensions': (2, max(self.wordTM.numberOfCells(),
self.actionTM.numberOfCells())),
'seed': self.tmSeed
}
self.modulesParams['generalTM'].update(defaultGeneralTMParams)
self.generalTM = TemporalMemory(**self.modulesParams['generalTM'])
print("Using external Parameters!")
else:
self.wordSP = SpatialPooler(**self.defaultWordSPParams)
self.wordTM = TemporalMemory(**self.defaultWordTMParams)
self.actionSP = SpatialPooler(**self.defaultActionSPParams)
self.actionTM = TemporalMemory(**self.defaultActionTMParams)
print("External parameters invalid or not found, using"\
" the default ones")
defaultGeneralTMParams = {
'columnDimensions': (2, max(self.wordTM.numberOfCells(),
self.actionTM.numberOfCells())),
'seed': self.tmSeed
}
self.generalTM = TemporalMemory(**defaultGeneralTMParams)
self.classifier = CLAClassifierCond(
steps=[1, 2, 3],
alpha=0.1,
actValueAlpha=0.3,
verbosity=0
)
self.startPointOverlap = CommonOverlap('==', 1,
self.actionTM.columnDimensions, threshold=0.5)
def processInput(self, sentence, actionSeq, wordSDR=None,
actionSDR=None, verbosity=0, learn=True):
if wordSDR is None:
wordSDR = numpy.zeros(self.wordSP.getColumnDimensions(),
dtype=numpy.uint8)
if actionSDR is None:
actionSDR = numpy.zeros(self.actionSP.getColumnDimensions(),
dtype=numpy.uint8)
nCellsFromSentence = self.generalTM.columnDimensions[1]
sentenceActiveCells = set()
actionSeqActiveCells = set()
recordNum = 0
# Feed the words from the sentence to the region 1
for word in sentence:
encodedWord = self.wordEncoder.encode(word)
self.wordSP.compute(encodedWord, learn, wordSDR)
self.wordTM.compute(
set(numpy.where(wordSDR > 0)[0]),
learn
)
region1Predicting = (self.wordTM.predictiveCells != set())
sentenceActiveCells.update(self.wordTM.getActiveCells())
#print("{} - {}".format(word, ))
retVal = self.classifier.compute(
recordNum=recordNum,
patternNZ=self.wordTM.getActiveCells(),
classification={
'bucketIdx': self.wordEncoder.getBucketIndices(word)[0],
'actValue': word
},
learn=learn,
infer=True,
conditionFunc=lambda x: x.endswith("-event")
)
recordNum += 1
bestPredictions = []
for step in retVal:
if step == 'actualValues':
continue
higherProbIndex = numpy.argmax(retVal[step])
bestPredictions.append(
retVal['actualValues'][higherProbIndex]
)
if region1Predicting:
# Feed the sentence to the region 2
self.generalTM.compute(sentenceActiveCells, learn)
generalPrediction = set(self.generalTM.mapCellsToColumns(
self.generalTM.predictiveCells
).keys())
# Normalize predictions so cells stay in the actionTM
# range.
generalPrediction = set([i - nCellsFromSentence
for i in generalPrediction
if i >= nCellsFromSentence])
# columnsPrediction = numpy.zeros(
# self.actionSP.getNumColumns(),
# dtype=numpy.uint8
# )
# columnsPrediction[self.actionTM.mapCellsToColumns(
# generalPrediction).keys()] = 1
# self.startPointOverlap.updateCounts(columnsPrediction)
#
# if len(actionSeq) <= 0:
#
# assert region1Predicting, "Region 1 is not predicting, consider "\
# "training the model for a longer time"
# predictedValues = []
#
# firstColumns = numpy.where(numpy.bitwise_and(columnsPrediction > 0,
# self.startPointOverlap.commonElements))
#
# predictedEnc = numpy.zeros(self.actionEncoder.getWidth(),
# dtype=numpy.uint8)
# predictedEnc[
# [self.actionSP._mapColumn(col) for col in firstColumns]] = 1
# predictedValues.append(self.actionEncoder.decode(predictedEnc))
#
# print(firstColumns)
#
# self.actionTM.predictiveCells.update(generalPrediction)
# self.actionTM.compute(firstColumns, learn)
#
# predictedColumns = self.actionTM.mapCellsToColumns(
# self.actionTM.predictiveCells).keys()[0]
for action in actionSeq:
encodedAction = self.actionEncoder.encode(action)
# Use the predicted cells from region 2 to bias the
# activity of cells in region 1.
if region1Predicting:
self.actionTM.predictiveCells.update(generalPrediction)
self.actionSP.compute(encodedAction, learn, actionSDR)
self.actionTM.compute(
set(numpy.where(actionSDR > 0)[0]),
learn
)
actionActiveCells = [i + nCellsFromSentence for i in
self.actionTM.getActiveCells()]
actionSeqActiveCells.update(actionActiveCells)
self.classifier.compute(
recordNum=recordNum,
patternNZ=actionActiveCells,
classification={
'bucketIdx': self.wordEncoder.getWidth() +
self.actionEncoder.getBucketIndices(action)[0],
'actValue': action
},
learn=learn,
infer=True,
conditionFunc=lambda x: x.endswith("-event")
)
recordNum += 1
if region1Predicting:
self.generalTM.compute(
actionSeqActiveCells,
True
)
if verbosity > 0:
print('Best Predictions: ' + str(bestPredictions))
if verbosity > 3:
print(" | CLAClassifier best predictions for step1: ")
top = sorted(retVal[1].tolist(), reverse=True)[:3]
for prob in top:
probIndex = retVal[1].tolist().index(prob)
print(str(retVal['actualValues'][probIndex]) +
" - " + str(prob))
print(" | CLAClassifier best predictions for step2: ")
top = sorted(retVal[2].tolist(), reverse=True)[:3]
for prob in top:
probIndex = retVal[2].tolist().index(prob)
print(str(retVal['actualValues'][probIndex]) +
" - " + str(prob))
print("")
print("---------------------------------------------------")
print("")
return bestPredictions
def train(self, numIterations, trainingData=None,
maxTime=-1, verbosity=0):
"""
@param numIterations
@param trainingData
@param maxTime: (default: -1) Training stops if maxTime (in
minutes) is exceeded. Note that this may interrupt an
ongoing train ireration. -1 is no time restrictions.
@param verbosity: (default: 0) How much verbose about the
process. 0 doesn't print anything.
"""
startTime = time.time()
maxTimeReached = False
recordNum = 0
if trainingData is None:
trainingData = self.trainingData
wordSDR = numpy.zeros(self.wordSP.getColumnDimensions(),
dtype=numpy.uint8)
actionSDR = numpy.zeros(self.actionSP.getColumnDimensions(),
dtype=numpy.uint8)
#generalSDR = numpy.zeros(self.generalSP.getColumnDimensions(),
# dtype=numpy.uint8)
generalInput = numpy.zeros(self.generalTM.numberOfColumns(),
dtype=numpy.uint8)
for iteration in xrange(numIterations):
print("Iteration " + str(iteration))
for sentence, actionSeq in trainingData:
self.processInput(sentence, actionSeq, wordSDR, actionSDR)
self.reset()
recordNum += 1
if maxTime > 0:
elapsedMinutes = (time.time() - startTime) * (1.0 / 60.0)
if elapsedMinutes > maxTime:
maxTimeReached = True
print("maxTime reached, training stoped at iteration "\
"{}!".format(self.iterationsTrained))
break
if maxTimeReached:
break
self.iterationsTrained += 1
def inputSentence(self, sentence, verbosity=1, learn=False):
return self.processInput(sentence, [], verbosity=verbosity, learn=learn)
|
larvasapiens/htm-teul
|
Learning/LearningModels/FeedbackModel.py
|
Python
|
gpl-2.0
| 13,193
|
#
# Gramps - a GTK+/GNOME based genealogy program
#
# Copyright (C) 2001 Donald N. Allingham
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# gen/plug/docgen/fontscale.py
"""
Provide a rough estimate of the width of a text string.
"""
SWISS = [
0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000,
0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000,
0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000,
0.000, 0.000, 0.278, 0.278, 0.355, 0.556, 0.556, 0.889, 0.667, 0.191,
0.333, 0.333, 0.389, 0.584, 0.278, 0.333, 0.278, 0.278, 0.556, 0.556,
0.556, 0.556, 0.556, 0.556, 0.556, 0.556, 0.556, 0.556, 0.278, 0.278,
0.584, 0.584, 0.584, 0.556, 1.015, 0.667, 0.667, 0.722, 0.722, 0.667,
0.611, 0.778, 0.722, 0.278, 0.500, 0.667, 0.556, 0.833, 0.722, 0.778,
0.667, 0.778, 0.722, 0.667, 0.611, 0.722, 0.667, 0.944, 0.667, 0.667,
0.611, 0.278, 0.278, 0.278, 0.469, 0.556, 0.333, 0.556, 0.556, 0.500,
0.556, 0.556, 0.278, 0.556, 0.556, 0.222, 0.222, 0.500, 0.222, 0.833,
0.556, 0.556, 0.556, 0.556, 0.333, 0.500, 0.278, 0.556, 0.500, 0.722,
0.500, 0.500, 0.500, 0.334, 0.260, 0.334, 0.584, 0.350, 0.556, 0.350,
0.222, 0.556, 0.333, 1.000, 0.556, 0.556, 0.333, 1.000, 0.667, 0.333,
1.000, 0.350, 0.611, 0.350, 0.350, 0.222, 0.222, 0.333, 0.333, 0.350,
0.556, 1.000, 0.333, 1.000, 0.500, 0.333, 0.944, 0.350, 0.500, 0.667,
0.278, 0.333, 0.556, 0.556, 0.556, 0.556, 0.260, 0.556, 0.333, 0.737,
0.370, 0.556, 0.584, 0.333, 0.737, 0.333, 0.400, 0.584, 0.333, 0.333,
0.333, 0.556, 0.537, 0.278, 0.333, 0.333, 0.365, 0.556, 0.834, 0.834,
0.834, 0.611, 0.667, 0.667, 0.667, 0.667, 0.667, 0.667, 1.000, 0.722,
0.667, 0.667, 0.667, 0.667, 0.278, 0.278, 0.278, 0.278, 0.722, 0.722,
0.778, 0.778, 0.778, 0.778, 0.778, 0.584, 0.778, 0.722, 0.722, 0.722,
0.722, 0.667, 0.667, 0.611, 0.556, 0.556, 0.556, 0.556, 0.556, 0.556,
0.889, 0.500, 0.556, 0.556, 0.556, 0.556, 0.278, 0.278, 0.278, 0.278,
0.556, 0.556, 0.556, 0.556, 0.556, 0.556, 0.556, 0.584, 0.611, 0.556,
0.556, 0.556, 0.556, 0.500, 0.556, 0.500]
SWISS_B = [
0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000,
0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000,
0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000,
0.000, 0.000, 0.278, 0.333, 0.474, 0.556, 0.556, 0.889, 0.722, 0.238,
0.333, 0.333, 0.389, 0.584, 0.278, 0.333, 0.278, 0.278, 0.556, 0.556,
0.556, 0.556, 0.556, 0.556, 0.556, 0.556, 0.556, 0.556, 0.333, 0.333,
0.584, 0.584, 0.584, 0.611, 0.975, 0.722, 0.722, 0.722, 0.722, 0.667,
0.611, 0.778, 0.722, 0.278, 0.556, 0.722, 0.611, 0.833, 0.722, 0.778,
0.667, 0.778, 0.722, 0.667, 0.611, 0.722, 0.667, 0.944, 0.667, 0.667,
0.611, 0.333, 0.278, 0.333, 0.584, 0.556, 0.333, 0.556, 0.611, 0.556,
0.611, 0.556, 0.333, 0.611, 0.611, 0.278, 0.278, 0.556, 0.278, 0.889,
0.611, 0.611, 0.611, 0.611, 0.389, 0.556, 0.333, 0.611, 0.556, 0.778,
0.556, 0.556, 0.500, 0.389, 0.280, 0.389, 0.584, 0.350, 0.556, 0.350,
0.278, 0.556, 0.500, 1.000, 0.556, 0.556, 0.333, 1.000, 0.667, 0.333,
1.000, 0.350, 0.611, 0.350, 0.350, 0.278, 0.278, 0.500, 0.500, 0.350,
0.556, 1.000, 0.333, 1.000, 0.556, 0.333, 0.944, 0.350, 0.500, 0.667,
0.278, 0.333, 0.556, 0.556, 0.556, 0.556, 0.280, 0.556, 0.333, 0.737,
0.370, 0.556, 0.584, 0.333, 0.737, 0.333, 0.400, 0.584, 0.333, 0.333,
0.333, 0.611, 0.556, 0.278, 0.333, 0.333, 0.365, 0.556, 0.834, 0.834,
0.834, 0.611, 0.722, 0.722, 0.722, 0.722, 0.722, 0.722, 1.000, 0.722,
0.667, 0.667, 0.667, 0.667, 0.278, 0.278, 0.278, 0.278, 0.722, 0.722,
0.778, 0.778, 0.778, 0.778, 0.778, 0.584, 0.778, 0.722, 0.722, 0.722,
0.722, 0.667, 0.667, 0.611, 0.556, 0.556, 0.556, 0.556, 0.556, 0.556,
0.889, 0.556, 0.556, 0.556, 0.556, 0.556, 0.278, 0.278, 0.278, 0.278,
0.611, 0.611, 0.611, 0.611, 0.611, 0.611, 0.611, 0.584, 0.611, 0.611,
0.611, 0.611, 0.611, 0.556, 0.611, 0.556]
SWISS_I = [
0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000,
0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000,
0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000,
0.000, 0.000, 0.278, 0.278, 0.355, 0.556, 0.556, 0.889, 0.667, 0.191,
0.333, 0.333, 0.389, 0.584, 0.278, 0.333, 0.278, 0.278, 0.556, 0.556,
0.556, 0.556, 0.556, 0.556, 0.556, 0.556, 0.556, 0.556, 0.278, 0.278,
0.584, 0.584, 0.584, 0.556, 1.015, 0.667, 0.667, 0.722, 0.722, 0.667,
0.611, 0.778, 0.722, 0.278, 0.500, 0.667, 0.556, 0.833, 0.722, 0.778,
0.667, 0.778, 0.722, 0.667, 0.611, 0.722, 0.667, 0.944, 0.667, 0.667,
0.611, 0.278, 0.278, 0.278, 0.469, 0.556, 0.333, 0.556, 0.556, 0.500,
0.556, 0.556, 0.278, 0.556, 0.556, 0.222, 0.222, 0.500, 0.222, 0.833,
0.556, 0.556, 0.556, 0.556, 0.333, 0.500, 0.278, 0.556, 0.500, 0.722,
0.500, 0.500, 0.500, 0.334, 0.260, 0.334, 0.584, 0.350, 0.556, 0.350,
0.222, 0.556, 0.333, 1.000, 0.556, 0.556, 0.333, 1.000, 0.667, 0.333,
1.000, 0.350, 0.611, 0.350, 0.350, 0.222, 0.222, 0.333, 0.333, 0.350,
0.556, 1.000, 0.333, 1.000, 0.500, 0.333, 0.944, 0.350, 0.500, 0.667,
0.278, 0.333, 0.556, 0.556, 0.556, 0.556, 0.260, 0.556, 0.333, 0.737,
0.370, 0.556, 0.584, 0.333, 0.737, 0.333, 0.400, 0.584, 0.333, 0.333,
0.333, 0.556, 0.537, 0.278, 0.333, 0.333, 0.365, 0.556, 0.834, 0.834,
0.834, 0.611, 0.667, 0.667, 0.667, 0.667, 0.667, 0.667, 1.000, 0.722,
0.667, 0.667, 0.667, 0.667, 0.278, 0.278, 0.278, 0.278, 0.722, 0.722,
0.778, 0.778, 0.778, 0.778, 0.778, 0.584, 0.778, 0.722, 0.722, 0.722,
0.722, 0.667, 0.667, 0.611, 0.556, 0.556, 0.556, 0.556, 0.556, 0.556,
0.889, 0.500, 0.556, 0.556, 0.556, 0.556, 0.278, 0.278, 0.278, 0.278,
0.556, 0.556, 0.556, 0.556, 0.556, 0.556, 0.556, 0.584, 0.611, 0.556,
0.556, 0.556, 0.556, 0.500, 0.556, 0.500]
SWISS_BI = [
0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000,
0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000,
0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000,
0.000, 0.000, 0.278, 0.333, 0.474, 0.556, 0.556, 0.889, 0.722, 0.238,
0.333, 0.333, 0.389, 0.584, 0.278, 0.333, 0.278, 0.278, 0.556, 0.556,
0.556, 0.556, 0.556, 0.556, 0.556, 0.556, 0.556, 0.556, 0.333, 0.333,
0.584, 0.584, 0.584, 0.611, 0.975, 0.722, 0.722, 0.722, 0.722, 0.667,
0.611, 0.778, 0.722, 0.278, 0.556, 0.722, 0.611, 0.833, 0.722, 0.778,
0.667, 0.778, 0.722, 0.667, 0.611, 0.722, 0.667, 0.944, 0.667, 0.667,
0.611, 0.333, 0.278, 0.333, 0.584, 0.556, 0.333, 0.556, 0.611, 0.556,
0.611, 0.556, 0.333, 0.611, 0.611, 0.278, 0.278, 0.556, 0.278, 0.889,
0.611, 0.611, 0.611, 0.611, 0.389, 0.556, 0.333, 0.611, 0.556, 0.778,
0.556, 0.556, 0.500, 0.389, 0.280, 0.389, 0.584, 0.350, 0.556, 0.350,
0.278, 0.556, 0.500, 1.000, 0.556, 0.556, 0.333, 1.000, 0.667, 0.333,
1.000, 0.350, 0.611, 0.350, 0.350, 0.278, 0.278, 0.500, 0.500, 0.350,
0.556, 1.000, 0.333, 1.000, 0.556, 0.333, 0.944, 0.350, 0.500, 0.667,
0.278, 0.333, 0.556, 0.556, 0.556, 0.556, 0.280, 0.556, 0.333, 0.737,
0.370, 0.556, 0.584, 0.333, 0.737, 0.333, 0.400, 0.584, 0.333, 0.333,
0.333, 0.611, 0.556, 0.278, 0.333, 0.333, 0.365, 0.556, 0.834, 0.834,
0.834, 0.611, 0.722, 0.722, 0.722, 0.722, 0.722, 0.722, 1.000, 0.722,
0.667, 0.667, 0.667, 0.667, 0.278, 0.278, 0.278, 0.278, 0.722, 0.722,
0.778, 0.778, 0.778, 0.778, 0.778, 0.584, 0.778, 0.722, 0.722, 0.722,
0.722, 0.667, 0.667, 0.611, 0.556, 0.556, 0.556, 0.556, 0.556, 0.556,
0.889, 0.556, 0.556, 0.556, 0.556, 0.556, 0.278, 0.278, 0.278, 0.278,
0.611, 0.611, 0.611, 0.611, 0.611, 0.611, 0.611, 0.584, 0.611, 0.611,
0.611, 0.611, 0.611, 0.556, 0.611, 0.556]
ROMAN = [
0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000,
0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000,
0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000,
0.000, 0.000, 0.250, 0.333, 0.408, 0.500, 0.500, 0.833, 0.778, 0.180,
0.333, 0.333, 0.500, 0.564, 0.250, 0.333, 0.250, 0.278, 0.500, 0.500,
0.500, 0.500, 0.500, 0.500, 0.500, 0.500, 0.500, 0.500, 0.278, 0.278,
0.564, 0.564, 0.564, 0.444, 0.921, 0.722, 0.667, 0.667, 0.722, 0.611,
0.556, 0.722, 0.722, 0.333, 0.389, 0.722, 0.611, 0.889, 0.722, 0.722,
0.556, 0.722, 0.667, 0.556, 0.611, 0.722, 0.722, 0.944, 0.722, 0.722,
0.611, 0.333, 0.278, 0.333, 0.469, 0.500, 0.333, 0.444, 0.500, 0.444,
0.500, 0.444, 0.333, 0.500, 0.500, 0.278, 0.278, 0.500, 0.278, 0.778,
0.500, 0.500, 0.500, 0.500, 0.333, 0.389, 0.278, 0.500, 0.500, 0.722,
0.500, 0.500, 0.444, 0.480, 0.200, 0.480, 0.541, 0.350, 0.500, 0.350,
0.333, 0.500, 0.444, 1.000, 0.500, 0.500, 0.333, 1.000, 0.556, 0.333,
0.889, 0.350, 0.611, 0.350, 0.350, 0.333, 0.333, 0.444, 0.444, 0.350,
0.500, 1.000, 0.333, 0.980, 0.389, 0.333, 0.722, 0.350, 0.444, 0.722,
0.250, 0.333, 0.500, 0.500, 0.500, 0.500, 0.200, 0.500, 0.333, 0.760,
0.276, 0.500, 0.564, 0.333, 0.760, 0.333, 0.400, 0.564, 0.300, 0.300,
0.333, 0.500, 0.453, 0.250, 0.333, 0.300, 0.310, 0.500, 0.750, 0.750,
0.750, 0.444, 0.722, 0.722, 0.722, 0.722, 0.722, 0.722, 0.889, 0.667,
0.611, 0.611, 0.611, 0.611, 0.333, 0.333, 0.333, 0.333, 0.722, 0.722,
0.722, 0.722, 0.722, 0.722, 0.722, 0.564, 0.722, 0.722, 0.722, 0.722,
0.722, 0.722, 0.556, 0.500, 0.444, 0.444, 0.444, 0.444, 0.444, 0.444,
0.667, 0.444, 0.444, 0.444, 0.444, 0.444, 0.278, 0.278, 0.278, 0.278,
0.500, 0.500, 0.500, 0.500, 0.500, 0.500, 0.500, 0.564, 0.500, 0.500,
0.500, 0.500, 0.500, 0.500, 0.500, 0.500]
ROMAN_B = [
0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000,
0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000,
0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000,
0.000, 0.000, 0.250, 0.333, 0.555, 0.500, 0.500, 1.000, 0.833, 0.278,
0.333, 0.333, 0.500, 0.570, 0.250, 0.333, 0.250, 0.278, 0.500, 0.500,
0.500, 0.500, 0.500, 0.500, 0.500, 0.500, 0.500, 0.500, 0.333, 0.333,
0.570, 0.570, 0.570, 0.500, 0.930, 0.722, 0.667, 0.722, 0.722, 0.667,
0.611, 0.778, 0.778, 0.389, 0.500, 0.778, 0.667, 0.944, 0.722, 0.778,
0.611, 0.778, 0.722, 0.556, 0.667, 0.722, 0.722, 1.000, 0.722, 0.722,
0.667, 0.333, 0.278, 0.333, 0.581, 0.500, 0.333, 0.500, 0.556, 0.444,
0.556, 0.444, 0.333, 0.500, 0.556, 0.278, 0.333, 0.556, 0.278, 0.833,
0.556, 0.500, 0.556, 0.556, 0.444, 0.389, 0.333, 0.556, 0.500, 0.722,
0.500, 0.500, 0.444, 0.394, 0.220, 0.394, 0.520, 0.350, 0.500, 0.350,
0.333, 0.500, 0.500, 1.000, 0.500, 0.500, 0.333, 1.000, 0.556, 0.333,
1.000, 0.350, 0.667, 0.350, 0.350, 0.333, 0.333, 0.500, 0.500, 0.350,
0.500, 1.000, 0.333, 1.000, 0.389, 0.333, 0.722, 0.350, 0.444, 0.722,
0.250, 0.333, 0.500, 0.500, 0.500, 0.500, 0.220, 0.500, 0.333, 0.747,
0.300, 0.500, 0.570, 0.333, 0.747, 0.333, 0.400, 0.570, 0.300, 0.300,
0.333, 0.556, 0.540, 0.250, 0.333, 0.300, 0.330, 0.500, 0.750, 0.750,
0.750, 0.500, 0.722, 0.722, 0.722, 0.722, 0.722, 0.722, 1.000, 0.722,
0.667, 0.667, 0.667, 0.667, 0.389, 0.389, 0.389, 0.389, 0.722, 0.722,
0.778, 0.778, 0.778, 0.778, 0.778, 0.570, 0.778, 0.722, 0.722, 0.722,
0.722, 0.722, 0.611, 0.556, 0.500, 0.500, 0.500, 0.500, 0.500, 0.500,
0.722, 0.444, 0.444, 0.444, 0.444, 0.444, 0.278, 0.278, 0.278, 0.278,
0.500, 0.556, 0.500, 0.500, 0.500, 0.500, 0.500, 0.570, 0.500, 0.556,
0.556, 0.556, 0.556, 0.500, 0.556, 0.500]
ROMAN_I = [
0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000,
0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000,
0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000,
0.000, 0.000, 0.250, 0.333, 0.420, 0.500, 0.500, 0.833, 0.778, 0.214,
0.333, 0.333, 0.500, 0.675, 0.250, 0.333, 0.250, 0.278, 0.500, 0.500,
0.500, 0.500, 0.500, 0.500, 0.500, 0.500, 0.500, 0.500, 0.333, 0.333,
0.675, 0.675, 0.675, 0.500, 0.920, 0.611, 0.611, 0.667, 0.722, 0.611,
0.611, 0.722, 0.722, 0.333, 0.444, 0.667, 0.556, 0.833, 0.667, 0.722,
0.611, 0.722, 0.611, 0.500, 0.556, 0.722, 0.611, 0.833, 0.611, 0.556,
0.556, 0.389, 0.278, 0.389, 0.422, 0.500, 0.333, 0.500, 0.500, 0.444,
0.500, 0.444, 0.278, 0.500, 0.500, 0.278, 0.278, 0.444, 0.278, 0.722,
0.500, 0.500, 0.500, 0.500, 0.389, 0.389, 0.278, 0.500, 0.444, 0.667,
0.444, 0.444, 0.389, 0.400, 0.275, 0.400, 0.541, 0.350, 0.500, 0.350,
0.333, 0.500, 0.556, 0.889, 0.500, 0.500, 0.333, 1.000, 0.500, 0.333,
0.944, 0.350, 0.556, 0.350, 0.350, 0.333, 0.333, 0.556, 0.556, 0.350,
0.500, 0.889, 0.333, 0.980, 0.389, 0.333, 0.667, 0.350, 0.389, 0.556,
0.250, 0.389, 0.500, 0.500, 0.500, 0.500, 0.275, 0.500, 0.333, 0.760,
0.276, 0.500, 0.675, 0.333, 0.760, 0.333, 0.400, 0.675, 0.300, 0.300,
0.333, 0.500, 0.523, 0.250, 0.333, 0.300, 0.310, 0.500, 0.750, 0.750,
0.750, 0.500, 0.611, 0.611, 0.611, 0.611, 0.611, 0.611, 0.889, 0.667,
0.611, 0.611, 0.611, 0.611, 0.333, 0.333, 0.333, 0.333, 0.722, 0.667,
0.722, 0.722, 0.722, 0.722, 0.722, 0.675, 0.722, 0.722, 0.722, 0.722,
0.722, 0.556, 0.611, 0.500, 0.500, 0.500, 0.500, 0.500, 0.500, 0.500,
0.667, 0.444, 0.444, 0.444, 0.444, 0.444, 0.278, 0.278, 0.278, 0.278,
0.500, 0.500, 0.500, 0.500, 0.500, 0.500, 0.500, 0.675, 0.500, 0.500,
0.500, 0.500, 0.500, 0.444, 0.500, 0.444]
ROMAN_BI = [
0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000,
0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000,
0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000,
0.000, 0.000, 0.250, 0.389, 0.555, 0.500, 0.500, 0.833, 0.778, 0.278,
0.333, 0.333, 0.500, 0.570, 0.250, 0.333, 0.250, 0.278, 0.500, 0.500,
0.500, 0.500, 0.500, 0.500, 0.500, 0.500, 0.500, 0.500, 0.333, 0.333,
0.570, 0.570, 0.570, 0.500, 0.832, 0.667, 0.667, 0.667, 0.722, 0.667,
0.667, 0.722, 0.778, 0.389, 0.500, 0.667, 0.611, 0.889, 0.722, 0.722,
0.611, 0.722, 0.667, 0.556, 0.611, 0.722, 0.667, 0.889, 0.667, 0.611,
0.611, 0.333, 0.278, 0.333, 0.570, 0.500, 0.333, 0.500, 0.500, 0.444,
0.500, 0.444, 0.333, 0.500, 0.556, 0.278, 0.278, 0.500, 0.278, 0.778,
0.556, 0.500, 0.500, 0.500, 0.389, 0.389, 0.278, 0.556, 0.444, 0.667,
0.500, 0.444, 0.389, 0.348, 0.220, 0.348, 0.570, 0.350, 0.500, 0.350,
0.333, 0.500, 0.500, 1.000, 0.500, 0.500, 0.333, 1.000, 0.556, 0.333,
0.944, 0.350, 0.611, 0.350, 0.350, 0.333, 0.333, 0.500, 0.500, 0.350,
0.500, 1.000, 0.333, 1.000, 0.389, 0.333, 0.722, 0.350, 0.389, 0.611,
0.250, 0.389, 0.500, 0.500, 0.500, 0.500, 0.220, 0.500, 0.333, 0.747,
0.266, 0.500, 0.606, 0.333, 0.747, 0.333, 0.400, 0.570, 0.300, 0.300,
0.333, 0.576, 0.500, 0.250, 0.333, 0.300, 0.300, 0.500, 0.750, 0.750,
0.750, 0.500, 0.667, 0.667, 0.667, 0.667, 0.667, 0.667, 0.944, 0.667,
0.667, 0.667, 0.667, 0.667, 0.389, 0.389, 0.389, 0.389, 0.722, 0.722,
0.722, 0.722, 0.722, 0.722, 0.722, 0.570, 0.722, 0.722, 0.722, 0.722,
0.722, 0.611, 0.611, 0.500, 0.500, 0.500, 0.500, 0.500, 0.500, 0.500,
0.722, 0.444, 0.444, 0.444, 0.444, 0.444, 0.278, 0.278, 0.278, 0.278,
0.500, 0.556, 0.500, 0.500, 0.500, 0.500, 0.500, 0.570, 0.500, 0.556,
0.556, 0.556, 0.556, 0.444, 0.500, 0.444]
FONT_ARRAY = [ [SWISS, SWISS_B, SWISS_I, SWISS_BI ],
[ROMAN, ROMAN_B, ROMAN_I, ROMAN_BI ] ]
#-------------------------------------------------------------------------
#
# string_width
#
#-------------------------------------------------------------------------
def string_width(font, text):
"""
returns with width of a string in the specified font
"""
## TODO: Does it not make sense to use writing on a pango Layout to know
## text width?
i = font.get_type_face()
j = font.get_bold() + font.get_italic()*2
s = font.get_size()
l = FONT_ARRAY[i][j]
r = 0
for c in text:
try:
r = r + l[ord(c)]
except:
r = r + l[ord('n')]
return (r+1)*s
def string_multiline_width(font, text):
max = 0
for line in text.splitlines():
width = string_width(font, line)
if width > max:
max = width
return max
def string_trim(font, text, width, ellipses = "..."):
"""
Like string_width, but this makes sure the length of the
string is <= width. Optionally, add ellipses (...).
"""
i = font.get_type_face()
j = font.get_bold() + font.get_italic()*2
s = font.get_size()
l = FONT_ARRAY[i][j]
ellipses_length = 0
# get length of each letter
for c in ellipses:
try:
ellipses_length += l[ord(c)]
except:
ellipses_length += l[ord('n')]
# find the part that is < width
retval = ""
sumlen = 0
for c in text:
try:
length = l[ord(c)]
except:
length = l[ord('n')]
# too long:
if (sumlen + length + 1) * s > width:
if ellipses_length > 0:
# try again with ellipses
retval += c
sumlen += length
break
else:
# return just this so far
return retval
retval += c
sumlen += length
# if exited out the bottom:
if (sumlen + 1) * s <= width:
return text
# too long; try again with ellipses
retval = ""
sumlen = 0
for c in text:
try:
length = l[ord(c)]
except:
length = l[ord('n')]
if (sumlen + length + 1) * s > width:
return retval
if (sumlen + length + ellipses_length + 1) * s > width:
return retval + ellipses
retval += c
sumlen += length
# should not exit out the bottom!
return text
|
gramps-project/gramps
|
gramps/gen/plug/docgen/fontscale.py
|
Python
|
gpl-2.0
| 17,840
|
# -*- coding: utf-8 -*-
#
# This file is part of Invenio.
# Copyright (C) 2015 CERN.
#
# Invenio is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# Invenio is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Invenio; if not, write to the Free Software Foundation, Inc.,
# 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
"""Unit tests for deposit module validators."""
from invenio.testsuite import InvenioTestCase, make_test_suite, run_test_suite
from wtforms.validators import ValidationError
class Field(object):
def __init__(self, old_doi, new_doi):
self.object_data = old_doi
self.data = new_doi
class Form(object):
pass
LOCAL_DOI_PREFIX = "10.5072"
REMOTE_DOI_PREFIX = "10.7777"
class MintedDOIValidatorTest(InvenioTestCase):
"""Test MitedDOIValidator."""
def test_doi_new(self):
from invenio.modules.deposit.validation_utils import MintedDOIValidator
validator = MintedDOIValidator()
field = Field("", LOCAL_DOI_PREFIX + "/test.77777")
field2 = Field("", REMOTE_DOI_PREFIX + "/test.77777")
form = Form()
self.assertIsNone(validator(form, field))
self.assertIsNone(validator(form, field2))
def test_matching_doi(self):
from invenio.modules.deposit.validation_utils import MintedDOIValidator
validator = MintedDOIValidator()
field = Field(
LOCAL_DOI_PREFIX + "/test.77777",
LOCAL_DOI_PREFIX + "/test.77777")
field2 = Field(
REMOTE_DOI_PREFIX + "/test.77777",
REMOTE_DOI_PREFIX + "/test.77777")
form = Form()
self.assertIsNone(validator(form, field))
self.assertIsNone(validator(form, field2))
def test__different_doi_(self):
from invenio.modules.deposit.validation_utils import MintedDOIValidator
validator = MintedDOIValidator()
field = Field(
LOCAL_DOI_PREFIX + "/test.12345",
LOCAL_DOI_PREFIX + "/test.77777")
field2 = Field(
REMOTE_DOI_PREFIX + "/test.12345",
REMOTE_DOI_PREFIX + "/test.77777")
form = Form()
with self.assertRaises(ValidationError):
validator(form, field)
with self.assertRaises(ValidationError):
validator(form, field2)
TEST_SUITE = make_test_suite(MintedDOIValidatorTest)
if __name__ == "__main__":
run_test_suite(TEST_SUITE)
|
zenodo/invenio
|
invenio/modules/deposit/testsuite/test_deposit_validators.py
|
Python
|
gpl-2.0
| 2,861
|
from django.conf import settings
from django.core.management.base import BaseCommand
import datetime
import logging
from calfire_tracker.manager_calfire import RetrieveCalFireCurrentIncidents
logger = logging.getLogger("calfire_tracker")
class Command(BaseCommand):
help = "Scrapes California Wildfires data from CalFire and some Inciweb Pages"
def calfire_current_incidents(self, *args, **options):
task_run = RetrieveCalFireCurrentIncidents()
task_run._init()
self.stdout.write("Finished retrieving CalFire current incidents at %s\n" % str(datetime.datetime.now()))
def handle(self, *args, **options):
self.calfire_current_incidents()
self.stdout.write("Finished this data import at %s\n" % str(datetime.datetime.now()))
|
SCPR/firetracker
|
calfire_tracker/management/commands/scraper_wildfires.py
|
Python
|
gpl-2.0
| 782
|
#a script for a-block'o-code
#Jacob Mickiewicz
from Adafruit_I2C import Adafruit_I2C
def addrFvect(x,y):
if((y<16)&(x<8)):
return (y<<3|x)
else:
return (0xff)
def I2cLex(x ,y):
address = addrFvect(x,y)
if(address == 0xff):
return lex.noblock
else:
i2caddr = Adafruit_I2C(address)
val = i2caddr.readU8(0)
if (val == -1):
return lex.noblock
else:
return val # lex.reverse_mapping[val]
def enum(*sequential, **named):
enums = dict(zip(sequential, range(len(sequential))), **named)
reverse = dict((value, key) for key, value in enums.iteritems())
enums['reverse_mapping'] = reverse
return type('Enum', (), enums)
lex = enum(noblock=0,plus=1,minus=2,one=3,two=4,X=5,assign=6,ifblock=7,lessthan=8)
###################################################################
#start of run
###################################################################
for y in range(1,5):
for x in range(0,4):
z = I2cLex(x,y) #z will be returned value from i2c
print lex.reverse_mapping[z],
print
|
erebus-labs/blocks-o-code
|
dev/BBB/i2cmaster.py
|
Python
|
gpl-2.0
| 1,042
|
# -*- coding: UTF-8 -*-
__revision__ = '$Id$'
# Copyright (c) 2005-2011 Vasco Nunes, Piotr Ożarowski
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Library General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA
# You may use and distribute this software under the terms of the
# GNU General Public License, version 2 or later
import gutils, movie
import string, re
plugin_name = 'IMDb'
plugin_description = 'Internet Movie Database'
plugin_url = 'www.imdb.com'
plugin_language = _('English')
plugin_author = 'Vasco Nunes, Piotr Ożarowski'
plugin_author_email = 'griffith@griffith.cc'
plugin_version = '1.12'
class Plugin(movie.Movie):
def __init__(self, id):
self.encode = 'iso8859-1'
self.movie_id = id
self.url = "http://imdb.com/title/tt%s" % self.movie_id
def initialize(self):
self.cast_page = self.open_page(url=self.url + '/fullcredits')
self.plot_page = self.open_page(url=self.url + '/plotsummary')
self.comp_page = self.open_page(url=self.url + '/companycredits')
self.tagl_page = self.open_page(url=self.url + '/taglines')
def get_image(self):
tmp = gutils.trim(self.page, 'id="img_primary"', '</a>')
self.image_url = gutils.trim(tmp, 'src="', '"')
def get_o_title(self):
self.o_title = gutils.regextrim(self.page, 'class="title-extra">', '<')
if not self.o_title:
self.o_title = gutils.regextrim(self.page, '<h1>', '([ ]|[&][#][0-9]+[;])<span')
if not self.o_title:
self.o_title = re.sub(' [(].*', '', gutils.trim(self.page, '<title>', '</title>'))
def get_title(self): # same as get_o_title()
self.title = gutils.regextrim(self.page, '<h1>', '([ ]|[&][#][0-9]+[;])<span')
if not self.title:
self.title = re.sub(' [(].*', '', gutils.trim(self.page, '<title>', '</title>'))
def get_director(self):
self.director = ''
parts = re.split('<a href=', gutils.trim(self.cast_page, '>Directed by<', '</table>'))
if len(parts) > 1:
for part in parts[1:]:
director = gutils.trim(part, '>', '<')
self.director = self.director + director + ', '
self.director = self.director[0:len(self.director) - 2]
def get_plot(self):
self.plot = gutils.regextrim(self.page, '<h5>Plot:</h5>', '(</div>|<a href.*)')
self.plot = self.__before_more(self.plot)
elements = string.split(self.plot_page, '<p class="plotpar">')
if len(elements) > 1:
self.plot = self.plot + '\n\n'
elements[0] = ''
for element in elements:
if element <> '':
self.plot = self.plot + gutils.strip_tags(gutils.before(element, '</a>')) + '\n\n'
def get_year(self):
self.year = gutils.trim(self.page, '<a href="/year/', '</a>')
self.year = gutils.after(self.year, '">')
def get_runtime(self):
self.runtime = gutils.regextrim(self.page, 'Runtime:<[^>]+>', ' min')
def get_genre(self):
self.genre = gutils.regextrim(self.page, 'Genre[s]*:<[^>]+>', '</div>')
self.genre = self.__before_more(self.genre)
def get_cast(self):
self.cast = ''
self.cast = gutils.trim(self.cast_page, '<table class="cast">', '</table>')
if self.cast == '':
self.cast = gutils.trim(self.page, '<table class="cast">', '</table>')
self.cast = string.replace(self.cast, ' ... ', _(' as '))
self.cast = string.replace(self.cast, '...', _(' as '))
self.cast = string.replace(self.cast, '</tr><tr>', "\n")
self.cast = re.sub('</tr>[ \t]*<tr[ \t]*class="even">', "\n", self.cast)
self.cast = re.sub('</tr>[ \t]*<tr[ \t]*class="odd">', "\n", self.cast)
self.cast = self.__before_more(self.cast)
def get_classification(self):
self.classification = gutils.trim(self.page, '(<a href="/mpaa">MPAA</a>)', '</div>')
self.classification = gutils.trim(self.classification, 'Rated ', ' ')
def get_studio(self):
self.studio = ''
tmp = gutils.regextrim(self.comp_page, 'Production Companies<[^>]+', '</ul>')
tmp = string.split(tmp, 'href="')
for entry in tmp:
entry = gutils.trim(entry, '>', '<')
if entry:
self.studio = self.studio + entry + ', '
if self.studio:
self.studio = self.studio[:-2]
def get_o_site(self):
self.o_site = ''
def get_site(self):
self.site = "http://www.imdb.com/title/tt%s" % self.movie_id
def get_trailer(self):
self.trailer = "http://www.imdb.com/title/tt%s/trailers" % self.movie_id
def get_country(self):
self.country = '<' + gutils.trim(self.page, 'Country:<', '</div>')
self.country = re.sub('[\n]+', '', self.country)
def get_rating(self):
pattern = re.compile('>([0-9]([.][0-9])*)(<[^>]+>)+[/](<[^>]+>)[0-9][0-9]<')
result = pattern.search(self.page)
if result:
self.rating = result.groups()[0]
if self.rating:
try:
self.rating = round(float(self.rating), 0)
except Exception, e:
self.rating = 0
else:
self.rating = 0
def get_notes(self):
self.notes = ''
language = gutils.regextrim(self.page, 'Language:<[^>]+>', '</div>')
language = gutils.strip_tags(language)
language = re.sub('[\n]+', '', language)
language = re.sub('[ ]+', ' ', language)
language = language.strip()
color = gutils.regextrim(self.page, 'Color:<[^>]+>', '</div>')
color = gutils.strip_tags(color)
color = re.sub('[\n]+', '', color)
color = re.sub('[ ]+', ' ', color)
color = color.strip()
sound = gutils.regextrim(self.page, 'Sound Mix:<[^>]+>', '</div>')
sound = gutils.strip_tags(sound)
sound = re.sub('[\n]+', '', sound)
sound = re.sub('[ ]+', ' ', sound)
sound = sound.strip()
tagline = gutils.regextrim(self.tagl_page, 'Taglines for', 'Related Links')
index = string.rfind(tagline, '</div>')
if index > -1:
taglines = string.split(tagline[index:], '<hr')
tagline = ''
for entry in taglines:
entry = gutils.clean(gutils.after(entry, '>'))
if entry:
tagline = tagline + entry + '\n'
else:
tagline = ''
if len(language)>0:
self.notes = "%s: %s\n" %(_('Language'), language)
if len(sound)>0:
self.notes += "%s: %s\n" %(gutils.strip_tags(_('<b>Audio</b>')), sound)
if len(color)>0:
self.notes += "%s: %s\n" %(_('Color'), color)
if len(tagline)>0:
self.notes += "%s: %s\n" %('Tagline', tagline)
def get_screenplay(self):
self.screenplay = ''
parts = re.split('<a href=', gutils.trim(self.cast_page, '>Writing credits<', '</table>'))
if len(parts) > 1:
for part in parts[1:]:
screenplay = gutils.trim(part, '>', '<')
if screenplay == 'WGA':
continue
screenplay = screenplay.replace(' (written by)', '')
screenplay = screenplay.replace(' and<', '<')
self.screenplay = self.screenplay + screenplay + ', '
if len(self.screenplay) > 2:
self.screenplay = self.screenplay[0:len(self.screenplay) - 2]
def get_cameraman(self):
self.cameraman = ''
tmp = gutils.regextrim(self.cast_page, 'Cinematography by<[^>]+', '</table>')
tmp = string.split(tmp, 'href="')
for entry in tmp:
entry = gutils.trim(entry, '>', '<')
if entry:
self.cameraman = self.cameraman + entry + ', '
if self.cameraman:
self.cameraman = self.cameraman[:-2]
def __before_more(self, data):
for element in ['>See more<', '>more<', '>Full summary<', '>Full synopsis<']:
tmp = string.find(data, element)
if tmp>0:
data = data[:tmp] + '>'
return data
class SearchPlugin(movie.SearchMovie):
PATTERN = re.compile(r"""<A HREF=['"]/title/tt([0-9]+)/["']>(.*?)</LI>""")
PATTERN2 = re.compile(r"""<a href=['"]/title/tt([0-9]+)/["'](.*?)</tr>""")
def __init__(self):
# http://www.imdb.com/List?words=
# finds every title sorted alphabetically, first results are with a quote at
# the beginning (episodes from tv series), no popular results at first
# http://www.imdb.com/find?more=tt;q=
# finds a whole bunch of results. if you look for "Rocky" you will get 903 results.
# http://www.imdb.com/find?s=tt;q=
# seems to give the best results. 88 results for "Rocky", popular titles first.
self.original_url_search = 'http://www.imdb.com/find?s=tt;q='
self.translated_url_search = 'http://www.imdb.com/find?s=tt;q='
self.encode = 'iso8859-1'
def search(self,parent_window):
if not self.open_search(parent_window):
return None
tmp_page = gutils.trim(self.page, 'Here are the', '</TABLE>')
if not tmp_page:
has_results = re.match('[(]Displaying [1-9][0-7]* Result[s]*[)]', self.page)
if not has_results:
# nothing or one result found, try another url which looks deeper in the imdb database
# example: Adventures of Falcon -> one result, jumps directly to the movie page
# which isn't supported by this plugin
self.url = 'http://www.imdb.com/find?more=tt;q='
if not self.open_search(parent_window):
return None
self.page = gutils.trim(self.page, '(Displaying', '>Suggestions For Improving Your Results<')
else:
self.page = tmp_page
self.page = self.page.decode('iso-8859-1')
# correction of all &#xxx entities
self.page = gutils.convert_entities(self.page)
return self.page
def get_searches(self):
elements = re.split('<LI>', self.page)
if len(elements) < 2:
elements = string.split(self.page, '<tr>')
if len(elements):
for element in elements[1:]:
match = self.PATTERN2.findall(element)
if len(match):
tmp = re.sub('^[0-9]+[.]', '', gutils.clean(gutils.after(match[0][1], '>')))
self.ids.append(match[0][0])
self.titles.append(tmp)
else:
for element in elements[1:]:
match = self.PATTERN.findall(element)
if len(match):
tmp = gutils.clean(match[0][1])
self.ids.append(match[0][0])
self.titles.append(tmp)
#
# Plugin Test
#
class SearchPluginTest(SearchPlugin):
#
# Configuration for automated tests:
# dict { movie_id -> [ expected result count for original url, expected result count for translated url ] }
#
test_configuration = {
'Rocky Balboa' : [ 25, 25 ],
'Ein glückliches Jahr' : [ 47, 47 ]
}
class PluginTest:
#
# Configuration for automated tests:
# dict { movie_id -> dict { arribute -> value } }
#
# value: * True/False if attribute only should be tested for any value
# * or the expected value
#
test_configuration = {
'0138097' : {
'title' : 'Shakespeare in Love',
'o_title' : 'Shakespeare in Love',
'director' : 'John Madden',
'plot' : True,
'cast' : 'Geoffrey Rush' + _(' as ') + 'Philip Henslowe\n\
Tom Wilkinson' + _(' as ') + 'Hugh Fennyman\n\
Steven O\'Donnell' + _(' as ') + 'Lambert\n\
Tim McMullan' + _(' as ') + 'Frees (as Tim McMullen)\n\
Joseph Fiennes' + _(' as ') + 'Will Shakespeare\n\
Steven Beard' + _(' as ') + 'Makepeace - the Preacher\n\
Antony Sher' + _(' as ') + 'Dr. Moth\n\
Patrick Barlow' + _(' as ') + 'Will Kempe\n\
Martin Clunes' + _(' as ') + 'Richard Burbage\n\
Sandra Reinton' + _(' as ') + 'Rosaline\n\
Simon Callow' + _(' as ') + 'Tilney - Master of the Revels\n\
Judi Dench' + _(' as ') + 'Queen Elizabeth\n\
Bridget McConnell' + _(' as ') + 'Lady in Waiting (as Bridget McConnel)\n\
Georgie Glen' + _(' as ') + 'Lady in Waiting\n\
Nicholas Boulton' + _(' as ') + 'Henry Condell\n\
Gwyneth Paltrow' + _(' as ') + 'Viola De Lesseps\n\
Imelda Staunton' + _(' as ') + 'Nurse\n\
Colin Firth' + _(' as ') + 'Lord Wessex\n\
Desmond McNamara' + _(' as ') + 'Crier\n\
Barnaby Kay' + _(' as ') + 'Nol\n\
Jim Carter' + _(' as ') + 'Ralph Bashford\n\
Paul Bigley' + _(' as ') + 'Peter - the Stage Manager\n\
Jason Round' + _(' as ') + 'Actor in Tavern\n\
Rupert Farley' + _(' as ') + 'Barman\n\
Adam Barker' + _(' as ') + 'First Auditionee\n\
Joe Roberts' + _(' as ') + 'John Webster\n\
Harry Gostelow' + _(' as ') + 'Second Auditionee\n\
Alan Cody' + _(' as ') + 'Third Auditionee\n\
Mark Williams' + _(' as ') + 'Wabash\n\
David Curtiz' + _(' as ') + 'John Hemmings\n\
Gregor Truter' + _(' as ') + 'James Hemmings\n\
Simon Day' + _(' as ') + 'First Boatman\n\
Jill Baker' + _(' as ') + 'Lady De Lesseps\n\
Amber Glossop' + _(' as ') + 'Scullery Maid\n\
Robin Davies' + _(' as ') + 'Master Plum\n\
Hywel Simons' + _(' as ') + 'Servant\n\
Nicholas Le Prevost' + _(' as ') + 'Sir Robert De Lesseps\n\
Ben Affleck' + _(' as ') + 'Ned Alleyn\n\
Timothy Kightley' + _(' as ') + 'Edward Pope\n\
Mark Saban' + _(' as ') + 'Augustine Philips\n\
Bob Barrett' + _(' as ') + 'George Bryan\n\
Roger Morlidge' + _(' as ') + 'James Armitage\n\
Daniel Brocklebank' + _(' as ') + 'Sam Gosse\n\
Roger Frost' + _(' as ') + 'Second Boatman\n\
Rebecca Charles' + _(' as ') + 'Chambermaid\n\
Richard Gold' + _(' as ') + 'Lord in Waiting\n\
Rachel Clarke' + _(' as ') + 'First Whore\n\
Lucy Speed' + _(' as ') + 'Second Whore\n\
Patricia Potter' + _(' as ') + 'Third Whore\n\
John Ramm' + _(' as ') + 'Makepeace\'s Neighbor\n\
Martin Neely' + _(' as ') + 'Paris / Lady Montague (as Martin Neeley)\n\
The Choir of St. George\'s School in Windsor' + _(' as ') + 'Choir (as The Choir of St. George\'s School, Windsor) rest of cast listed alphabetically:\n\
Jason Canning' + _(' as ') + 'Nobleman (uncredited)\n\
Kelley Costigan' + _(' as ') + 'Theatregoer (uncredited)\n\
Rupert Everett' + _(' as ') + 'Christopher Marlowe (uncredited)\n\
John Inman' + _(' as ') + 'Character player (uncredited)',
'country' : 'USA | UK',
'genre' : 'Comedy | Drama | Romance',
'classification' : 'R',
'studio' : 'Universal Pictures, Miramax Films, Bedford Falls Productions',
'o_site' : False,
'site' : 'http://www.imdb.com/title/tt0138097',
'trailer' : 'http://www.imdb.com/title/tt0138097/trailers',
'year' : 1998,
'notes' : _('Language') + ': English\n'\
+ _('Audio') + ': Dolby Digital\n'\
+ _('Color') + ': Color\n\
Tagline: ...A Comedy About the Greatest Love Story Almost Never Told...\n\
Love is the only inspiration',
'runtime' : 123,
'image' : True,
'rating' : 7,
'screenplay' : 'Marc Norman, Tom Stoppard',
'cameraman' : 'Richard Greatrex',
'barcode' : False
},
}
|
FiloSottile/Griffith-mirror
|
lib/plugins/movie/PluginMovieIMDB.py
|
Python
|
gpl-2.0
| 16,353
|
#/*##########################################################################
# Copyright (C) 2004-2012 European Synchrotron Radiation Facility
#
# This file is part of the PyMca X-ray Fluorescence Toolkit developed at
# the ESRF by the Software group.
#
# This toolkit is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the Free
# Software Foundation; either version 2 of the License, or (at your option)
# any later version.
#
# PyMca is distributed in the hope that it will be useful, but WITHOUT ANY
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# PyMca; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# PyMca follows the dual licensing model of Riverbank's PyQt and cannot be
# used as a free plugin for a non-free program.
#
# Please contact the ESRF industrial unit (industry@esrf.fr) if this license
# is a problem for you.
#############################################################################*/
import copy
from PyMca import EnergyTable
from PyMca import Elements
from PyMca.QPeriodicTable import QPeriodicTable
from PyMca import PyMcaQt as qt
DEBUG = 0
QTVERSION = qt.qVersion()
ElementList = Elements.ElementList
__revision__ = "$Revision: 1.12 $"
class PeakButton(qt.QPushButton):
def __init__(self, parent, peak):
qt.QPushButton.__init__(self, parent)
#, peak)
self.peak= peak
font= self.font()
font.setBold(1)
self.setText(peak)
self.setFlat(1)
if QTVERSION < '4.0.0':
self.setToggleButton(0)
self.setSizePolicy(qt.QSizePolicy(qt.QSizePolicy.Expanding, qt.QSizePolicy.Expanding))
self.selected= 0
self.brush= qt.QBrush(qt.QColor(qt.Qt.yellow))
self.connect(self, qt.SIGNAL("clicked()"), self.clickedSlot)
def toggle(self):
self.selected= not self.selected
self.update()
def setSelected(self, b):
self.selected= b
if QTVERSION > '4.0.0':
if b:
role = self.backgroundRole()
palette = self.palette()
palette.setBrush( role,self.brush)
self.setPalette(palette)
else:
role = self.backgroundRole()
palette = self.palette()
palette.setBrush( role, qt.QBrush())
self.setPalette(palette)
self.update()
def isSelected(self):
return self.selected
def clickedSlot(self):
self.toggle()
if QTVERSION < '4.0.0':
self.emit(qt.PYSIGNAL("peakClicked"), (self.peak,))
else:
self.emit(qt.SIGNAL("peakClicked(QString)"), self.peak)
def paintEvent(self, pEvent):
if QTVERSION < '4.0.0':
qt.QPushButton.paintEvent(self, pEvent)
else:
p = qt.QPainter(self)
wr= self.rect()
pr= qt.QRect(wr.left()+1, wr.top()+1, wr.width()-2, wr.height()-2)
if self.selected:
p.fillRect(pr, self.brush)
p.setPen(qt.Qt.black)
p.drawRoundRect(pr)
p.end()
qt.QPushButton.paintEvent(self, pEvent)
def drawButton(self, p):
wr= self.rect()
pr= qt.QRect(wr.left()+1, wr.top()+1, wr.width()-2, wr.height()-2)
if self.selected:
p.fillRect(pr, self.brush)
qt.QPushButton.drawButtonLabel(self, p)
p.setPen(qt.Qt.black)
p.drawRoundRect(pr)
class PeakButtonList(qt.QWidget):
def __init__(self, parent=None, name="PeakButtonList",
peaklist=['K','Ka','Kb','L','L1','L2','L3','M'],
fl=0):
qt.QWidget.__init__(self,parent)
self.peaklist = peaklist
if QTVERSION < '4.0.0':
layout= qt.QHBoxLayout(self, 0, 5)
else:
layout= qt.QHBoxLayout(self)
layout.setMargin(0)
layout.setSpacing(5)
#, 0, 5)
layout.addStretch(2)
self.buttondict={}
for key in peaklist:
self.buttondict[key] = PeakButton(self, key)
layout.addWidget(self.buttondict[key])
if QTVERSION < '4.0.0':
self.connect(self.buttondict[key],
qt.PYSIGNAL("peakClicked"), self.__selection)
else:
self.connect(self.buttondict[key],
qt.SIGNAL("peakClicked(QString)"), self.__selection)
layout.addStretch(1)
#Reset
self.resetBut = qt.QPushButton(self)
self.resetBut.setText("Reset")
layout.addWidget(self.resetBut)
self.connect(self.resetBut,qt.SIGNAL('clicked()'),self.__resetBut)
layout.addStretch(2)
def __resetBut(self):
for key in self.peaklist:
self.buttondict[key].setSelected(0)
if QTVERSION < '4.0.0':
self.emit(qt.PYSIGNAL('selectionChanged'),([],))
else:
self.emit(qt.SIGNAL('selectionChanged'),([]))
def __selection(self, peak):
selection= []
for key in self.peaklist:
if self.buttondict[key].isSelected():
selection.append(key)
if QTVERSION < '4.0.0':
self.emit(qt.PYSIGNAL("selectionChanged"), (selection,))
else:
self.emit(qt.SIGNAL("selectionChanged"), (selection))
def setSelection(self, selection=[]):
for key in self.peaklist:
if key in selection:
self.buttondict[key].setSelected(1)
else: self.buttondict[key].setSelected(0)
def setDisabled(self,selection=[]):
for key in self.peaklist:
if key in selection:
self.buttondict[key].setEnabled(0)
else: self.buttondict[key].setEnabled(1)
class FitPeakSelect(qt.QWidget):
def __init__(self, parent=None, name="FitPeakSelect",peakdict = {}, fl=0, energyTable = None):
qt.QWidget.__init__(self,parent)
if QTVERSION < '4.0.0':
self.setSizePolicy(qt.QSizePolicy(qt.QSizePolicy.Minimum,
qt.QSizePolicy.Minimum))
layout=qt.QVBoxLayout(self)
layout.setMargin(0)
layout.setSpacing(10)
hbox = qt.QWidget(self)
hboxLayout = qt.QHBoxLayout(hbox)
hboxLayout.setMargin(0)
hboxLayout.setSpacing(20)
hboxLayout.addWidget(qt.HorizontalSpacer(hbox))
l1=MyQLabel(hbox, bold=True, color=qt.QColor(0,0,0))
hboxLayout.addWidget(l1)
self.energyValue = None
if energyTable is not None:
text = '<b><nobr>Excitation Energy (keV)</nobr></b>'
l1.setFixedWidth(l1.fontMetrics().width("##"+text+"####"))
l1.setText(text)
self.energyTable = energyTable
add = 0
self.energy = MyQLabel(hbox)
hboxLayout.addWidget(self.energy)
self.energy.setFixedWidth(self.energy.fontMetrics().width('########.###'))
self.energy.setAlignment(qt.Qt.AlignLeft)
#self.energy.setForegroundColor(qt.Qt.red)
else:
l1.setText('<b><nobr>Excitation Energy (keV)</nobr></b>')
self.energyTable = EnergyTable.EnergyTable(self)
add = 1
self.energy = qt.QLineEdit(hbox)
hboxLayout.addWidget(self.energy)
self.energy.setFixedWidth(self.energy.fontMetrics().width('########.###'))
self.energyButton = qt.QPushButton(hbox)
hboxLayout.addWidget(self.energyButton)
self.energyButton.setText("Update")
self.connect(self.energyButton, qt.SIGNAL('clicked()'),
self._energyClicked)
hboxLayout.addWidget(qt.HorizontalSpacer(hbox))
layout.addSpacing(20)
layout.addWidget(hbox)
self.table = QPeriodicTable(self)
line= qt.QFrame(self)
line.setFrameShape(qt.QFrame.HLine)
line.setFrameShadow(qt.QFrame.Sunken)
self.peaks = PeakButtonList(self)
self.peaks.setDisabled(['K','Ka','Kb','L','L1','L2','L3','M'])
if QTVERSION < '4.0.0':
self.connect(self.energyTable, qt.PYSIGNAL("EnergyTableSignal"),
self._energyTableAction)
self.connect(self.table, qt.PYSIGNAL("elementClicked"),
self.elementClicked)
self.connect(self.peaks, qt.PYSIGNAL("selectionChanged"),
self.peakSelectionChanged)
else:
self.connect(self.energyTable, qt.SIGNAL("EnergyTableSignal"),
self._energyTableAction)
self.connect(self.table, qt.SIGNAL("elementClicked"),
self.elementClicked)
self.connect(self.peaks, qt.SIGNAL("selectionChanged"),
self.peakSelectionChanged)
#Reset All
self.resetAllButton = qt.QPushButton(self.peaks)
palette = qt.QPalette(self.resetAllButton.palette())
role = self.resetAllButton.foregroundRole()
palette.setColor(role, qt.Qt.red)
self.resetAllButton.setPalette(palette)
self.resetAllButton.setText("Reset All")
self.peaks.layout().addWidget(self.resetAllButton)
self.connect(self.resetAllButton, qt.SIGNAL("clicked()"),
self.__resetAll)
layout.addWidget(self.table)
layout.addWidget(line)
layout.addWidget(self.peaks)
if add:layout.addWidget(self.energyTable)
layout.addStretch(1)
self.current= None
self.setSelection(peakdict)
def __resetAll(self):
msg=qt.QMessageBox.warning( self, "Clear selection",
"Do you want to reset the selection for all elements?",
qt.QMessageBox.Yes,qt.QMessageBox.No)
if msg == qt.QMessageBox.No:
return
self.peakdict = {}
self.table.setSelection(list(self.peakdict.keys()))
self.peaks.setSelection([])
self.peakSelectionChanged([])
def __getZ(self,element):
return ElementList.index(element) + 1
def setSelection(self,peakdict):
self.peakdict = {}
self.peakdict.update(peakdict)
for key in list(self.peakdict.keys()):
if type(self.peakdict[key])!= type([]):
self.peakdict[key]= [ self.peakdict[key] ]
self.table.setSelection(list(self.peakdict.keys()))
def getSelection(self):
ddict={}
for key in list(self.peakdict.keys()):
if len(self.peakdict[key]):
ddict[key]= self.peakdict[key]
return ddict
def peakSelectionChanged(self,selection):
if self.current is None: return
if type(selection) != type([]):
selection=selection.list
self.peakdict[self.current] = selection
if len(self.peakdict[self.current]):
self.table.setElementSelected(self.current,1)
else:
self.table.setElementSelected(self.current,0)
sel= self.getSelection()
sel['current'] = self.current
if QTVERSION < '4.0.0':
self.emit(qt.PYSIGNAL("FitPeakSelect"), (sel,))
else:
self.emit(qt.SIGNAL("FitPeakSelect"), (sel))
def elementClicked(self,symbol):
if QTVERSION > '4.0.0':symbol = str(symbol)
if not (symbol in self.peakdict):
self.peakdict[symbol] = []
self.current = symbol
if len(self.peakdict[self.current]):
self.table.setElementSelected(self.current,1)
else:
self.table.setElementSelected(self.current,0)
for ele in list(self.peakdict.keys()):
if ele != symbol:
if not len(self.peakdict[ele]):
del self.peakdict[ele]
sel= self.getSelection()
sel['current'] = self.current
self.setPeaksDisabled(symbol)
if QTVERSION < '4.0.0':
self.emit(qt.PYSIGNAL("FitPeakSelect"), (sel,))
else:
self.emit(qt.SIGNAL("FitPeakSelect"),(sel))
self.peaks.setSelection(self.peakdict[symbol])
def setPeaksDisabled(self,symbol):
z = self.__getZ(symbol)
if (z > 47) and (Elements.getomegam5('Cd') > 0.0):
#we have data available to support that
disabled = []
elif z > 66:
#self.peaks.setDisabled(['Ka','Kb'])
#disabled = ['Ka','Kb']
disabled = []
elif z > 17:
#self.peaks.setDisabled(['Ka','Kb','M'])
#disabled = ['Ka','Kb','M']
disabled = ['M']
elif z > 2:
#self.peaks.setDisabled(['Ka','Kb','L','L1','L2','L3','M'])
#disabled = ['Ka','Kb','L','L1','L2','L3','M']
disabled = ['L','L1','L2','L3','M']
else:
#self.peaks.setDisabled(['K','Ka','Kb','L','L1','L2','L3','M'])
#disabled = ['Ka','Kb','L','L1','L2','L3','M']
disabled = ['Ka', 'Kb','L','L1','L2','L3','M']
ele = symbol
if self.energyValue is not None:
for peak in ['K', 'Ka', 'Kb', 'L','L1','L2','L3','M']:
if peak not in disabled:
if peak == 'L':
if Elements.Element[ele]['binding']['L3'] > self.energyValue:
disabled.append(peak)
elif peak == 'M':
if Elements.Element[ele]['binding']['M5'] > self.energyValue:
disabled.append(peak)
elif peak == 'Ka':
if Elements.Element[ele]['binding']['K'] > self.energyValue:
disabled.append(peak)
elif peak == 'Kb':
if Elements.Element[ele]['binding']['K'] > self.energyValue:
disabled.append(peak)
elif Elements.Element[ele]['binding'][peak] > self.energyValue:
disabled.append(peak)
else:
pass
self.peaks.setDisabled(disabled)
def setEnergy(self, energy):
if (energy is None) or (energy == []):
self.energyValue = energy
self.energy.setText("None")
elif energy == "None":
self.energyValue = None
self.energy.setText("None")
elif type(energy) == type([]):
self.energyValue = max(energy)
else:
self.energyValue = energy
self.energy.setText("%.4f" % energy)
self._energyClicked()
def _energyTableAction(self, ddict):
if DEBUG:
print("_energyTableAction called",)
print("ddict = ",ddict.dict)
elist, wlist, flist, slist= self.energyTable.getParameters()
maxenergy = 0.0
for i in range(len(flist)):
if flist[i]:
if elist[i] is not None:
if wlist[i] > 0.0:
if elist[i] > maxenergy:
maxenergy = elist[i]
if maxenergy == 0.0:maxenergy = None
self.setEnergy(maxenergy)
def _energyClicked(self):
string = str(self.energy.text())
string.replace(" ","")
if (string != "None") and len(string):
try:
value = float(string)
self.energyValue = value
if False:
self.energyButton.setFocus()
except:
msg=qt.QMessageBox(self.energy)
msg.setIcon(qt.QMessageBox.Critical)
msg.setText("Invalid Float")
msg.exec_loop()
self.energy.setFocus()
else:
self.energyValue = None
if False:
self.energyButton.setFocus()
self.__updateSelection()
def __updateSelection(self):
if self.energyValue is not None:
for ele in list(self.peakdict.keys()):
for peak in self.peakdict[ele]:
if peak in self.peakdict[ele]:
index = self.peakdict[ele].index(peak)
if peak == 'L':
if Elements.Element[ele]['binding']['L3'] > self.energyValue:
del self.peakdict[ele][index]
elif peak == 'M':
if Elements.Element[ele]['binding']['M5'] > self.energyValue:
del self.peakdict[ele][index]
elif peak == "Ka":
if Elements.Element[ele]['binding']['K'] > self.energyValue:
del self.peakdict[ele][index]
elif peak == "Kb":
if Elements.Element[ele]['binding']['K'] > self.energyValue:
del self.peakdict[ele][index]
elif Elements.Element[ele]['binding'][peak] > self.energyValue:
del self.peakdict[ele][index]
else:
pass
if ele == self.current:
self.peaks.setSelection(self.peakdict[ele])
self.peakSelectionChanged(self.peakdict[ele])
self.elementClicked(ele)
if not len(self.peakdict[ele]): del self.peakdict[ele]
dict = copy.deepcopy(self.peakdict)
self.setSelection(dict)
class MyQLineEdit(qt.QLineEdit):
def __init__(self,parent=None,name=None):
qt.QLineEdit.__init__(self,parent,name)
def focusInEvent(self,event):
self.setPaletteBackgroundColor(qt.QColor('yellow'))
def focusOutEvent(self,event):
self.setPaletteBackgroundColor(qt.QColor('white'))
class MyQLabel(qt.QLabel):
def __init__(self,parent=None,name=None,fl=0,bold=True, color= qt.Qt.red):
qt.QLabel.__init__(self,parent)
if QTVERSION <'4.0.0':
self.color = color
self.bold = bold
else:
palette = self.palette()
role = self.foregroundRole()
palette.setColor(role,color)
self.setPalette(palette)
self.font().setBold(bold)
if QTVERSION < '4.0.0':
def drawContents(self, painter):
painter.font().setBold(self.bold)
pal =self.palette()
pal.setColor(qt.QColorGroup.Foreground,self.color)
self.setPalette(pal)
qt.QLabel.drawContents(self,painter)
painter.font().setBold(0)
def testwidget():
import sys
def change(ddict):
print("New selection:",)
print(ddict)
a = qt.QApplication(sys.argv)
qt.QObject.connect(a,qt.SIGNAL("lastWindowClosed()"),a,qt.SLOT("quit()"))
w = qt.QTabWidget()
if QTVERSION < '4.0.0':
f = FitPeakSelect(w)
w.addTab(f, "QPeriodicTable")
qt.QObject.connect(f, qt.PYSIGNAL("FitPeakSelect"), change)
w.show()
a.exec_loop()
else:
f = FitPeakSelect()
w.addTab(f, "QPeriodicTable")
qt.QObject.connect(f, qt.SIGNAL("FitPeakSelect"), change)
w.show()
a.exec_()
if __name__ == "__main__":
testwidget()
|
tonnrueter/pymca_devel
|
PyMca/FitPeakSelect.py
|
Python
|
gpl-2.0
| 19,711
|
from datetime import datetime
from datetime import timedelta
import pytest
from cfme import test_requirements
from cfme.infrastructure.provider.virtualcenter import VMwareProvider
from cfme.utils.appliance.implementations.ui import navigate_to
from cfme.utils.blockers import BZ
from cfme.utils.wait import wait_for
pytestmark = [
pytest.mark.tier(3),
test_requirements.c_and_u,
pytest.mark.usefixtures('setup_provider_modscope'),
pytest.mark.provider([VMwareProvider],
scope='module',
required_fields=[(['cap_and_util', 'capandu_vm'], 'cu-24x7')]),
pytest.mark.meta(blockers=[BZ(1635126, forced_streams=['5.10'])])
]
ELEMENTS = ['vm', 'host']
GRAPH_TYPE = ['hourly', 'daily']
@pytest.fixture(scope='module')
def order_data(appliance, provider, enable_candu):
# Order two day back gap collection data for testing
end_date = datetime.now()
start_date = end_date - timedelta(days=2)
view = navigate_to(appliance.server.zone, 'CANDUGapCollection')
view.candugapcollection.fill({'end_date': end_date,
'start_date': start_date})
view.candugapcollection.submit.click()
@pytest.mark.parametrize('graph_type', GRAPH_TYPE)
@pytest.mark.parametrize('element', ELEMENTS)
def test_gap_collection(appliance, provider, element, graph_type, order_data):
""" Test gap collection data
prerequisites:
* C&U enabled appliance
Steps:
* Navigate to Configuration > Diagnostics > Zone Gap Collection Page
* Order old data
* Navigate to VM or Host Utilization page
* Check for Hourly data
* Check for Daily data
Polarion:
assignee: nachandr
casecomponent: CandU
caseimportance: medium
initialEstimate: 1/4h
"""
if element == 'host':
collection = appliance.collections.hosts
for test_host in provider.data['hosts']:
if not test_host.get('test_fleece', False):
continue
element = collection.instantiate(name=test_host.name, provider=provider)
elif element == 'vm':
collection = appliance.provider_based_collection(provider)
element = collection.instantiate('cu-24x7', provider)
date = datetime.now() - timedelta(days=1)
element.wait_candu_data_available(timeout=1200)
view = navigate_to(element, 'candu')
view.options.interval.fill(graph_type.capitalize())
try:
graph = getattr(view, 'vm_cpu')
except AttributeError:
graph = getattr(view.interval_type, 'host_cpu')
assert graph.is_displayed
def refresh():
provider.browser.refresh()
view = navigate_to(element, 'candu')
view.options.interval.fill(graph_type.capitalize())
# wait, some time graph took time to load
wait_for(lambda: len(graph.all_legends) > 0,
delay=5, timeout=600, fail_func=refresh)
# check collected data for cpu graph
view.options.calendar.fill(date)
graph_data = 0
for leg in graph.all_legends:
graph.display_legends(leg)
for data in graph.data_for_legends(leg).values():
graph_data += float(data[leg].replace(',', '').replace('%', '').split()[0])
assert graph_data > 0
|
izapolsk/integration_tests
|
cfme/tests/candu/test_gap_collection.py
|
Python
|
gpl-2.0
| 3,239
|
import argparse
import logging
from tracer.extensions.extension import Extension, register_syscall
from tracer.injector import InjectedMemory
class StoreToDict(argparse.Action):
def __call__(self, parser, namespace, values, option_string=None):
if not getattr(namespace, self.dest):
setattr(namespace, self.dest, {})
pair = values.split(':', 2)
if len(pair) != 2:
raise argparse.ArgumentError(self, "must have two values separated by colon, '{}' given".format(values))
val = getattr(namespace, self.dest)
val[pair[0]] = pair[1]
class ChangeOpenPath(Extension):
"""
ChangeOpenPath replaces file in open syscalls
Every path to replace have to be specified in parameter replace-path separated by colon, ie
--replace-path requested:replaced
Example:
Print /etc/hosts instead of /etc/passwd in command cat /etc/passwd
$ tracer -vvvvv -e ./examples/extensions/replace_open_path.py \
--replace-path /etc/passwd:/etc/hosts \
cat /etc/passwd
"""
def create_options(self, parser):
parser.add_argument(
'--replace-path',
action=StoreToDict,
help='Replace path1 with path2 in open syscalls, --replace-path path1:path2',
default={}
)
# TODO: openat
@register_syscall("open", success_only=False)
def change(self, syscall):
paths = syscall.process.tracer.options.replace_path
requested_path = syscall.arguments[0].text
if syscall.result or requested_path not in paths:
return
new_path = paths[requested_path]
logging.info("Replacing path %s with %s", requested_path, new_path)
addr = InjectedMemory(syscall.process, len(new_path))
addr.write(new_path.encode('utf-8') + b'\0')
p = syscall.process.tracer.backend.debugger[syscall.process.pid]
regs = p.getregs()
regs.rdi = addr.addr
p.setregs(regs)
|
trnila/tracer
|
examples/extensions/replace_open_path.py
|
Python
|
gpl-2.0
| 1,988
|
#!/usr/bin/env python
"""
Copyright: (C) 2013 LeafLabs, LLC
License: MIT License
Author: bnewbold
Date: September 2013
Project: SNG Wired Leaf (SNG4)
This is essentially a port of the old packet_tool.py to work through the
daemon (instead of writing raw TCP packets).
It does not do any acquisition modes, HDF5 storage, or UDP capture; for that
use acquire.py and/or proto2bytes (which is the equivalent of udp_tool.py).
See the '-h' usage for more info.
"""
from __future__ import print_function
import argparse
import collections
import os.path
import sys
import time
from daemon_control import *
# map of module indexes
modules = {
'error': 0,
'central': 1,
'sata': 2,
'daq': 3,
'udp': 4,
'gpio': 5,
'ext': 5,
}
# count of registers for each module (used for 'dump' command)
module_len = [
1, # error
8, # central
20, # sata
14, # daq
15, # udp
5, # gpio
]
# ========== Helpers =========== #
def ints(data):
"Helper to split a 32bit int into a tuple of 4 bytes"
return (((data >> 24) & 0xFF) % 256,
((data >> 16) & 0xFF) % 256,
((data >> 8) & 0xFF) % 256,
(data & 0xFF))
def toint(data):
"Helper to convert 4 bytes into a 32bit int"
l = map(ord, data)
return (l[0] << 24) + (l[1] << 16) + (l[2] << 8) + l[3]
def repr_data(val):
"Helper to pretty print a tuple of 4 bytes"
i = int(val)
d = ints(i)
h = " ".join("%.2x" % b for b in d).upper()
return "%10d | %s (%3d %3d %3d %3d)" % (i, h, d[0], d[1], d[2], d[3])
def read_request(module, addr):
"""
Helper to execute a single register read transaction, in a blocking
manner.
"""
reply = do_control_cmd(reg_read(module, addr))
if reply is None or reply.type != 255: # TODO: 255 == REG_IO
raise Exception("%s\nNo reply! Is daemon running?" % reply)
return reply.reg_io.val
def write_request(module, addr, data):
"""
Helper to execute a single register write transaction, in a blocking
manner.
'data' should be 32bits as an integer
"""
reply = do_control_cmd(reg_write(module, addr, data))
if reply is None or reply.type != 255: # TODO: 255 == REG_IO
raise Exception("%s\nNo reply! Is daemon running?" % reply)
return reply.reg_io.val
def parse_module(raw):
if raw in modules.keys():
module = modules[raw]
else:
module = int(raw)
if not module in modules.values():
raise Exception("Invalid module index: %d" % module)
return module
def parse_value(s):
"""Convert a variety of input strings to a (32bit) integer"""
# try binary values
if s.lower() in ["on", "true", "yes"]:
return 1
if s.lower() in ["off", "false", "no"]:
return 0
if s.startswith('0b'):
# binary
return int(s[2:], 2)
if s.startswith('0x'):
# hex
return int(s[2:], 16)
if len(s.split('.')) == 4:
# ipv4 address?
l = map(int, s.split('.'))
return (l[0] << 24) + (l[1] << 16) + (l[2] << 8) + l[3]
# fall back to int(); if this fails, an exception will be raised
return int(s)
def set_channel_list(l):
"""
Takes a list of 32 (chip, chan) tuples and tells to data node to return
those channel pairings as the 32 "virtual channels" in live-streaming
sub-sample packets.
"""
for i in range(32):
chip = l[i][0] & 0b00011111
chan = l[i][1] & 0b00011111
write_request(modules['daq'], 128+i, (chip << 8) | chan)
# ========== Commands =========== #
def ping(delay_sec=0.5):
start = 0
diff = 0
while True:
sys.stdout.write("Ping... ")
sys.stdout.flush()
start = time.time()
try:
read_request(0, 0)
diff = time.time() - start
sys.stdout.write("Pong (%.3fms)\n" % (diff*1000.))
except:
sys.stdout.write("Failed.\n")
sys.stdout.flush()
time.sleep(delay_sec)
def blink(period=0.5, gpio_pin=0):
index = int(gpio_pin) + 8
if index >= 16:
raise Exception("Invalid GPIO pin: %s" % gpio_pin)
on_val = 0x0001 << index
off_val = 0x0000
while True:
write_request(5, 4, on_val)
print("On.")
time.sleep(period/2.0)
write_request(5, 4, off_val)
print("Off.")
time.sleep(period/2.0)
def dump(module):
module = parse_module(module)
for k in modules.keys():
if modules[k] == module:
print("All registers for '%s' module:" % k)
for addr in range(module_len[module]):
reply_val = read_request(module, addr)
print("Register value at %d, %d: \t%s" % (
module, addr, repr_data(reply_val)))
def do_reg_read(module, addr, verbose=True):
module = parse_module(module)
reply_val = read_request(module, addr)
if verbose:
print("Register value at %d, %d: \t%s" % (
module, addr, repr_data(reply_val)))
def do_reg_write(module, addr, value, verbose=True):
module = parse_module(module)
reply_val = write_request(module, addr, value)
if verbose:
print("Written to %d, %d: \t%s" % (
module, addr, repr_data(reply_val)))
def intan_write(intan_addr, value):
module = modules['daq']
addr = 5
intan_addr = intan_addr & 0b00111111
value = parse_value(value) & 0xFF
cmd = (0x1 << 24) | \
(0xFF << 16) | \
((0b10000000 | intan_addr) << 8) | \
value
print("CMD: %s" % hex(cmd))
reply_val = write_request(module, addr, cmd)
print("Written to %d, %d: \t%s" % (
module, addr, repr_data(reply_val)))
reply = write_request(module, addr, cmd)
print("Written to %d, %d: \t%s" % (
module, addr, repr_data(reply_val)))
print("That means that register %d (zero-indexed) was set to %d (integer) "
"for all attached Intan chips." % (intan_addr, value))
print("(assuming that acquisition was running...)")
def config_subsamples(constant, number):
l = []
if constant == "chip":
l = [(number, chan) for chan in range(32)]
elif constant == "channel":
l = [(chip, number) for chip in range(32)]
else:
raise Exception("Don't know how to hold constant '%s'" % constant)
print("Setting sub-sample channels as:")
print()
print("\tindex\tchip\tchannel")
for i in range(len(l)):
print("\t%3d\t%3d\t%3d" % (i, l[i][0], l[i][1]))
set_channel_list(l)
print("Done.")
# ========== Script/Args =========== #
def main():
parser = argparse.ArgumentParser(
description="Low-level data node register manipulation tool")
subparsers = parser.add_subparsers(title="commands")
# commands with no arguments are instantiated tersely
subparsers.add_parser('ping',
help="continuously ping the data node",
description="Continuously ping the data node. Prints latency as "
"it goes.")\
.set_defaults(func=ping)
subparsers.add_parser('blink',
help="continuously blink an LED",
description="Continuously toggles a GPIO line on the board, which "
"causes an orange LED to blink.")\
.set_defaults(func=blink)
parser_dump = subparsers.add_parser('dump',
help="print all registers for a single module",
description="Print all registers for a single module.")
parser_dump.add_argument("module", type=str)
parser_dump.set_defaults(func=dump)
parser_read = subparsers.add_parser('read',
help="read from a single register value",
description="Read from a single register value.",)
parser_read.add_argument("module", type=str)
parser_read.add_argument("addr", type=int)
parser_read.set_defaults(func=do_reg_read)
parser_write = subparsers.add_parser('write',
help="write to a single register value",
description="Write to a single register value.")
parser_write.add_argument("module", type=str)
parser_write.add_argument("addr", type=int)
parser_write.add_argument("value", type=int)
parser_write.set_defaults(func=do_reg_write)
parser_intan_write = subparsers.add_parser('intan_write',
help="write to a single register on all Intan chips",
description="Write to a single register on all Intan chips.")
parser_intan_write.add_argument("intan_addr", type=int)
parser_intan_write.add_argument("value", type=str)
parser_intan_write.set_defaults(func=intan_write)
parser_subsamples = subparsers.add_parser('subsamples',
help="assign subsample channels by a chip or per-chip channels",
description="In live sub-sample streaming mode, the 32 'virtual' "
"channels can each individually be configured to point "
"to any of the 1024 regular channels (32 channels for "
"each of 32 chips. This command will configure the "
"virtual channels by either holding the chip number "
"constant (and selecting all 32 channels for "
"that chip) or holding the channel number constant "
"(and selecting that channel across all 32 chips in "
"parallel).")
parser_subsamples.add_argument("--constant",
choices=['chip','channel'],
required=True,
help="what to hold constant")
parser_subsamples.add_argument("number",
type=int,
help="the value for the index being held constant")
parser_subsamples.set_defaults(func=config_subsamples)
args = parser.parse_args()
func_kwargs = args.__dict__.copy()
func_kwargs.pop('func')
args.func(**func_kwargs)
if __name__ == '__main__':
main()
|
leaflabs/leafysd
|
util/debug_tool.py
|
Python
|
gpl-2.0
| 9,832
|
# ##### BEGIN GPL LICENSE BLOCK #####
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# ##### END GPL LICENSE BLOCK #####
# <pep8 compliant>
import bpy
from bpy.types import Panel
from bpy.app.translations import pgettext_iface as iface_
class ModifierButtonsPanel:
bl_space_type = 'PROPERTIES'
bl_region_type = 'WINDOW'
bl_context = "modifier"
bl_options = {'HIDE_HEADER'}
class DATA_PT_modifiers(ModifierButtonsPanel, Panel):
bl_label = "Modifiers"
def draw(self, context):
layout = self.layout
ob = context.object
layout.operator_menu_enum("object.modifier_add", "type")
for md in ob.modifiers:
box = layout.template_modifier(md)
if box:
# match enum type to our functions, avoids a lookup table.
getattr(self, md.type)(box, ob, md)
# the mt.type enum is (ab)used for a lookup on function names
# ...to avoid lengthy if statements
# so each type must have a function here.
def ARMATURE(self, layout, ob, md):
split = layout.split()
col = split.column()
col.label(text="Object:")
col.prop(md, "object", text="")
col.prop(md, "use_deform_preserve_volume")
col = split.column()
col.label(text="Bind To:")
col.prop(md, "use_vertex_groups", text="Vertex Groups")
col.prop(md, "use_bone_envelopes", text="Bone Envelopes")
layout.separator()
split = layout.split()
row = split.row(align=True)
row.prop_search(md, "vertex_group", ob, "vertex_groups", text="")
sub = row.row(align=True)
sub.active = bool(md.vertex_group)
sub.prop(md, "invert_vertex_group", text="", icon='ARROW_LEFTRIGHT')
split.prop(md, "use_multi_modifier")
def ARRAY(self, layout, ob, md):
layout.prop(md, "fit_type")
if md.fit_type == 'FIXED_COUNT':
layout.prop(md, "count")
elif md.fit_type == 'FIT_LENGTH':
layout.prop(md, "fit_length")
elif md.fit_type == 'FIT_CURVE':
layout.prop(md, "curve")
layout.separator()
split = layout.split()
col = split.column()
col.prop(md, "use_constant_offset")
sub = col.column()
sub.active = md.use_constant_offset
sub.prop(md, "constant_offset_displace", text="")
col.separator()
col.prop(md, "use_merge_vertices", text="Merge")
sub = col.column()
sub.active = md.use_merge_vertices
sub.prop(md, "use_merge_vertices_cap", text="First Last")
sub.prop(md, "merge_threshold", text="Distance")
col = split.column()
col.prop(md, "use_relative_offset")
sub = col.column()
sub.active = md.use_relative_offset
sub.prop(md, "relative_offset_displace", text="")
col.separator()
col.prop(md, "use_object_offset")
sub = col.column()
sub.active = md.use_object_offset
sub.prop(md, "offset_object", text="")
layout.separator()
layout.prop(md, "start_cap")
layout.prop(md, "end_cap")
def BEVEL(self, layout, ob, md):
split = layout.split()
col = split.column()
col.prop(md, "width")
col.prop(md, "segments")
col.prop(md, "profile")
col.prop(md, "material")
col = split.column()
col.prop(md, "use_only_vertices")
col.prop(md, "use_clamp_overlap")
col.prop(md, "loop_slide")
layout.label(text="Limit Method:")
layout.row().prop(md, "limit_method", expand=True)
if md.limit_method == 'ANGLE':
layout.prop(md, "angle_limit")
elif md.limit_method == 'VGROUP':
layout.label(text="Vertex Group:")
layout.prop_search(md, "vertex_group", ob, "vertex_groups", text="")
layout.label(text="Width Method:")
layout.row().prop(md, "offset_type", expand=True)
def BOOLEAN(self, layout, ob, md):
split = layout.split()
col = split.column()
col.label(text="Operation:")
col.prop(md, "operation", text="")
col = split.column()
col.label(text="Object:")
col.prop(md, "object", text="")
def BUILD(self, layout, ob, md):
split = layout.split()
col = split.column()
col.prop(md, "frame_start")
col.prop(md, "frame_duration")
col.prop(md, "use_reverse")
col = split.column()
col.prop(md, "use_random_order")
sub = col.column()
sub.active = md.use_random_order
sub.prop(md, "seed")
def MESH_CACHE(self, layout, ob, md):
layout.prop(md, "cache_format")
layout.prop(md, "filepath")
layout.label(text="Evaluation:")
layout.prop(md, "factor", slider=True)
layout.prop(md, "deform_mode")
layout.prop(md, "interpolation")
layout.label(text="Time Mapping:")
row = layout.row()
row.prop(md, "time_mode", expand=True)
row = layout.row()
row.prop(md, "play_mode", expand=True)
if md.play_mode == 'SCENE':
layout.prop(md, "frame_start")
layout.prop(md, "frame_scale")
else:
time_mode = md.time_mode
if time_mode == 'FRAME':
layout.prop(md, "eval_frame")
elif time_mode == 'TIME':
layout.prop(md, "eval_time")
elif time_mode == 'FACTOR':
layout.prop(md, "eval_factor")
layout.label(text="Axis Mapping:")
split = layout.split(percentage=0.5, align=True)
split.alert = (md.forward_axis[-1] == md.up_axis[-1])
split.label("Forward/Up Axis:")
split.prop(md, "forward_axis", text="")
split.prop(md, "up_axis", text="")
split = layout.split(percentage=0.5)
split.label(text="Flip Axis:")
row = split.row()
row.prop(md, "flip_axis")
def CAST(self, layout, ob, md):
split = layout.split(percentage=0.25)
split.label(text="Cast Type:")
split.prop(md, "cast_type", text="")
split = layout.split(percentage=0.25)
col = split.column()
col.prop(md, "use_x")
col.prop(md, "use_y")
col.prop(md, "use_z")
col = split.column()
col.prop(md, "factor")
col.prop(md, "radius")
col.prop(md, "size")
col.prop(md, "use_radius_as_size")
split = layout.split()
col = split.column()
col.label(text="Vertex Group:")
col.prop_search(md, "vertex_group", ob, "vertex_groups", text="")
col = split.column()
col.label(text="Control Object:")
col.prop(md, "object", text="")
if md.object:
col.prop(md, "use_transform")
def CLOTH(self, layout, ob, md):
layout.label(text="Settings are inside the Physics tab")
def COLLISION(self, layout, ob, md):
layout.label(text="Settings are inside the Physics tab")
def CURVE(self, layout, ob, md):
split = layout.split()
col = split.column()
col.label(text="Object:")
col.prop(md, "object", text="")
col = split.column()
col.label(text="Vertex Group:")
col.prop_search(md, "vertex_group", ob, "vertex_groups", text="")
layout.label(text="Deformation Axis:")
layout.row().prop(md, "deform_axis", expand=True)
def DECIMATE(self, layout, ob, md):
decimate_type = md.decimate_type
row = layout.row()
row.prop(md, "decimate_type", expand=True)
if decimate_type == 'COLLAPSE':
has_vgroup = bool(md.vertex_group)
layout.prop(md, "ratio")
split = layout.split()
col = split.column()
row = col.row(align=True)
row.prop_search(md, "vertex_group", ob, "vertex_groups", text="")
row.prop(md, "invert_vertex_group", text="", icon='ARROW_LEFTRIGHT')
layout_info = col
col = split.column()
row = col.row()
row.active = has_vgroup
row.prop(md, "vertex_group_factor")
col.prop(md, "use_collapse_triangulate")
row = col.split(percentage=0.75)
row.prop(md, "use_symmetry")
row.prop(md, "symmetry_axis", text="")
elif decimate_type == 'UNSUBDIV':
layout.prop(md, "iterations")
layout_info = layout
else: # decimate_type == 'DISSOLVE':
layout.prop(md, "angle_limit")
layout.prop(md, "use_dissolve_boundaries")
layout.label("Delimit:")
row = layout.row()
row.prop(md, "delimit")
layout_info = layout
layout_info.label(text=iface_("Faces: %d") % md.face_count, translate=False)
def DISPLACE(self, layout, ob, md):
has_texture = (md.texture is not None)
col = layout.column(align=True)
col.label(text="Texture:")
col.template_ID(md, "texture", new="texture.new")
split = layout.split()
col = split.column(align=True)
col.label(text="Direction:")
col.prop(md, "direction", text="")
col.label(text="Vertex Group:")
col.prop_search(md, "vertex_group", ob, "vertex_groups", text="")
col = split.column(align=True)
col.active = has_texture
col.label(text="Texture Coordinates:")
col.prop(md, "texture_coords", text="")
if md.texture_coords == 'OBJECT':
col.label(text="Object:")
col.prop(md, "texture_coords_object", text="")
elif md.texture_coords == 'UV' and ob.type == 'MESH':
col.label(text="UV Map:")
col.prop_search(md, "uv_layer", ob.data, "uv_textures", text="")
layout.separator()
row = layout.row()
row.prop(md, "mid_level")
row.prop(md, "strength")
def DYNAMIC_PAINT(self, layout, ob, md):
layout.label(text="Settings are inside the Physics tab")
def EDGE_SPLIT(self, layout, ob, md):
split = layout.split()
col = split.column()
col.prop(md, "use_edge_angle", text="Edge Angle")
sub = col.column()
sub.active = md.use_edge_angle
sub.prop(md, "split_angle")
split.prop(md, "use_edge_sharp", text="Sharp Edges")
def EXPLODE(self, layout, ob, md):
split = layout.split()
col = split.column()
col.label(text="Vertex group:")
col.prop_search(md, "vertex_group", ob, "vertex_groups", text="")
sub = col.column()
sub.active = bool(md.vertex_group)
sub.prop(md, "protect")
col.label(text="Particle UV")
col.prop_search(md, "particle_uv", ob.data, "uv_textures", text="")
col = split.column()
col.prop(md, "use_edge_cut")
col.prop(md, "show_unborn")
col.prop(md, "show_alive")
col.prop(md, "show_dead")
col.prop(md, "use_size")
layout.operator("object.explode_refresh", text="Refresh")
def FLUID_SIMULATION(self, layout, ob, md):
layout.label(text="Settings are inside the Physics tab")
def HOOK(self, layout, ob, md):
use_falloff = (md.falloff_type != 'NONE')
split = layout.split()
col = split.column()
col.label(text="Object:")
col.prop(md, "object", text="")
if md.object and md.object.type == 'ARMATURE':
col.label(text="Bone:")
col.prop_search(md, "subtarget", md.object.data, "bones", text="")
col = split.column()
col.label(text="Vertex Group:")
col.prop_search(md, "vertex_group", ob, "vertex_groups", text="")
layout.separator()
row = layout.row(align=True)
if use_falloff:
row.prop(md, "falloff_radius")
row.prop(md, "strength", slider=True)
layout.prop(md, "falloff_type")
col = layout.column()
if use_falloff:
if md.falloff_type == 'CURVE':
col.template_curve_mapping(md, "falloff_curve")
split = layout.split()
col = split.column()
col.prop(md, "use_falloff_uniform")
if ob.mode == 'EDIT':
row = col.row(align=True)
row.operator("object.hook_reset", text="Reset")
row.operator("object.hook_recenter", text="Recenter")
row = layout.row(align=True)
row.operator("object.hook_select", text="Select")
row.operator("object.hook_assign", text="Assign")
def LAPLACIANDEFORM(self, layout, ob, md):
is_bind = md.is_bind
layout.prop(md, "iterations")
row = layout.row()
row.active = not is_bind
row.label(text="Anchors Vertex Group:")
row = layout.row()
row.enabled = not is_bind
row.prop_search(md, "vertex_group", ob, "vertex_groups", text="")
layout.separator()
row = layout.row()
row.enabled = bool(md.vertex_group)
row.operator("object.laplaciandeform_bind", text="Unbind" if is_bind else "Bind")
def LAPLACIANSMOOTH(self, layout, ob, md):
layout.prop(md, "iterations")
split = layout.split(percentage=0.25)
col = split.column()
col.label(text="Axis:")
col.prop(md, "use_x")
col.prop(md, "use_y")
col.prop(md, "use_z")
col = split.column()
col.label(text="Lambda:")
col.prop(md, "lambda_factor", text="Factor")
col.prop(md, "lambda_border", text="Border")
col.separator()
col.prop(md, "use_volume_preserve")
col.prop(md, "use_normalized")
layout.label(text="Vertex Group:")
layout.prop_search(md, "vertex_group", ob, "vertex_groups", text="")
def LATTICE(self, layout, ob, md):
split = layout.split()
col = split.column()
col.label(text="Object:")
col.prop(md, "object", text="")
col = split.column()
col.label(text="Vertex Group:")
col.prop_search(md, "vertex_group", ob, "vertex_groups", text="")
layout.separator()
layout.prop(md, "strength", slider=True)
def MASK(self, layout, ob, md):
split = layout.split()
col = split.column()
col.label(text="Mode:")
col.prop(md, "mode", text="")
col = split.column()
if md.mode == 'ARMATURE':
col.label(text="Armature:")
row = col.row(align=True)
row.prop(md, "armature", text="")
sub = row.row(align=True)
sub.active = (md.armature is not None)
sub.prop(md, "invert_vertex_group", text="", icon='ARROW_LEFTRIGHT')
elif md.mode == 'VERTEX_GROUP':
col.label(text="Vertex Group:")
row = col.row(align=True)
row.prop_search(md, "vertex_group", ob, "vertex_groups", text="")
sub = row.row(align=True)
sub.active = bool(md.vertex_group)
sub.prop(md, "invert_vertex_group", text="", icon='ARROW_LEFTRIGHT')
def MESH_DEFORM(self, layout, ob, md):
split = layout.split()
col = split.column()
col.active = not md.is_bound
col.label(text="Object:")
col.prop(md, "object", text="")
col = split.column()
col.label(text="Vertex Group:")
row = col.row(align=True)
row.prop_search(md, "vertex_group", ob, "vertex_groups", text="")
sub = row.row(align=True)
sub.active = bool(md.vertex_group)
sub.prop(md, "invert_vertex_group", text="", icon='ARROW_LEFTRIGHT')
layout.separator()
if md.is_bound:
layout.operator("object.meshdeform_bind", text="Unbind")
else:
layout.operator("object.meshdeform_bind", text="Bind")
row = layout.row()
row.prop(md, "precision")
row.prop(md, "use_dynamic_bind")
def MIRROR(self, layout, ob, md):
split = layout.split(percentage=0.25)
col = split.column()
col.label(text="Axis:")
col.prop(md, "use_x")
col.prop(md, "use_y")
col.prop(md, "use_z")
col = split.column()
col.label(text="Options:")
col.prop(md, "use_mirror_merge", text="Merge")
col.prop(md, "use_clip", text="Clipping")
col.prop(md, "use_mirror_vertex_groups", text="Vertex Groups")
col = split.column()
col.label(text="Textures:")
col.prop(md, "use_mirror_u", text="U")
col.prop(md, "use_mirror_v", text="V")
col = layout.column()
if md.use_mirror_merge is True:
col.prop(md, "merge_threshold")
col.label(text="Mirror Object:")
col.prop(md, "mirror_object", text="")
def MULTIRES(self, layout, ob, md):
layout.row().prop(md, "subdivision_type", expand=True)
split = layout.split()
col = split.column()
col.prop(md, "levels", text="Preview")
col.prop(md, "sculpt_levels", text="Sculpt")
col.prop(md, "render_levels", text="Render")
col = split.column()
col.enabled = ob.mode != 'EDIT'
col.operator("object.multires_subdivide", text="Subdivide")
col.operator("object.multires_higher_levels_delete", text="Delete Higher")
col.operator("object.multires_reshape", text="Reshape")
col.operator("object.multires_base_apply", text="Apply Base")
col.prop(md, "use_subsurf_uv")
col.prop(md, "show_only_control_edges")
layout.separator()
col = layout.column()
row = col.row()
if md.is_external:
row.operator("object.multires_external_pack", text="Pack External")
row.label()
row = col.row()
row.prop(md, "filepath", text="")
else:
row.operator("object.multires_external_save", text="Save External...")
row.label()
def OCEAN(self, layout, ob, md):
if not bpy.app.build_options.mod_oceansim:
layout.label("Built without OceanSim modifier")
return
layout.prop(md, "geometry_mode")
if md.geometry_mode == 'GENERATE':
row = layout.row()
row.prop(md, "repeat_x")
row.prop(md, "repeat_y")
layout.separator()
split = layout.split()
col = split.column()
col.prop(md, "time")
col.prop(md, "depth")
col.prop(md, "random_seed")
col = split.column()
col.prop(md, "resolution")
col.prop(md, "size")
col.prop(md, "spatial_size")
layout.label("Waves:")
split = layout.split()
col = split.column()
col.prop(md, "choppiness")
col.prop(md, "wave_scale", text="Scale")
col.prop(md, "wave_scale_min")
col.prop(md, "wind_velocity")
col = split.column()
col.prop(md, "wave_alignment", text="Alignment")
sub = col.column()
sub.active = (md.wave_alignment > 0.0)
sub.prop(md, "wave_direction", text="Direction")
sub.prop(md, "damping")
layout.separator()
layout.prop(md, "use_normals")
split = layout.split()
col = split.column()
col.prop(md, "use_foam")
sub = col.row()
sub.active = md.use_foam
sub.prop(md, "foam_coverage", text="Coverage")
col = split.column()
col.active = md.use_foam
col.label("Foam Data Layer Name:")
col.prop(md, "foam_layer_name", text="")
layout.separator()
if md.is_cached:
layout.operator("object.ocean_bake", text="Free Bake").free = True
else:
layout.operator("object.ocean_bake").free = False
split = layout.split()
split.enabled = not md.is_cached
col = split.column(align=True)
col.prop(md, "frame_start", text="Start")
col.prop(md, "frame_end", text="End")
col = split.column(align=True)
col.label(text="Cache path:")
col.prop(md, "filepath", text="")
split = layout.split()
split.enabled = not md.is_cached
col = split.column()
col.active = md.use_foam
col.prop(md, "bake_foam_fade")
col = split.column()
def PARTICLE_INSTANCE(self, layout, ob, md):
layout.prop(md, "object")
layout.prop(md, "particle_system_index", text="Particle System")
split = layout.split()
col = split.column()
col.label(text="Create From:")
col.prop(md, "use_normal")
col.prop(md, "use_children")
col.prop(md, "use_size")
col = split.column()
col.label(text="Show Particles When:")
col.prop(md, "show_alive")
col.prop(md, "show_unborn")
col.prop(md, "show_dead")
layout.separator()
layout.prop(md, "use_path", text="Create Along Paths")
split = layout.split()
split.active = md.use_path
col = split.column()
col.row().prop(md, "axis", expand=True)
col.prop(md, "use_preserve_shape")
col = split.column()
col.prop(md, "position", slider=True)
col.prop(md, "random_position", text="Random", slider=True)
def PARTICLE_SYSTEM(self, layout, ob, md):
layout.label(text="Settings can be found inside the Particle context")
def SCREW(self, layout, ob, md):
split = layout.split()
col = split.column()
col.prop(md, "axis")
col.prop(md, "object", text="AxisOb")
col.prop(md, "angle")
col.prop(md, "steps")
col.prop(md, "render_steps")
col.prop(md, "use_smooth_shade")
col = split.column()
row = col.row()
row.active = (md.object is None or md.use_object_screw_offset is False)
row.prop(md, "screw_offset")
row = col.row()
row.active = (md.object is not None)
row.prop(md, "use_object_screw_offset")
col.prop(md, "use_normal_calculate")
col.prop(md, "use_normal_flip")
col.prop(md, "iterations")
col.prop(md, "use_stretch_u")
col.prop(md, "use_stretch_v")
def SHRINKWRAP(self, layout, ob, md):
split = layout.split()
col = split.column()
col.label(text="Target:")
col.prop(md, "target", text="")
col = split.column()
col.label(text="Vertex Group:")
col.prop_search(md, "vertex_group", ob, "vertex_groups", text="")
split = layout.split()
col = split.column()
col.prop(md, "offset")
col = split.column()
col.label(text="Mode:")
col.prop(md, "wrap_method", text="")
if md.wrap_method == 'PROJECT':
split = layout.split()
col = split.column()
col.prop(md, "subsurf_levels")
col = split.column()
col.prop(md, "project_limit", text="Limit")
split = layout.split(percentage=0.25)
col = split.column()
col.label(text="Axis:")
col.prop(md, "use_project_x")
col.prop(md, "use_project_y")
col.prop(md, "use_project_z")
col = split.column()
col.label(text="Direction:")
col.prop(md, "use_negative_direction")
col.prop(md, "use_positive_direction")
col = split.column()
col.label(text="Cull Faces:")
col.prop(md, "cull_face", expand=True)
layout.prop(md, "auxiliary_target")
elif md.wrap_method == 'NEAREST_SURFACEPOINT':
layout.prop(md, "use_keep_above_surface")
def SIMPLE_DEFORM(self, layout, ob, md):
layout.row().prop(md, "deform_method", expand=True)
split = layout.split()
col = split.column()
col.label(text="Vertex Group:")
col.prop_search(md, "vertex_group", ob, "vertex_groups", text="")
split = layout.split()
col = split.column()
col.label(text="Origin:")
col.prop(md, "origin", text="")
if md.deform_method in {'TAPER', 'STRETCH', 'TWIST'}:
col.label(text="Lock:")
col.prop(md, "lock_x")
col.prop(md, "lock_y")
col = split.column()
col.label(text="Deform:")
if md.deform_method in {'TAPER', 'STRETCH'}:
col.prop(md, "factor")
else:
col.prop(md, "angle")
col.prop(md, "limits", slider=True)
def SMOKE(self, layout, ob, md):
layout.label(text="Settings are inside the Physics tab")
def SMOOTH(self, layout, ob, md):
split = layout.split(percentage=0.25)
col = split.column()
col.label(text="Axis:")
col.prop(md, "use_x")
col.prop(md, "use_y")
col.prop(md, "use_z")
col = split.column()
col.prop(md, "factor")
col.prop(md, "iterations")
col.label(text="Vertex Group:")
col.prop_search(md, "vertex_group", ob, "vertex_groups", text="")
def SOFT_BODY(self, layout, ob, md):
layout.label(text="Settings are inside the Physics tab")
def SOLIDIFY(self, layout, ob, md):
split = layout.split()
col = split.column()
col.prop(md, "thickness")
col.prop(md, "thickness_clamp")
col.separator()
row = col.row(align=True)
row.prop_search(md, "vertex_group", ob, "vertex_groups", text="")
sub = row.row(align=True)
sub.active = bool(md.vertex_group)
sub.prop(md, "invert_vertex_group", text="", icon='ARROW_LEFTRIGHT')
sub = col.row()
sub.active = bool(md.vertex_group)
sub.prop(md, "thickness_vertex_group", text="Factor")
col.label(text="Crease:")
col.prop(md, "edge_crease_inner", text="Inner")
col.prop(md, "edge_crease_outer", text="Outer")
col.prop(md, "edge_crease_rim", text="Rim")
col = split.column()
col.prop(md, "offset")
col.prop(md, "use_flip_normals")
col.prop(md, "use_even_offset")
col.prop(md, "use_quality_normals")
col.prop(md, "use_rim")
col_rim = col.column()
col_rim.active = md.use_rim
col_rim.prop(md, "use_rim_only")
col.separator()
col.label(text="Material Index Offset:")
sub = col.column()
row = sub.split(align=True, percentage=0.4)
row.prop(md, "material_offset", text="")
row = row.row(align=True)
row.active = md.use_rim
row.prop(md, "material_offset_rim", text="Rim")
def SUBSURF(self, layout, ob, md):
layout.row().prop(md, "subdivision_type", expand=True)
split = layout.split()
col = split.column()
col.label(text="Subdivisions:")
col.prop(md, "levels", text="View")
col.prop(md, "render_levels", text="Render")
col = split.column()
col.label(text="Options:")
col.prop(md, "use_subsurf_uv")
col.prop(md, "show_only_control_edges")
if hasattr(md, "use_opensubdiv"):
col.prop(md, "use_opensubdiv")
def SURFACE(self, layout, ob, md):
layout.label(text="Settings are inside the Physics tab")
def UV_PROJECT(self, layout, ob, md):
split = layout.split()
col = split.column()
col.label(text="Image:")
col.prop(md, "image", text="")
col = split.column()
col.label(text="UV Map:")
col.prop_search(md, "uv_layer", ob.data, "uv_textures", text="")
split = layout.split()
col = split.column()
col.prop(md, "use_image_override")
col.prop(md, "projector_count", text="Projectors")
for proj in md.projectors:
col.prop(proj, "object", text="")
col = split.column()
sub = col.column(align=True)
sub.prop(md, "aspect_x", text="Aspect X")
sub.prop(md, "aspect_y", text="Aspect Y")
sub = col.column(align=True)
sub.prop(md, "scale_x", text="Scale X")
sub.prop(md, "scale_y", text="Scale Y")
def WARP(self, layout, ob, md):
use_falloff = (md.falloff_type != 'NONE')
split = layout.split()
col = split.column()
col.label(text="From:")
col.prop(md, "object_from", text="")
col.prop(md, "use_volume_preserve")
col = split.column()
col.label(text="To:")
col.prop(md, "object_to", text="")
col.prop_search(md, "vertex_group", ob, "vertex_groups", text="")
col = layout.column()
row = col.row(align=True)
row.prop(md, "strength")
if use_falloff:
row.prop(md, "falloff_radius")
col.prop(md, "falloff_type")
if use_falloff:
if md.falloff_type == 'CURVE':
col.template_curve_mapping(md, "falloff_curve")
# 2 new columns
split = layout.split()
col = split.column()
col.label(text="Texture:")
col.template_ID(md, "texture", new="texture.new")
col = split.column()
col.label(text="Texture Coordinates:")
col.prop(md, "texture_coords", text="")
if md.texture_coords == 'OBJECT':
layout.prop(md, "texture_coords_object", text="Object")
elif md.texture_coords == 'UV' and ob.type == 'MESH':
layout.prop_search(md, "uv_layer", ob.data, "uv_textures")
def WAVE(self, layout, ob, md):
split = layout.split()
col = split.column()
col.label(text="Motion:")
col.prop(md, "use_x")
col.prop(md, "use_y")
col.prop(md, "use_cyclic")
col = split.column()
col.prop(md, "use_normal")
sub = col.column()
sub.active = md.use_normal
sub.prop(md, "use_normal_x", text="X")
sub.prop(md, "use_normal_y", text="Y")
sub.prop(md, "use_normal_z", text="Z")
split = layout.split()
col = split.column()
col.label(text="Time:")
sub = col.column(align=True)
sub.prop(md, "time_offset", text="Offset")
sub.prop(md, "lifetime", text="Life")
col.prop(md, "damping_time", text="Damping")
col = split.column()
col.label(text="Position:")
sub = col.column(align=True)
sub.prop(md, "start_position_x", text="X")
sub.prop(md, "start_position_y", text="Y")
col.prop(md, "falloff_radius", text="Falloff")
layout.separator()
layout.prop(md, "start_position_object")
layout.prop_search(md, "vertex_group", ob, "vertex_groups")
split = layout.split(percentage=0.33)
col = split.column()
col.label(text="Texture")
col = split.column()
col.template_ID(md, "texture", new="texture.new")
layout.prop(md, "texture_coords")
if md.texture_coords == 'UV' and ob.type == 'MESH':
layout.prop_search(md, "uv_layer", ob.data, "uv_textures")
elif md.texture_coords == 'OBJECT':
layout.prop(md, "texture_coords_object")
layout.separator()
split = layout.split()
col = split.column()
col.prop(md, "speed", slider=True)
col.prop(md, "height", slider=True)
col = split.column()
col.prop(md, "width", slider=True)
col.prop(md, "narrowness", slider=True)
def REMESH(self, layout, ob, md):
layout.prop(md, "mode")
row = layout.row()
row.prop(md, "octree_depth")
row.prop(md, "scale")
if md.mode == 'SHARP':
layout.prop(md, "sharpness")
layout.prop(md, "use_smooth_shade")
layout.prop(md, "use_remove_disconnected")
row = layout.row()
row.active = md.use_remove_disconnected
row.prop(md, "threshold")
@staticmethod
def vertex_weight_mask(layout, ob, md):
layout.label(text="Influence/Mask Options:")
split = layout.split(percentage=0.4)
split.label(text="Global Influence:")
split.prop(md, "mask_constant", text="")
if not md.mask_texture:
split = layout.split(percentage=0.4)
split.label(text="Vertex Group Mask:")
split.prop_search(md, "mask_vertex_group", ob, "vertex_groups", text="")
if not md.mask_vertex_group:
split = layout.split(percentage=0.4)
split.label(text="Texture Mask:")
split.template_ID(md, "mask_texture", new="texture.new")
if md.mask_texture:
split = layout.split()
col = split.column()
col.label(text="Texture Coordinates:")
col.prop(md, "mask_tex_mapping", text="")
col = split.column()
col.label(text="Use Channel:")
col.prop(md, "mask_tex_use_channel", text="")
if md.mask_tex_mapping == 'OBJECT':
layout.prop(md, "mask_tex_map_object", text="Object")
elif md.mask_tex_mapping == 'UV' and ob.type == 'MESH':
layout.prop_search(md, "mask_tex_uv_layer", ob.data, "uv_textures")
def VERTEX_WEIGHT_EDIT(self, layout, ob, md):
split = layout.split()
col = split.column()
col.label(text="Vertex Group:")
col.prop_search(md, "vertex_group", ob, "vertex_groups", text="")
col.label(text="Default Weight:")
col.prop(md, "default_weight", text="")
col = split.column()
col.prop(md, "use_add")
sub = col.column()
sub.active = md.use_add
sub.prop(md, "add_threshold")
col = col.column()
col.prop(md, "use_remove")
sub = col.column()
sub.active = md.use_remove
sub.prop(md, "remove_threshold")
layout.separator()
layout.prop(md, "falloff_type")
if md.falloff_type == 'CURVE':
layout.template_curve_mapping(md, "map_curve")
# Common mask options
layout.separator()
self.vertex_weight_mask(layout, ob, md)
def VERTEX_WEIGHT_MIX(self, layout, ob, md):
split = layout.split()
col = split.column()
col.label(text="Vertex Group A:")
col.prop_search(md, "vertex_group_a", ob, "vertex_groups", text="")
col.label(text="Default Weight A:")
col.prop(md, "default_weight_a", text="")
col.label(text="Mix Mode:")
col.prop(md, "mix_mode", text="")
col = split.column()
col.label(text="Vertex Group B:")
col.prop_search(md, "vertex_group_b", ob, "vertex_groups", text="")
col.label(text="Default Weight B:")
col.prop(md, "default_weight_b", text="")
col.label(text="Mix Set:")
col.prop(md, "mix_set", text="")
# Common mask options
layout.separator()
self.vertex_weight_mask(layout, ob, md)
def VERTEX_WEIGHT_PROXIMITY(self, layout, ob, md):
split = layout.split()
col = split.column()
col.label(text="Vertex Group:")
col.prop_search(md, "vertex_group", ob, "vertex_groups", text="")
col = split.column()
col.label(text="Target Object:")
col.prop(md, "target", text="")
split = layout.split()
col = split.column()
col.label(text="Distance:")
col.prop(md, "proximity_mode", text="")
if md.proximity_mode == 'GEOMETRY':
col.row().prop(md, "proximity_geometry")
col = split.column()
col.label()
col.prop(md, "min_dist")
col.prop(md, "max_dist")
layout.separator()
layout.prop(md, "falloff_type")
# Common mask options
layout.separator()
self.vertex_weight_mask(layout, ob, md)
def SKIN(self, layout, ob, md):
row = layout.row()
row.operator("object.skin_armature_create", text="Create Armature")
row.operator("mesh.customdata_skin_add")
layout.separator()
row = layout.row(align=True)
row.prop(md, "branch_smoothing")
row.prop(md, "use_smooth_shade")
split = layout.split()
col = split.column()
col.label(text="Selected Vertices:")
sub = col.column(align=True)
sub.operator("object.skin_loose_mark_clear", text="Mark Loose").action = 'MARK'
sub.operator("object.skin_loose_mark_clear", text="Clear Loose").action = 'CLEAR'
sub = col.column()
sub.operator("object.skin_root_mark", text="Mark Root")
sub.operator("object.skin_radii_equalize", text="Equalize Radii")
col = split.column()
col.label(text="Symmetry Axes:")
col.prop(md, "use_x_symmetry")
col.prop(md, "use_y_symmetry")
col.prop(md, "use_z_symmetry")
def TRIANGULATE(self, layout, ob, md):
row = layout.row()
col = row.column()
col.label(text="Quad Method:")
col.prop(md, "quad_method", text="")
col = row.column()
col.label(text="Ngon Method:")
col.prop(md, "ngon_method", text="")
def UV_WARP(self, layout, ob, md):
split = layout.split()
col = split.column()
col.prop(md, "center")
col = split.column()
col.label(text="UV Axis:")
col.prop(md, "axis_u", text="")
col.prop(md, "axis_v", text="")
split = layout.split()
col = split.column()
col.label(text="From:")
col.prop(md, "object_from", text="")
col = split.column()
col.label(text="To:")
col.prop(md, "object_to", text="")
split = layout.split()
col = split.column()
obj = md.object_from
if obj and obj.type == 'ARMATURE':
col.label(text="Bone:")
col.prop_search(md, "bone_from", obj.data, "bones", text="")
col = split.column()
obj = md.object_to
if obj and obj.type == 'ARMATURE':
col.label(text="Bone:")
col.prop_search(md, "bone_to", obj.data, "bones", text="")
split = layout.split()
col = split.column()
col.label(text="Vertex Group:")
col.prop_search(md, "vertex_group", ob, "vertex_groups", text="")
col = split.column()
col.label(text="UV Map:")
col.prop_search(md, "uv_layer", ob.data, "uv_textures", text="")
def WIREFRAME(self, layout, ob, md):
has_vgroup = bool(md.vertex_group)
split = layout.split()
col = split.column()
col.prop(md, "thickness", text="Thickness")
row = col.row(align=True)
row.prop_search(md, "vertex_group", ob, "vertex_groups", text="")
sub = row.row(align=True)
sub.active = has_vgroup
sub.prop(md, "invert_vertex_group", text="", icon='ARROW_LEFTRIGHT')
row = col.row(align=True)
row.active = has_vgroup
row.prop(md, "thickness_vertex_group", text="Factor")
col.prop(md, "use_crease", text="Crease Edges")
col.prop(md, "crease_weight", text="Crease Weight")
col = split.column()
col.prop(md, "offset")
col.prop(md, "use_even_offset", text="Even Thickness")
col.prop(md, "use_relative_offset", text="Relative Thickness")
col.prop(md, "use_boundary", text="Boundary")
col.prop(md, "use_replace", text="Replace Original")
col.prop(md, "material_offset", text="Material Offset")
def DATA_TRANSFER(self, layout, ob, md):
row = layout.row(align=True)
row.prop(md, "object")
sub = row.row(align=True)
sub.active = bool(md.object)
sub.prop(md, "use_object_transform", text="", icon='GROUP')
layout.separator()
split = layout.split(0.333)
split.prop(md, "use_vert_data")
use_vert = md.use_vert_data
row = split.row()
row.active = use_vert
row.prop(md, "vert_mapping", text="")
if use_vert:
col = layout.column(align=True)
split = col.split(0.333, align=True)
sub = split.column(align=True)
sub.prop(md, "data_types_verts")
sub = split.column(align=True)
row = sub.row(align=True)
row.prop(md, "layers_vgroup_select_src", text="")
row.label(icon='RIGHTARROW')
row.prop(md, "layers_vgroup_select_dst", text="")
row = sub.row(align=True)
row.label("", icon='NONE')
layout.separator()
split = layout.split(0.333)
split.prop(md, "use_edge_data")
use_edge = md.use_edge_data
row = split.row()
row.active = use_edge
row.prop(md, "edge_mapping", text="")
if use_edge:
col = layout.column(align=True)
split = col.split(0.333, align=True)
sub = split.column(align=True)
sub.prop(md, "data_types_edges")
layout.separator()
split = layout.split(0.333)
split.prop(md, "use_loop_data")
use_loop = md.use_loop_data
row = split.row()
row.active = use_loop
row.prop(md, "loop_mapping", text="")
if use_loop:
col = layout.column(align=True)
split = col.split(0.333, align=True)
sub = split.column(align=True)
sub.prop(md, "data_types_loops")
sub = split.column(align=True)
row = sub.row(align=True)
row.label("", icon='NONE')
row = sub.row(align=True)
row.prop(md, "layers_vcol_select_src", text="")
row.label(icon='RIGHTARROW')
row.prop(md, "layers_vcol_select_dst", text="")
row = sub.row(align=True)
row.prop(md, "layers_uv_select_src", text="")
row.label(icon='RIGHTARROW')
row.prop(md, "layers_uv_select_dst", text="")
col.prop(md, "islands_precision")
layout.separator()
split = layout.split(0.333)
split.prop(md, "use_poly_data")
use_poly = md.use_poly_data
row = split.row()
row.active = use_poly
row.prop(md, "poly_mapping", text="")
if use_poly:
col = layout.column(align=True)
split = col.split(0.333, align=True)
sub = split.column(align=True)
sub.prop(md, "data_types_polys")
layout.separator()
split = layout.split()
col = split.column()
row = col.row(align=True)
sub = row.row(align=True)
sub.active = md.use_max_distance
sub.prop(md, "max_distance")
row.prop(md, "use_max_distance", text="", icon='STYLUS_PRESSURE')
col = split.column()
col.prop(md, "ray_radius")
layout.separator()
split = layout.split()
col = split.column()
col.prop(md, "mix_mode")
col.prop(md, "mix_factor")
col = split.column()
row = col.row()
row.active = bool(md.object)
row.operator("object.datalayout_transfer", text="Generate Data Layers")
row = col.row(align=True)
row.prop_search(md, "vertex_group", ob, "vertex_groups", text="")
sub = row.row(align=True)
sub.active = bool(md.vertex_group)
sub.prop(md, "invert_vertex_group", text="", icon='ARROW_LEFTRIGHT')
def NORMAL_EDIT(self, layout, ob, md):
has_vgroup = bool(md.vertex_group)
needs_object_offset = (((md.mode == 'RADIAL') and not md.target) or
((md.mode == 'DIRECTIONAL') and md.use_direction_parallel))
row = layout.row()
row.prop(md, "mode", expand=True)
split = layout.split()
col = split.column()
col.prop(md, "target", text="")
sub = col.column(align=True)
sub.active = needs_object_offset
sub.prop(md, "offset")
row = col.row(align=True)
col = split.column()
row = col.row()
row.active = (md.mode == 'DIRECTIONAL')
row.prop(md, "use_direction_parallel")
subcol = col.column(align=True)
subcol.label("Mix Mode:")
subcol.prop(md, "mix_mode", text="")
subcol.prop(md, "mix_factor")
row = subcol.row(align=True)
row.prop_search(md, "vertex_group", ob, "vertex_groups", text="")
sub = row.row(align=True)
sub.active = has_vgroup
sub.prop(md, "invert_vertex_group", text="", icon='ARROW_LEFTRIGHT')
def CORRECTIVE_SMOOTH(self, layout, ob, md):
is_bind = md.is_bind
layout.prop(md, "factor", text="Factor")
layout.prop(md, "iterations")
row = layout.row()
row.prop(md, "smooth_type")
split = layout.split()
col = split.column()
col.label(text="Vertex Group:")
row = col.row(align=True)
row.prop_search(md, "vertex_group", ob, "vertex_groups", text="")
row.prop(md, "invert_vertex_group", text="", icon='ARROW_LEFTRIGHT')
col = split.column()
col.prop(md, "use_only_smooth")
col.prop(md, "use_pin_boundary")
layout.prop(md, "rest_source")
if md.rest_source == 'BIND':
layout.operator("object.correctivesmooth_bind", text="Unbind" if is_bind else "Bind")
if __name__ == "__main__": # only for live edit.
bpy.utils.register_module(__name__)
|
pawkoz/dyplom
|
blender/release/scripts/startup/bl_ui/properties_data_modifier.py
|
Python
|
gpl-2.0
| 46,076
|
class check_privilege_revoke_all_sysauth():
"""
check_privilege_revoke_all_sysauth
Ensure 'ALL' Is Revoked from Unauthorized 'GRANTEE' on SYSAUTH$
The SYSAUTH$ table contains details about which users and roles have
what system privileges.
"""
# References:
# http://www.davidlitchfield.com/AddendumtotheOracle12cCISGuidelines.pdf
# http://www.davidlitchfield.com/oracle_backdoors.pdf
TITLE = 'Revoke ALL from SYSAUTH$'
CATEGORY = 'Privilege'
TYPE = 'sql'
SQL = "SELECT GRANTEE, PRIVILEGE FROM DBA_TAB_PRIVS WHERE TABLE_NAME = 'SYSAUTH$'"
verbose = False
skip = False
result = {}
def do_check(self, *results):
self.result['level'] = 'GREEN'
output = ''
for rows in results:
for row in rows:
self.result['level'] = 'RED'
output += row[0] + ' with ' + row[1] + 'on SYSAUTH$\n'
if 'GREEN' == self.result['level']:
output = 'No user with grants to SYSAUTH$.'
self.result['output'] = output
return self.result
def __init__(self, parent):
print('Performing check: ' + self.TITLE)
|
foospidy/DbDat
|
plugins/oracle/check_privilege_revoke_all_sysauth.py
|
Python
|
gpl-2.0
| 1,203
|
#!/usr/bin/python3
##################################################################
#
# Run a test, including the provisioning and teardown of all nodes
#
# Copyright (C) 2021 SUSE Linux GmbH
#
##################################################################
import twopence
import argparse
import os
import curly
import readline
import atexit
from .logger import LogParser
from .results import ResultsVector, ResultsMatrix
def info(msg):
print("== %s" % msg)
def error(msg):
print(f"Error: {msg}")
class AbortedTestcase(Exception):
pass
class ContinueTestcase(Exception):
pass
class FinishTestcase(Exception):
pass
class Interaction(object):
class Command(object):
def __init__(self, name, description, func):
self.name = name
self.description = description.strip()
self.func = func
def getCompletion(self, tokens, nth):
return None
def __init__(self, testcase, message):
self.testcase = testcase
self.message = message
self.commands = {}
for name in dir(self):
attr = getattr(self, name, None)
if not attr:
continue
if not callable(attr):
continue
# If it's a subclass of Command, instantiate it and
# add it right away. This is the only way we can
# do per-command completion of arguments
if type(attr) == type(self.__class__) and \
attr != self.Command and \
issubclass(attr, self.Command):
cmd = attr()
self.commands[cmd.name] = cmd
continue
doc = attr.__doc__
try:
(name, description) = doc.split(":", maxsplit = 1)
except:
continue
self.addCommand(name, description, attr)
def addCommand(self, name, description, func):
cmd = self.Command(name, description, func)
self.commands[name] = cmd
def getCommand(self, name):
return self.commands.get(name)
def getCompletion(self, text, nth = 0):
if type(nth) != int:
return None
index = 0
for cmd in self.commands.values():
if cmd.name.startswith(text):
if index >= nth:
return cmd
index += 1
return None
def cont(self, testcase, *args):
'''continue: proceed to next step'''
raise ContinueTestcase()
def inspect(self, testcase, *args):
'''inspect: display information on the test case'''
testcase.inspect()
def finish(self, testcase, *args):
'''finish: finish the test case non-interactively'''
raise FinishTestcase()
def help(self, testcase, *args):
'''help: display help message'''
for (name, cmd) in self.commands.items():
print("%-20s %s" % (name, cmd.description))
def abort(self, testcase, *args):
'''abort: abort this test case'''
raise AbortedTestcase()
class InteractionPreProvisioning(Interaction):
pass
class InteractionPostProvisioning(Interaction):
def status(self, testcase, *args):
'''status: display the status of provisioned cluster'''
testcase.displayClusterStatus()
class SSHCommand(Interaction.Command):
def __init__(self):
super().__init__("ssh", "connect to a node", self.perform)
def getCompletion(self, testcase, tokens, nth):
if len(tokens) != 1:
return None
name = tokens[0]
if nth == 0:
for match in testcase._nodes:
if match.startswith(name):
return match
return None
def perform(self, testcase, *args):
'''ssh: connect to a node'''
if len(args) != 1:
print("usage: ssh NODE")
return
name = args[0]
print("Trying to connect to node %s" % name)
testcase.runProvisioner("login", name)
class InteractionPostTestRun(InteractionPostProvisioning):
def rerun(self, testcase, *args):
'''rerun: re-run the test script'''
testcase.runScript(rerun = True)
class Console:
BANNER = '''
Welcome to the susetest shell. For an overview of commands, please enter 'help'.
Type 'continue' or Ctrl-D to exit interactive shell and proceed.
'''
def __init__(self):
self.histfile = os.path.join(os.path.expanduser("~"), ".twopence/history")
try:
readline.read_history_file(self.histfile)
self.h_len = readline.get_current_history_length()
except FileNotFoundError:
open(self.histfile, 'wb').close()
self.h_len = 0
readline.parse_and_bind("tab: complete")
readline.set_completer(self.complete)
atexit.register(self.save)
self.banner = False
self.interactions = None
def save(self):
new_h_len = readline.get_current_history_length()
readline.set_history_length(1000)
readline.append_history_file(new_h_len - self.h_len, self.histfile)
def interact(self, interaction):
if not self.banner:
print(self.BANNER)
self.banner = True
print(interaction.message)
self.interactions = interaction
while True:
try:
response = input("> ")
except EOFError:
print("<Ctrl-d>")
break
response = response.strip()
w = response.split()
if not w:
continue
name = w.pop(0)
if name == 'continue':
break
cmd = interaction.getCommand(name)
if not cmd:
cmd = interaction.getCompletion(name)
if not cmd:
print("Unknown command `%s'" % name)
continue
# Invoke the command
cmd.func(interaction.testcase, *w)
self.interactions = None
def complete(self, text, nth):
if not self.interactions:
return None
linebuf = readline.get_line_buffer()
tokens = linebuf.split()
if not tokens:
return None
# We've actually completed a word, and we do not want to
# do completion of the last word but the next argument (which is
# empty so far).
if linebuf.endswith(' '):
tokens.append('')
name = tokens.pop(0)
if not tokens:
cmd = self.interactions.getCompletion(name, nth)
else:
cmd = self.interactions.getCompletion(name)
if cmd is None:
return None
if not tokens:
return cmd.name
testcase = self.interactions.testcase
return cmd.getCompletion(testcase, tokens, nth)
class TestThing:
def __init__(self, name):
self.info = None
self.name = name
@property
def description(self):
return f"{self.type_string} {self.name}"
def validateCompatibility(self, features):
if not self.info:
error(f"Cannot validate compatibility of {self.description}: no info object")
return False
if not self.info.validateFeatureCompatibility(features, msgfunc = info):
info(f"Skipping incompatible {self.description}")
return False
return True
class Context:
def __init__(self, workspace, logspace, parent = None,
parameters = [],
dryrun = False, debug = False, quiet = False, clobber = False,
platform = None, platformFeatures = None, requestedFeatures = None,
results = None):
self.workspace = workspace
self.logspace = logspace
self.results = results
if parent:
self.platform = parent.platform
self.platformFeatures = parent.platformFeatures
self.requestedFeatures = parent.requestedFeatures
self.dryrun = parent.dryrun
self.debug = parent.debug
self.quiet = parent.quiet
self.clobber = parent.clobber
else:
self.platform = platform
self.dryrun = dryrun
self.debug = debug
self.quiet = quiet
self.clobber = clobber
if not platformFeatures and platform:
platformFeatures = self.getPlatformFeatures(platform)
self.platformFeatures = platformFeatures or set()
self.requestedFeatures = requestedFeatures or set()
self.requestedFeatures.difference_update(self.platformFeatures)
self.parameters = []
if parameters:
self.parameters += parameters
def getPlatformFeatures(self, platform):
import twopence.provision
return twopence.provision.queryPlatformFeatures(platform) or set()
def validateMatrix(self, matrix):
# print(f"### CHECKING FEATURE COMPAT of {matrix.description} vs {self.platformFeatures}")
return matrix.validateCompatibility(self.platformFeatures)
def createSubContext(self, extra_path, extra_parameters = []):
if self.results:
assert(isinstance(self.results, ResultsMatrix))
column_name = extra_path[-1]
results = self.results.createColumn(column_name, extra_parameters)
else:
results = None
return Context(
parent = self,
workspace = os.path.join(self.workspace, *extra_path),
logspace = os.path.join(self.logspace, *extra_path),
results = results,
parameters = self.parameters + extra_parameters)
def mergeTestReport(self, testReport):
if self.results is not None:
for group in testReport.groups:
for test in group.tests:
self.results.add(test.id, test.status, test.description)
self.results.save()
def createWorkspaceFor(self, name):
return self._makedir(os.path.join(self.workspace, name))
def createLogspaceFor(self, name):
return self._makedir(os.path.join(self.logspace, name))
def attachResults(self, results):
results.attachToLogspace(self.logspace, clobber = self.clobber)
self.results = results
def _makedir(self, path):
if not os.path.isdir(path):
os.makedirs(path)
return path
class Testcase(TestThing):
type_string = "test case"
STAGE_LARVAL = "larval"
STAGE_INITIALIZED = "initialized"
STAGE_PROVISIONED = "provisioned"
STAGE_TEST_COMPLETE = "complete"
STAGE_DESTROYED = "destroyed"
def __init__(self, name, context):
super().__init__(name)
self.workspace = context.createWorkspaceFor(name)
self.logspace = context.createLogspaceFor(name)
self.dryrun = context.dryrun
self.debug = context.debug
self.quiet = context.quiet
self.provisionFeatures = context.requestedFeatures
self.isCompatible = True
self.testConfig = None
self.testScript = None
self.testReport = None
self.stage = self.STAGE_LARVAL
self._nodes = []
@property
def is_larval(self):
return self.stage == self.STAGE_LARVAL
@property
def is_initialized(self):
return self.stage == self.STAGE_INITIALIZED
@property
def is_provisioned(self):
return self.stage == self.STAGE_PROVISIONED
@property
def is_test_complete(self):
return self.stage == self.STAGE_TEST_COMPLETE
@property
def is_destroyed(self):
return self.stage == self.STAGE_DESTROYED
def validate(self):
info = twopence.TestBase().findTestCase(self.name)
if info is None:
error(f"could not find {self.description}")
return False
self.info = info
self.testConfig = info.config
self.testScript = info.script
return True
def perform(self, testrunConfig, console = None):
self.console = console
self.initializeWorkspace(testrunConfig)
self.interactPreProvisioned()
self.provisionCluster()
self.interactPostProvisioned()
self.runScript()
self.interactPostTestrun()
self.validateResult()
self.destroyCluster()
def initializeWorkspace(self, testrunConfig):
if not self.is_larval:
return
info("Initializing workspace")
self.runProvisioner(
"init",
"--logspace", self.logspace,
"--config", testrunConfig,
"--config", self.testConfig)
config = curly.Config(self.testConfig)
tree = config.tree()
self._nodes = []
for name in tree.get_children("node"):
self._nodes.append(name)
self.stage = self.STAGE_INITIALIZED
def provisionCluster(self):
if not self.is_initialized:
return
info("Provisioning test nodes")
if self.runProvisioner("create") != 0:
info("Failed to provision cluster")
return
self.stage = self.STAGE_PROVISIONED
def displayClusterStatus(self):
self.runProvisioner("status")
def runScript(self, rerun = False):
if rerun and self.is_test_complete:
pass
elif not self.is_provisioned:
info("unable to run script; nodes not yet provisioned")
return
info("Executing test script")
# This is hard-coded, and we "just know" where it is.
# If this ever changes, use
# twopence provision --workspace BLAH show status-file
# to obtain the name of that file
statusFile = os.path.join(self.workspace, "status.conf")
if self.runCommand(self.testScript, "--config", statusFile) != 0:
info("Test script return non-zero exit status")
# FIXME: record failure; we should also return non-zero
# exit status in this case
self.stage = self.STAGE_TEST_COMPLETE
def validateResult(self):
if not self.is_test_complete:
return
if self.dryrun:
return
# at a minimum, we should try to load the junit results and check if they're
# valid.
# Additional things to do:
# - implement useful behavior on test failures, like offering ssh
# access; suspending and saving the SUTs; etc.
# - compare test results against a list of expected failures,
# and actively call out regressions (and improvements)
# - aggregate test results and store them in a database
info("Validating test result")
reportPath = os.path.join(self.logspace, "junit-results.xml")
if not os.path.isfile(reportPath):
print("Error: cannot find test report document at %s" % reportPath);
return
self.testReport = reportPath
def destroyCluster(self):
# in any but the larval state, we have cleanup to do
if self.is_larval:
return
info("Destroying test nodes")
self.runProvisioner("destroy", "--zap")
self.stage = self.STAGE_DESTROYED
def runProvisioner(self, *args):
return self.runCommand("twopence provision", "--workspace", self.workspace, *args)
def runCommand(self, cmd, *args):
argv = [cmd]
if self.debug:
argv.append("--debug")
argv += args
# info("Executing command:")
cmd = " ".join(argv)
print(" " + cmd)
if self.dryrun:
return 0
if self.quiet:
cmd += " >/dev/null 2>&1"
return os.system(cmd)
def interact(self, interaction):
console = self.console
if not console:
return
try:
console.interact(interaction)
except ContinueTestcase:
pass
except FinishTestcase:
self.console = None
pass
def interactPreProvisioned(self):
msg = "Ready to provision %s" % self.name
self.interact(InteractionPreProvisioning(self, msg))
def interactPostProvisioned(self):
msg = "Provisioned %s, ready to execute" % self.name
self.interact(InteractionPostProvisioning(self, msg))
def interactPostTestrun(self):
msg = "Test run %s complete, ready to destroy cluster" % self.name
self.interact(InteractionPostTestRun(self, msg))
def inspect(self):
if self.runCommand(self.testScript, "info") != 0:
info("Test script return non-zero exit status")
class Testsuite(TestThing):
type_string = "test suite"
def __init__(self, name):
super().__init__(name)
self.testcases = None
self.info = twopence.TestBase().findTestSuite(name)
def validate(self):
if self.testcases is not None:
return True
if not self.info or not self.info.validate():
error(f"Cannot find {self.description}")
return False
self.testcases = self.info.open().get_values("testcases")
info(f"Loaded test suite {self.name}")
if not self.testcases:
error(f"{self.description} does not define any test cases")
return False
info(" consisting of " + ", ".join(self.testcases))
return True
class TestMatrixColumn(TestThing):
type_string = "test matrix column"
def __init__(self, name, matrix_name, config):
self.name = name
self.matrix_name = matrix_name
self.config = config
self.parameters = config.get_values("parameters")
def parametersAsDict(self):
result = {}
for paramString in self.parameters:
words = paramString.split('=', maxsplit = 1)
if len(words) != 2:
raise ValueError("argument to --parameter must be in the form name=value, not \"%s\"" % s)
key, value = words
result[key] = value
return result
def buildContext(self, context):
info(f"Processing next column of test matrix {self.matrix_name}: {self.name}")
return context.createSubContext(
extra_path = [self.matrix_name, self.name],
extra_parameters = self.parameters)
class Testmatrix(TestThing):
type_string = "test matrix"
def __init__(self, name, args):
super().__init__(name)
self.args = args
self.columns = None
self.info = twopence.TestBase().findTestMatrix(name)
def validate(self):
if self.columns is not None:
return True
if not self.info.validate():
error(f"Cannot find test matrix {self.name}")
return False
self.columns = self.load()
info(f"Loaded {self.description} from {self.info.path}")
if not self.columns:
error(f"test matrix {self.name} does not define any columns")
return False
info(f"Test matrix {self.name} defines these columns")
for column in self.columns:
print(f" {column.name}")
for param in column.parameters:
print(f" {param}")
return True
def load(self):
result = []
config = self.info.open()
for child in config:
if child.type != 'column':
continue
column = TestMatrixColumn(child.name, self.name, child)
result.append(column)
# The name attribute is useful for later stages that don't know which matrix
# the test run was based on
name = config.get_value("name")
if name is None:
raise ValueError(f"{self.info.path} does not define a name attribute")
if name != self.name:
raise ValueError(f"{self.info.path} specifies name = {name} (expected {self.name}")
return result
class Pipeline:
def __init__(self, context):
self.context = context
self.testcases = []
self.valid = True
def addTestcases(self, names):
for name in names:
if name not in self.testcases:
self.testcases.append(name)
def addTestsuites(self, names):
for name in names:
suite = Testsuite(name)
if not suite.validate():
self.valid = False
continue
self.addTestcases(suite.testcases)
def start(self, context = None):
if context is None:
context = self.context
testcases = []
for name in self.testcases:
test = Testcase(name, context)
if not test.validate():
self.valid = False
if not test.validateCompatibility(context.platformFeatures):
test.isCompatible = False
testcases.append(test)
if not self.valid:
error("Detected one or more invalid test cases")
return None
if not testcases:
error("No test cases defined")
return None
if not any(_.isCompatible for _ in testcases):
error("All test cases are incompatible with the base platform")
return None
return testcases
class Runner:
MODE_TESTS = 0
MODE_SUITES = 1
def __init__(self, mode = MODE_TESTS):
self.mode = mode
parser = self.build_arg_parser()
args = parser.parse_args()
self.valid = False
self.platform = args.platform
self.matrix = None
self.buildTestContext(args)
self.pipeline = Pipeline(self.context)
if self.mode == self.MODE_TESTS:
self.pipeline.addTestcases(args.testcase)
elif self.mode == self.MODE_SUITES:
self.pipeline.addTestsuites(args.testsuite)
else:
raise ValueError(f"invalid mode {self.mode}")
if args.matrix:
self.matrix = Testmatrix(args.matrix, args)
self.console = None
if args.interactive:
self.console = Console()
def buildTestContext(self, args):
self.testrun = args.testrun
self.workspace = args.workspace
self.logspace = args.logspace
if self.workspace is None:
self.workspace = os.path.expanduser("~/susetest/work")
if self.logspace is None:
self.logspace = os.path.expanduser("~/susetest/logs")
if self.testrun:
self.workspace = os.path.join(self.workspace, self.testrun)
self.logspace = os.path.join(self.logspace, self.testrun)
requestedFeatures = set(args.feature)
# We always add 'twopence' because our tests use twopence.
# If the build always has the twopence SUT infrastructure installed,
# great. If it does not, we'll add it.
requestedFeatures.add('twopence')
self.context = Context(self.workspace, self.logspace,
platform = args.platform,
parameters = args.parameter,
dryrun = args.dry_run,
debug = args.debug,
clobber = args.clobber,
requestedFeatures = requestedFeatures)
return
def validate(self):
if not self.valid:
self.valid = self._validate()
return self.valid
def _validate(self):
valid = True
if self.platform is None:
print("Error: no default platform specified; please specify one using --platform")
valid = False
if os.path.exists(self.workspace) and not os.path.isdir(self.workspace):
print(f"Error: workspace {self.workspace} exists, but is not a directory")
valid = False
if os.path.exists(self.logspace) and not os.path.isdir(self.logspace):
print(f"Error: logspace {self.logspace} exists, but is not a directory")
valid = False
return valid
def perform(self):
if not self.validate():
print("Fatal: refusing to run any tests due to above error(s)")
exit(1)
if not self.matrix:
self.context.attachResults(ResultsVector())
self._perform(self.context)
else:
matrix = self.matrix
if not matrix.validate() or not self.context.validateMatrix(matrix):
error(f"Matrix is not compatible with base platform {self.platform}")
print("Fatal: refusing to run any tests due to above error(s)")
exit(1)
self.context.attachResults(ResultsMatrix(matrix.name))
for column in matrix.columns:
context = column.buildContext(self.context)
if not self._perform(context):
info("Aborting test matrix")
break
def _perform(self, context):
testcases = self.pipeline.start(context)
if testcases is None:
print("Fatal: refusing to run any tests due to above error(s)")
exit(1)
okayToContinue = True
testrunConfig = self.createTestrunConfig(context)
for test in testcases:
if not test.isCompatible:
info(f"Skipping {test.description} because it's not compatible with the plaform's feature set")
# FIXME: generate a test report that says all tests we skipped
continue
info("About to perform %s" % test.name)
info(f" Workspace is {test.workspace}")
info(f" Logspace is {test.logspace}")
try:
test.perform(testrunConfig, self.console)
except AbortedTestcase:
print("Test %s was aborted, trying to clean up" % test.name)
test.destroyCluster()
okayToContinue = False
break
if test.testReport:
info(f"Test report can be found in {test.testReport}")
report = LogParser(test.testReport)
context.mergeTestReport(report)
os.remove(testrunConfig)
return okayToContinue
# could be moved to Context
def createTestrunConfig(self, context):
path = os.path.join(self.workspace, "testrun.conf")
info("Creating %s" % path)
config = curly.Config()
tree = config.tree()
node = tree.add_child("role", "default")
node.set_value("platform", context.platform)
node.set_value("repositories", ["twopence", ])
if context.requestedFeatures:
node.set_value("build", list(context.requestedFeatures))
if context.parameters:
child = tree.add_child("parameters")
for paramString in context.parameters:
words = paramString.split('=', maxsplit = 1)
if len(words) != 2:
raise ValueError("argument to --parameter must be in the form name=value, not \"%s\"" % s)
child.set_value(*words)
config.save(path)
info("Contents of %s:" % path)
with open(path) as f:
for l in f.readlines():
print(" %s" % l.rstrip())
return path
def build_arg_parser(self):
import argparse
parser = argparse.ArgumentParser(description = 'Provision and run tests.')
parser.add_argument('--platform',
help = 'specify the OS platform to use for all nodes and roles')
parser.add_argument('--testrun',
help = 'the testrun this test case is part of')
parser.add_argument('--workspace',
help = 'the directory to use as workspace')
parser.add_argument('--logspace',
help = 'the directory to use as logspace')
parser.add_argument('--clobber', default = False, action = 'store_true',
help = 'Clobber existing test results')
parser.add_argument('--parameter', action = 'append',
help = 'Parameters to be passed to the test suite, in name=value format')
parser.add_argument('--matrix',
help = 'Name of a test matrix to be applied to the test cases')
parser.add_argument('--dry-run', default = False, action = 'store_true',
help = 'Do not run any commands, just show what would be done')
parser.add_argument('--debug', default = False, action = 'store_true',
help = 'Enable debugging output from the provisioner')
parser.add_argument('--quiet', default = False, action = 'store_true',
help = 'Do not show output of provisioning and test script')
parser.add_argument('--feature', default = [], action = 'append',
help = 'Specify features you want the deployed image to provide')
parser.add_argument('--interactive', default = False, action = 'store_true',
help = 'Run tests interactively, stopping after each step.')
if self.mode == self.MODE_TESTS:
parser.add_argument('testcase', metavar='TESTCASE', nargs='+',
help = 'name of the test cases to run')
elif self.mode == self.MODE_SUITES:
parser.add_argument('testsuite', metavar='TESTSUITE', nargs='+',
help = 'name of the test suites to run')
return parser
class Inspector:
def __init__(self):
parser = self.build_arg_parser()
args = parser.parse_args()
self.testcases = []
for name in args.testcase:
test = Testcase(name, workspace = None)
test.validate()
self.testcases.append(test)
def perform(self):
info("Inspecting test cases")
for test in self.testcases:
test.inspect()
def build_arg_parser(self):
import argparse
parser = argparse.ArgumentParser(description = 'Inspect tests.')
parser.add_argument('testcase', metavar='TESTCASE', nargs='+',
help = 'name of the test cases to inspect')
return parser
|
okirch/susetest
|
python/susetest/executor.py
|
Python
|
gpl-2.0
| 25,259
|
# This file is part of Scapy
# See http://www.secdev.org/projects/scapy for more information
# Copyright (C) Philippe Biondi <phil@secdev.org>
# This program is published under a GPLv2 license
"""
SMB (Server Message Block), also known as CIFS.
"""
from scapy.packet import *
from scapy.fields import *
from scapy.layers.netbios import NBTSession
# SMB NetLogon Response Header
class SMBNetlogon_Protocol_Response_Header(Packet):
name = "SMBNetlogon Protocol Response Header"
fields_desc = [StrFixedLenField("Start", b"\xffSMB", 4),
ByteEnumField("Command", 0x25, {0x25: "Trans"}),
ByteField("Error_Class", 0x02),
ByteField("Reserved", 0),
LEShortField("Error_code", 4),
ByteField("Flags", 0),
LEShortField("Flags2", 0x0000),
LEShortField("PIDHigh", 0x0000),
LELongField("Signature", 0x0),
LEShortField("Unused", 0x0),
LEShortField("TID", 0),
LEShortField("PID", 0),
LEShortField("UID", 0),
LEShortField("MID", 0),
ByteField("WordCount", 17),
LEShortField("TotalParamCount", 0),
LEShortField("TotalDataCount", 112),
LEShortField("MaxParamCount", 0),
LEShortField("MaxDataCount", 0),
ByteField("MaxSetupCount", 0),
ByteField("unused2", 0),
LEShortField("Flags3", 0),
ByteField("TimeOut1", 0xe8),
ByteField("TimeOut2", 0x03),
LEShortField("unused3", 0),
LEShortField("unused4", 0),
LEShortField("ParamCount2", 0),
LEShortField("ParamOffset", 0),
LEShortField("DataCount", 112),
LEShortField("DataOffset", 92),
ByteField("SetupCount", 3),
ByteField("unused5", 0)]
# SMB MailSlot Protocol
class SMBMailSlot(Packet):
name = "SMB Mail Slot Protocol"
fields_desc = [LEShortField("opcode", 1),
LEShortField("priority", 1),
LEShortField("class", 2),
LEShortField("size", 135),
StrNullField("name", "\\MAILSLOT\\NET\\GETDC660")]
# SMB NetLogon Protocol Response Tail SAM
class SMBNetlogon_Protocol_Response_Tail_SAM(Packet):
name = "SMB Netlogon Protocol Response Tail SAM"
fields_desc = [ByteEnumField("Command", 0x17, {0x12: "SAM logon request", 0x17: "SAM Active directory Response"}), # noqa: E501
ByteField("unused", 0),
ShortField("Data1", 0),
ShortField("Data2", 0xfd01),
ShortField("Data3", 0),
ShortField("Data4", 0xacde),
ShortField("Data5", 0x0fe5),
ShortField("Data6", 0xd10a),
ShortField("Data7", 0x374c),
ShortField("Data8", 0x83e2),
ShortField("Data9", 0x7dd9),
ShortField("Data10", 0x3a16),
ShortField("Data11", 0x73ff),
ByteField("Data12", 0x04),
StrFixedLenField("Data13", "rmff", 4),
ByteField("Data14", 0x0),
ShortField("Data16", 0xc018),
ByteField("Data18", 0x0a),
StrFixedLenField("Data20", "rmff-win2k", 10),
ByteField("Data21", 0xc0),
ShortField("Data22", 0x18c0),
ShortField("Data23", 0x180a),
StrFixedLenField("Data24", "RMFF-WIN2K", 10),
ShortField("Data25", 0),
ByteField("Data26", 0x17),
StrFixedLenField("Data27", "Default-First-Site-Name", 23),
ShortField("Data28", 0x00c0),
ShortField("Data29", 0x3c10),
ShortField("Data30", 0x00c0),
ShortField("Data31", 0x0200),
ShortField("Data32", 0x0),
ShortField("Data33", 0xac14),
ShortField("Data34", 0x0064),
ShortField("Data35", 0x0),
ShortField("Data36", 0x0),
ShortField("Data37", 0x0),
ShortField("Data38", 0x0),
ShortField("Data39", 0x0d00),
ShortField("Data40", 0x0),
ShortField("Data41", 0xffff)]
# SMB NetLogon Protocol Response Tail LM2.0
class SMBNetlogon_Protocol_Response_Tail_LM20(Packet):
name = "SMB Netlogon Protocol Response Tail LM20"
fields_desc = [ByteEnumField("Command", 0x06, {0x06: "LM 2.0 Response to logon request"}), # noqa: E501
ByteField("unused", 0),
StrFixedLenField("DblSlash", "\\\\", 2),
StrNullField("ServerName", "WIN"),
LEShortField("LM20Token", 0xffff)]
# SMBNegociate Protocol Request Header
class SMBNegociate_Protocol_Request_Header(Packet):
name = "SMBNegociate Protocol Request Header"
fields_desc = [StrFixedLenField("Start", b"\xffSMB", 4),
ByteEnumField("Command", 0x72, {0x72: "SMB_COM_NEGOTIATE"}),
ByteField("Error_Class", 0),
ByteField("Reserved", 0),
LEShortField("Error_code", 0),
ByteField("Flags", 0x18),
LEShortField("Flags2", 0x0000),
LEShortField("PIDHigh", 0x0000),
LELongField("Signature", 0x0),
LEShortField("Unused", 0x0),
LEShortField("TID", 0),
LEShortField("PID", 1),
LEShortField("UID", 0),
LEShortField("MID", 2),
ByteField("WordCount", 0),
LEShortField("ByteCount", 12)]
# SMB Negotiate Protocol Request Tail
class SMBNegociate_Protocol_Request_Tail(Packet):
name = "SMB Negotiate Protocol Request Tail"
fields_desc = [ByteField("BufferFormat", 0x02),
StrNullField("BufferData", "NT LM 0.12")]
# SMBNegociate Protocol Response Advanced Security
class SMBNegociate_Protocol_Response_Advanced_Security(Packet):
name = "SMBNegociate Protocol Response Advanced Security"
fields_desc = [StrFixedLenField("Start", b"\xffSMB", 4),
ByteEnumField("Command", 0x72, {0x72: "SMB_COM_NEGOTIATE"}),
ByteField("Error_Class", 0),
ByteField("Reserved", 0),
LEShortField("Error_Code", 0),
ByteField("Flags", 0x98),
LEShortField("Flags2", 0x0000),
LEShortField("PIDHigh", 0x0000),
LELongField("Signature", 0x0),
LEShortField("Unused", 0x0),
LEShortField("TID", 0),
LEShortField("PID", 1),
LEShortField("UID", 0),
LEShortField("MID", 2),
ByteField("WordCount", 17),
LEShortField("DialectIndex", 7),
ByteField("SecurityMode", 0x03),
LEShortField("MaxMpxCount", 50),
LEShortField("MaxNumberVC", 1),
LEIntField("MaxBufferSize", 16144),
LEIntField("MaxRawSize", 65536),
LEIntField("SessionKey", 0x0000),
LEShortField("ServerCapabilities", 0xf3f9),
BitField("UnixExtensions", 0, 1),
BitField("Reserved2", 0, 7),
BitField("ExtendedSecurity", 1, 1),
BitField("CompBulk", 0, 2),
BitField("Reserved3", 0, 5),
# There have been 127490112000000000 tenths of micro-seconds between 1st january 1601 and 1st january 2005. 127490112000000000=0x1C4EF94D6228000, so ServerTimeHigh=0xD6228000 and ServerTimeLow=0x1C4EF94. # noqa: E501
LEIntField("ServerTimeHigh", 0xD6228000),
LEIntField("ServerTimeLow", 0x1C4EF94),
LEShortField("ServerTimeZone", 0x3c),
ByteField("EncryptionKeyLength", 0),
LEFieldLenField("ByteCount", None, "SecurityBlob", adjust=lambda pkt, x: x - 16), # noqa: E501
BitField("GUID", 0, 128),
StrLenField("SecurityBlob", "", length_from=lambda x: x.ByteCount + 16)] # noqa: E501
# SMBNegociate Protocol Response No Security
# When using no security, with EncryptionKeyLength=8, you must have an EncryptionKey before the DomainName # noqa: E501
class SMBNegociate_Protocol_Response_No_Security(Packet):
name = "SMBNegociate Protocol Response No Security"
fields_desc = [StrFixedLenField("Start", b"\xffSMB", 4),
ByteEnumField("Command", 0x72, {0x72: "SMB_COM_NEGOTIATE"}),
ByteField("Error_Class", 0),
ByteField("Reserved", 0),
LEShortField("Error_Code", 0),
ByteField("Flags", 0x98),
LEShortField("Flags2", 0x0000),
LEShortField("PIDHigh", 0x0000),
LELongField("Signature", 0x0),
LEShortField("Unused", 0x0),
LEShortField("TID", 0),
LEShortField("PID", 1),
LEShortField("UID", 0),
LEShortField("MID", 2),
ByteField("WordCount", 17),
LEShortField("DialectIndex", 7),
ByteField("SecurityMode", 0x03),
LEShortField("MaxMpxCount", 50),
LEShortField("MaxNumberVC", 1),
LEIntField("MaxBufferSize", 16144),
LEIntField("MaxRawSize", 65536),
LEIntField("SessionKey", 0x0000),
LEShortField("ServerCapabilities", 0xf3f9),
BitField("UnixExtensions", 0, 1),
BitField("Reserved2", 0, 7),
BitField("ExtendedSecurity", 0, 1),
FlagsField("CompBulk", 0, 2, "CB"),
BitField("Reserved3", 0, 5),
# There have been 127490112000000000 tenths of micro-seconds between 1st january 1601 and 1st january 2005. 127490112000000000=0x1C4EF94D6228000, so ServerTimeHigh=0xD6228000 and ServerTimeLow=0x1C4EF94. # noqa: E501
LEIntField("ServerTimeHigh", 0xD6228000),
LEIntField("ServerTimeLow", 0x1C4EF94),
LEShortField("ServerTimeZone", 0x3c),
ByteField("EncryptionKeyLength", 8),
LEShortField("ByteCount", 24),
BitField("EncryptionKey", 0, 64),
StrNullField("DomainName", "WORKGROUP"),
StrNullField("ServerName", "RMFF1")]
# SMBNegociate Protocol Response No Security No Key
class SMBNegociate_Protocol_Response_No_Security_No_Key(Packet):
namez = "SMBNegociate Protocol Response No Security No Key"
fields_desc = [StrFixedLenField("Start", b"\xffSMB", 4),
ByteEnumField("Command", 0x72, {0x72: "SMB_COM_NEGOTIATE"}),
ByteField("Error_Class", 0),
ByteField("Reserved", 0),
LEShortField("Error_Code", 0),
ByteField("Flags", 0x98),
LEShortField("Flags2", 0x0000),
LEShortField("PIDHigh", 0x0000),
LELongField("Signature", 0x0),
LEShortField("Unused", 0x0),
LEShortField("TID", 0),
LEShortField("PID", 1),
LEShortField("UID", 0),
LEShortField("MID", 2),
ByteField("WordCount", 17),
LEShortField("DialectIndex", 7),
ByteField("SecurityMode", 0x03),
LEShortField("MaxMpxCount", 50),
LEShortField("MaxNumberVC", 1),
LEIntField("MaxBufferSize", 16144),
LEIntField("MaxRawSize", 65536),
LEIntField("SessionKey", 0x0000),
LEShortField("ServerCapabilities", 0xf3f9),
BitField("UnixExtensions", 0, 1),
BitField("Reserved2", 0, 7),
BitField("ExtendedSecurity", 0, 1),
FlagsField("CompBulk", 0, 2, "CB"),
BitField("Reserved3", 0, 5),
# There have been 127490112000000000 tenths of micro-seconds between 1st january 1601 and 1st january 2005. 127490112000000000=0x1C4EF94D6228000, so ServerTimeHigh=0xD6228000 and ServerTimeLow=0x1C4EF94. # noqa: E501
LEIntField("ServerTimeHigh", 0xD6228000),
LEIntField("ServerTimeLow", 0x1C4EF94),
LEShortField("ServerTimeZone", 0x3c),
ByteField("EncryptionKeyLength", 0),
LEShortField("ByteCount", 16),
StrNullField("DomainName", "WORKGROUP"),
StrNullField("ServerName", "RMFF1")]
# Session Setup AndX Request
class SMBSession_Setup_AndX_Request(Packet):
name = "Session Setup AndX Request"
fields_desc = [StrFixedLenField("Start", b"\xffSMB", 4),
ByteEnumField("Command", 0x73, {0x73: "SMB_COM_SESSION_SETUP_ANDX"}), # noqa: E501
ByteField("Error_Class", 0),
ByteField("Reserved", 0),
LEShortField("Error_Code", 0),
ByteField("Flags", 0x18),
LEShortField("Flags2", 0x0001),
LEShortField("PIDHigh", 0x0000),
LELongField("Signature", 0x0),
LEShortField("Unused", 0x0),
LEShortField("TID", 0),
LEShortField("PID", 1),
LEShortField("UID", 0),
LEShortField("MID", 2),
ByteField("WordCount", 13),
ByteEnumField("AndXCommand", 0x75, {0x75: "SMB_COM_TREE_CONNECT_ANDX"}), # noqa: E501
ByteField("Reserved2", 0),
LEShortField("AndXOffset", 96),
LEShortField("MaxBufferS", 2920),
LEShortField("MaxMPXCount", 50),
LEShortField("VCNumber", 0),
LEIntField("SessionKey", 0),
LEFieldLenField("ANSIPasswordLength", None, "ANSIPassword"),
LEShortField("UnicodePasswordLength", 0),
LEIntField("Reserved3", 0),
LEShortField("ServerCapabilities", 0x05),
BitField("UnixExtensions", 0, 1),
BitField("Reserved4", 0, 7),
BitField("ExtendedSecurity", 0, 1),
BitField("CompBulk", 0, 2),
BitField("Reserved5", 0, 5),
LEShortField("ByteCount", 35),
StrLenField("ANSIPassword", "Pass", length_from=lambda x: x.ANSIPasswordLength), # noqa: E501
StrNullField("Account", "GUEST"),
StrNullField("PrimaryDomain", ""),
StrNullField("NativeOS", "Windows 4.0"),
StrNullField("NativeLanManager", "Windows 4.0"),
ByteField("WordCount2", 4),
ByteEnumField("AndXCommand2", 0xFF, {0xFF: "SMB_COM_NONE"}),
ByteField("Reserved6", 0),
LEShortField("AndXOffset2", 0),
LEShortField("Flags3", 0x2),
LEShortField("PasswordLength", 0x1),
LEShortField("ByteCount2", 18),
ByteField("Password", 0),
StrNullField("Path", "\\\\WIN2K\\IPC$"),
StrNullField("Service", "IPC")]
# Session Setup AndX Response
class SMBSession_Setup_AndX_Response(Packet):
name = "Session Setup AndX Response"
fields_desc = [StrFixedLenField("Start", b"\xffSMB", 4),
ByteEnumField("Command", 0x73, {0x73: "SMB_COM_SESSION_SETUP_ANDX"}), # noqa: E501
ByteField("Error_Class", 0),
ByteField("Reserved", 0),
LEShortField("Error_Code", 0),
ByteField("Flags", 0x90),
LEShortField("Flags2", 0x1001),
LEShortField("PIDHigh", 0x0000),
LELongField("Signature", 0x0),
LEShortField("Unused", 0x0),
LEShortField("TID", 0),
LEShortField("PID", 1),
LEShortField("UID", 0),
LEShortField("MID", 2),
ByteField("WordCount", 3),
ByteEnumField("AndXCommand", 0x75, {0x75: "SMB_COM_TREE_CONNECT_ANDX"}), # noqa: E501
ByteField("Reserved2", 0),
LEShortField("AndXOffset", 66),
LEShortField("Action", 0),
LEShortField("ByteCount", 25),
StrNullField("NativeOS", "Windows 4.0"),
StrNullField("NativeLanManager", "Windows 4.0"),
StrNullField("PrimaryDomain", ""),
ByteField("WordCount2", 3),
ByteEnumField("AndXCommand2", 0xFF, {0xFF: "SMB_COM_NONE"}),
ByteField("Reserved3", 0),
LEShortField("AndXOffset2", 80),
LEShortField("OptionalSupport", 0x01),
LEShortField("ByteCount2", 5),
StrNullField("Service", "IPC"),
StrNullField("NativeFileSystem", "")]
bind_layers(NBTSession, SMBNegociate_Protocol_Request_Header, )
bind_layers(NBTSession, SMBNegociate_Protocol_Response_Advanced_Security, ExtendedSecurity=1) # noqa: E501
bind_layers(NBTSession, SMBNegociate_Protocol_Response_No_Security, ExtendedSecurity=0, EncryptionKeyLength=8) # noqa: E501
bind_layers(NBTSession, SMBNegociate_Protocol_Response_No_Security_No_Key, ExtendedSecurity=0, EncryptionKeyLength=0) # noqa: E501
bind_layers(NBTSession, SMBSession_Setup_AndX_Request, )
bind_layers(NBTSession, SMBSession_Setup_AndX_Response, )
bind_layers(SMBNegociate_Protocol_Request_Header, SMBNegociate_Protocol_Request_Tail, ) # noqa: E501
bind_layers(SMBNegociate_Protocol_Request_Tail, SMBNegociate_Protocol_Request_Tail, ) # noqa: E501
|
smainand/scapy
|
scapy/layers/smb.py
|
Python
|
gpl-2.0
| 18,582
|
# for loop
for x in range(2, 9+1):
for y in range(1, 9+1):
print(x, ' * ', y, ' = ', x*y)
# print(str(x) + ' * ' + str(y) + ' = ' + str(x*y))
# print('%d * %d = %d' % (x, y, x*y))
# while loop
x = 2
while x < 9+1:
y = 1
while y < 9+1:
print(x, ' * ', y, ' = ', x*y)
# print(str(x) + ' * ' + str(y) + ' = ' + str(x*y))
# print('%d * %d = %d' % (x, y, x*y))
y += 1
x += 1
|
yehnan/python_book_yehnan
|
ch02ex/ch02ex2.2_answer.py
|
Python
|
gpl-2.0
| 464
|
#
# Gramps - a GTK+/GNOME based genealogy program
#
# Copyright (C) 2000-2007 Donald N. Allingham
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
# $Id$
"""
Provide sorting routines for use in GRAMPS. Since these functions are
intended to provide fast sorting, they tend to bypass access methods,
and directly use class members. For this reason, care needs to be taken
to make sure these remain in sync with the rest of the design.
"""
#-------------------------------------------------------------------------
#
# Standard python modules
#
#-------------------------------------------------------------------------
import locale
#-------------------------------------------------------------------------
#
# GRAMPS Modules
#
#-------------------------------------------------------------------------
from lib import Date
from utils.db import get_birth_or_fallback
from display.name import displayer as _nd
#-------------------------------------------------------------------------
#
# Constants
#
#-------------------------------------------------------------------------
class Sort(object):
def __init__(self, database):
self.database = database
def by_last_name(self, first_id, second_id):
"""Sort routine for comparing two last names. If last names are equal,
uses the given name and suffix"""
first = self.database.get_person_from_handle(first_id)
second = self.database.get_person_from_handle(second_id)
name1 = first.get_primary_name()
name2 = second.get_primary_name()
fsn = name1.get_surname()
ssn = name2.get_surname()
if fsn == ssn :
ffn = name1.get_first_name()
sfn = name2.get_first_name()
if ffn == sfn:
return locale.strcoll(name1.get_suffix(), name2.get_suffix())
else:
return locale.strcoll(ffn, sfn)
else:
return locale.strcoll(fsn, ssn)
def by_last_name_key(self, first_id):
"""Sort routine for comparing two last names. If last names are equal,
uses the given name and suffix"""
first = self.database.get_person_from_handle(first_id)
name1 = first.get_primary_name()
fsn = name1.get_surname()
ffn = name1.get_first_name()
fsu = name1.get_suffix()
return locale.strxfrm(fsn + ffn + fsu)
def by_sorted_name(self, first_id, second_id):
"""
Sort routine for comparing two displayed names.
"""
first = self.database.get_person_from_handle(first_id)
second = self.database.get_person_from_handle(second_id)
name1 = _nd.sorted(first)
name2 = _nd.sorted(second)
return locale.strcoll(name1, name2)
def by_sorted_name_key(self, first_id):
"""
Sort routine for comparing two displayed names.
"""
first = self.database.get_person_from_handle(first_id)
name1 = _nd.sorted(first)
return locale.strxfrm(name1)
def by_birthdate(self, first_id, second_id):
"""Sort routine for comparing two people by birth dates. If the birth dates
are equal, sorts by name"""
first = self.database.get_person_from_handle(first_id)
second = self.database.get_person_from_handle(second_id)
birth1 = get_birth_or_fallback(self.database, first)
if birth1:
date1 = birth1.get_date_object()
else:
date1 = Date()
birth2 = get_birth_or_fallback(self.database, second)
if birth2:
date2 = birth2.get_date_object()
else:
date2 = Date()
dsv1 = date1.get_sort_value()
dsv2 = date2.get_sort_value()
val = cmp(dsv1, dsv2)
if val == 0:
return self.by_last_name(first_id, second_id)
return val
def by_birthdate_key(self, first_id):
"""Sort routine for comparing two people by birth dates. If the birth dates
are equal, sorts by name"""
first = self.database.get_person_from_handle(first_id)
birth1 = get_birth_or_fallback(self.database, first)
if birth1:
date1 = birth1.get_date_object()
else:
date1 = Date()
dsv1 = date1.get_sort_value()
return "%08d" % dsv1 + self.by_last_name_key(first_id)
def by_date(self, a_id, b_id):
"""Sort routine for comparing two events by their dates. """
if not (a_id and b_id):
return 0
a_obj = self.database.get_event_from_handle(a_id)
b_obj = self.database.get_event_from_handle(b_id)
dsv1 = a_obj.get_date_object().get_sort_value()
dsv2 = b_obj.get_date_object().get_sort_value()
return cmp(dsv1, dsv2)
def by_date_key(self, a_id):
"""Sort routine for comparing two events by their dates. """
if not a_id:
return 0
a_obj = self.database.get_event_from_handle(a_id)
return a_obj.get_date_object().get_sort_value()
def by_place_title(self, a_id, b_id):
"""Sort routine for comparing two places. """
if not (a_id and b_id):
return 0
a_obj = self.database.get_place_from_handle(a_id)
b_obj = self.database.get_place_from_handle(b_id)
return locale.strcoll(a_obj.title, b_obj.title)
def by_place_title_key(self, a_id):
"""Sort routine for comparing two places. """
if not a_id:
return 0
a_obj = self.database.get_place_from_handle(a_id)
return locale.strxfrm(a_obj.title)
def by_event_place(self, a_id, b_id):
"""Sort routine for comparing two events by their places. """
if not (a_id and b_id):
return 0
evt_a = self.database.get_event_from_handle(a_id)
evt_b = self.database.get_event_from_handle(b_id)
plc_a = self.database.get_place_from_handle(evt_a.get_place_handle())
plc_b = self.database.get_place_from_handle(evt_b.get_place_handle())
plc_a_title = ""
plc_b_title = ""
if plc_a:
plc_a_title = plc_a.title
if plc_b:
plc_b_title = plc_b.title
return locale.strcoll(plc_a_title, plc_b_title)
def by_event_place_key(self, a_id):
"""Sort routine for comparing two events by their places. """
if not a_id:
return 0
evt_a = self.database.get_event_from_handle(a_id)
plc_a = self.database.get_place_from_handle(evt_a.get_place_handle())
plc_a_title = plc_a.title if plc_a else ""
return locale.strxfrm(plc_a_title)
def by_event_description(self, a_id, b_id):
"""Sort routine for comparing two events by their descriptions. """
if not (a_id and b_id):
return 0
evt_a = self.database.get_event_from_handle(a_id)
evt_b = self.database.get_event_from_handle(b_id)
return locale.strcoll(evt_a.get_description(), evt_b.get_description())
def by_event_description_key(self, a_id):
"""Sort routine for comparing two events by their descriptions. """
if not a_id:
return 0
evt_a = self.database.get_event_from_handle(a_id)
return locale.strxfrm(evt_a.get_description())
def by_event_id(self, a_id, b_id):
"""Sort routine for comparing two events by their ID. """
if not (a_id and b_id):
return 0
evt_a = self.database.get_event_from_handle(a_id)
evt_b = self.database.get_event_from_handle(b_id)
return locale.strcoll(evt_a.get_gramps_id(), evt_b.get_gramps_id())
def by_event_id_key(self, a_id):
"""Sort routine for comparing two events by their ID. """
if not a_id:
return 0
evt_a = self.database.get_event_from_handle(a_id)
return locale.strxfrm(evt_a.get_gramps_id())
def by_event_type(self, a_id, b_id):
"""Sort routine for comparing two events by their type. """
if not (a_id and b_id):
return 0
evt_a = self.database.get_event_from_handle(a_id)
evt_b = self.database.get_event_from_handle(b_id)
return locale.strcoll(str(evt_a.get_type()), str(evt_b.get_type()))
def by_event_type_key(self, a_id):
"""Sort routine for comparing two events by their type. """
if not a_id:
return 0
evt_a = self.database.get_event_from_handle(a_id)
return locale.strxfrm(str(evt_a.get_type()))
def by_media_title(self,a_id,b_id):
"""Sort routine for comparing two media objects by their title. """
if not (a_id and b_id):
return False
a = self.database.get_object_from_handle(a_id)
b = self.database.get_object_from_handle(b_id)
return locale.strcoll(a.desc, b.desc)
def by_media_title_key(self, a_id):
"""Sort routine for comparing two media objects by their title. """
if not a_id:
return False
a = self.database.get_object_from_handle(a_id)
return locale.strxfrm(a.desc)
|
arunkgupta/gramps
|
gramps/gen/sort.py
|
Python
|
gpl-2.0
| 9,779
|
import datetime
from tlsspy.probe.base import Probe
class Timing(Probe):
def probe(self, address, certificates):
'''
Records the start time of the analysis run.
Provides the following keys:
* ``analysis.timing.start`` as ISO time
Probes that depend on this probe:
* 980_timing_
.. _980_timing: probe_980_timing.html
'''
return self.merge(dict(
analysis=dict(timing=dict(start=datetime.datetime.now())),
))
PROBES = (
Timing,
)
|
tehmaze/tlsspy
|
tlsspy/probe/probe_020_timing.py
|
Python
|
gpl-2.0
| 536
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2008-2013 Zuza Software Foundation
#
# This file is part of Pootle.
#
# Pootle is free software; you can redistribute it and/or modify it under the
# terms of the GNU General Public License as published by the Free Software
# Foundation; either version 2 of the License, or (at your option) any later
# version.
#
# Pootle is distributed in the hope that it will be useful, but WITHOUT ANY
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
# A PARTICULAR PURPOSE. See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along with
# Pootle; if not, see <http://www.gnu.org/licenses/>.
import logging
from django.contrib.auth.models import Permission
from django.contrib.contenttypes.models import ContentType
from django.db.models.signals import pre_delete, post_delete
################################ Signal handlers ##############################
permission_queryset = None
def fix_permission_content_type_pre(sender, instance, **kwargs):
if instance.name == 'pootle' and instance.model == "":
logging.debug("Fixing permissions content types")
global permission_queryset
permission_queryset = [permission for permission in \
Permission.objects.filter(content_type=instance)]
pre_delete.connect(fix_permission_content_type_pre, sender=ContentType)
def fix_permission_content_type_post(sender, instance, **kwargs):
global permission_queryset
if permission_queryset is not None:
dir_content_type = ContentType.objects.get(app_label='pootle_app',
model='directory')
dir_content_type.name = 'pootle'
dir_content_type.save()
for permission in permission_queryset:
permission.content_type = dir_content_type
permission.save()
permission_queryset = None
post_delete.connect(fix_permission_content_type_post, sender=ContentType)
|
arky/pootle-dev
|
pootle/apps/pootle_app/management/__init__.py
|
Python
|
gpl-2.0
| 2,075
|
import pytest
from cfme.middleware.provider.hawkular import HawkularProvider
from cfme.middleware.server import MiddlewareServer
from cfme.utils.version import current_version
from server_methods import verify_server_running, verify_server_stopped
from server_methods import get_servers_set, verify_server_suspended
from server_methods import get_eap_server, get_hawkular_server
from server_methods import verify_server_starting, verify_server_stopping
from server_methods import get_eap_container_server
pytestmark = [
pytest.mark.usefixtures('setup_provider'),
pytest.mark.uncollectif(lambda: current_version() < '5.7'),
pytest.mark.provider([HawkularProvider], scope="function"),
]
@pytest.yield_fixture(scope="function")
def server(provider):
server = get_eap_server(provider)
yield server
server.restart_server()
def test_list_servers():
"""Tests servers lists between UI, DB and Management system.
Steps:
* Get servers list from UI
* Get servers list from Database
* Get headers from UI
* Compare headers from UI with expected headers list
* Compare content of all the list [UI, Database, Management system]
"""
ui_servers = get_servers_set(MiddlewareServer.servers())
db_servers = get_servers_set(MiddlewareServer.servers_in_db())
mgmt_servers = get_servers_set(MiddlewareServer.servers_in_mgmt())
headers = MiddlewareServer.headers()
headers_expected = ['Server Name', 'Product', 'Host Name', 'Feed', 'Provider']
assert headers == headers_expected
assert ui_servers == db_servers == mgmt_servers, \
("Lists of servers mismatch! UI:{}, DB:{}, MGMT:{}"
.format(ui_servers, db_servers, mgmt_servers))
def test_list_provider_servers(provider):
"""Tests servers lists from current Provider between UI, DB and Management system
Steps:
* Get servers list from UI of provider
* Get servers list from Database of provider
* Get servers list from Management system(Hawkular)
* Compare content of all the list [UI, Database, Management system]
"""
ui_servers = get_servers_set(MiddlewareServer.servers(provider=provider))
db_servers = get_servers_set(MiddlewareServer.servers_in_db(provider=provider))
mgmt_servers = get_servers_set(MiddlewareServer.servers_in_mgmt(provider=provider))
assert ui_servers == db_servers == mgmt_servers, \
("Lists of servers mismatch! UI:{}, DB:{}, MGMT:{}"
.format(ui_servers, db_servers, mgmt_servers))
def test_server_details(provider):
"""Tests server details on UI
Steps:
* Select Hawkular server details in UI
* Compare selected server UI details with CFME database and MGMT system
"""
server = get_hawkular_server(provider)
srv_ui = server.server(method='ui')
srv_db = server.server(method='db')
srv_mgmt = srv_ui.server(method='mgmt')
assert srv_ui, "Server was not found in UI"
assert srv_db, "Server was not found in DB"
assert srv_mgmt, "Server was not found in MGMT system"
assert srv_ui.name == srv_db.name == srv_mgmt.name, \
("server name does not match between UI:{}, DB:{}, MGMT:{}"
.format(srv_ui.name, srv_db.name, srv_mgmt.name))
srv_db.validate_properties()
srv_mgmt.validate_properties()
@pytest.mark.smoke
@pytest.mark.uncollectif(lambda: current_version() < '5.8')
def test_container_server_immutability(provider):
"""Tests container based EAP server immutability on UI
Steps:
* Select container based EAP server details in UI
* Compare selected server UI details with CFME database and MGMT system
* Verify that all menu items are disabled and server is immutable
"""
server = get_eap_container_server(provider)
srv_ui = server.server(method='ui')
srv_db = server.server(method='db')
srv_mgmt = srv_ui.server(method='mgmt')
assert srv_ui, "Server was not found in UI"
assert srv_db, "Server was not found in DB"
assert srv_mgmt, "Server was not found in MGMT system"
assert srv_ui.name == srv_db.name == srv_mgmt.name, \
("server name does not match between UI:{}, DB:{}, MGMT:{}"
.format(srv_ui.name, srv_db.name, srv_mgmt.name))
srv_db.validate_properties()
srv_mgmt.validate_properties()
assert srv_ui.is_immutable(), "Server in container should be immutable"
def test_hawkular_immutability(provider):
"""Tests Hawkular server itself reload operation message on UI
Steps:
* Chooses Hawkular server.
* Checks that server is immutable, i.e. toolbar is hidden.
"""
server = get_hawkular_server(provider)
assert server.is_immutable()
def test_server_reload(provider, server):
"""Tests server reload operation on UI
Steps:
* Invokes 'Reload Server' toolbar operation
* Checks that server status is not running in UI, in DB and in MGMT.
* Waits for some time
* Checks that server status is running in UI, in DB and in MGMT.
"""
verify_server_running(provider, server)
server.reload_server()
verify_server_running(provider, server)
# enable when MiQ server start functionality is implemented
@pytest.mark.uncollect
def test_server_stop(provider, server):
"""Tests server stop operation on UI
Steps:
* Invokes 'Stop Server' toolbar operation
* Checks that server status is stopped in UI, in DB and in MGMT.
"""
verify_server_running(provider, server)
server.stop_server()
verify_server_stopping(provider, server)
verify_server_stopped(provider, server)
server.start_server()
verify_server_starting(provider, server)
verify_server_running(provider, server)
# enable when MiQ server start functionality is implemented
@pytest.mark.uncollect
def test_server_shutdown(provider, server):
"""Tests server gracefully shutdown operation on UI
Steps:
* Invokes 'Gracefully shutdown Server' toolbar operation
* Checks that server status is stopped in UI, in DB and in MGMT.
"""
verify_server_running(provider, server)
server.shutdown_server()
verify_server_stopping(provider, server)
verify_server_stopped(provider, server)
server.start_server()
verify_server_starting(provider, server)
verify_server_running(provider, server)
@pytest.mark.smoke
def test_server_restart(provider, server):
"""Tests server restart operation on UI
Steps:
* Invokes 'Restart Server' toolbar operation
* Checks that server status is not running in UI, in DB and in MGMT.
* Waits for some time
* Checks that server status is running in UI, in DB and in MGMT.
"""
verify_server_running(provider, server)
server.restart_server()
verify_server_running(provider, server)
def test_server_suspend_resume(provider, server):
"""Tests server suspend/resume operation on UI
Steps:
* Invokes Suspend Server' toolbar operation
* Checks that server status is not running in UI, in DB and in MGMT.
* Invokes 'Resume Server' toolbar operation
* Waits for some time
* Checks that server status is running in UI, in DB and in MGMT.
"""
verify_server_running(provider, server)
server.suspend_server()
verify_server_suspended(provider, server)
server.resume_server()
verify_server_running(provider, server)
|
jkandasa/integration_tests
|
cfme/tests/middleware/test_middleware_server.py
|
Python
|
gpl-2.0
| 7,450
|
# Django settings for academicControl project.
#encoding:utf-8
import os
RUTA_PROYECTO = os.path.dirname(os.path.realpath(__file__))
DEBUG = True
TEMPLATE_DEBUG = DEBUG
ADMINS = (
('Helier Cortez', 'hdnymib@gmail.com'),
)
MANAGERS = ADMINS
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.mysql', # Add 'postgresql_psycopg2', 'mysql', 'sqlite3' or 'oracle'.
'NAME': 'academicControl', # Or path to database file if using sqlite3.
# The following settings are not used with sqlite3:
'USER': 'root',
'PASSWORD': '',
'HOST': 'localhost', # Empty for localhost through domain sockets or '127.0.0.1' for localhost through TCP.
'PORT': '3306', # Set to empty string for default.
}
}
# Hosts/domain names that are valid for this site; required if DEBUG is False
# See https://docs.djangoproject.com/en/1.5/ref/settings/#allowed-hosts
ALLOWED_HOSTS = []
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# In a Windows environment this must be set to your system time zone.
TIME_ZONE = 'America/La_Paz'
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = 'es'
SITE_ID = 1
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
# If you set this to False, Django will not format dates, numbers and
# calendars according to the current locale.
USE_L10N = True
# If you set this to False, Django will not use timezone-aware datetimes.
USE_TZ = True
# Absolute filesystem path to the directory that will hold user-uploaded files.
# Example: "/var/www/example.com/media/"
MEDIA_ROOT = ''
# URL that handles the media served from MEDIA_ROOT. Make sure to use a
# trailing slash.
# Examples: "http://example.com/media/", "http://media.example.com/"
MEDIA_URL = ''
# Absolute path to the directory static files should be collected to.
# Don't put anything in this directory yourself; store your static files
# in apps' "static/" subdirectories and in STATICFILES_DIRS.
# Example: "/var/www/example.com/static/"
STATIC_ROOT = ''
# URL prefix for static files.
# Example: "http://example.com/static/", "http://static.example.com/"
STATIC_URL = '/static/'
# Additional locations of static files
STATICFILES_DIRS = (
# Put strings here, like "/home/html/static" or "C:/www/django/static".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
)
# List of finder classes that know how to find static files in
# various locations.
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
# 'django.contrib.staticfiles.finders.DefaultStorageFinder',
)
# Make this unique, and don't share it with anybody.
SECRET_KEY = '#1v^ftqj9s4y87^tui@0cv3(zma^-e_64=_b43woeckzat1sg7'
# List of callables that know how to import templates from various sources.
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
# 'django.template.loaders.eggs.Loader',
)
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
# Uncomment the next line for simple clickjacking protection:
# 'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 'academicControl.urls'
# Python dotted path to the WSGI application used by Django's runserver.
WSGI_APPLICATION = 'academicControl.wsgi.application'
TEMPLATE_DIRS = (
# Put strings here, like "/home/html/django_templates" or "C:/www/django/templates".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
os.path.join(RUTA_PROYECTO,'plantilla'),
)
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.staticfiles',
# Uncomment the next line to enable the admin:
'django.contrib.admin',
# Uncomment the next line to enable admin documentation:
# 'django.contrib.admindocs',
#app academic
'academic',
)
# A sample logging configuration. The only tangible logging
# performed by this configuration is to send an email to
# the site admins on every HTTP 500 error when DEBUG=False.
# See http://docs.djangoproject.com/en/dev/topics/logging for
# more details on how to customize your logging configuration.
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'filters': {
'require_debug_false': {
'()': 'django.utils.log.RequireDebugFalse'
}
},
'handlers': {
'mail_admins': {
'level': 'ERROR',
'filters': ['require_debug_false'],
'class': 'django.utils.log.AdminEmailHandler'
}
},
'loggers': {
'django.request': {
'handlers': ['mail_admins'],
'level': 'ERROR',
'propagate': True,
},
}
}
|
hyms/academicControl
|
academicControl/settings.py
|
Python
|
gpl-2.0
| 5,571
|
from state import *
from sys import stdout
from utils import *
"""
This is how the board should be drawn.
5 . . K . .
4 . G G G .
3 . . . . .
2 D D D D D
1 . . . . .
A B C D E
Internally, the board is represented by converting the state (as a byte array),
to a list of 5 lists of strings, as such:
0 : 0 1 2 3 4
1 : 0 1 2 3 4
2 : 0 1 2 3 4
4 : 0 1 2 3 4
"""
_SPACE = ' '
_C_RED = '\033[91m'
_C_GREEN = '\33[32m'
_C_MAGENTA = '\33[34m'
_C_END = '\033[0m'
def get_pos(tile_idx):
"""
Converts a tile index into (row, col) pair, which are the indices into the
list of list of string representation of the board corresponding to the
same position.
:param tile_idx: position of the piece as in number from 0 to 24
:type tile_idx: byte
:return: the offsets of the row and columns of the board location
:rtype: (int, int)
"""
return abs(tile_idx % BOARD_NUM_RANKS - 4), tile_idx // BOARD_NUM_RANKS
def draw_board_plain(state):
"""
Draws the board from the given state representation to standard output.
:param state: a compact state representation
:type state: array of bytes
"""
board_str = ""
board = [[EMPTY] * BOARD_NUM_FILES for _ in range(BOARD_NUM_RANKS)]
row, col = get_pos(get_king_tile_index(state))
board[row][col] = KING
for _, guard_idx in get_live_guards_enumeration(state):
row, col = get_pos(guard_idx)
board[row][col] = GUARD
for _, dragon_idx in get_live_dragon_enumeration(state):
row, col = get_pos(dragon_idx - DRAGON_BASE)
board[row][col] = DRAGON
for i, rank in enumerate(board):
board_str += RANK[i] + OFF_THE_BOARD
for tile_content in rank:
board_str += _SPACE + tile_content
board_str += "\n"
board_str += "\n"
board_str += OFF_THE_BOARD + OFF_THE_BOARD
for f in FILE:
board_str += OFF_THE_BOARD + f
board_str += "\n"
print(board_str)
def draw_board(state, move_number, terminal=False, utility=0):
"""
Draws the board from the given state representation to standard output.
Board is green coloured, if it's KING turn.
Board is red coloured, if it's DRAGON turn.
:param state: a compact state representation
:type state: array of bytes
:param move_number: the count of the moves so far in the game
:type move_number: int
:param terminal: is the given state a terminal state?
:type terminal: bool
:param utility: the utility value of the state **if the state is terminal**
:type utility: int
"""
board_str = ""
if terminal:
if utility == DRAW:
board_str += "It's a draw!\n"
else:
board_str += "The "
if utility == KING_WIN:
if stdout.isatty():
board_str += _C_GREEN
board_str += "king "
else:
if stdout.isatty():
board_str += _C_RED
board_str += "dragon "
board_str += "player "
if stdout.isatty():
board_str += _C_END
board_str += "won!\n"
if stdout.isatty():
board_str += _C_MAGENTA
else:
board_str += "Move: " + str(move_number) + " ("
if player_turn(state) == KING_PLAYER:
if stdout.isatty():
board_str += _C_GREEN
board_str += "king "
else:
if stdout.isatty():
board_str += _C_RED
board_str += "dragon "
board_str += "player"
if stdout.isatty():
board_str += _C_END
board_str += "'s turn)\n"
if player_turn(state) == KING_PLAYER:
if stdout.isatty():
board_str += _C_GREEN
else:
if stdout.isatty():
board_str += _C_RED
board = [[EMPTY] * BOARD_NUM_FILES for _ in range(BOARD_NUM_RANKS)]
row, col = get_pos(get_king_tile_index(state))
board[row][col] = KING
for _, guard_idx in get_live_guards_enumeration(state):
row, col = get_pos(guard_idx)
board[row][col] = GUARD
for _, dragon_idx in get_live_dragon_enumeration(state):
row, col = get_pos(dragon_idx - DRAGON_BASE)
board[row][col] = DRAGON
for i, rank in enumerate(board):
board_str += RANK[i] + OFF_THE_BOARD
for tile_content in rank:
board_str += _SPACE + tile_content
board_str += "\n"
board_str += "\n"
board_str += OFF_THE_BOARD + OFF_THE_BOARD
for f in FILE:
board_str += OFF_THE_BOARD + f
board_str += "\n"
if stdout.isatty():
board_str += _C_END
print(board_str)
def get_player_move(state, expanded_state):
"""
Prompts the player to enter a move, then validates the move. Repeats if the
move is not valid. Returns a valid move as a (<from-tile-index>,
<to-tile-index>) pair.
:param state: a compact state representation
:type state: array of bytes
:param expanded_state: the expanded representation of the state
:type expanded_state: dict(byte, char)
:return: a valid move as a (<from-tile-index>, <to-tile-index>) pair
:rtype: (byte, byte)
"""
while True:
move = input("Enter your move: ")
correct_form, from_tile_idx, to_tile_idx = parse_move(move)
if not correct_form:
print("Wrong move format: '" + move + "'.")
continue
if (from_tile_idx, to_tile_idx) not in all_valid_moves(state,
expanded_state):
print("Invalid move: '" + move + "'.")
continue
return from_tile_idx, to_tile_idx
|
francois-rd/madking
|
ui.py
|
Python
|
gpl-2.0
| 5,751
|
from .imaer_document import ImaerDocument
from .metadata import AeriusCalculatorMetadata
from .emission_source import (
EmissionSourceType,
EmissionSource,
EmissionSourceCharacteristics,
SpecifiedHeatContent,
Emission
)
from .roads import (
SRM2Road,
RoadSideBarrier,
StandardVehicle
)
from .gml import get_gml_element
|
opengeogroep/AERIUS-QGIS-plugins
|
ImaerPlugin/imaer4/__init__.py
|
Python
|
gpl-2.0
| 351
|
# This file is part of Superwas.
#
# Superwas is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Superwas is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Superwas. If not, see <http://www.gnu.org/licenses/>.
# JMS Resources, Queue Connection Factories, Queues and Topics
#
# Author: Andre van Dijk (SuperClass IT)
# Date: $Date: 2013-01-04 16:04:36 +0100 (vr, 04 jan 2013) $
# $Id: jms.py 424 2013-01-04 15:04:36Z andre $
class JMSProvider(WASConfig):
"""
General JMS Provider class
"""
level=3
def __init__(self, parent=None):
WASConfig.__init__(self, parent)
self.man_attributes={
'name':"",
'externalInitialContextFactory':"",
'externalProviderURL':""
}
self.opt_attributes={
'description':"",
'classpath':"",
'nativepath':""
}
def setDescription(self, description):
if description is not None: self.opt_attributes['description']=description
self.logValue()
def getDescription(self):
return self.opt_attributes['description']
def setExternalInitialContextFactory(self, externalInitialContextFactory):
if externalInitialContextFactory is not None: self.man_attributes['externalInitialContextFactory']=externalInitialContextFactory
self.logValue()
def getExternalInitialContextFactory(self):
return self.man_attributes['externalInitialContextFactory']
def setExternalProviderURL(self, externalProviderURL):
if externalProviderURL is not None: self.man_attributes['externalProviderURL']=externalProviderURL
self.logValue()
def getExternalProviderURL(self):
return self.man_attributes['externalProviderURL']
def setClasspath(self, classpath):
if classpath is not None: self.opt_attributes['classpath']=classpath
self.logValue()
def getClasspath(self):
return self.opt_attributes['classpath']
def setNativepath(self, nativepath):
if nativepath is not None: self.opt_attributes['nativepath']=nativepath
self.logValue()
def getNativepath(self):
return self.opt_attributes['nativepath']
def validate(self):
if self.getName() in ["WebSphere JMS Provider", "WebSphere MQ JMS Provider"]:
self.wasDefault="true"
WASConfig.validate(self)
class JMSConnectionFactory(WASConfig):
"""
Class to represent a JMS connection factory.
Note that for a MQ type a MQConnectionFactory is created
instead of MQQueueConnectionFactory or
MQTopicConnectionFactory, as stated in the J2EE Message
Provider Standard v1.0, section 6.1.2. Standard: The Connection
Factory must be used instead of Queue Connection Factory
because it supports JMS 1.1 and JMS 1.0.2
Author: Andre van Dijk (SuperClass IT)
Date: $Date: 2013-01-04 16:04:36 +0100 (vr, 04 jan 2013) $
$Id: jms.py 424 2013-01-04 15:04:36Z andre $
"""
level=4
def __init__(self, parent=None):
WASConfig.__init__(self, parent)
self.validParents=["JMSProvider"]
# JMS options
self.man_attributes={
'name':"",
'jndiName':""
}
self.opt_attributes={
'description':"",
'authDataAlias':"",
'XAEnabled':""
}
# Connection pool options
self.connectionPool={"maxConnections": 10, \
"minConnections": 1, \
"unusedTimeout": 1800, \
"agedTimeout": 0, \
"reapTime": 180, \
"connectionTimeout": 180, \
"purgePolicy": "EntirePool"
}
def getOptAttributes(self):
return self.opt_attributes.keys()+self.connectionPool.keys()
def setJndiName(self, jndi):
if jndi is not None:
self.man_attributes['jndiName']=jndi
self.logValue()
def getJndiName(self):
return self.man_attributes['jndiName']
#types = { "MQQUEUE":"MQQueueConnectionFactory", \
#"MQTOPIC":"MQTopicConnectionFactory", \
#"MQCON":"MQConnectionFactory", \
#"WASQUEUE":"WASQueueConnectionFactory", \
#"WASTOPIC":"WASTopicConnectionFactory"\
def setDescription(self, description):
if description is not None:
self.opt_attributes['description']=description
self.logValue()
def getDescription(self):
return self.opt_attributes['description']
def setXAEnabled(self, xa):
"""
Indicates whether to use XA mode.
"""
if xa is not None:
self.opt_attributes['XAEnabled']=xa
self.logValue()
def getXAEnabled(self):
return self.opt_attributes['XAEnabled']
def setMaxConnections(self, maxConnections):
if maxConnections is not None:
self.connectionPool['maxConnections']=maxConnections
self.logValue()
def getMaxConnections(self):
return self.connectionPool['maxConnections']
def setMinConnections(self, minConnections):
if minConnections is not None:
self.connectionPool['minConnections']=minConnections
self.logValue()
def getMinConnections(self):
return self.connectionPool['minConnections']
def setUnusedTimeout(self, unusedTimeout):
if unusedTimeout is not None:
self.connectionPool['unusedTimeout']=unusedTimeout
self.logValue()
def getUnusedTimeout(self):
return self.connectionPool['unusedTimeout']
def setAgedTimeout(self, agedTimeout):
if agedTimeout is not None:
self.connectionPool['agedTimeout']=agedTimeout
self.logValue()
def getAgedTimeout(self):
return self.connectionPool['agedTimeout']
def setReapTime(self, reapTime):
if reapTime is not None:
self.connectionPool['reapTime']=reapTime
self.logValue()
def getReapTime(self):
return self.connectionPool['reapTime']
def setConnectionTimeout(self, connectionTimeout):
if connectionTimeout is not None:
self.connectionPool['connectionTimeout']=connectionTimeout
self.logValue()
def getConnectionTimeout(self):
return self.connectionPool['connectionTimeout']
def setPurgePolicy(self, purgePolicy):
if purgePolicy is not None:
self.connectionPool['purgePolicy']=purgePolicy
self.logValue()
def getPurgePolicy(self):
return self.connectionPool['purgePolicy']
def setAuthDataAlias(self, authDataAlias):
if authDataAlias is not None:
self.opt_attributes['authDataAlias']=authDataAlias
self.logValue()
def getAuthDataAlias(self):
return self.opt_attributes['authDataAlias']
def create(self):
# Abstract class
pass
class MQConnectionFactory(JMSConnectionFactory):
def __init__(self, parent=None):
JMSConnectionFactory.__init__(self, parent)
self.man_attributes={
'name': "",
'jndiName' : "",
'host' : "",
}
self.opt_attributes={
'description': "",
'XAEnabled':"true",
'port' : 0,
'channel' : "",
'queueManager' : "",
'transportType' : "BINDINGS",
'authDataAlias': ""
}
# Session pool options
self.__sessionPool={
"minConnections" : 1,
"maxConnections" : 10,
"unusedTimeout" : 1800,
"agedTimeout" : 0,
"reapTime" : 180,
"connectionTimeout" : 180,
"purgePolicy": "EntirePool"
}
self.__ssl={
'sslType':None,
'sslConfiguration':None
}
def getOptAttributes(self):
return self.__ssl.keys()+JMSConnectionFactory.getOptAttributes(self)+[ "session%s%s" % (k[0].upper(),k[1:]) for k in self.__sessionPool.keys()]
def setHost(self, host):
if host is not None:
self.man_attributes['host']=host
self.logValue()
def getHost(self):
return self.man_attributes['host']
def setPort(self, port):
if port is not None:
self.opt_attributes['port']=port
self.logValue()
def getPort(self):
return self.opt_attributes['port']
def setChannel(self, channel):
if channel is not None:
self.opt_attributes['channel']=channel
self.logValue()
def getChannel(self):
return self.opt_attributes['channel']
def setQueueManager(self, queueManager):
if queueManager is not None:
self.opt_attributes['queueManager']=queueManager
self.logValue()
def getQueueManager(self):
return self.opt_attributes['queueManager']
def setTransportType(self, transportType):
if transportType is not None:
if transportType not in ['BINDINGS', 'CLIENT', 'DIRECT', 'QUEUED']: raise Exception('JMS transport type should be: BINDINGS|CLIENT|DIRECT|QUEUED')
self.opt_attributes['transportType']=transportType
self.logValue()
def getTransportType(self):
return self.opt_attributes['transportType']
def setSessionMinConnections(self, minSessions):
if minSessions is not None:
self.__sessionPool['minConnections']=minSessions
self.logValue()
def getSessionMinConnections(self):
return self.__sessionPool['minConnections']
def setSessionMaxConnections(self, maxSessions):
if maxSessions is not None:
self.__sessionPool['maxConnections']=maxSessions
self.logValue()
def getSessionMaxConnections(self):
return self.__sessionPool['maxConnections']
def setSessionUnusedTimeout(self, sessionUnusedTimeout):
if sessionUnusedTimeout is not None:
self.__sessionPool['unusedTimeout']=sessionUnusedTimeout
self.logValue()
def getSessionUnusedTimeout(self):
return self.__sessionPool['unusedTimeout']
def setSessionAgedTimeout(self, sessionAgedTimeout):
if sessionAgedTimeout is not None:
self.__sessionPool['agedTimeout']=sessionAgedTimeout
self.logValue()
def getSessionAgedTimeout(self):
return self.__sessionPool['agedTimeout']
def setSessionReapTime(self, sessionReapTime):
if sessionReapTime is not None:
self.__sessionPool['reapTime']=sessionReapTime
self.logValue()
def getSessionReapTime(self):
return self.__sessionPool['reapTime']
def setSessionConnectionTimeout(self, connectionTimeout):
if connectionTimeout is not None:
self.__sessionPool['connectionTimeout']=connectionTimeout
self.logValue()
def getSessionConnectionTimeout(self):
return self.__sessionPool['connectionTimeout']
def setSessionPurgePolicy(self, purgePolicy):
if purgePolicy is not None:
self.__sessionPool['purgePolicy']=purgePolicy
self.logValue()
def getSessionPurgePolicy(self):
return self.__sessionPool['purgePolicy']
def setSslType(self, sslType):
if sslType is not None:
if sslType not in ['SPECIFIC','CENTRAL']:
raise Exception("SSLType should be SPECIFIC or CENTRAL")
else:
self.__ssl['sslType']=sslType
self.logValue()
def getSslType(self):
return self.__ssl['sslType']
def setSslConfiguration(self, sslConfiguration):
if sslConfiguration is not None:
self.__ssl['sslConfiguration']=sslConfiguration
self.logValue()
def getSslConfiguration(self):
return self.__ssl['sslConfiguration']
def getStatistics(self):
servers=AdminControl.queryMBeans('type=Perf,*')
for s in servers.toArray():
j2cfs=AdminControl.queryMBeans('type=Server,node=%s,process=%s,*' % (s.getObjectName().getKeyProperty("node"),s.getObjectName().getKeyProperty("process") ))
for j in j2cfs.toArray():
j2cStats=AdminControl.invoke_jmx(s.getObjectName(),'getStatsObject',[j.getObjectName(),java.lang.Boolean('true')],['javax.management.ObjectName','java.lang.Boolean'])
if j2cStats is not None:
for i in j2cStats.subCollections():
if i.getName()=="j2cModule":
for h in j2cStats.getStats("j2cModule").getSubStats():
for k in h.getSubStats():
if k.getName()==self.getJndiName():
print k.getName()
WASConfig.getStatistics(self)
def create(self):
WASConfig.create(self)
templateList = Util.wslist(AdminConfig.listTemplates( self.getConfigType() ))
templateToUse = None
for template in templateList:
if ( AdminConfig.showAttribute( template, "XAEnabled" )==self.getXAEnabled() ):
templateToUse = template
break
if templateToUse == None: raise Exception("No proper template found for : %s, xa: %s" % (self.getType(),self.getXAEnabled()))
authOptions=[]
options=Util.dictToList(self.man_attributes)+Util.dictToList(self.opt_attributes)
if self.getAuthDataAlias() is not None:
options.append(["xaRecoveryAuthAlias", self.getAuthDataAlias()])
if self.getSslType() is not None:
options.append(["sslType", self.getSslType()])
if self.getSslConfiguration() is not None:
options.append(["sslConfiguration", self.getSslConfiguration()])
logger.debug("jmsOptions : %s" % options)
self.configID=AdminConfig.createUsingTemplate(self.getConfigType(), self.getParent().getConfigID(), options, templateToUse )
if self.getAuthDataAlias() is not None:
AdminConfig.create('MappingModule', self.getConfigID(), [ ['authDataAlias', self.getAuthDataAlias()], ['mappingConfigAlias','DefaultPrincipalMapping']])
logger.info("Setting connection pool options for %s: %s" % (self.getType(),self.getName()))
logger.debug("connectionPool : %s" % self.connectionPool)
connectionPool = AdminConfig.showAttribute( self.getConfigID(), "connectionPool" )
AdminConfig.modify( connectionPool, Util.dictToList(self.connectionPool) )
logger.info("Setting session pool options for %s: %s" % (self.getType(),self.getName()))
logger.debug("sessionPool : %s" % self.__sessionPool)
sessionPool = AdminConfig.showAttribute( self.getConfigID(), "sessionPool" )
AdminConfig.modify( sessionPool, Util.dictToList(self.__sessionPool))
logger.info("Succesfully created %s: %s" % (self.getType(),self.getName()))
class MQQueueConnectionFactory(MQConnectionFactory):
def __init__(self, parent=None):
MQConnectionFactory.__init__(self, parent)
class MQTopicConnectionFactory(MQConnectionFactory):
def __init__(self, parent=None):
MQConnectionFactory.__init__(self, parent)
class JmsDestination(WASConfig):
"""
Class to represent a JMS Destination, either a queue or a topic.
Author: Andre van Dijk (SuperClass IT)
$Rev: 424 $
"""
level=4
def __init__(self, parent=None):
WASConfig.__init__(self, parent)
self.validParents=["JMSProvider"]
self.man_attributes={
'name':"",
'jndiName':""
}
self.opt_attributes={
"expiry" : "APPLICATION_DEFINED",
"persistence" : "APPLICATION_DEFINED",
"priority" : "APPLICATION_DEFINED",
"specifiedExpiry" : 0,
"specifiedPriority" : 0
}
def setJndiName(self, jndi):
if jndi is not None:
self.man_attributes['jndiName']=jndi
self.logValue()
def getJndiName(self):
return self.man_attributes['jndiName']
def setExpiry(self, expiry):
if expiry is not None:
expiries = ["APPLICATION_DEFINED","SPECIFIED","UNLIMITED"]
if expiry not in expiries: raise Exception("Illegal JMS Destination expiry: should be APPLICATION_DEFINED, SPECIFIED or UNLIMITED")
self.opt_attributes['expiry']=expiry
self.logValue()
def getExpiry(self):
return self.opt_attributes['expiry']
def setPersistence(self, persistence):
if persistence is not None:
persistences = ["APPLICATION_DEFINED","SPECIFIED","UNLIMITED"]
if persistence not in persistences: raise Exception("Illegal JMS Destination persistence: should be APPLICATION_DEFINED, SPECIFIED or UNLIMITED")
self.opt_attributes['persistence']=persistence
self.logValue()
def getPersistence(self):
return self.opt_attributes['persistence']
def setPriority(self, priority):
if priority is not None:
priorites = ["APPLICATION_DEFINED","SPECIFIED","UNLIMITED"]
if priority not in priorities: raise Exception("Illegal JMS Destination priority: should be APPLICATION_DEFINED, SPECIFIED or UNLIMITED")
self.opt_attributes['priority']=priority
self.logValue()
def getPriority(self):
return self.opt_attributes['priority']
def setSpecifiedExpiry(self, specifiedExpiry):
if specifiedExpiry is not None:
self.opt_attributes['specifiedExpiry']=specifiedExpiry
self.logValue()
def getSpecifiedExpiry(self):
return self.opt_attributes['specifiedExpiry']
def setSpecifiedPriority(self, specifiedPriority):
if specifiedPriority is not None:
self.opt_attributes['specifiedPriority']=specifiedPriority
self.logValue()
def getSpecifiedPriority(self):
return self.opt_attributes['specifiedPriority']
def create(self):
# Abstract class
pass
class MQDestination(JmsDestination):
"""
Class to represent a MQQueue.
Author: Andre van Dijk (SuperClass IT)
$Rev: 424 $
"""
def __init__(self, parent=None):
JmsDestination.__init__(self, parent)
self.man_attributes={
'name':"",
'jndiName':""
}
self.opt_attributes.update({
"targetClient" : "MQ",
"useNativeEncoding" : "false",
"decimalEncoding" : "Normal" ,
"integerEncoding" : "Normal" ,
"floatingPointEncoding" : "IEEENormal"
})
def setCcsid(self, ccsid):
if ccsid is not None:
self.opt_attributes['CCSID']=ccsid
self.logValue()
def getCcsid(self):
return self.opt_attributes['CCSID']
def setTargetClient(self, targetClient):
if targetClient is not None:
targetClients = ["MQ","JMS"]
if targetClient not in targetClients: raise Exception("Illegal JMS Destination targetclient: should be MQ or JMS")
self.opt_attributes['targetClient']=targetClient
self.logValue()
def getTargetClient(self):
return self.opt_attributes['targetClient']
def setUseNativeEncoding(self, useNativeEncoding):
if useNativeEncoding is not None:
self.opt_attributes['useNativeEncoding']=useNativeEncoding
self.logValue()
def getUseNativeEncoding(self):
return self.opt_attributes['useNativeEncoding']
def setDecimalEncoding(self, decimalEncoding):
if decimalEncoding is not None:
decimalEncodings = ["Normal","Reversed"]
if decimalEncoding not in decimalEncodings: raise Exception("Illegal JMS Destination decimalEncoding: should be Normal or Reversed")
self.opt_attributes['decimalEncoding']=decimalEncoding
self.logValue()
def getDecimalEncoding(self):
return self.opt_attributes['decimalEncoding']
def setIntegerEncoding(self, integerEncoding):
if integerEncoding is not None:
integerEncodings = ["Normal","Reversed"]
if integerEncoding not in integerEncodings: raise Exception("Illegal JMS Destination integerEncoding: should be Normal or Reversed")
self.opt_attributes['integerEncoding']=integerEncoding
self.logValue()
def getIntegerEncoding(self):
return self.opt_attributes['integerEncoding']
def setFloatingPointEncoding(self, floatingPointEncoding):
if floatingPointEncoding is not None:
floatingPointEncodings = ["IEEENormal","IEEEReversed","S390"]
if floatingPointEncoding not in floatingPointEncodings: raise Exception("Illegal JMS Destination floatingPointEncoding: should be IEEENormal, IEEEReversed or S390")
self.opt_attributes['floatingPointEncoding']=floatingPointEncoding
self.logValue()
def getFloatingPointEncoding(self):
return self.opt_attributes['floatingPointEncoding']
def create(self):
WASConfig.create(self)
AdminConfig.create(self.getConfigType(), self.getParent().getConfigID(), Util.dictToList(self.man_attributes)+Util.dictToList(self.opt_attributes) )
class MQQueue(MQDestination):
"""
Class to represent a MQTopic.
Author: Andre van Dijk (SuperClass IT)
$Rev: 424 $
"""
def __init__(self, parent=None):
MQDestination.__init__(self, parent)
self.man_attributes.update({
"baseQueueName" : ""
})
self.opt_attributes.update({
"baseQueueManagerName" : ""
})
def setBaseQueueName(self, mqName):
if mqName is not None:
self.man_attributes['baseQueueName']=mqName
self.logValue()
def getBaseQueueName(self):
return self.man_attributes['baseQueueName']
def setBaseQueueManagerName(self, queueManagerName):
if queueManagerName is not None:
self.opt_attributes['baseQueueManagerName']=queueManagerName
self.logValue()
def getBaseQueueManagerName(self):
return self.opt_attributes['baseQueueManagerName']
class MQTopic(MQDestination):
"""
Class to represent a MQTopic.
Author: Andre van Dijk (SuperClass IT)
$Rev: 424 $
"""
def __init__(self, parent=None):
MQDestination.__init__(self, parent)
self.man_attributes.update({
"baseTopicName" : ""
})
def setBaseTopicName(self, mqName):
if mqName is not None:
self.man_attributes['baseTopicName']=mqName
self.logValue()
def getBaseTopicName(self):
return self.man_attributes['baseTopicName']
|
superclass/superwas
|
jms.py
|
Python
|
gpl-2.0
| 20,003
|
#! /usr/bin/env python
#
# smadata2.inverter.smabluetooth - Support for Bluetooth enabled SMA inverters
# Copyright (C) 2014 David Gibson <david@gibson.dropbear.id.au>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
from __future__ import print_function
from __future__ import division
import sys
import getopt
import time
import bluetooth
import readline
import base
from smadata2.datetimeutil import format_time
__all__ = ['Connection',
'OTYPE_PPP', 'OTYPE_PPP2', 'OTYPE_HELLO', 'OTYPE_GETVAR',
'OTYPE_VARVAL', 'OTYPE_ERROR',
'OVAR_SIGNAL',
'int2bytes16', 'int2bytes32', 'bytes2int']
OUTER_HLEN = 18
OTYPE_PPP = 0x01
OTYPE_HELLO = 0x02
OTYPE_GETVAR = 0x03
OTYPE_VARVAL = 0x04
OTYPE_ERROR = 0x07
OTYPE_PPP2 = 0x08
OVAR_SIGNAL = 0x05
INNER_HLEN = 36
SMA_PROTOCOL_ID = 0x6560
def waiter(fn):
def waitfn(self, *args):
fn(self, *args)
if hasattr(self, '__waitcond_' + fn.__name__):
wc = getattr(self, '__waitcond_' + fn.__name__)
if wc is None:
self.waitvar = args
else:
self.waitvar = wc(*args)
return waitfn
def _check_header(hdr):
if len(hdr) < OUTER_HLEN:
raise ValueError()
if hdr[0] != 0x7e:
raise Error("Missing packet start marker")
if (hdr[1] > 0x70) or (hdr[2] != 0):
raise Error("Bad packet length")
if hdr[3] != (hdr[0] ^ hdr[1] ^ hdr[2]):
raise Error("Bad header check byte")
return hdr[1]
def ba2str(addr):
if len(addr) != 6:
raise ValueError("Bad length for bluetooth address")
assert len(addr) == 6
return "%02X:%02X:%02X:%02X:%02X:%02X" % tuple(reversed(addr))
def str2ba(s):
addr = [int(x, 16) for x in s.split(':')]
addr.reverse()
if len(addr) != 6:
raise ValueError("Bad length for bluetooth address")
return bytearray(addr)
def int2bytes16(v):
return bytearray([v & 0xff, v >> 8])
def int2bytes32(v):
return bytearray([v & 0xff, (v >> 8) & 0xff, (v >> 16) & 0xff, v >> 24])
def bytes2int(b):
v = 0
while b:
v = v << 8
v += b.pop()
return v
crc16_table = [0x0000, 0x1189, 0x2312, 0x329b, 0x4624, 0x57ad, 0x6536, 0x74bf,
0x8c48, 0x9dc1, 0xaf5a, 0xbed3, 0xca6c, 0xdbe5, 0xe97e, 0xf8f7,
0x1081, 0x0108, 0x3393, 0x221a, 0x56a5, 0x472c, 0x75b7, 0x643e,
0x9cc9, 0x8d40, 0xbfdb, 0xae52, 0xdaed, 0xcb64, 0xf9ff, 0xe876,
0x2102, 0x308b, 0x0210, 0x1399, 0x6726, 0x76af, 0x4434, 0x55bd,
0xad4a, 0xbcc3, 0x8e58, 0x9fd1, 0xeb6e, 0xfae7, 0xc87c, 0xd9f5,
0x3183, 0x200a, 0x1291, 0x0318, 0x77a7, 0x662e, 0x54b5, 0x453c,
0xbdcb, 0xac42, 0x9ed9, 0x8f50, 0xfbef, 0xea66, 0xd8fd, 0xc974,
0x4204, 0x538d, 0x6116, 0x709f, 0x0420, 0x15a9, 0x2732, 0x36bb,
0xce4c, 0xdfc5, 0xed5e, 0xfcd7, 0x8868, 0x99e1, 0xab7a, 0xbaf3,
0x5285, 0x430c, 0x7197, 0x601e, 0x14a1, 0x0528, 0x37b3, 0x263a,
0xdecd, 0xcf44, 0xfddf, 0xec56, 0x98e9, 0x8960, 0xbbfb, 0xaa72,
0x6306, 0x728f, 0x4014, 0x519d, 0x2522, 0x34ab, 0x0630, 0x17b9,
0xef4e, 0xfec7, 0xcc5c, 0xddd5, 0xa96a, 0xb8e3, 0x8a78, 0x9bf1,
0x7387, 0x620e, 0x5095, 0x411c, 0x35a3, 0x242a, 0x16b1, 0x0738,
0xffcf, 0xee46, 0xdcdd, 0xcd54, 0xb9eb, 0xa862, 0x9af9, 0x8b70,
0x8408, 0x9581, 0xa71a, 0xb693, 0xc22c, 0xd3a5, 0xe13e, 0xf0b7,
0x0840, 0x19c9, 0x2b52, 0x3adb, 0x4e64, 0x5fed, 0x6d76, 0x7cff,
0x9489, 0x8500, 0xb79b, 0xa612, 0xd2ad, 0xc324, 0xf1bf, 0xe036,
0x18c1, 0x0948, 0x3bd3, 0x2a5a, 0x5ee5, 0x4f6c, 0x7df7, 0x6c7e,
0xa50a, 0xb483, 0x8618, 0x9791, 0xe32e, 0xf2a7, 0xc03c, 0xd1b5,
0x2942, 0x38cb, 0x0a50, 0x1bd9, 0x6f66, 0x7eef, 0x4c74, 0x5dfd,
0xb58b, 0xa402, 0x9699, 0x8710, 0xf3af, 0xe226, 0xd0bd, 0xc134,
0x39c3, 0x284a, 0x1ad1, 0x0b58, 0x7fe7, 0x6e6e, 0x5cf5, 0x4d7c,
0xc60c, 0xd785, 0xe51e, 0xf497, 0x8028, 0x91a1, 0xa33a, 0xb2b3,
0x4a44, 0x5bcd, 0x6956, 0x78df, 0x0c60, 0x1de9, 0x2f72, 0x3efb,
0xd68d, 0xc704, 0xf59f, 0xe416, 0x90a9, 0x8120, 0xb3bb, 0xa232,
0x5ac5, 0x4b4c, 0x79d7, 0x685e, 0x1ce1, 0x0d68, 0x3ff3, 0x2e7a,
0xe70e, 0xf687, 0xc41c, 0xd595, 0xa12a, 0xb0a3, 0x8238, 0x93b1,
0x6b46, 0x7acf, 0x4854, 0x59dd, 0x2d62, 0x3ceb, 0x0e70, 0x1ff9,
0xf78f, 0xe606, 0xd49d, 0xc514, 0xb1ab, 0xa022, 0x92b9, 0x8330,
0x7bc7, 0x6a4e, 0x58d5, 0x495c, 0x3de3, 0x2c6a, 0x1ef1, 0x0f78]
assert(len(crc16_table) == 256)
def crc16(iv, data):
crc = iv
for b in data:
crc = (crc >> 8) ^ crc16_table[(crc ^ b) & 0xff]
return crc ^ 0xffff
class Connection(base.InverterConnection):
MAXBUFFER = 512
BROADCAST = "ff:ff:ff:ff:ff:ff"
BROADCAST2 = bytearray('\xff\xff\xff\xff\xff\xff')
def __init__(self, addr):
self.sock = bluetooth.BluetoothSocket(bluetooth.RFCOMM)
self.sock.connect((addr, 1))
self.remote_addr = addr
self.local_addr = self.sock.getsockname()[0]
self.local_addr2 = bytearray('\x78\x00\x3f\x10\xfb\x39')
self.rxbuf = bytearray()
self.pppbuf = dict()
self.tagcounter = 0
def gettag(self):
self.tagcounter += 1
return self.tagcounter
#
# RX side
#
def rx(self):
space = self.MAXBUFFER - len(self.rxbuf)
self.rxbuf += self.sock.recv(space)
while len(self.rxbuf) >= OUTER_HLEN:
pktlen = _check_header(self.rxbuf[:OUTER_HLEN])
if len(self.rxbuf) < pktlen:
return
pkt = self.rxbuf[:pktlen]
del self.rxbuf[:pktlen]
self.rx_raw(pkt)
@waiter
def rx_raw(self, pkt):
from_ = ba2str(pkt[4:10])
to_ = ba2str(pkt[10:16])
type_ = bytes2int(pkt[16:18])
payload = pkt[OUTER_HLEN:]
self.rx_outer(from_, to_, type_, payload)
def rxfilter_outer(self, to_):
return ((to_ == self.local_addr)
or (to_ == self.BROADCAST)
or (to_ == "00:00:00:00:00:00"))
@waiter
def rx_outer(self, from_, to_, type_, payload):
if not self.rxfilter_outer(to_):
return
if (type_ == OTYPE_PPP) or (type_ == OTYPE_PPP2):
self.rx_ppp_raw(from_, payload)
def rx_ppp_raw(self, from_, payload):
if from_ not in self.pppbuf:
self.pppbuf[from_] = bytearray()
pppbuf = self.pppbuf[from_]
pppbuf.extend(payload)
term = pppbuf.find('\x7e', 1)
if term < 0:
return
raw = pppbuf[:term+1]
del pppbuf[:term+1]
assert raw[-1] == 0x7e
if raw[0] != 0x7e:
raise Error("Missing flag byte on PPP packet")
raw = raw[1:-1]
frame = bytearray()
while raw:
b = raw.pop(0)
if b == 0x7d:
frame.append(raw.pop(0) ^ 0x20)
else:
frame.append(b)
if (frame[0] != 0xff) or (frame[1] != 0x03):
raise Error("Bad header on PPP frame")
pcrc = bytes2int(frame[-2:])
ccrc = crc16(0xffff, frame[:-2])
if pcrc != ccrc:
raise Error("Bad CRC on PPP frame")
protocol = bytes2int(frame[2:4])
self.rx_ppp(from_, protocol, frame[4:-2])
@waiter
def rx_ppp(self, from_, protocol, payload):
if protocol == SMA_PROTOCOL_ID:
innerlen = payload[0]
if len(payload) != (innerlen * 4):
raise Error("Inner length field (0x%02x = %d bytes)" +
" does not match actual length (%d bytes)"
% (innerlen, innerlen * 4, len(payload)))
a2 = payload[1]
to2 = payload[2:8]
b1 = payload[8]
b2 = payload[9]
from2 = payload[10:16]
c1 = payload[16]
c2 = payload[17]
error = bytes2int(payload[18:20])
pktcount = bytes2int(payload[20:22])
tag = bytes2int(payload[22:24])
first = bool(tag & 0x8000)
tag = tag & 0x7fff
type_ = bytes2int(payload[24:26])
response = bool(type_ & 1)
type_ = type_ & ~1
subtype = bytes2int(payload[26:28])
arg1 = bytes2int(payload[28:32])
arg2 = bytes2int(payload[32:36])
extra = payload[36:]
self.rx_6560(from2, to2, a2, b1, b2, c1, c2, tag,
type_, subtype, arg1, arg2, extra,
response, error, pktcount, first)
def rxfilter_6560(self, to2):
return ((to2 == self.local_addr2)
or (to2 == self.BROADCAST2))
@waiter
def rx_6560(self, from2, to2, a2, b1, b2, c1, c2, tag,
type_, subtype, arg1, arg2, extra,
response, error, pktcount, first):
if not self.rxfilter_6560(to2):
return
pass
#
# Tx side
#
def tx_raw(self, pkt):
if _check_header(pkt) != len(pkt):
raise ValueError("Bad packet")
self.sock.send(str(pkt))
def tx_outer(self, from_, to_, type_, payload):
pktlen = len(payload) + OUTER_HLEN
pkt = bytearray([0x7e, pktlen, 0x00, pktlen ^ 0x7e])
pkt += str2ba(from_)
pkt += str2ba(to_)
pkt += int2bytes16(type_)
pkt += payload
assert _check_header(pkt) == pktlen
self.tx_raw(pkt)
def tx_ppp(self, to_, protocol, payload):
frame = bytearray('\xff\x03')
frame += int2bytes16(protocol)
frame += payload
frame += int2bytes16(crc16(0xffff, frame))
rawpayload = bytearray()
rawpayload.append(0x7e)
for b in frame:
# Escape \x7e (FLAG), 0x7d (ESCAPE), 0x11 (XON) and 0x13 (XOFF)
if b in [0x7e, 0x7d, 0x11, 0x13]:
rawpayload.append(0x7d)
rawpayload.append(b ^ 0x20)
else:
rawpayload.append(b)
rawpayload.append(0x7e)
self.tx_outer(self.local_addr, to_, OTYPE_PPP, rawpayload)
def tx_6560(self, from2, to2, a2, b1, b2, c1, c2, tag,
type_, subtype, arg1, arg2, extra=bytearray(),
response=False, error=0, pktcount=0, first=True):
if len(extra) % 4 != 0:
raise Error("Inner protocol payloads must"
+ "have multiple of 4 bytes length")
innerlen = (len(extra) + INNER_HLEN) // 4
payload = bytearray()
payload.append(innerlen)
payload.append(a2)
payload.extend(to2)
payload.append(b1)
payload.append(b2)
payload.extend(from2)
payload.append(c1)
payload.append(c2)
payload.extend(int2bytes16(error))
payload.extend(int2bytes16(pktcount))
if first:
xtag = tag | 0x8000
else:
xtag = tag
payload.extend(int2bytes16(xtag))
if type_ & 0x1:
raise ValueError
if response:
xtype = type_ | 1
else:
xtype = type_
payload.extend(int2bytes16(xtype))
payload.extend(int2bytes16(subtype))
payload.extend(int2bytes32(arg1))
payload.extend(int2bytes32(arg2))
payload.extend(extra)
self.tx_ppp("ff:ff:ff:ff:ff:ff", SMA_PROTOCOL_ID, payload)
return tag
def tx_logon(self, password='0000', timeout=900):
if len(password) > 12:
raise ValueError
password += '\x00' * (12 - len(password))
tag = self.gettag()
extra = bytearray('\xaa\xaa\xbb\xbb\x00\x00\x00\x00')
extra += bytearray(((ord(c) + 0x88) % 0xff) for c in password)
return self.tx_6560(self.local_addr2, self.BROADCAST2, 0xa0,
0x00, 0x01, 0x00, 0x01, tag,
0x040c, 0xfffd, 7, timeout, extra)
def tx_gdy(self):
return self.tx_6560(self.local_addr2, self.BROADCAST2,
0xa0, 0x00, 0x00, 0x00, 0x00, self.gettag(),
0x200, 0x5400, 0x00262200, 0x002622ff)
def tx_yield(self):
return self.tx_6560(self.local_addr2, self.BROADCAST2,
0xa0, 0x00, 0x00, 0x00, 0x00, self.gettag(),
0x200, 0x5400, 0x00260100, 0x002601ff)
def tx_historic(self, fromtime, totime):
return self.tx_6560(self.local_addr2, self.BROADCAST2,
0xe0, 0x00, 0x00, 0x00, 0x00, self.gettag(),
0x200, 0x7000, fromtime, totime)
def tx_historic_daily(self, fromtime, totime):
return self.tx_6560(self.local_addr2, self.BROADCAST2,
0xe0, 0x00, 0x00, 0x00, 0x00, self.gettag(),
0x200, 0x7020, fromtime, totime)
def wait(self, class_, cond=None):
self.waitvar = None
fn = getattr(self, 'rx_' + class_)
setattr(self, '__waitcond_rx_' + class_, cond)
while self.waitvar is None:
self.rx()
delattr(self, '__waitcond_rx_' + class_)
return self.waitvar
def wait_outer(self, wtype, wpl=bytearray()):
def wfn(from_, to_, type_, payload):
if ((type_ == wtype)
and payload.startswith(wpl)):
return payload
return self.wait('outer', wfn)
def wait_6560(self, wtag):
def tagfn(from2, to2, a2, b1, b2, c1, c2, tag,
type_, subtype, arg1, arg2, extra,
response, error, pktcount, first):
if response and (tag == wtag):
if (pktcount != 0) or not first:
raise Error("Unexpected multipacket reply")
if error:
raise Error("SMA device returned error 0x%x\n", error)
return (from2, type_, subtype, arg1, arg2, extra)
return self.wait('6560', tagfn)
def wait_6560_multi(self, wtag):
tmplist = []
def multiwait_6560(from2, to2, a2, b1, b2, c1, c2, tag,
type_, subtype, arg1, arg2, extra,
response, error, pktcount, first):
if not response or (tag != wtag):
return None
if not tmplist:
if not first:
raise Error("Didn't see first packet of reply")
tmplist.append(pktcount + 1) # Expected number of packets
else:
expected = tmplist[0]
sofar = len(tmplist) - 1
if pktcount != (expected - sofar - 1):
raise Error("Got packet index %d instead of %d"
% (pktcount, expected - sofar))
tmplist.append((from2, type_, subtype, arg1, arg2, extra))
if pktcount == 0:
return True
self.wait('6560', multiwait_6560)
assert(len(tmplist) == (tmplist[0] + 1))
return tmplist[1:]
# Operations
def hello(self):
hellopkt = self.wait_outer(OTYPE_HELLO)
if hellopkt != bytearray('\x00\x04\x70\x00\x01\x00\x00\x00'
+ '\x00\x01\x00\x00\x00'):
raise Error("Unexpected HELLO %r" % hellopkt)
self.tx_outer("00:00:00:00:00:00", self.remote_addr,
OTYPE_HELLO, hellopkt)
self.wait_outer(0x05)
def getvar(self, varid):
self.tx_outer("00:00:00:00:00:00", self.remote_addr, OTYPE_GETVAR,
int2bytes16(varid))
val = self.wait_outer(OTYPE_VARVAL, int2bytes16(varid))
return val[2:]
def getsignal(self):
val = self.getvar(OVAR_SIGNAL)
return val[2] / 0xff
def do_6560(self, a2, b1, b2, c1, c2, tag, type_, subtype, arg1, arg2,
payload=bytearray()):
self.tx_6560(self.local_addr2, self.BROADCAST2, a2, b1, b2, c1, c2,
tag, type_, subtype, arg1, arg2, payload)
return self.wait_6560(tag)
def logon(self, password='0000', timeout=900):
tag = self.tx_logon(password, timeout)
self.wait_6560(tag)
def total_yield(self):
tag = self.tx_yield()
from2, type_, subtype, arg1, arg2, extra = self.wait_6560(tag)
timestamp = bytes2int(extra[4:8])
total = bytes2int(extra[8:12])
return timestamp, total
def daily_yield(self):
tag = self.tx_gdy()
from2, type_, subtype, arg1, arg2, extra = self.wait_6560(tag)
timestamp = bytes2int(extra[4:8])
daily = bytes2int(extra[8:12])
return timestamp, daily
def historic(self, fromtime, totime):
tag = self.tx_historic(fromtime, totime)
data = self.wait_6560_multi(tag)
points = []
for from2, type_, subtype, arg1, arg2, extra in data:
while extra:
timestamp = bytes2int(extra[0:4])
val = bytes2int(extra[4:8])
extra = extra[12:]
if val != 0xffffffff:
points.append((timestamp, val))
return points
def historic_daily(self, fromtime, totime):
tag = self.tx_historic_daily(fromtime, totime)
data = self.wait_6560_multi(tag)
points = []
for from2, type_, subtype, arg1, arg2, extra in data:
while extra:
timestamp = bytes2int(extra[0:4])
val = bytes2int(extra[4:8])
extra = extra[12:]
if val != 0xffffffff:
points.append((timestamp, val))
return points
def ptime(str):
return int(time.mktime(time.strptime(str, "%Y-%m-%d")))
def cmd_total(sma, args):
if len(args) != 1:
print("Command usage: total")
sys.exit(1)
timestamp, total = sma.total_yield()
print("%s: Total generation to-date %d Wh"
% (format_time(timestamp), total))
def cmd_daily(sma, args):
if len(args) != 1:
print("Command usage: daily")
sys.exit(1)
timestamp, daily = sma.daily_yield()
print("%s: Daily generation %d Wh"
% (format_time(timestamp), daily))
def cmd_historic(sma, args):
fromtime = ptime("2013-01-01")
totime = int(time.time()) # Now
if len(args) > 1:
fromtime = ptime(args[1])
if len(args) > 2:
totime = ptime(args[2])
if len(args) > 3:
print("Command usage: historic [start-date [end-date]]")
sys.exit(1)
hlist = sma.historic(fromtime, totime)
for timestamp, val in hlist:
print("[%d] %s: Total generation %d Wh"
% (timestamp, format_time(timestamp), val))
def cmd_historic_daily(sma, args):
fromtime = ptime("2013-01-01")
totime = int(time.time()) # Now
if len(args) > 1:
fromtime = ptime(args[1])
if len(args) > 2:
totime = ptime(args[2])
if len(args) > 3:
print("Command usage: historic [start-date [end-date]]")
sys.exit(1)
hlist = sma.historic_daily(fromtime, totime)
for timestamp, val in hlist:
print("[%d] %s: Total generation %d Wh"
% (timestamp, format_time(timestamp), val))
if __name__ == '__main__':
bdaddr = None
optlist, args = getopt.getopt(sys.argv[1:], 'b:')
if not args:
print("Usage: %s -b <bdaddr> command args.." % sys.argv[0])
sys.exit(1)
cmd = 'cmd_' + args[0]
if cmd not in globals():
print("Invalid command '%s'" % args[0])
sys.exit(1)
cmdfn = globals()[cmd]
for opt, optarg in optlist:
if opt == '-b':
bdaddr = optarg
if bdaddr is None:
print("No bluetooth address specified")
sys.exit(1)
sma = Connection(bdaddr)
sma.hello()
sma.logon(timeout=60)
cmdfn(sma, args)
|
NobodysNightmare/python-smadata2
|
smadata2/inverter/smabluetooth.py
|
Python
|
gpl-2.0
| 20,732
|
'''Text progress bar library for Python.
A text progress bar is typically used to display the progress of a long
running operation, providing a visual cue that processing is underway.
The ProgressBar class manages the current progress, and the format of the line
is given by a number of widgets. A widget is an object that may display
differently depending on the state of the progress bar.
The progressbar module is very easy to use, yet very powerful. It will also
automatically enable features like auto-resizing when the system supports it.
'''
__title__ = 'Python Progressbar'
__package_name__ = 'progressbar2'
__author__ = 'Rick van Hattem (Wolph)'
__description__ = '''
A Python Progressbar library to provide visual (yet text based) progress to
long running operations.
'''.strip()
__email__ = 'wolph@wol.ph'
__version__ = '3.5.0'
__license__ = 'BSD'
__copyright__ = 'Copyright 2015 Rick van Hattem (Wolph)'
__url__ = 'https://github.com/WoLpH/python-progressbar'
|
RePeet13/Project-Bot
|
bin/lib/progressbar/__about__.py
|
Python
|
gpl-2.0
| 976
|
#! coding: utf-8
import telegram
class Command:
def __init__(self, bot, message):
chat_id = message.chat_id
text = '''About Persub Bot
Author: Mohammad Amin Sameti (@mamins1376)
Source Code on GitHub: https://github.com/mamins1376/Persub-Bot
The goal of Persub Bot is giving the user the best subtitles available for a movie or a tv show.
uses the python telegram bot api (see https://github.com/leandrotoledo/python-telegram-bot).
tell me your feedbacks! ;)'''
bot.sendMessage(chat_id, text)
|
mamins1376/Persub-Bot
|
persubbot/commands/about.py
|
Python
|
gpl-2.0
| 515
|
# -*- coding: utf-8 -*-
import fauxfactory
import iso8601
import re
import socket
import sys
from collections import namedtuple
from os import path as os_path
from subprocess import check_call
from urlparse import urlparse
import paramiko
from scp import SCPClient
import diaper
from utils import conf, ports, version
from utils.log import logger
from utils.net import net_check
from fixtures.pytest_store import store
from utils.path import project_path
from utils.quote import quote
from utils.timeutil import parsetime
# Default blocking time before giving up on an ssh command execution,
# in seconds (float)
RUNCMD_TIMEOUT = 1200.0
class SSHResult(namedtuple("SSHResult", ["rc", "output"])):
"""Allows rich comparison for more convenient testing.
Given you have ``result`` which is an instance of :py:class:`SSHResult`, you can do as follows:
.. code-block:: python
assert result # If $?=0, then the result evaluates to a truthy value and passes the assert
assert result == 'installed' # direct matching of the output value
assert 'something' in result # like before but uses the ``in`` matching for a partial match
assert result == 5 # assert that the $?=5 (you can use <, >, ...)
Therefore this class can act like 3 kinds of values:
* Like a string (with the output of the command) when compared with or cast to one
* Like a number (with the return code) when compared with or cast to one
* Like a bool, giving truthy value if the return code was zero. That is related to the
preceeding bullet.
But it still subclasses the original class therefore all old behaviour is kept. But you don't
have to expand the tuple or pull the value out if you are checking only one of them.
"""
def __str__(self):
return self.output
def __contains__(self, what):
# Handling 'something' in x
if not isinstance(what, basestring):
raise ValueError('You can only check strings using the in operator')
return what in self.output
def __nonzero__(self):
# Handling bool(x) or if x:
return self.rc == 0
def __int__(self):
# handling int(x)
return self.rc
def __cmp__(self, other):
# Handling comparison to strings or numbers
if isinstance(other, int):
return cmp(self.rc, other)
elif isinstance(other, basestring):
return cmp(self.output, other)
else:
raise ValueError('You can only compare SSHResult with str or int')
@property
def success(self):
return self.rc == 0
@property
def failed(self):
return self.rc != 0
_ssh_key_file = project_path.join('.generated_ssh_key')
_ssh_pubkey_file = project_path.join('.generated_ssh_key.pub')
_client_session = []
class SSHClient(paramiko.SSHClient):
"""paramiko.SSHClient wrapper
Allows copying/overriding and use as a context manager
Constructor kwargs are handed directly to paramiko.SSHClient.connect()
Keywords:
container: If specified, then it is assumed that the VM hosts a container of CFME. The
param then contains the name of the container.
is_pod: If specified and True, then it is assumed that the target is a podified openshift
app and ``container`` then specifies the name of the pod to interact with.
stdout: If specified, overrides the system stdout file for streaming output.
stderr: If specified, overrides the system stderr file for streaming output.
"""
def __init__(self, stream_output=False, **connect_kwargs):
super(SSHClient, self).__init__()
self._streaming = stream_output
# deprecated/useless karg, included for backward-compat
self._keystate = connect_kwargs.pop('keystate', None)
# Container is used to store both docker VM's container name and Openshift pod name.
self._container = connect_kwargs.pop('container', None)
self.is_pod = connect_kwargs.pop('is_pod', False)
self.f_stdout = connect_kwargs.pop('stdout', sys.stdout)
self.f_stderr = connect_kwargs.pop('stderr', sys.stderr)
# load the defaults for ssh
default_connect_kwargs = {
'timeout': 10,
'allow_agent': False,
'look_for_keys': False,
'gss_auth': False
}
# Load credentials and destination from confs, if connect_kwargs is empty
if not connect_kwargs.get('hostname'):
parsed_url = urlparse(store.base_url)
default_connect_kwargs["port"] = ports.SSH
default_connect_kwargs['username'] = conf.credentials['ssh']['username']
default_connect_kwargs['password'] = conf.credentials['ssh']['password']
default_connect_kwargs['hostname'] = parsed_url.hostname
default_connect_kwargs["port"] = connect_kwargs.pop('port', ports.SSH)
# Overlay defaults with any passed-in kwargs and store
default_connect_kwargs.update(connect_kwargs)
self._connect_kwargs = default_connect_kwargs
self.set_missing_host_key_policy(paramiko.AutoAddPolicy())
_client_session.append(self)
@property
def is_container(self):
return self._container is not None and not self.is_pod
@property
def username(self):
return self._connect_kwargs.get('username')
def __repr__(self):
return "<SSHClient hostname={} port={}>".format(
repr(self._connect_kwargs.get("hostname")),
repr(self._connect_kwargs.get("port", 22)))
def __call__(self, **connect_kwargs):
# Update a copy of this instance's connect kwargs with passed in kwargs,
# then return a new instance with the updated kwargs
new_connect_kwargs = dict(self._connect_kwargs)
new_connect_kwargs.update(connect_kwargs)
# pass the key state if the hostname is the same, under the assumption that the same
# host will still have keys installed if they have already been
new_client = SSHClient(**new_connect_kwargs)
return new_client
def __enter__(self):
self.connect()
return self
def __exit__(self, *args, **kwargs):
# Noop, call close explicitly to shut down the transport
# It will be reopened automatically on next command
pass
def __del__(self):
self.close()
def _check_port(self):
hostname = self._connect_kwargs['hostname']
if not net_check(ports.SSH, hostname, force=True):
raise Exception("SSH connection to {}:{} failed, port unavailable".format(
hostname, ports.SSH))
def _progress_callback(self, filename, size, sent):
if sent > 0:
logger.debug('scp progress for %r: %s of %s ', filename, sent, size)
def close(self):
with diaper:
_client_session.remove(self)
super(SSHClient, self).close()
@property
def connected(self):
return self._transport and self._transport.active
def connect(self, hostname=None, **kwargs):
"""See paramiko.SSHClient.connect"""
if hostname and hostname != self._connect_kwargs['hostname']:
self._connect_kwargs['hostname'] = hostname
self.close()
if not self.connected:
self._connect_kwargs.update(kwargs)
self._check_port()
# Only install ssh keys if they aren't installed (or currently being installed)
return super(SSHClient, self).connect(**self._connect_kwargs)
def open_sftp(self, *args, **kwargs):
if self.is_container:
logger.warning(
'You are about to use sftp on a containerized appliance. It may not work.')
self.connect()
return super(SSHClient, self).open_sftp(*args, **kwargs)
def get_transport(self, *args, **kwargs):
if not self.connected:
self.connect()
return super(SSHClient, self).get_transport(*args, **kwargs)
def run_command(
self, command, timeout=RUNCMD_TIMEOUT, reraise=False, ensure_host=False,
ensure_user=False):
"""Run a command over SSH.
Args:
command: The command. Supports taking dicts as version picking.
timeout: Timeout after which the command execution fails.
reraise: Does not muffle the paramiko exceptions in the log.
ensure_host: Ensure that the command is run on the machine with the IP given, not any
container or such that we might be using by default.
ensure_user: Ensure that the command is run as the user we logged in, so in case we are
not root, setting this to True will prevent from running sudo.
Returns:
A :py:class:`SSHResult` instance.
"""
if isinstance(command, dict):
command = version.pick(command)
original_command = command
uses_sudo = False
logger.info("Running command %r", command)
if self.is_pod and not ensure_host:
# This command will be executed in the context of the host provider
command = 'oc rsh {} bash -c {}'.format(self._container, quote(
'source /etc/default/evm; ' + command))
ensure_host = True
elif self.is_container and not ensure_host:
command = 'docker exec {} bash -c {}'.format(self._container, quote(
'source /etc/default/evm; ' + command))
if self.username != 'root' and not ensure_user:
# We need sudo
command = 'sudo -i bash -c {command}'.format(command=quote(command))
uses_sudo = True
if command != original_command:
logger.info("> Actually running command %r", command)
command += '\n'
output = []
try:
session = self.get_transport().open_session()
if uses_sudo:
# We need a pseudo-tty for sudo
session.get_pty()
if timeout:
session.settimeout(float(timeout))
session.exec_command(command)
stdout = session.makefile()
stderr = session.makefile_stderr()
while True:
if session.recv_ready:
for line in stdout:
output.append(line)
if self._streaming:
self.f_stdout.write(line)
if session.recv_stderr_ready:
for line in stderr:
output.append(line)
if self._streaming:
self.f_stderr.write(line)
if session.exit_status_ready():
break
exit_status = session.recv_exit_status()
return SSHResult(exit_status, ''.join(output))
except paramiko.SSHException:
if reraise:
raise
else:
logger.exception('Exception happened during SSH call')
except socket.timeout:
logger.exception(
"Command %r timed out. Output before it failed was:\n%r",
command,
''.join(output))
raise
# Returning two things so tuple unpacking the return works even if the ssh client fails
# Return whatever we have in the output
return SSHResult(1, ''.join(output))
def cpu_spike(self, seconds=60, cpus=2, **kwargs):
"""Creates a CPU spike of specific length and processes.
Args:
seconds: How long the spike should last.
cpus: How many processes to use.
Returns:
See :py:meth:`SSHClient.run_command`
"""
return self.run_command(
"duration={}; instances={}; endtime=$(($(date +%s) + $duration)); "
"for ((i=0; i<instances; i++)) do while (($(date +%s) < $endtime)); "
"do :; done & done".format(seconds, cpus), **kwargs)
def run_rails_command(self, command, timeout=RUNCMD_TIMEOUT, **kwargs):
logger.info("Running rails command %r", command)
return self.run_command('cd /var/www/miq/vmdb; bin/rails runner {command}'.format(
command=command), timeout=timeout, **kwargs)
def run_rake_command(self, command, timeout=RUNCMD_TIMEOUT, **kwargs):
logger.info("Running rake command %r", command)
return self.run_command(
'cd /var/www/miq/vmdb; bin/rake -f /var/www/miq/vmdb/Rakefile {command}'.format(
command=command), timeout=timeout, **kwargs)
def put_file(self, local_file, remote_file='.', **kwargs):
logger.info("Transferring local file %r to remote %r", local_file, remote_file)
if self.is_container:
tempfilename = '/share/temp_{}'.format(fauxfactory.gen_alpha())
logger.info('For this purpose, temporary file name is %r', tempfilename)
scp = SCPClient(self.get_transport(), progress=self._progress_callback).put(
local_file, tempfilename, **kwargs)
self.run_command('mv {} {}'.format(tempfilename, remote_file))
return scp
elif self.is_pod:
tmp_folder_name = 'automation-{}'.format(fauxfactory.gen_alpha().lower())
logger.info('For this purpose, temporary folder name is /tmp/%s', tmp_folder_name)
# Clean up container's temporary folder
self.run_command('rm -rf /tmp/{0}'.format(tmp_folder_name))
# Create/Clean up the host's temporary folder
self.run_command(
'rm -rf /tmp/{0}; mkdir -p /tmp/{0}'.format(tmp_folder_name), ensure_host=True)
# Now upload the file to the openshift host
tmp_file_name = 'file-{}'.format(fauxfactory.gen_alpha().lower())
tmp_full_name = '/tmp/{}/{}'.format(tmp_folder_name, tmp_file_name)
scp = SCPClient(self.get_transport(), progress=self._progress_callback).put(
local_file, tmp_full_name, **kwargs)
# use oc rsync to put the file in the container
assert self.run_command(
'oc rsync /tmp/{} {}:/tmp/'.format(tmp_folder_name, self._container),
ensure_host=True)
# Move the file onto correct place
assert self.run_command('mv {} {}'.format(tmp_full_name, remote_file))
return scp
else:
if self.username == 'root':
return SCPClient(self.get_transport(), progress=self._progress_callback).put(
local_file, remote_file, **kwargs)
# scp client is not sudo, may not work for non sudo
tempfilename = '/home/{user_name}/temp_{random_alpha}'.format(
user_name=self.username, random_alpha=fauxfactory.gen_alpha())
logger.info('For this purpose, temporary file name is %r', tempfilename)
scp = SCPClient(self.get_transport(), progress=self._progress_callback).put(
local_file, tempfilename, **kwargs)
self.run_command('mv {temp_file} {remote_file}'.format(temp_file=tempfilename,
remote_file=remote_file))
return scp
def get_file(self, remote_file, local_path='', **kwargs):
logger.info("Transferring remote file %r to local %r", remote_file, local_path)
base_name = os_path.basename(remote_file)
if self.is_container:
tmp_file_name = 'temp_{}'.format(fauxfactory.gen_alpha())
tempfilename = '/share/{}'.format(tmp_file_name)
logger.info('For this purpose, temporary file name is %r', tempfilename)
self.run_command('cp {} {}'.format(remote_file, tempfilename))
scp = SCPClient(self.get_transport(), progress=self._progress_callback).get(
tempfilename, local_path, **kwargs)
self.run_command('rm {}'.format(tempfilename))
check_call([
'mv',
os_path.join(local_path, tmp_file_name),
os_path.join(local_path, base_name)])
return scp
elif self.is_pod:
tmp_folder_name = 'automation-{}'.format(fauxfactory.gen_alpha().lower())
tmp_file_name = 'file-{}'.format(fauxfactory.gen_alpha().lower())
tmp_full_name = '/tmp/{}/{}'.format(tmp_folder_name, tmp_file_name)
logger.info('For this purpose, temporary file name is %r', tmp_full_name)
# Clean up container's temporary folder
self.run_command('rm -rf /tmp/{0}; mkdir -p /tmp/{0}'.format(tmp_folder_name))
# Create/Clean up the host's temporary folder
self.run_command(
'rm -rf /tmp/{0}; mkdir -p /tmp/{0}'.format(tmp_folder_name), ensure_host=True)
# Now copy the file in container to the tmp folder
assert self.run_command('cp {} {}'.format(remote_file, tmp_full_name))
# Use the oc rsync to pull the file onto the host
assert self.run_command(
'oc rsync {}:/tmp/{} /tmp'.format(self._container, tmp_folder_name),
ensure_host=True)
# Now download the file to the openshift host
scp = SCPClient(self.get_transport(), progress=self._progress_callback).get(
tmp_full_name, local_path, **kwargs)
check_call([
'mv',
os_path.join(local_path, tmp_file_name),
os_path.join(local_path, base_name)])
return scp
else:
return SCPClient(self.get_transport(), progress=self._progress_callback).get(
remote_file, local_path, **kwargs)
def patch_file(self, local_path, remote_path, md5=None):
""" Patches a single file on the appliance
Args:
local_path: Path to patch (diff) file
remote_path: Path to file to be patched (on the appliance)
md5: MD5 checksum of the original file to check if it has changed
Returns:
True if changes were applied, False if patching was not necessary
Note:
If there is a .bak file present and the file-to-be-patched was
not patched by the current patch-file, it will be used to restore it first.
Recompiling assets and restarting appropriate services might be required.
"""
logger.info('Patching %s', remote_path)
# Upload diff to the appliance
diff_remote_path = os_path.join('/tmp/', os_path.basename(remote_path))
self.put_file(local_path, diff_remote_path)
# If already patched with current file, exit
logger.info('Checking if already patched')
rc, out = self.run_command(
'patch {} {} -f --dry-run -R'.format(remote_path, diff_remote_path))
if rc == 0:
return False
# If we have a .bak file available, it means the file is already patched
# by some older patch; in that case, replace the file-to-be-patched by the .bak first
logger.info("Checking if %s.bak is available", remote_path)
rc, out = self.run_command('test -e {}.bak'.format(remote_path))
if rc == 0:
logger.info("%s.bak found; using it to replace %s", remote_path, remote_path)
rc, out = self.run_command('mv {}.bak {}'.format(remote_path, remote_path))
if rc != 0:
raise Exception(
"Unable to replace {} with {}.bak".format(remote_path, remote_path))
else:
logger.info("%s.bak not found", remote_path)
# If not patched and there's MD5 checksum available, check it
if md5:
logger.info("MD5 sum check in progress for %s", remote_path)
rc, out = self.run_command('md5sum -c - <<< "{} {}"'.format(md5, remote_path))
if rc == 0:
logger.info('MD5 sum check result: file not changed')
else:
logger.warning('MD5 sum check result: file has been changed!')
# Create the backup and patch
rc, out = self.run_command(
'patch {} {} -f -b -z .bak'.format(remote_path, diff_remote_path))
if rc != 0:
raise Exception("Unable to patch file {}: {}".format(remote_path, out))
return True
def get_build_datetime(self):
command = "stat --printf=%Y /var/www/miq/vmdb/VERSION"
return parsetime.fromtimestamp(int(self.run_command(command).output.strip()))
def get_build_date(self):
return self.get_build_datetime().date()
def is_appliance_downstream(self):
return self.run_command("stat /var/www/miq/vmdb/BUILD").rc == 0
def uptime(self):
out = self.run_command('cat /proc/uptime')[1]
match = re.findall('\d+\.\d+', out)
if match:
return float(match[0])
return 0
def client_address(self):
res = self.run_command('echo $SSH_CLIENT', ensure_host=True, ensure_user=True)
# SSH_CLIENT format is 'clientip clientport serverport', we want clientip
if not res.output:
raise Exception('unable to get client address via SSH')
return res.output.split()[0]
def appliance_has_netapp(self):
return self.run_command("stat /var/www/miq/vmdb/HAS_NETAPP").rc == 0
@property
def status(self):
"""Parses the output of the ``systemctl status evmserverd``.
Returns:
A dictionary containing ``servers`` and ``workers``, both lists. Each of the lists
contains dictionaries, one per line. You can refer inside the dictionary using the
headers.
"""
matcher = re.compile(
'|'.join([
'DEPRECATION WARNING',
'called from block in',
'Please use .* instead',
'key :terminate is duplicated and overwritten',
]))
if version.current_version() < "5.5":
data = self.run_command("systemctl status evmserverd")
else:
data = self.run_rake_command("evm:status")
if data.rc != 0:
raise Exception("systemctl status evmserverd $?={}".format(data.rc))
data = data.output.strip().split("\n\n")
if len(data) == 2:
srvs, wrks = data
else:
srvs = data[0]
wrks = ""
if "checking evm status" not in srvs.lower():
raise Exception("Wrong command output:\n{}".format(data.output))
def _process_dict(d):
d["PID"] = int(d["PID"])
d["ID"] = int(d["ID"])
try:
d["SPID"] = int(d["SPID"])
except ValueError:
d["SPID"] = None
if "Active Roles" in d:
d["Active Roles"] = set(d["Active Roles"].split(":"))
if "Last Heartbeat" in d:
d["Last Heartbeat"] = iso8601.parse_date(d["Last Heartbeat"])
if "Started On" in d:
d["Started On"] = iso8601.parse_date(d["Started On"])
# Servers part
srvs = [line for line in srvs.split("\n")[1:] if matcher.search(line) is None]
srv_headers = [h.strip() for h in srvs[0].strip().split("|")]
srv_body = srvs[2:]
servers = []
for server in srv_body:
fields = [f.strip() for f in server.strip().split("|")]
srv = dict(zip(srv_headers, fields))
_process_dict(srv)
servers.append(srv)
# Workers part
# TODO: Figure more permanent solution for ignoring the warnings
wrks = [line for line in wrks.split("\n") if matcher.search(line) is None]
workers = []
if wrks:
wrk_headers = [h.strip() for h in wrks[0].strip().split("|")]
wrk_body = wrks[2:]
for worker in wrk_body:
fields = [f.strip() for f in worker.strip().split("|")]
wrk = dict(zip(wrk_headers, fields))
_process_dict(wrk)
workers.append(wrk)
return {"servers": servers, "workers": workers}
class SSHTail(SSHClient):
def __init__(self, remote_filename, **connect_kwargs):
super(SSHTail, self).__init__(stream_output=False, **connect_kwargs)
self._remote_filename = remote_filename
self._sftp_client = None
self._remote_file_size = None
def __iter__(self):
for line in self.raw_lines():
yield line.rstrip()
def raw_lines(self):
with self as sshtail:
fstat = sshtail._sftp_client.stat(self._remote_filename)
if self._remote_file_size is not None:
if self._remote_file_size < fstat.st_size:
remote_file = self._sftp_client.open(self._remote_filename, 'r')
remote_file.seek(self._remote_file_size, 0)
while (remote_file.tell() < fstat.st_size):
line = remote_file.readline() # Note the missing rstrip() here!
yield line
self._remote_file_size = fstat.st_size
def raw_string(self):
return ''.join(self)
def __enter__(self):
self.connect(**self._connect_kwargs)
self._sftp_client = self.open_sftp()
return self
def __exit__(self, *args, **kwargs):
self._sftp_client.close()
def set_initial_file_end(self):
with self as sshtail:
fstat = sshtail._sftp_client.stat(self._remote_filename)
self._remote_file_size = fstat.st_size # Seed initial size of file
def lines_as_list(self):
"""Return lines as list"""
return list(self)
def keygen():
"""Generate temporary ssh keypair for appliance SSH auth
Intended not only to simplify ssh access to appliances, but also to simplify
SSH access from one appliance to another in multi-appliance setups
"""
# private key
prv = paramiko.RSAKey.generate(bits=1024)
with _ssh_key_file.open('w') as f:
prv.write_private_key(f)
# public key
pub = paramiko.RSAKey(filename=_ssh_key_file.strpath)
with _ssh_pubkey_file.open('w') as f:
f.write("{} {} {}\n".format(pub.get_name(), pub.get_base64(),
'autogenerated cfme_tests key'))
|
jteehan/cfme_tests
|
utils/ssh.py
|
Python
|
gpl-2.0
| 26,511
|
#!/usr/bin/python
#
# Copyright (C) 2007-2009 Julian Andres Klode <jak@jak-linux.org>
# Copyright (C) 2003-2006 Darren Kirby <d@badcomputer.org>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
# 10-08-2009 modified by Paolo Patruno
'''
dir2ogg converts mp3, m4a, and wav files to the free open source OGG format. Oggs are
about 20-25% smaller than mp3s with the same relative sound quality. Your mileage may vary.
Keep in mind that converting from mp3 or m4a to ogg is a conversion between two lossy formats.
This is fine if you just want to free up some disk space, but if you're a hard-core audiophile
you may be dissapointed. I really can't notice a difference in quality with 'naked' ears myself.
This script converts mp3s to wavs using mpg123 then converts the wavs to oggs using oggenc.
m4a conversions require faad. Id3 tag support requires mutagen for mp3s.
Scratch tags using the filename will be written for wav files (and mp3s with no tags!)
'''
from __future__ import division
from __future__ import print_function
from future import standard_library
standard_library.install_aliases()
from builtins import str
from builtins import range
from past.utils import old_div
from builtins import object
import sys
import os, os.path
import re
from fnmatch import _cache, translate
from optparse import OptionParser
from subprocess import Popen, call, PIPE
__version__ = '0.11.7'
__date__ = '2009-05-03'
FILTERS = {'mp3': ('*.mp3',),
'm4a': ('*.aac', '*.m4a', '*.mp4'),
'wma': ('*.asf', '*.wma', '*.wmf'),
'flash': ('*.flv', ),
'flac': ('*.flac',),
'wav': ('*.wav', ),
'speex': ('*.spx','*.speex' ),
}
def mmatch(names, patterns, rbool=True):
'''names/patterns=str/list/tuple'''
results = []
if isinstance(names, str):
names = [names]
if isinstance(patterns, str):
patterns = [patterns]
for pat in patterns:
pat = pat.lower()
if not pat in _cache:
_cache[pat] = re.compile(translate(pat))
match = _cache[pat].match
for name in names:
if match(name.lower()):
if rbool:
return True
else:
results.append(name)
if rbool:
return bool(results)
else:
return results
def read_opts():
if not '--version' in sys.argv:
show_banner()
if len(sys.argv[1:]) == 0:
fatal('No arguments specified, see --help for usage.')
parser = OptionParser(usage='%prog [options] [arguments]', version='%prog ' + __version__)
parser.add_option('-l', '--license', action='callback', callback=show_license, help='display license informations')
parser.add_option('-d', '--directory', action='store_true', help='convert files in all directories specified as arguments')
parser.add_option('-r', '--recursive', action='store_true', help='convert files in all subdirectories of all directories specified as arguments')
parser.add_option('-c', '--cdda', action='store_true', help="convert audio cd in all devices specified as arguments (or default: /dev/cdrom) [EXPERIMENTAL]")
parser.add_option('-q', '--quality', metavar='N', default=3.0, type='float', help='quality. N is a number from 1-10 (default %default)')
parser.add_option('-t', '--smart-mp3', action='store_true', help='try to use similar quality as original mp3 file (overwrites -q)')
parser.add_option('-T', '--smart-mp3-correction', metavar='N', default=0.0, type='float', help='decrease detected quality (implies -t)')
parser.add_option('-n', '--no-mp3', dest='convert_mp3', action='store_false', default=True, help="don't convert mp3s (use with '-d' or '-r')")
parser.add_option('-a', '--convert-all', action='store_true', help="convert all supported formats")
parser.add_option('-f', '--convert-flac', action='store_true', help="convert flac files (use with '-d')")
parser.add_option('-x', '--convert-speex', action='store_true', help="convert speex files (use with '-d')")
parser.add_option('-m', '--convert-m4a', action='store_true', help="convert m4a files (use with '-d')")
parser.add_option('-w', '--convert-wav', action='store_true', help="convert wav files (use with '-d')")
parser.add_option('-W', '--convert-wma', action='store_true', help="convert wma files (use with '-d').")
parser.add_option('-F', '--convert-flash', action='store_true', help="convert flash files (use with '-d').")
parser.add_option('--delete-input', action='store_true', help='delete input files')
parser.add_option('-p', '--preserve-wav', action='store_true', help='keep the wav files (also includes -P)')
parser.add_option('-P', '--no-pipe', action='store_true', help='Do not use pipes, use temporary wav files')
parser.add_option('-v', '--verbose', action='store_true', help='verbose output')
# Setup decoders
commands = {'mp3': ('mpg123', 'mpg321', 'lame', 'mplayer'),
'wma': ('mplayer',),
'm4a': ('faad', 'mplayer'),
'flash': ('mplayer',),
'flac': ('flac', 'ogg123', 'mplayer'),
'speex': ('speexdec',),
'cd': ('cdparanoia', 'icedax','cdda2wav', 'mplayer'),
}
for ext, dec in list(commands.items()):
default, choices = None, []
for command in dec:
in_path = [prefix for prefix in os.environ['PATH'].split(os.pathsep) if os.path.exists(os.path.join(prefix, command))]
if in_path:
choices.append(command)
default = default or command
parser.add_option('--' + ext + '-decoder', type="choice", metavar=default, default=default, choices=choices, help="decoder for %s files (choices: %s)" % (ext, ', '.join(choices)))
# End of decoder options
options, args = parser.parse_args()
options.convert_cd = options.cdda
options.filters = []
for ext, pat in list(FILTERS.items()):
# Activate Encoders for files on the commandline
if options.convert_all or mmatch(args, pat):
setattr(options, 'convert_' + ext, True)
if getattr(options, 'convert_' + ext):
options.filters += pat
# Missing decoders
if ext != 'wav' and getattr(options, 'convert_' + ext) and not getattr(options, ext + '_decoder'):
fatal('%s was enabled, but no decoder has been found.' % ext)
if len(args) == 0 and not options.cdda:
fatal('No files/directories specified.')
return options, args
def info(msg):
print('Information: %s' % msg)
def warn(msg):
'''print errors to the screen (red)'''
print("Warning: %s" % msg, file=sys.stderr)
def fatal(msg):
'''Fatal error (error + exit)'''
print("Error: %s" % msg, file=sys.stderr)
sys.exit(1)
def return_dirs(root):
mydirs = {}
for pdir, dirs, files in os.walk(root):
if not pdir in mydirs:
mydirs[pdir] = files
return mydirs
class Id3TagHandler(object):
'''Class for handling meta-tags. (Needs mutagen)'''
accept = ['album', 'album_subtitle', 'albumartist', 'albumartistsort',
'albumsort', 'artist', 'artistsort', 'asin', 'bpm', 'comment',
'compilation', 'composer', 'composersort', 'conductor', 'copyright',
'date', 'discid', 'discnumber', 'encodedby', 'engineer', 'gapless',
'genre', 'grouping', 'isrc', 'label', 'lyricist', 'lyrics', 'mood',
'musicbrainz_albumartistid', 'musicbrainz_albumid', 'musicbrainz_artistid',
'musicbrainz_discid', 'musicbrainz_sortname', 'musicbrainz_trackid',
'musicbrainz_trmid', 'musicip_puid', 'podcast', 'podcasturl',
'releasecountry', 'musicbrainz_albumstatus', 'musicbrainz_albumtype', 'remixer', 'show',
'showsort', 'subtitle', 'title', 'titlesort', 'tracknumber', 'tracktotal']
def __init__(self, song):
self.song = song
self.tags = {}
def grab_common(self, handler, convert=None, error=None):
'''Common grabber, starts the handler and applies the tags to self.tags'''
try:
mydict = handler(self.song)
except error as msg:
import warnings,traceback;
warn('Mutagen failed on %s, no tags available' % self.song)
traceback.print_exc(0)
print(file=sys.stderr)
return
if convert:
convert = dict([(k.lower(), v.lower()) for k, v in list(convert.items())]) # Fix convert
for key, val in list(mydict.items()):
key = key.lower()
key = convert and (key in convert and convert[key] or key) or key
if not key in self.accept:
continue
if not convert: # Hack for FLAC, which uses Vorbis tags
pass
elif hasattr(val, 'text'):
val = val.text
if convert:
new_val = []
if not isinstance(val, list):
val = [val]
for i in val:
if not isinstance(i, str):
# Convert all invalid values to unicode
try:
new_val.append(str(i))
except UnicodeDecodeError:
warn('Ignoring UnicodeDecodeError in key %s' % key)
new_val.append(str(i, errors='ignore'))
else:
new_val.append(i)
val = new_val
del new_val
self.tags[key] = val
def grab_m4a_tags(self):
'''Import MP4 tags handler, set convert and call commonGrab'''
convert = {'----:com.apple.iTunes:ASIN': 'asin',
'----:com.apple.iTunes:MusicBrainz Album Artist Id': 'musicbrainz_albumartistid',
'----:com.apple.iTunes:MusicBrainz Album Id': 'musicbrainz_albumid',
'----:com.apple.iTunes:MusicBrainz Album Release Country': 'releasecountry',
'----:com.apple.iTunes:MusicBrainz Album Status': 'musicbrainz_albumstatus',
'----:com.apple.iTunes:MusicBrainz Album Type': 'musicbrainz_albumtype',
'----:com.apple.iTunes:MusicBrainz Artist Id': 'musicbrainz_artistid',
'----:com.apple.iTunes:MusicBrainz Disc Id': 'musicbrainz_discid',
'----:com.apple.iTunes:MusicBrainz TRM Id': 'musicbrainz_trmid',
'----:com.apple.iTunes:MusicBrainz Track Id': 'musicbrainz_trackid',
'----:com.apple.iTunes:MusicIP PUID': 'musicip_puid',
'aART': 'albumartist', 'cpil': 'compilation', 'cprt': 'copyright',
'pcst': 'podcast', 'pgap': 'gapless', 'purl': 'podcasturl',
'soaa': 'albumartistsort', 'soal': 'albumsort', 'soar': 'artistsort',
'soco': 'composersort', 'sonm': 'titlesort', 'sosn': 'showsort',
'trkn': 'tracknumber', 'tvsh': 'show', '\xa9ART': 'artist',
'\xa9alb': 'album', '\xa9cmt': 'comment', '\xa9day': 'date',
'\xa9gen': 'genre', '\xa9grp': 'grouping', '\xa9lyr': 'lyrics',
'\xa9nam': 'title', '\xa9too': 'encodedby','\xa9wrt': 'composer'}
try:
from mutagen.mp4 import MP4, error
except ImportError:
from mutagen.m4a import M4A as MP4, error
self.grab_common(MP4, convert, error)
def grab_wma_tags(self):
'''Import ASF tags handler, set convert and call commonGrab'''
convert = {'Author': 'artist', 'Description': 'comment',
'MusicBrainz/Album Artist Id': 'musicbrainz_albumartistid',
'MusicBrainz/Album Id': 'musicbrainz_albumid',
'MusicBrainz/Album Release Country': 'releasecountry',
'MusicBrainz/Album Status': 'musicbrainz_albumstatus',
'MusicBrainz/Album Type': 'musicbrainz_albumtype',
'MusicBrainz/Artist Id': 'musicbrainz_artistid',
'MusicBrainz/Disc Id': 'musicbrainz_discid',
'MusicBrainz/TRM Id': 'musicbrainz_trmid',
'MusicBrainz/Track Id': 'musicbrainz_trackid',
'MusicIP/PUID': 'musicip_puid',
'WM/AlbumArtist': 'albumartist',
'WM/AlbumArtistSortOrder': 'albumartistsort',
'WM/AlbumSortOrder': 'albumsort',
'WM/AlbumTitle': 'album',
'WM/ArtistSortOrder': 'artistsort',
'WM/BeatsPerMinute': 'bpm',
'WM/Composer': 'composer',
'WM/Conductor': 'conductor',
'WM/ContentGroupDescription': 'grouping',
'WM/Copyright': 'copyright',
'WM/EncodedBy': 'encodedby',
'WM/Genre': 'genre',
'WM/ISRC': 'isrc',
'WM/Lyrics': 'lyrics',
'WM/ModifiedBy': 'remixer',
'WM/Mood': 'mood',
'WM/PartOfSet': 'discnumber',
'WM/Producer': 'engineer',
'WM/Publisher': 'label',
'WM/SetSubTitle': 'album_subtitle',
'WM/SubTitle': 'subtitle',
'WM/TitleSortOrder': 'titlesort',
'WM/TrackNumber': 'tracknumber',
'WM/Writer': 'lyricist',
'WM/Year': 'date',
}
from mutagen.asf import ASF, error
self.grab_common(ASF, convert, error)
def grab_flac_tags(self):
'''Import FLAC tags handler, and call commonGrab'''
from mutagen.flac import FLAC, error
self.grab_common(FLAC, error=error)
def grab_flash_tags(self):
'''Import FLAC tags handler, and call commonGrab'''
pass
def grab_speex_tags(self):
'''Import speex tags handler, and call commonGrab'''
from mutagen.oggspeex import OggSpeex, error
self.grab_common(OggSpeex, error=error)
def grab_mp3_tags(self):
'''Import MP3 tags handler, and call commonGrab'''
from mutagen.id3 import ID3, error
convert = {'TPE1': 'artist', 'TPE2': 'albumartist', 'TPE3': 'conductor', 'TPE4': 'remixer',
'TCOM': 'composer', 'TCON': 'genre', 'TALB': 'album', 'TIT1': 'grouping',
'TIT2': 'title', 'TIT3': 'subtitle', 'TSST': 'discsubtitle', 'TEXT': 'lyricist',
'TCMP': 'compilation', 'TDRC': 'date', 'COMM': 'comment', 'TMOO': 'mood',
'TMED': 'media', 'TBPM': 'bpm', 'WOAR': 'website', 'TSRC': 'isrc',
'TENC': 'encodedby', 'TCOP': 'copyright', 'TSOA': 'albumsort',
'TSOP': 'artistsort', 'TSOT': 'titlesort','TPUB': 'label',
'TRCK': 'tracknumber'}
self.grab_common(ID3, convert, error)
def list_if_verbose(self):
info('Meta-tags I will write:')
for key, val in list(self.tags.items()):
if type(val) == list:
info(key + ': ' + ','.join(val))
else:
info(key + ': ' + val)
class Convert(Id3TagHandler):
'''
Base conversion Class.
__init__ creates some useful attributes,
grabs the id3 tags, and sets a flag to remove files.
Methods are the conversions we can do
'''
def __init__(self, song, conf):
self.device = ""
self.track = ""
Id3TagHandler.__init__(self, song)
self.conf = conf
song_root = os.path.splitext(song)[0] + "."
self.songwav = song_root + 'wav'
self.songogg = song_root + 'ogg'
self.decoder = ''
if (os.path.exists(self.songogg)):
warn('try to convert to an already present file: %s' % self.songogg)
return
# (smartmp3) I have to remember default quality for next files
original_quality = self.conf.quality
for ext, pat in list(FILTERS.items()):
if mmatch(self.song, pat) and ext != 'wav':
self.decoder = getattr(self.conf, ext + '_decoder')
getattr(self, 'grab_%s_tags' % ext)()
if ext == 'mp3' and (self.conf.smart_mp3 or \
self.conf.smart_mp3_correction):
self.smart_mp3()
#self.songogg = "%(artist)s/%(album)s/%(track)s - %(title)s.ogg" % self.tags
#self.songogg = "%(artist)s/%(album)s - %(title)s.ogg" % self.tags
self.convert()
# (smartmp3) Replacing quality by default value
self.conf.quality = original_quality
def smart_mp3(self):
# initial Code by Marek Palatinus <marek@palatinus.cz>, 2007
# Table of quality = relation between mp3 bitrate and vorbis quality. Source: wikipedia
# quality_table = {45:-1, 64:0, 80:1, 96:2, 112:3, 128:4, 160:5, 192:6, 224:7, 256:8, 320:9, 500:10 }
# log(0.015*bitrate, 1.19) is logaritmic regression of table above. Useful for mp3s in VBR :-).
try:
from mutagen.mp3 import MP3, HeaderNotFoundError
except ImportError:
warn('(smartmp3) You dont have mutagen installed. Bitrate detection failed. Using default quality %.02f' % self.conf.quality)
return
try:
mp3info = MP3(self.song)
bitrate = mp3info.info.bitrate
except HeaderNotFoundError:
info('(smartmp3) File is not an mp3 stream. Using default quality %.02f' % self.conf.quality)
return
import math
self.conf.quality = round(5.383 * math.log(0.01616 * bitrate/1000.) - self.conf.smart_mp3_correction, 2)
self.conf.quality = max(self.conf.quality, -1) # Lowest quality is -1
self.conf.quality = min(self.conf.quality, 6) # Highest quality is 6
info("(smartmp3) Detected bitrate: %d kbps" % (old_div(bitrate,1000)))
info("(smartmp3) Assumed vorbis quality: %.02f" % self.conf.quality)
def decode(self):
# Used for mplayer
tempwav = 'dir2ogg-%s-temp.wav' % os.getpid()
if self.decoder not in ('mplayer','speexdec') and not self.conf.no_pipe and not self.conf.preserve_wav:
outfile, outfile1 = '-', '/dev/stdout'
use_pipe = 1
else:
outfile = outfile1 = self.songwav
use_pipe = 0
decoder = {'mpg123': ['mpg123', '-w', outfile1, self.song],
'mpg321': ['mpg321', '-w', outfile, self.song],
'faad': ['faad', '-o' , outfile1, self.song],
'ogg123': ['ogg123', '-dwav', '-f' , outfile, self.song],
'flac': ['flac', '-o', outfile, '-d', self.song],
'speexdec':['speexdec', self.song, outfile],
'lame': ['lame', '--quiet', '--decode', self.song, outfile],
'mplayer': ['mplayer', '-vo', 'null', '-vc' ,'dummy', '-af', 'resample=44100', '-ao', 'pcm:file=' + tempwav, self.song],
'alac-decoder': ['alac-decoder', self.song],
'cd-cdparanoia': ['cdparanoia', '-Z', '-q', '-w', '-d', self.device, str(self.track), outfile],
'cd-icedax': ['icedax', '-H', '-t', str(self.track), '-D',self.device],
'cd-cdda2wav': ['cdda2wav', '-H', '-t', str(self.track), '-D',self.device],
'cd-mplayer': ['mplayer', '-vo', 'null', '-vc' ,'dummy', '-af', 'resample=44100', '-ao', 'pcm:file=temp.wav', '-cdrom-device', self.device, "cdda://" + str(self.track)]}
if use_pipe:
return True, Popen(decoder[self.decoder], stdout=PIPE)
else:
decoder['cd-cdparanoia'].remove('-q')
decoder['lame'].remove('--quiet')
retcode = call(decoder[self.decoder])
if self.decoder == 'mplayer':
# Move the file for mplayer (which uses tempwav), so it works
# for --preserve-wav.
os.rename(tempwav, self.songwav)
if retcode != 0:
return (False, None)
else:
return (True, None)
def convert(self):
''' Convert wav -> ogg.'''
if self.songwav == self.song:
success = True
dec = None
else:
success, dec = self.decode()
if not success:
warn('Decoding of "%s" failed.' % self.song)
return
if dec and self.decoder == 'mpg123':
import mutagen
try:
info("additional option:" )
opts=['-R', str(mutagen.File(self.song).info.sample_rate)]
info(str(opts))
except:
opts=[]
else:
opts=[]
if dec:
enc = Popen(['oggenc', '-Q', '-o', self.songogg, '-q', str(self.conf.quality).replace('.', ','), '-'] + opts, stdin=dec.stdout)
enc.communicate()
dec.wait()
if dec.returncode < 0:
warn('Decoding of "%s" failed.' % self.song)
return False
elif enc.returncode < 0:
warn('Encoding of "%s" failed.' % self.song)
return False
else:
enc = call(['oggenc', '-o', self.songogg, '-q', str(self.conf.quality).replace('.', ','), self.songwav])
if enc != 0:
warn('Encoding of "%s" failed.' % self.songwav)
return False
elif not self.conf.preserve_wav and self.song != self.songwav:
os.remove(self.songwav)
if self.tags != {}:
try:
# Add tags to the ogg file
from mutagen.oggvorbis import OggVorbis
myogg = OggVorbis(self.songogg)
myogg.update(self.tags)
myogg.save()
except:
warn('Could not save the tags')
import traceback
traceback.print_exc()
return False
elif self.songwav != self.song or 'cd-' in self.decoder:
warn('No tags found...')
if self.conf.delete_input:
os.remove(self.song)
return True
class ConvertTrack(Convert):
'''Wrapper around Convert for CD Tracks'''
def __init__(self, device, conf, track, tags):
self.device, self.track, self.tags, self.conf = device, track, tags, conf
self.song = ''
self.songwav = "audio.wav"
self.songogg = "%(artist)s/%(album)s/%(ntracknumber)s - %(title)s.ogg" % tags
self.conf.preserve_wav = False
self.decoder = 'cd-' + self.conf.cd_decoder
self.convert()
class ConvertDisc(object):
'''Wrapper around ConvertTrack to Convert complete cds
Currently uses MusicBrainz, but a CDDB fallback will be added, too.'''
def __init__(self, dev, conf):
warn("Converting CDs is not well supported, please use another "
"solution.")
self.dev, self.conf = dev, conf
try:
self.get_mb()
except self.MBError:
warn('MusicBrainz failed. Trying FreeDB...')
self.get_cddb()
class MBError(Exception):
'''Empty'''
def get_cddb(self):
try:
import CDDB, DiscID
except ImportError:
fatal('You need python-cddb (http://cddb-py.sf.net) to convert cds. Please install it.')
disc_id = DiscID.disc_id(DiscID.open(self.dev))
query_info = CDDB.query(disc_id)[1]
if not query_info:
fatal('The disk is not listed in FreeDB, dir2ogg only supports disk listed in MusicBrainz or FreeDB')
if isinstance(query_info, list):
query_info = query_info[0]
read_info = CDDB.read(query_info['category'], query_info['disc_id'])[1]
for track in range(disc_id[1]):
title = {}
title['discid'] = query_info['disc_id']
title['artist'], title['album'] = (track.strip() for track in query_info['title'].split("/"))
title['genre'] = read_info['DGENRE']
title['date'] = read_info['DYEAR']
title['title'] = read_info['TTITLE' + str(track)]
title['tracktotal'] = str(len(list(range(disc_id[1]))) + 1)
title['ntracknumber'] = '0' * (len(title['tracktotal'] ) - len(str(track+1)) ) + str(track+1)
title['tracknumber'] = str(track+1)
for key, val in list(title.items()):
title[key] = str(str(val), "ISO-8859-1")
ConvertTrack(self.dev, self.conf, track+1, title)
def get_mb(self):
try:
import musicbrainz2.disc as mbdisc
import musicbrainz2.webservice as mbws
except ImportError as err:
warn('You need python-musicbrainz2 (or python-cddb) to convert cds. Please install it. Trying cddb.')
raise self.MBError(err)
service = mbws.WebService()
query = mbws.Query(service)
# Read the disc in the drive
try:
disc = mbdisc.readDisc(self.dev)
except mbdisc.DiscError as err:
warn(err)
raise self.MBError
discId = disc.getId()
try:
myfilter = mbws.ReleaseFilter(discId=discId)
results = query.getReleases(myfilter)
except mbws.WebServiceError as err:
warn(err)
raise self.MBError
if len(results) == 0:
print("Disc is not yet in the MusicBrainz database.")
print("Consider adding it via", mbdisc.getSubmissionUrl(disc))
raise self.MBError
try:
inc = mbws.ReleaseIncludes(artist=True, tracks=True, releaseEvents=True)
release = query.getReleaseById(results[0].release.getId(), inc)
except mbws.WebServiceError as err:
warn(err)
raise self.MBError
isSingleArtist = release.isSingleArtistRelease()
try:
# try to get the CDDB ID
import DiscID
cddb_id = '%08lx' % int(DiscID.disc_id(DiscID.open(self.dev))[0])
except:
cddb_id = False
trackn = 1
for track in release.tracks:
title = {}
title['artist'] = isSingleArtist and release.artist.name or track.artist
if cddb_id:
title['discid'] = cddb_id
title['album'] = release.title
title['date'] = release.getEarliestReleaseDate()
title['musicbrainz_albumartistid'] = release.artist.id.split("/")[-1]
title['musicbrainz_albumid'] = release.id.split("/")[-1]
title['musicbrainz_discid'] = discId
title['musicbrainz_sortname'] = release.artist.sortName
title['musicbrainz_trackid'] = track.id.split("/")[-1]
title['title'] = track.title
title['tracktotal'] = str(len(release.tracks))
title['ntracknumber'] = "%02d" % trackn
title['tracknumber'] = str(trackn)
ConvertTrack(self.dev, self.conf, trackn, title)
trackn+=1
class ConvertDirectory(object):
'''
This class is just a wrapper for Convert.
Grab the songs to convert, then feed them one
by one to the Convert class.
'''
def __init__(self, conf, directory, files):
''' Decide which files will be converted.'''
if os.path.exists(directory) == 0:
fatal('Directory: "%s" not found' % directory)
self.directory = directory = os.path.normpath(directory) + os.path.sep
self.songs = mmatch(files, conf.filters, False)
if conf.verbose:
self.print_if_verbose()
for song in self.songs:
try:
Convert(directory + song, conf)
except:
warn('File: %s error in ogg conversion' % directory + song)
def print_if_verbose(self):
''' Echo files to be converted if verbose flag is set.'''
info('In %s I am going to convert:' % self.directory)
for song in self.songs:
print(" ", song)
def show_banner():
print('dir2ogg %s (%s), converts audio files into ogg vorbis.\n' % (__version__, __date__))
def show_license(*args, **kwargs):
print('Copyright (C) 2007-2008 Julian Andres Klode <jak@jak-linux.org>')
print('Copyright (C) 2003-2006 Darren Kirby <d@badcomputer.org>\n')
print('This program is distributed in the hope that it will be useful,')
print('but WITHOUT ANY WARRANTY; without even the implied warranty of')
print('MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the')
print('GNU General Public License for more details.\n')
print('Currently developed by Julian Andres Klode <jak@jak-linux.org>.')
sys.exit(0)
def main():
conf = read_opts()
conf_args, conf = conf[1], conf[0]
if conf.cdda:
discs = len(conf_args) and conf_args or ("/dev/cdrom",)
for disc in discs:
ConvertDisc(disc, conf)
elif conf.directory or conf.recursive:
rdirs = {}
for path in conf_args:
if not os.path.isdir(path):
fatal('Path: %s does not exists' % path)
elif conf.recursive:
rdirs.update(return_dirs(path))
elif conf.directory:
rdirs.update({path: os.listdir(path)})
for directory, files in list(rdirs.items()):
try:
ConvertDirectory(conf, directory, files)
except:
warn('DIR: %s ;Files: %s error in ogg conversion' % (directory,files))
else:
for path in conf_args:
if not os.path.isfile(path):
fatal('Path: %s does not exists' % path)
for filename in conf_args:
try:
Convert(filename, conf)
except:
warn('File: %s error in ogg conversion' % filename)
sys.exit(0)
if __name__ == '__main__':
main()
|
pat1/autoradio
|
autoradio/dir2ogg.py
|
Python
|
gpl-2.0
| 30,823
|
#!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "kaka.settings")
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
|
hdzierz/Kaka
|
manage.py
|
Python
|
gpl-2.0
| 247
|
# pylint: disable=E1101,E1103,W0232
import datetime
import warnings
from functools import partial
from sys import getsizeof
import numpy as np
from pandas._libs import index as libindex, lib, Timestamp
from pandas.compat import range, zip, lrange, lzip, map
from pandas.compat.numpy import function as nv
from pandas import compat
from pandas.core.dtypes.common import (
_ensure_int64,
_ensure_platform_int,
is_object_dtype,
is_iterator,
is_list_like,
is_scalar)
from pandas.core.dtypes.missing import isna, array_equivalent
from pandas.errors import PerformanceWarning, UnsortedIndexError
from pandas.core.common import (_values_from_object,
is_bool_indexer,
is_null_slice,
is_true_slices)
import pandas.core.base as base
from pandas.util._decorators import (Appender, cache_readonly,
deprecate, deprecate_kwarg)
import pandas.core.common as com
import pandas.core.missing as missing
import pandas.core.algorithms as algos
from pandas.io.formats.printing import pprint_thing
from pandas.core.config import get_option
from pandas.core.indexes.base import (
Index, _ensure_index,
_get_na_value, InvalidIndexError,
_index_shared_docs)
from pandas.core.indexes.frozen import (
FrozenNDArray, FrozenList, _ensure_frozen)
import pandas.core.indexes.base as ibase
_index_doc_kwargs = dict(ibase._index_doc_kwargs)
_index_doc_kwargs.update(
dict(klass='MultiIndex',
target_klass='MultiIndex or list of tuples'))
class MultiIndex(Index):
"""
A multi-level, or hierarchical, index object for pandas objects
Parameters
----------
levels : sequence of arrays
The unique labels for each level
labels : sequence of arrays
Integers for each level designating which label at each location
sortorder : optional int
Level of sortedness (must be lexicographically sorted by that
level)
names : optional sequence of objects
Names for each of the index levels. (name is accepted for compat)
copy : boolean, default False
Copy the meta-data
verify_integrity : boolean, default True
Check that the levels/labels are consistent and valid
"""
# initialize to zero-length tuples to make everything work
_typ = 'multiindex'
_names = FrozenList()
_levels = FrozenList()
_labels = FrozenList()
_comparables = ['names']
rename = Index.set_names
def __new__(cls, levels=None, labels=None, sortorder=None, names=None,
copy=False, verify_integrity=True, _set_identity=True,
name=None, **kwargs):
# compat with Index
if name is not None:
names = name
if levels is None or labels is None:
raise TypeError("Must pass both levels and labels")
if len(levels) != len(labels):
raise ValueError('Length of levels and labels must be the same.')
if len(levels) == 0:
raise ValueError('Must pass non-zero number of levels/labels')
if len(levels) == 1:
if names:
name = names[0]
else:
name = None
return Index(levels[0], name=name, copy=True).take(labels[0])
result = object.__new__(MultiIndex)
# we've already validated levels and labels, so shortcut here
result._set_levels(levels, copy=copy, validate=False)
result._set_labels(labels, copy=copy, validate=False)
if names is not None:
# handles name validation
result._set_names(names)
if sortorder is not None:
result.sortorder = int(sortorder)
else:
result.sortorder = sortorder
if verify_integrity:
result._verify_integrity()
if _set_identity:
result._reset_identity()
return result
def _verify_integrity(self, labels=None, levels=None):
"""
Parameters
----------
labels : optional list
Labels to check for validity. Defaults to current labels.
levels : optional list
Levels to check for validity. Defaults to current levels.
Raises
------
ValueError
* if length of levels and labels don't match or any label would
exceed level bounds
"""
# NOTE: Currently does not check, among other things, that cached
# nlevels matches nor that sortorder matches actually sortorder.
labels = labels or self.labels
levels = levels or self.levels
if len(levels) != len(labels):
raise ValueError("Length of levels and labels must match. NOTE:"
" this index is in an inconsistent state.")
label_length = len(self.labels[0])
for i, (level, label) in enumerate(zip(levels, labels)):
if len(label) != label_length:
raise ValueError("Unequal label lengths: %s" %
([len(lab) for lab in labels]))
if len(label) and label.max() >= len(level):
raise ValueError("On level %d, label max (%d) >= length of"
" level (%d). NOTE: this index is in an"
" inconsistent state" % (i, label.max(),
len(level)))
def _get_levels(self):
return self._levels
def _set_levels(self, levels, level=None, copy=False, validate=True,
verify_integrity=False):
# This is NOT part of the levels property because it should be
# externally not allowed to set levels. User beware if you change
# _levels directly
if validate and len(levels) == 0:
raise ValueError('Must set non-zero number of levels.')
if validate and level is None and len(levels) != self.nlevels:
raise ValueError('Length of levels must match number of levels.')
if validate and level is not None and len(levels) != len(level):
raise ValueError('Length of levels must match length of level.')
if level is None:
new_levels = FrozenList(
_ensure_index(lev, copy=copy)._shallow_copy()
for lev in levels)
else:
level = [self._get_level_number(l) for l in level]
new_levels = list(self._levels)
for l, v in zip(level, levels):
new_levels[l] = _ensure_index(v, copy=copy)._shallow_copy()
new_levels = FrozenList(new_levels)
if verify_integrity:
self._verify_integrity(levels=new_levels)
names = self.names
self._levels = new_levels
if any(names):
self._set_names(names)
self._tuples = None
self._reset_cache()
def set_levels(self, levels, level=None, inplace=False,
verify_integrity=True):
"""
Set new levels on MultiIndex. Defaults to returning
new index.
Parameters
----------
levels : sequence or list of sequence
new level(s) to apply
level : int, level name, or sequence of int/level names (default None)
level(s) to set (None for all levels)
inplace : bool
if True, mutates in place
verify_integrity : bool (default True)
if True, checks that levels and labels are compatible
Returns
-------
new index (of same type and class...etc)
Examples
--------
>>> idx = MultiIndex.from_tuples([(1, u'one'), (1, u'two'),
(2, u'one'), (2, u'two')],
names=['foo', 'bar'])
>>> idx.set_levels([['a','b'], [1,2]])
MultiIndex(levels=[[u'a', u'b'], [1, 2]],
labels=[[0, 0, 1, 1], [0, 1, 0, 1]],
names=[u'foo', u'bar'])
>>> idx.set_levels(['a','b'], level=0)
MultiIndex(levels=[[u'a', u'b'], [u'one', u'two']],
labels=[[0, 0, 1, 1], [0, 1, 0, 1]],
names=[u'foo', u'bar'])
>>> idx.set_levels(['a','b'], level='bar')
MultiIndex(levels=[[1, 2], [u'a', u'b']],
labels=[[0, 0, 1, 1], [0, 1, 0, 1]],
names=[u'foo', u'bar'])
>>> idx.set_levels([['a','b'], [1,2]], level=[0,1])
MultiIndex(levels=[[u'a', u'b'], [1, 2]],
labels=[[0, 0, 1, 1], [0, 1, 0, 1]],
names=[u'foo', u'bar'])
"""
if level is not None and not is_list_like(level):
if not is_list_like(levels):
raise TypeError("Levels must be list-like")
if is_list_like(levels[0]):
raise TypeError("Levels must be list-like")
level = [level]
levels = [levels]
elif level is None or is_list_like(level):
if not is_list_like(levels) or not is_list_like(levels[0]):
raise TypeError("Levels must be list of lists-like")
if inplace:
idx = self
else:
idx = self._shallow_copy()
idx._reset_identity()
idx._set_levels(levels, level=level, validate=True,
verify_integrity=verify_integrity)
if not inplace:
return idx
# remove me in 0.14 and change to read only property
__set_levels = deprecate("setting `levels` directly",
partial(set_levels, inplace=True,
verify_integrity=True),
alt_name="set_levels")
levels = property(fget=_get_levels, fset=__set_levels)
def _get_labels(self):
return self._labels
def _set_labels(self, labels, level=None, copy=False, validate=True,
verify_integrity=False):
if validate and level is None and len(labels) != self.nlevels:
raise ValueError("Length of labels must match number of levels")
if validate and level is not None and len(labels) != len(level):
raise ValueError('Length of labels must match length of levels.')
if level is None:
new_labels = FrozenList(
_ensure_frozen(lab, lev, copy=copy)._shallow_copy()
for lev, lab in zip(self.levels, labels))
else:
level = [self._get_level_number(l) for l in level]
new_labels = list(self._labels)
for l, lev, lab in zip(level, self.levels, labels):
new_labels[l] = _ensure_frozen(
lab, lev, copy=copy)._shallow_copy()
new_labels = FrozenList(new_labels)
if verify_integrity:
self._verify_integrity(labels=new_labels)
self._labels = new_labels
self._tuples = None
self._reset_cache()
def set_labels(self, labels, level=None, inplace=False,
verify_integrity=True):
"""
Set new labels on MultiIndex. Defaults to returning
new index.
Parameters
----------
labels : sequence or list of sequence
new labels to apply
level : int, level name, or sequence of int/level names (default None)
level(s) to set (None for all levels)
inplace : bool
if True, mutates in place
verify_integrity : bool (default True)
if True, checks that levels and labels are compatible
Returns
-------
new index (of same type and class...etc)
Examples
--------
>>> idx = MultiIndex.from_tuples([(1, u'one'), (1, u'two'),
(2, u'one'), (2, u'two')],
names=['foo', 'bar'])
>>> idx.set_labels([[1,0,1,0], [0,0,1,1]])
MultiIndex(levels=[[1, 2], [u'one', u'two']],
labels=[[1, 0, 1, 0], [0, 0, 1, 1]],
names=[u'foo', u'bar'])
>>> idx.set_labels([1,0,1,0], level=0)
MultiIndex(levels=[[1, 2], [u'one', u'two']],
labels=[[1, 0, 1, 0], [0, 1, 0, 1]],
names=[u'foo', u'bar'])
>>> idx.set_labels([0,0,1,1], level='bar')
MultiIndex(levels=[[1, 2], [u'one', u'two']],
labels=[[0, 0, 1, 1], [0, 0, 1, 1]],
names=[u'foo', u'bar'])
>>> idx.set_labels([[1,0,1,0], [0,0,1,1]], level=[0,1])
MultiIndex(levels=[[1, 2], [u'one', u'two']],
labels=[[1, 0, 1, 0], [0, 0, 1, 1]],
names=[u'foo', u'bar'])
"""
if level is not None and not is_list_like(level):
if not is_list_like(labels):
raise TypeError("Labels must be list-like")
if is_list_like(labels[0]):
raise TypeError("Labels must be list-like")
level = [level]
labels = [labels]
elif level is None or is_list_like(level):
if not is_list_like(labels) or not is_list_like(labels[0]):
raise TypeError("Labels must be list of lists-like")
if inplace:
idx = self
else:
idx = self._shallow_copy()
idx._reset_identity()
idx._set_labels(labels, level=level, verify_integrity=verify_integrity)
if not inplace:
return idx
# remove me in 0.14 and change to readonly property
__set_labels = deprecate("setting labels directly",
partial(set_labels, inplace=True,
verify_integrity=True),
alt_name="set_labels")
labels = property(fget=_get_labels, fset=__set_labels)
def copy(self, names=None, dtype=None, levels=None, labels=None,
deep=False, _set_identity=False, **kwargs):
"""
Make a copy of this object. Names, dtype, levels and labels can be
passed and will be set on new copy.
Parameters
----------
names : sequence, optional
dtype : numpy dtype or pandas type, optional
levels : sequence, optional
labels : sequence, optional
Returns
-------
copy : MultiIndex
Notes
-----
In most cases, there should be no functional difference from using
``deep``, but if ``deep`` is passed it will attempt to deepcopy.
This could be potentially expensive on large MultiIndex objects.
"""
name = kwargs.get('name')
names = self._validate_names(name=name, names=names, deep=deep)
if deep:
from copy import deepcopy
if levels is None:
levels = deepcopy(self.levels)
if labels is None:
labels = deepcopy(self.labels)
else:
if levels is None:
levels = self.levels
if labels is None:
labels = self.labels
return MultiIndex(levels=levels, labels=labels, names=names,
sortorder=self.sortorder, verify_integrity=False,
_set_identity=_set_identity)
def __array__(self, dtype=None):
""" the array interface, return my values """
return self.values
def view(self, cls=None):
""" this is defined as a copy with the same identity """
result = self.copy()
result._id = self._id
return result
def _shallow_copy_with_infer(self, values=None, **kwargs):
# On equal MultiIndexes the difference is empty.
# Therefore, an empty MultiIndex is returned GH13490
if len(values) == 0:
return MultiIndex(levels=[[] for _ in range(self.nlevels)],
labels=[[] for _ in range(self.nlevels)],
**kwargs)
return self._shallow_copy(values, **kwargs)
@Appender(_index_shared_docs['_shallow_copy'])
def _shallow_copy(self, values=None, **kwargs):
if values is not None:
if 'name' in kwargs:
kwargs['names'] = kwargs.pop('name', None)
# discards freq
kwargs.pop('freq', None)
return MultiIndex.from_tuples(values, **kwargs)
return self.view()
@cache_readonly
def dtype(self):
return np.dtype('O')
def _is_memory_usage_qualified(self):
""" return a boolean if we need a qualified .info display """
def f(l):
return 'mixed' in l or 'string' in l or 'unicode' in l
return any([f(l) for l in self._inferred_type_levels])
@Appender(Index.memory_usage.__doc__)
def memory_usage(self, deep=False):
# we are overwriting our base class to avoid
# computing .values here which could materialize
# a tuple representation uncessarily
return self._nbytes(deep)
@cache_readonly
def nbytes(self):
""" return the number of bytes in the underlying data """
return self._nbytes(False)
def _nbytes(self, deep=False):
"""
return the number of bytes in the underlying data
deeply introspect the level data if deep=True
include the engine hashtable
*this is in internal routine*
"""
level_nbytes = sum((i.memory_usage(deep=deep) for i in self.levels))
label_nbytes = sum((i.nbytes for i in self.labels))
names_nbytes = sum((getsizeof(i) for i in self.names))
result = level_nbytes + label_nbytes + names_nbytes
# include our engine hashtable
result += self._engine.sizeof(deep=deep)
return result
def _format_attrs(self):
"""
Return a list of tuples of the (attr,formatted_value)
"""
attrs = [
('levels', ibase.default_pprint(self._levels,
max_seq_items=False)),
('labels', ibase.default_pprint(self._labels,
max_seq_items=False))]
if not all(name is None for name in self.names):
attrs.append(('names', ibase.default_pprint(self.names)))
if self.sortorder is not None:
attrs.append(('sortorder', ibase.default_pprint(self.sortorder)))
return attrs
def _format_space(self):
return "\n%s" % (' ' * (len(self.__class__.__name__) + 1))
def _format_data(self):
# we are formatting thru the attributes
return None
def __len__(self):
return len(self.labels[0])
def _get_names(self):
return FrozenList(level.name for level in self.levels)
def _set_names(self, names, level=None, validate=True):
"""
sets names on levels. WARNING: mutates!
Note that you generally want to set this *after* changing levels, so
that it only acts on copies
"""
# GH 15110
# Don't allow a single string for names in a MultiIndex
if names is not None and not is_list_like(names):
raise ValueError('Names should be list-like for a MultiIndex')
names = list(names)
if validate and level is not None and len(names) != len(level):
raise ValueError('Length of names must match length of level.')
if validate and level is None and len(names) != self.nlevels:
raise ValueError('Length of names must match number of levels in '
'MultiIndex.')
if level is None:
level = range(self.nlevels)
else:
level = [self._get_level_number(l) for l in level]
# set the name
for l, name in zip(level, names):
self.levels[l].rename(name, inplace=True)
names = property(fset=_set_names, fget=_get_names,
doc="Names of levels in MultiIndex")
def _reference_duplicate_name(self, name):
"""
Returns True if the name refered to in self.names is duplicated.
"""
# count the times name equals an element in self.names.
return sum(name == n for n in self.names) > 1
def _format_native_types(self, na_rep='nan', **kwargs):
new_levels = []
new_labels = []
# go through the levels and format them
for level, label in zip(self.levels, self.labels):
level = level._format_native_types(na_rep=na_rep, **kwargs)
# add nan values, if there are any
mask = (label == -1)
if mask.any():
nan_index = len(level)
level = np.append(level, na_rep)
label = label.values()
label[mask] = nan_index
new_levels.append(level)
new_labels.append(label)
# reconstruct the multi-index
mi = MultiIndex(levels=new_levels, labels=new_labels, names=self.names,
sortorder=self.sortorder, verify_integrity=False)
return mi.values
@Appender(_index_shared_docs['_get_grouper_for_level'])
def _get_grouper_for_level(self, mapper, level):
indexer = self.labels[level]
level_index = self.levels[level]
if mapper is not None:
# Handle group mapping function and return
level_values = self.levels[level].take(indexer)
grouper = level_values.map(mapper)
return grouper, None, None
labels, uniques = algos.factorize(indexer, sort=True)
if len(uniques) > 0 and uniques[0] == -1:
# Handle NAs
mask = indexer != -1
ok_labels, uniques = algos.factorize(indexer[mask],
sort=True)
labels = np.empty(len(indexer), dtype=indexer.dtype)
labels[mask] = ok_labels
labels[~mask] = -1
if len(uniques) < len(level_index):
# Remove unobserved levels from level_index
level_index = level_index.take(uniques)
grouper = level_index.take(labels)
return grouper, labels, level_index
@property
def _constructor(self):
return MultiIndex.from_tuples
@cache_readonly
def inferred_type(self):
return 'mixed'
@staticmethod
def _from_elements(values, labels=None, levels=None, names=None,
sortorder=None):
return MultiIndex(levels, labels, names, sortorder=sortorder)
def _get_level_number(self, level):
try:
count = self.names.count(level)
if count > 1:
raise ValueError('The name %s occurs multiple times, use a '
'level number' % level)
level = self.names.index(level)
except ValueError:
if not isinstance(level, int):
raise KeyError('Level %s not found' % str(level))
elif level < 0:
level += self.nlevels
if level < 0:
orig_level = level - self.nlevels
raise IndexError('Too many levels: Index has only %d '
'levels, %d is not a valid level number' %
(self.nlevels, orig_level))
# Note: levels are zero-based
elif level >= self.nlevels:
raise IndexError('Too many levels: Index has only %d levels, '
'not %d' % (self.nlevels, level + 1))
return level
_tuples = None
@cache_readonly
def _engine(self):
# choose our engine based on our size
# the hashing based MultiIndex for larger
# sizes, and the MultiIndexOjbect for smaller
# xref: https://github.com/pandas-dev/pandas/pull/16324
l = len(self)
if l > 10000:
return libindex.MultiIndexHashEngine(lambda: self, l)
return libindex.MultiIndexObjectEngine(lambda: self.values, l)
@property
def values(self):
if self._tuples is not None:
return self._tuples
values = []
for lev, lab in zip(self.levels, self.labels):
# Need to box timestamps, etc.
box = hasattr(lev, '_box_values')
# Try to minimize boxing.
if box and len(lev) > len(lab):
taken = lev._box_values(algos.take_1d(lev._values, lab))
elif box:
taken = algos.take_1d(lev._box_values(lev._values), lab,
fill_value=_get_na_value(lev.dtype.type))
else:
taken = algos.take_1d(np.asarray(lev._values), lab)
values.append(taken)
self._tuples = lib.fast_zip(values)
return self._tuples
# fml
@property
def _is_v1(self):
return False
@property
def _is_v2(self):
return False
@property
def _has_complex_internals(self):
# to disable groupby tricks
return True
@cache_readonly
def is_monotonic(self):
"""
return if the index is monotonic increasing (only equal or
increasing) values.
"""
return self.is_monotonic_increasing
@cache_readonly
def is_monotonic_increasing(self):
"""
return if the index is monotonic increasing (only equal or
increasing) values.
"""
# reversed() because lexsort() wants the most significant key last.
values = [self._get_level_values(i).values
for i in reversed(range(len(self.levels)))]
try:
sort_order = np.lexsort(values)
return Index(sort_order).is_monotonic
except TypeError:
# we have mixed types and np.lexsort is not happy
return Index(self.values).is_monotonic
@property
def is_monotonic_decreasing(self):
"""
return if the index is monotonic decreasing (only equal or
decreasing) values.
"""
return False
@cache_readonly
def is_unique(self):
return not self.duplicated().any()
@cache_readonly
def _have_mixed_levels(self):
""" return a boolean list indicated if we have mixed levels """
return ['mixed' in l for l in self._inferred_type_levels]
@cache_readonly
def _inferred_type_levels(self):
""" return a list of the inferred types, one for each level """
return [i.inferred_type for i in self.levels]
@cache_readonly
def _hashed_values(self):
""" return a uint64 ndarray of my hashed values """
from pandas.core.util.hashing import hash_tuples
return hash_tuples(self)
def _hashed_indexing_key(self, key):
"""
validate and return the hash for the provided key
*this is internal for use for the cython routines*
Paramters
---------
key : string or tuple
Returns
-------
np.uint64
Notes
-----
we need to stringify if we have mixed levels
"""
from pandas.core.util.hashing import hash_tuples, hash_tuple
if not isinstance(key, tuple):
return hash_tuples(key)
if not len(key) == self.nlevels:
raise KeyError
def f(k, stringify):
if stringify and not isinstance(k, compat.string_types):
k = str(k)
return k
key = tuple([f(k, stringify)
for k, stringify in zip(key, self._have_mixed_levels)])
return hash_tuple(key)
@Appender(base._shared_docs['duplicated'] % _index_doc_kwargs)
def duplicated(self, keep='first'):
from pandas.core.sorting import get_group_index
from pandas._libs.hashtable import duplicated_int64
shape = map(len, self.levels)
ids = get_group_index(self.labels, shape, sort=False, xnull=False)
return duplicated_int64(ids, keep)
@Appender(ibase._index_shared_docs['fillna'])
def fillna(self, value=None, downcast=None):
# isna is not implemented for MultiIndex
raise NotImplementedError('isna is not defined for MultiIndex')
@Appender(_index_shared_docs['dropna'])
def dropna(self, how='any'):
nans = [label == -1 for label in self.labels]
if how == 'any':
indexer = np.any(nans, axis=0)
elif how == 'all':
indexer = np.all(nans, axis=0)
else:
raise ValueError("invalid how option: {0}".format(how))
new_labels = [label[~indexer] for label in self.labels]
return self.copy(labels=new_labels, deep=True)
def get_value(self, series, key):
# somewhat broken encapsulation
from pandas.core.indexing import maybe_droplevels
# Label-based
s = _values_from_object(series)
k = _values_from_object(key)
def _try_mi(k):
# TODO: what if a level contains tuples??
loc = self.get_loc(k)
new_values = series._values[loc]
new_index = self[loc]
new_index = maybe_droplevels(new_index, k)
return series._constructor(new_values, index=new_index,
name=series.name).__finalize__(self)
try:
return self._engine.get_value(s, k)
except KeyError as e1:
try:
return _try_mi(key)
except KeyError:
pass
try:
return libindex.get_value_at(s, k)
except IndexError:
raise
except TypeError:
# generator/iterator-like
if is_iterator(key):
raise InvalidIndexError(key)
else:
raise e1
except Exception: # pragma: no cover
raise e1
except TypeError:
# a Timestamp will raise a TypeError in a multi-index
# rather than a KeyError, try it here
# note that a string that 'looks' like a Timestamp will raise
# a KeyError! (GH5725)
if (isinstance(key, (datetime.datetime, np.datetime64)) or
(compat.PY3 and isinstance(key, compat.string_types))):
try:
return _try_mi(key)
except (KeyError):
raise
except:
pass
try:
return _try_mi(Timestamp(key))
except:
pass
raise InvalidIndexError(key)
def _get_level_values(self, level):
"""
Return vector of label values for requested level,
equal to the length of the index
**this is an internal method**
Parameters
----------
level : int level
Returns
-------
values : ndarray
"""
unique = self.levels[level]
labels = self.labels[level]
filled = algos.take_1d(unique._values, labels,
fill_value=unique._na_value)
values = unique._shallow_copy(filled)
return values
def get_level_values(self, level):
"""
Return vector of label values for requested level,
equal to the length of the index
Parameters
----------
level : int or level name
Returns
-------
values : Index
"""
level = self._get_level_number(level)
values = self._get_level_values(level)
return values
def format(self, space=2, sparsify=None, adjoin=True, names=False,
na_rep=None, formatter=None):
if len(self) == 0:
return []
stringified_levels = []
for lev, lab in zip(self.levels, self.labels):
na = na_rep if na_rep is not None else _get_na_rep(lev.dtype.type)
if len(lev) > 0:
formatted = lev.take(lab).format(formatter=formatter)
# we have some NA
mask = lab == -1
if mask.any():
formatted = np.array(formatted, dtype=object)
formatted[mask] = na
formatted = formatted.tolist()
else:
# weird all NA case
formatted = [pprint_thing(na if isna(x) else x,
escape_chars=('\t', '\r', '\n'))
for x in algos.take_1d(lev._values, lab)]
stringified_levels.append(formatted)
result_levels = []
for lev, name in zip(stringified_levels, self.names):
level = []
if names:
level.append(pprint_thing(name,
escape_chars=('\t', '\r', '\n'))
if name is not None else '')
level.extend(np.array(lev, dtype=object))
result_levels.append(level)
if sparsify is None:
sparsify = get_option("display.multi_sparse")
if sparsify:
sentinel = ''
# GH3547
# use value of sparsify as sentinel, unless it's an obvious
# "Truthey" value
if sparsify not in [True, 1]:
sentinel = sparsify
# little bit of a kludge job for #1217
result_levels = _sparsify(result_levels, start=int(names),
sentinel=sentinel)
if adjoin:
from pandas.io.formats.format import _get_adjustment
adj = _get_adjustment()
return adj.adjoin(space, *result_levels).split('\n')
else:
return result_levels
def _to_safe_for_reshape(self):
""" convert to object if we are a categorical """
return self.set_levels([i._to_safe_for_reshape() for i in self.levels])
def to_frame(self, index=True):
"""
Create a DataFrame with the columns the levels of the MultiIndex
.. versionadded:: 0.20.0
Parameters
----------
index : boolean, default True
return this MultiIndex as the index
Returns
-------
DataFrame
"""
from pandas import DataFrame
result = DataFrame({(name or level):
self._get_level_values(level)
for name, level in
zip(self.names, range(len(self.levels)))},
copy=False)
if index:
result.index = self
return result
def to_hierarchical(self, n_repeat, n_shuffle=1):
"""
Return a MultiIndex reshaped to conform to the
shapes given by n_repeat and n_shuffle.
Useful to replicate and rearrange a MultiIndex for combination
with another Index with n_repeat items.
Parameters
----------
n_repeat : int
Number of times to repeat the labels on self
n_shuffle : int
Controls the reordering of the labels. If the result is going
to be an inner level in a MultiIndex, n_shuffle will need to be
greater than one. The size of each label must divisible by
n_shuffle.
Returns
-------
MultiIndex
Examples
--------
>>> idx = MultiIndex.from_tuples([(1, u'one'), (1, u'two'),
(2, u'one'), (2, u'two')])
>>> idx.to_hierarchical(3)
MultiIndex(levels=[[1, 2], [u'one', u'two']],
labels=[[0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1],
[0, 0, 0, 1, 1, 1, 0, 0, 0, 1, 1, 1]])
"""
levels = self.levels
labels = [np.repeat(x, n_repeat) for x in self.labels]
# Assumes that each label is divisible by n_shuffle
labels = [x.reshape(n_shuffle, -1).ravel(order='F') for x in labels]
names = self.names
return MultiIndex(levels=levels, labels=labels, names=names)
@property
def is_all_dates(self):
return False
def is_lexsorted(self):
"""
Return True if the labels are lexicographically sorted
"""
return self.lexsort_depth == self.nlevels
@cache_readonly
def lexsort_depth(self):
if self.sortorder is not None:
if self.sortorder == 0:
return self.nlevels
else:
return 0
int64_labels = [_ensure_int64(lab) for lab in self.labels]
for k in range(self.nlevels, 0, -1):
if lib.is_lexsorted(int64_labels[:k]):
return k
return 0
@classmethod
def from_arrays(cls, arrays, sortorder=None, names=None):
"""
Convert arrays to MultiIndex
Parameters
----------
arrays : list / sequence of array-likes
Each array-like gives one level's value for each data point.
len(arrays) is the number of levels.
sortorder : int or None
Level of sortedness (must be lexicographically sorted by that
level)
Returns
-------
index : MultiIndex
Examples
--------
>>> arrays = [[1, 1, 2, 2], ['red', 'blue', 'red', 'blue']]
>>> MultiIndex.from_arrays(arrays, names=('number', 'color'))
See Also
--------
MultiIndex.from_tuples : Convert list of tuples to MultiIndex
MultiIndex.from_product : Make a MultiIndex from cartesian product
of iterables
"""
if len(arrays) == 1:
name = None if names is None else names[0]
return Index(arrays[0], name=name)
# Check if lengths of all arrays are equal or not,
# raise ValueError, if not
for i in range(1, len(arrays)):
if len(arrays[i]) != len(arrays[i - 1]):
raise ValueError('all arrays must be same length')
from pandas.core.categorical import _factorize_from_iterables
labels, levels = _factorize_from_iterables(arrays)
if names is None:
names = [getattr(arr, "name", None) for arr in arrays]
return MultiIndex(levels=levels, labels=labels, sortorder=sortorder,
names=names, verify_integrity=False)
@classmethod
def from_tuples(cls, tuples, sortorder=None, names=None):
"""
Convert list of tuples to MultiIndex
Parameters
----------
tuples : list / sequence of tuple-likes
Each tuple is the index of one row/column.
sortorder : int or None
Level of sortedness (must be lexicographically sorted by that
level)
Returns
-------
index : MultiIndex
Examples
--------
>>> tuples = [(1, u'red'), (1, u'blue'),
(2, u'red'), (2, u'blue')]
>>> MultiIndex.from_tuples(tuples, names=('number', 'color'))
See Also
--------
MultiIndex.from_arrays : Convert list of arrays to MultiIndex
MultiIndex.from_product : Make a MultiIndex from cartesian product
of iterables
"""
if len(tuples) == 0:
if names is None:
msg = 'Cannot infer number of levels from empty list'
raise TypeError(msg)
arrays = [[]] * len(names)
elif isinstance(tuples, (np.ndarray, Index)):
if isinstance(tuples, Index):
tuples = tuples._values
arrays = list(lib.tuples_to_object_array(tuples).T)
elif isinstance(tuples, list):
arrays = list(lib.to_object_array_tuples(tuples).T)
else:
arrays = lzip(*tuples)
return MultiIndex.from_arrays(arrays, sortorder=sortorder, names=names)
@classmethod
def from_product(cls, iterables, sortorder=None, names=None):
"""
Make a MultiIndex from the cartesian product of multiple iterables
Parameters
----------
iterables : list / sequence of iterables
Each iterable has unique labels for each level of the index.
sortorder : int or None
Level of sortedness (must be lexicographically sorted by that
level).
names : list / sequence of strings or None
Names for the levels in the index.
Returns
-------
index : MultiIndex
Examples
--------
>>> numbers = [0, 1, 2]
>>> colors = [u'green', u'purple']
>>> MultiIndex.from_product([numbers, colors],
names=['number', 'color'])
MultiIndex(levels=[[0, 1, 2], [u'green', u'purple']],
labels=[[0, 0, 1, 1, 2, 2], [0, 1, 0, 1, 0, 1]],
names=[u'number', u'color'])
See Also
--------
MultiIndex.from_arrays : Convert list of arrays to MultiIndex
MultiIndex.from_tuples : Convert list of tuples to MultiIndex
"""
from pandas.core.categorical import _factorize_from_iterables
from pandas.core.reshape.util import cartesian_product
labels, levels = _factorize_from_iterables(iterables)
labels = cartesian_product(labels)
return MultiIndex(levels, labels, sortorder=sortorder, names=names)
def _sort_levels_monotonic(self):
"""
.. versionadded:: 0.20.0
This is an *internal* function.
create a new MultiIndex from the current to monotonically sorted
items IN the levels. This does not actually make the entire MultiIndex
monotonic, JUST the levels.
The resulting MultiIndex will have the same outward
appearance, meaning the same .values and ordering. It will also
be .equals() to the original.
Returns
-------
MultiIndex
Examples
--------
>>> i = pd.MultiIndex(levels=[['a', 'b'], ['bb', 'aa']],
labels=[[0, 0, 1, 1], [0, 1, 0, 1]])
>>> i
MultiIndex(levels=[['a', 'b'], ['bb', 'aa']],
labels=[[0, 0, 1, 1], [0, 1, 0, 1]])
>>> i.sort_monotonic()
MultiIndex(levels=[['a', 'b'], ['aa', 'bb']],
labels=[[0, 0, 1, 1], [1, 0, 1, 0]])
"""
if self.is_lexsorted() and self.is_monotonic:
return self
new_levels = []
new_labels = []
for lev, lab in zip(self.levels, self.labels):
if lev.is_monotonic:
new_levels.append(lev)
new_labels.append(lab)
continue
# indexer to reorder the levels
indexer = lev.argsort()
lev = lev.take(indexer)
# indexer to reorder the labels
indexer = _ensure_int64(indexer)
ri = lib.get_reverse_indexer(indexer, len(indexer))
lab = algos.take_1d(ri, lab)
new_levels.append(lev)
new_labels.append(lab)
return MultiIndex(new_levels, new_labels,
names=self.names, sortorder=self.sortorder,
verify_integrity=False)
def remove_unused_levels(self):
"""
create a new MultiIndex from the current that removing
unused levels, meaning that they are not expressed in the labels
The resulting MultiIndex will have the same outward
appearance, meaning the same .values and ordering. It will also
be .equals() to the original.
.. versionadded:: 0.20.0
Returns
-------
MultiIndex
Examples
--------
>>> i = pd.MultiIndex.from_product([range(2), list('ab')])
MultiIndex(levels=[[0, 1], ['a', 'b']],
labels=[[0, 0, 1, 1], [0, 1, 0, 1]])
>>> i[2:]
MultiIndex(levels=[[0, 1], ['a', 'b']],
labels=[[1, 1], [0, 1]])
The 0 from the first level is not represented
and can be removed
>>> i[2:].remove_unused_levels()
MultiIndex(levels=[[1], ['a', 'b']],
labels=[[0, 0], [0, 1]])
"""
new_levels = []
new_labels = []
changed = False
for lev, lab in zip(self.levels, self.labels):
uniques = algos.unique(lab)
# nothing unused
if len(uniques) == len(lev):
new_levels.append(lev)
new_labels.append(lab)
continue
changed = True
# labels get mapped from uniques to 0:len(uniques)
label_mapping = np.zeros(len(lev))
label_mapping[uniques] = np.arange(len(uniques))
lab = label_mapping[lab]
# new levels are simple
lev = lev.take(uniques)
new_levels.append(lev)
new_labels.append(lab)
result = self._shallow_copy()
if changed:
result._reset_identity()
result._set_levels(new_levels, validate=False)
result._set_labels(new_labels, validate=False)
return result
@property
def nlevels(self):
return len(self.levels)
@property
def levshape(self):
return tuple(len(x) for x in self.levels)
@Appender(_index_shared_docs['__contains__'] % _index_doc_kwargs)
def __contains__(self, key):
hash(key)
try:
self.get_loc(key)
return True
except LookupError:
return False
contains = __contains__
def __reduce__(self):
"""Necessary for making this object picklable"""
d = dict(levels=[lev for lev in self.levels],
labels=[label for label in self.labels],
sortorder=self.sortorder, names=list(self.names))
return ibase._new_Index, (self.__class__, d), None
def __setstate__(self, state):
"""Necessary for making this object picklable"""
if isinstance(state, dict):
levels = state.get('levels')
labels = state.get('labels')
sortorder = state.get('sortorder')
names = state.get('names')
elif isinstance(state, tuple):
nd_state, own_state = state
levels, labels, sortorder, names = own_state
self._set_levels([Index(x) for x in levels], validate=False)
self._set_labels(labels)
self._set_names(names)
self.sortorder = sortorder
self._verify_integrity()
self._reset_identity()
def __getitem__(self, key):
if is_scalar(key):
retval = []
for lev, lab in zip(self.levels, self.labels):
if lab[key] == -1:
retval.append(np.nan)
else:
retval.append(lev[lab[key]])
return tuple(retval)
else:
if is_bool_indexer(key):
key = np.asarray(key)
sortorder = self.sortorder
else:
# cannot be sure whether the result will be sorted
sortorder = None
if isinstance(key, Index):
key = np.asarray(key)
new_labels = [lab[key] for lab in self.labels]
return MultiIndex(levels=self.levels, labels=new_labels,
names=self.names, sortorder=sortorder,
verify_integrity=False)
@Appender(_index_shared_docs['take'] % _index_doc_kwargs)
def take(self, indices, axis=0, allow_fill=True,
fill_value=None, **kwargs):
nv.validate_take(tuple(), kwargs)
indices = _ensure_platform_int(indices)
taken = self._assert_take_fillable(self.labels, indices,
allow_fill=allow_fill,
fill_value=fill_value,
na_value=-1)
return MultiIndex(levels=self.levels, labels=taken,
names=self.names, verify_integrity=False)
def _assert_take_fillable(self, values, indices, allow_fill=True,
fill_value=None, na_value=None):
""" Internal method to handle NA filling of take """
# only fill if we are passing a non-None fill_value
if allow_fill and fill_value is not None:
if (indices < -1).any():
msg = ('When allow_fill=True and fill_value is not None, '
'all indices must be >= -1')
raise ValueError(msg)
taken = [lab.take(indices) for lab in self.labels]
mask = indices == -1
if mask.any():
masked = []
for new_label in taken:
label_values = new_label.values()
label_values[mask] = na_value
masked.append(FrozenNDArray(label_values))
taken = masked
else:
taken = [lab.take(indices) for lab in self.labels]
return taken
def append(self, other):
"""
Append a collection of Index options together
Parameters
----------
other : Index or list/tuple of indices
Returns
-------
appended : Index
"""
if not isinstance(other, (list, tuple)):
other = [other]
if all((isinstance(o, MultiIndex) and o.nlevels >= self.nlevels)
for o in other):
arrays = []
for i in range(self.nlevels):
label = self._get_level_values(i)
appended = [o._get_level_values(i) for o in other]
arrays.append(label.append(appended))
return MultiIndex.from_arrays(arrays, names=self.names)
to_concat = (self.values, ) + tuple(k._values for k in other)
new_tuples = np.concatenate(to_concat)
# if all(isinstance(x, MultiIndex) for x in other):
try:
return MultiIndex.from_tuples(new_tuples, names=self.names)
except:
return Index(new_tuples)
def argsort(self, *args, **kwargs):
return self.values.argsort(*args, **kwargs)
@deprecate_kwarg(old_arg_name='n', new_arg_name='repeats')
def repeat(self, repeats, *args, **kwargs):
nv.validate_repeat(args, kwargs)
return MultiIndex(levels=self.levels,
labels=[label.view(np.ndarray).repeat(repeats)
for label in self.labels], names=self.names,
sortorder=self.sortorder, verify_integrity=False)
def where(self, cond, other=None):
raise NotImplementedError(".where is not supported for "
"MultiIndex operations")
def drop(self, labels, level=None, errors='raise'):
"""
Make new MultiIndex with passed list of labels deleted
Parameters
----------
labels : array-like
Must be a list of tuples
level : int or level name, default None
Returns
-------
dropped : MultiIndex
"""
if level is not None:
return self._drop_from_level(labels, level)
try:
if not isinstance(labels, (np.ndarray, Index)):
labels = com._index_labels_to_array(labels)
indexer = self.get_indexer(labels)
mask = indexer == -1
if mask.any():
if errors != 'ignore':
raise ValueError('labels %s not contained in axis' %
labels[mask])
indexer = indexer[~mask]
except Exception:
pass
inds = []
for label in labels:
try:
loc = self.get_loc(label)
# get_loc returns either an integer, a slice, or a boolean
# mask
if isinstance(loc, int):
inds.append(loc)
elif isinstance(loc, slice):
inds.extend(lrange(loc.start, loc.stop))
elif is_bool_indexer(loc):
if self.lexsort_depth == 0:
warnings.warn('dropping on a non-lexsorted multi-index'
' without a level parameter may impact '
'performance.',
PerformanceWarning,
stacklevel=3)
loc = loc.nonzero()[0]
inds.extend(loc)
else:
msg = 'unsupported indexer of type {}'.format(type(loc))
raise AssertionError(msg)
except KeyError:
if errors != 'ignore':
raise
return self.delete(inds)
def _drop_from_level(self, labels, level):
labels = com._index_labels_to_array(labels)
i = self._get_level_number(level)
index = self.levels[i]
values = index.get_indexer(labels)
mask = ~algos.isin(self.labels[i], values)
return self[mask]
def droplevel(self, level=0):
"""
Return Index with requested level removed. If MultiIndex has only 2
levels, the result will be of Index type not MultiIndex.
Parameters
----------
level : int/level name or list thereof
Notes
-----
Does not check if result index is unique or not
Returns
-------
index : Index or MultiIndex
"""
levels = level
if not isinstance(levels, (tuple, list)):
levels = [level]
new_levels = list(self.levels)
new_labels = list(self.labels)
new_names = list(self.names)
levnums = sorted(self._get_level_number(lev) for lev in levels)[::-1]
for i in levnums:
new_levels.pop(i)
new_labels.pop(i)
new_names.pop(i)
if len(new_levels) == 1:
# set nan if needed
mask = new_labels[0] == -1
result = new_levels[0].take(new_labels[0])
if mask.any():
result = result.putmask(mask, np.nan)
result.name = new_names[0]
return result
else:
return MultiIndex(levels=new_levels, labels=new_labels,
names=new_names, verify_integrity=False)
def swaplevel(self, i=-2, j=-1):
"""
Swap level i with level j. Do not change the ordering of anything
Parameters
----------
i, j : int, string (can be mixed)
Level of index to be swapped. Can pass level name as string.
Returns
-------
swapped : MultiIndex
.. versionchanged:: 0.18.1
The indexes ``i`` and ``j`` are now optional, and default to
the two innermost levels of the index.
"""
new_levels = list(self.levels)
new_labels = list(self.labels)
new_names = list(self.names)
i = self._get_level_number(i)
j = self._get_level_number(j)
new_levels[i], new_levels[j] = new_levels[j], new_levels[i]
new_labels[i], new_labels[j] = new_labels[j], new_labels[i]
new_names[i], new_names[j] = new_names[j], new_names[i]
return MultiIndex(levels=new_levels, labels=new_labels,
names=new_names, verify_integrity=False)
def reorder_levels(self, order):
"""
Rearrange levels using input order. May not drop or duplicate levels
Parameters
----------
"""
order = [self._get_level_number(i) for i in order]
if len(order) != self.nlevels:
raise AssertionError('Length of order must be same as '
'number of levels (%d), got %d' %
(self.nlevels, len(order)))
new_levels = [self.levels[i] for i in order]
new_labels = [self.labels[i] for i in order]
new_names = [self.names[i] for i in order]
return MultiIndex(levels=new_levels, labels=new_labels,
names=new_names, verify_integrity=False)
def __getslice__(self, i, j):
return self.__getitem__(slice(i, j))
def _get_labels_for_sorting(self):
"""
we categorizing our labels by using the
available catgories (all, not just observed)
excluding any missing ones (-1); this is in preparation
for sorting, where we need to disambiguate that -1 is not
a valid valid
"""
from pandas.core.categorical import Categorical
def cats(label):
return np.arange(np.array(label).max() + 1 if len(label) else 0,
dtype=label.dtype)
return [Categorical.from_codes(label, cats(label), ordered=True)
for label in self.labels]
def sortlevel(self, level=0, ascending=True, sort_remaining=True):
"""
Sort MultiIndex at the requested level. The result will respect the
original ordering of the associated factor at that level.
Parameters
----------
level : list-like, int or str, default 0
If a string is given, must be a name of the level
If list-like must be names or ints of levels.
ascending : boolean, default True
False to sort in descending order
Can also be a list to specify a directed ordering
sort_remaining : sort by the remaining levels after level.
Returns
-------
sorted_index : pd.MultiIndex
Resulting index
indexer : np.ndarray
Indices of output values in original index
"""
from pandas.core.sorting import indexer_from_factorized
if isinstance(level, (compat.string_types, int)):
level = [level]
level = [self._get_level_number(lev) for lev in level]
sortorder = None
# we have a directed ordering via ascending
if isinstance(ascending, list):
if not len(level) == len(ascending):
raise ValueError("level must have same length as ascending")
from pandas.core.sorting import lexsort_indexer
indexer = lexsort_indexer([self.labels[lev] for lev in level],
orders=ascending)
# level ordering
else:
labels = list(self.labels)
shape = list(self.levshape)
# partition labels and shape
primary = tuple(labels.pop(lev - i) for i, lev in enumerate(level))
primshp = tuple(shape.pop(lev - i) for i, lev in enumerate(level))
if sort_remaining:
primary += primary + tuple(labels)
primshp += primshp + tuple(shape)
else:
sortorder = level[0]
indexer = indexer_from_factorized(primary, primshp,
compress=False)
if not ascending:
indexer = indexer[::-1]
indexer = _ensure_platform_int(indexer)
new_labels = [lab.take(indexer) for lab in self.labels]
new_index = MultiIndex(labels=new_labels, levels=self.levels,
names=self.names, sortorder=sortorder,
verify_integrity=False)
return new_index, indexer
def _convert_listlike_indexer(self, keyarr, kind=None):
"""
Parameters
----------
keyarr : list-like
Indexer to convert.
Returns
-------
tuple (indexer, keyarr)
indexer is an ndarray or None if cannot convert
keyarr are tuple-safe keys
"""
indexer, keyarr = super(MultiIndex, self)._convert_listlike_indexer(
keyarr, kind=kind)
# are we indexing a specific level
if indexer is None and len(keyarr) and not isinstance(keyarr[0],
tuple):
level = 0
_, indexer = self.reindex(keyarr, level=level)
# take all
if indexer is None:
indexer = np.arange(len(self))
check = self.levels[0].get_indexer(keyarr)
mask = check == -1
if mask.any():
raise KeyError('%s not in index' % keyarr[mask])
return indexer, keyarr
@Appender(_index_shared_docs['get_indexer'] % _index_doc_kwargs)
def get_indexer(self, target, method=None, limit=None, tolerance=None):
method = missing.clean_reindex_fill_method(method)
target = _ensure_index(target)
# empty indexer
if is_list_like(target) and not len(target):
return _ensure_platform_int(np.array([]))
if not isinstance(target, MultiIndex):
try:
target = MultiIndex.from_tuples(target)
except (TypeError, ValueError):
# let's instead try with a straight Index
if method is None:
return Index(self.values).get_indexer(target,
method=method,
limit=limit,
tolerance=tolerance)
if not self.is_unique:
raise Exception('Reindexing only valid with uniquely valued Index '
'objects')
if method == 'pad' or method == 'backfill':
if tolerance is not None:
raise NotImplementedError("tolerance not implemented yet "
'for MultiIndex')
indexer = self._get_fill_indexer(target, method, limit)
elif method == 'nearest':
raise NotImplementedError("method='nearest' not implemented yet "
'for MultiIndex; see GitHub issue 9365')
else:
# we may not compare equally because of hashing if we
# don't have the same dtypes
if self._inferred_type_levels != target._inferred_type_levels:
return Index(self.values).get_indexer(target.values)
indexer = self._engine.get_indexer(target)
return _ensure_platform_int(indexer)
@Appender(_index_shared_docs['get_indexer_non_unique'] % _index_doc_kwargs)
def get_indexer_non_unique(self, target):
return super(MultiIndex, self).get_indexer_non_unique(target)
def reindex(self, target, method=None, level=None, limit=None,
tolerance=None):
"""
Create index with target's values (move/add/delete values as necessary)
Returns
-------
new_index : pd.MultiIndex
Resulting index
indexer : np.ndarray or None
Indices of output values in original index
"""
# GH6552: preserve names when reindexing to non-named target
# (i.e. neither Index nor Series).
preserve_names = not hasattr(target, 'names')
if level is not None:
if method is not None:
raise TypeError('Fill method not supported if level passed')
# GH7774: preserve dtype/tz if target is empty and not an Index.
# target may be an iterator
target = ibase._ensure_has_len(target)
if len(target) == 0 and not isinstance(target, Index):
idx = self.levels[level]
attrs = idx._get_attributes_dict()
attrs.pop('freq', None) # don't preserve freq
target = type(idx)._simple_new(np.empty(0, dtype=idx.dtype),
**attrs)
else:
target = _ensure_index(target)
target, indexer, _ = self._join_level(target, level, how='right',
return_indexers=True,
keep_order=False)
else:
target = _ensure_index(target)
if self.equals(target):
indexer = None
else:
if self.is_unique:
indexer = self.get_indexer(target, method=method,
limit=limit,
tolerance=tolerance)
else:
raise Exception("cannot handle a non-unique multi-index!")
if not isinstance(target, MultiIndex):
if indexer is None:
target = self
elif (indexer >= 0).all():
target = self.take(indexer)
else:
# hopefully?
target = MultiIndex.from_tuples(target)
if (preserve_names and target.nlevels == self.nlevels and
target.names != self.names):
target = target.copy(deep=False)
target.names = self.names
return target, indexer
def get_slice_bound(self, label, side, kind):
if not isinstance(label, tuple):
label = label,
return self._partial_tup_index(label, side=side)
def slice_locs(self, start=None, end=None, step=None, kind=None):
"""
For an ordered MultiIndex, compute the slice locations for input
labels. They can be tuples representing partial levels, e.g. for a
MultiIndex with 3 levels, you can pass a single value (corresponding to
the first level), or a 1-, 2-, or 3-tuple.
Parameters
----------
start : label or tuple, default None
If None, defaults to the beginning
end : label or tuple
If None, defaults to the end
step : int or None
Slice step
kind : string, optional, defaults None
Returns
-------
(start, end) : (int, int)
Notes
-----
This function assumes that the data is sorted by the first level
"""
# This function adds nothing to its parent implementation (the magic
# happens in get_slice_bound method), but it adds meaningful doc.
return super(MultiIndex, self).slice_locs(start, end, step, kind=kind)
def _partial_tup_index(self, tup, side='left'):
if len(tup) > self.lexsort_depth:
raise UnsortedIndexError(
'Key length (%d) was greater than MultiIndex'
' lexsort depth (%d)' %
(len(tup), self.lexsort_depth))
n = len(tup)
start, end = 0, len(self)
zipped = zip(tup, self.levels, self.labels)
for k, (lab, lev, labs) in enumerate(zipped):
section = labs[start:end]
if lab not in lev:
if not lev.is_type_compatible(lib.infer_dtype([lab])):
raise TypeError('Level type mismatch: %s' % lab)
# short circuit
loc = lev.searchsorted(lab, side=side)
if side == 'right' and loc >= 0:
loc -= 1
return start + section.searchsorted(loc, side=side)
idx = lev.get_loc(lab)
if k < n - 1:
end = start + section.searchsorted(idx, side='right')
start = start + section.searchsorted(idx, side='left')
else:
return start + section.searchsorted(idx, side=side)
def get_loc(self, key, method=None):
"""
Get integer location, slice or boolean mask for requested label or
tuple. If the key is past the lexsort depth, the return may be a
boolean mask array, otherwise it is always a slice or int.
Parameters
----------
key : label or tuple
method : None
Returns
-------
loc : int, slice object or boolean mask
"""
if method is not None:
raise NotImplementedError('only the default get_loc method is '
'currently supported for MultiIndex')
def _maybe_to_slice(loc):
"""convert integer indexer to boolean mask or slice if possible"""
if not isinstance(loc, np.ndarray) or loc.dtype != 'int64':
return loc
loc = lib.maybe_indices_to_slice(loc, len(self))
if isinstance(loc, slice):
return loc
mask = np.empty(len(self), dtype='bool')
mask.fill(False)
mask[loc] = True
return mask
if not isinstance(key, tuple):
loc = self._get_level_indexer(key, level=0)
return _maybe_to_slice(loc)
keylen = len(key)
if self.nlevels < keylen:
raise KeyError('Key length ({0}) exceeds index depth ({1})'
''.format(keylen, self.nlevels))
if keylen == self.nlevels and self.is_unique:
def _maybe_str_to_time_stamp(key, lev):
if lev.is_all_dates and not isinstance(key, Timestamp):
try:
return Timestamp(key, tz=getattr(lev, 'tz', None))
except Exception:
pass
return key
key = _values_from_object(key)
key = tuple(map(_maybe_str_to_time_stamp, key, self.levels))
return self._engine.get_loc(key)
# -- partial selection or non-unique index
# break the key into 2 parts based on the lexsort_depth of the index;
# the first part returns a continuous slice of the index; the 2nd part
# needs linear search within the slice
i = self.lexsort_depth
lead_key, follow_key = key[:i], key[i:]
start, stop = (self.slice_locs(lead_key, lead_key)
if lead_key else (0, len(self)))
if start == stop:
raise KeyError(key)
if not follow_key:
return slice(start, stop)
warnings.warn('indexing past lexsort depth may impact performance.',
PerformanceWarning, stacklevel=10)
loc = np.arange(start, stop, dtype='int64')
for i, k in enumerate(follow_key, len(lead_key)):
mask = self.labels[i][loc] == self.levels[i].get_loc(k)
if not mask.all():
loc = loc[mask]
if not len(loc):
raise KeyError(key)
return (_maybe_to_slice(loc) if len(loc) != stop - start else
slice(start, stop))
def get_loc_level(self, key, level=0, drop_level=True):
"""
Get integer location slice for requested label or tuple
Parameters
----------
key : label or tuple
level : int/level name or list thereof
Returns
-------
loc : int or slice object
"""
def maybe_droplevels(indexer, levels, drop_level):
if not drop_level:
return self[indexer]
# kludgearound
orig_index = new_index = self[indexer]
levels = [self._get_level_number(i) for i in levels]
for i in sorted(levels, reverse=True):
try:
new_index = new_index.droplevel(i)
except:
# no dropping here
return orig_index
return new_index
if isinstance(level, (tuple, list)):
if len(key) != len(level):
raise AssertionError('Key for location must have same '
'length as number of levels')
result = None
for lev, k in zip(level, key):
loc, new_index = self.get_loc_level(k, level=lev)
if isinstance(loc, slice):
mask = np.zeros(len(self), dtype=bool)
mask[loc] = True
loc = mask
result = loc if result is None else result & loc
return result, maybe_droplevels(result, level, drop_level)
level = self._get_level_number(level)
# kludge for #1796
if isinstance(key, list):
key = tuple(key)
if isinstance(key, tuple) and level == 0:
try:
if key in self.levels[0]:
indexer = self._get_level_indexer(key, level=level)
new_index = maybe_droplevels(indexer, [0], drop_level)
return indexer, new_index
except TypeError:
pass
if not any(isinstance(k, slice) for k in key):
# partial selection
# optionally get indexer to avoid re-calculation
def partial_selection(key, indexer=None):
if indexer is None:
indexer = self.get_loc(key)
ilevels = [i for i in range(len(key))
if key[i] != slice(None, None)]
return indexer, maybe_droplevels(indexer, ilevels,
drop_level)
if len(key) == self.nlevels:
if self.is_unique:
# here we have a completely specified key, but are
# using some partial string matching here
# GH4758
all_dates = [(l.is_all_dates and
not isinstance(k, compat.string_types))
for k, l in zip(key, self.levels)]
can_index_exactly = any(all_dates)
if (any([l.is_all_dates
for k, l in zip(key, self.levels)]) and
not can_index_exactly):
indexer = self.get_loc(key)
# we have a multiple selection here
if (not isinstance(indexer, slice) or
indexer.stop - indexer.start != 1):
return partial_selection(key, indexer)
key = tuple(self[indexer].tolist()[0])
return (self._engine.get_loc(
_values_from_object(key)), None)
else:
return partial_selection(key)
else:
return partial_selection(key)
else:
indexer = None
for i, k in enumerate(key):
if not isinstance(k, slice):
k = self._get_level_indexer(k, level=i)
if isinstance(k, slice):
# everything
if k.start == 0 and k.stop == len(self):
k = slice(None, None)
else:
k_index = k
if isinstance(k, slice):
if k == slice(None, None):
continue
else:
raise TypeError(key)
if indexer is None:
indexer = k_index
else: # pragma: no cover
indexer &= k_index
if indexer is None:
indexer = slice(None, None)
ilevels = [i for i in range(len(key))
if key[i] != slice(None, None)]
return indexer, maybe_droplevels(indexer, ilevels, drop_level)
else:
indexer = self._get_level_indexer(key, level=level)
return indexer, maybe_droplevels(indexer, [level], drop_level)
def _get_level_indexer(self, key, level=0, indexer=None):
# return an indexer, boolean array or a slice showing where the key is
# in the totality of values
# if the indexer is provided, then use this
level_index = self.levels[level]
labels = self.labels[level]
def convert_indexer(start, stop, step, indexer=indexer, labels=labels):
# given the inputs and the labels/indexer, compute an indexer set
# if we have a provided indexer, then this need not consider
# the entire labels set
r = np.arange(start, stop, step)
if indexer is not None and len(indexer) != len(labels):
# we have an indexer which maps the locations in the labels
# that we have already selected (and is not an indexer for the
# entire set) otherwise this is wasteful so we only need to
# examine locations that are in this set the only magic here is
# that the result are the mappings to the set that we have
# selected
from pandas import Series
mapper = Series(indexer)
indexer = labels.take(_ensure_platform_int(indexer))
result = Series(Index(indexer).isin(r).nonzero()[0])
m = result.map(mapper)._values
else:
m = np.zeros(len(labels), dtype=bool)
m[np.in1d(labels, r,
assume_unique=Index(labels).is_unique)] = True
return m
if isinstance(key, slice):
# handle a slice, returnig a slice if we can
# otherwise a boolean indexer
try:
if key.start is not None:
start = level_index.get_loc(key.start)
else:
start = 0
if key.stop is not None:
stop = level_index.get_loc(key.stop)
else:
stop = len(level_index) - 1
step = key.step
except KeyError:
# we have a partial slice (like looking up a partial date
# string)
start = stop = level_index.slice_indexer(key.start, key.stop,
key.step, kind='loc')
step = start.step
if isinstance(start, slice) or isinstance(stop, slice):
# we have a slice for start and/or stop
# a partial date slicer on a DatetimeIndex generates a slice
# note that the stop ALREADY includes the stopped point (if
# it was a string sliced)
return convert_indexer(start.start, stop.stop, step)
elif level > 0 or self.lexsort_depth == 0 or step is not None:
# need to have like semantics here to right
# searching as when we are using a slice
# so include the stop+1 (so we include stop)
return convert_indexer(start, stop + 1, step)
else:
# sorted, so can return slice object -> view
i = labels.searchsorted(start, side='left')
j = labels.searchsorted(stop, side='right')
return slice(i, j, step)
else:
loc = level_index.get_loc(key)
if isinstance(loc, slice):
return loc
elif level > 0 or self.lexsort_depth == 0:
return np.array(labels == loc, dtype=bool)
i = labels.searchsorted(loc, side='left')
j = labels.searchsorted(loc, side='right')
return slice(i, j)
def get_locs(self, tup):
"""
Given a tuple of slices/lists/labels/boolean indexer to a level-wise
spec produce an indexer to extract those locations
Parameters
----------
key : tuple of (slices/list/labels)
Returns
-------
locs : integer list of locations or boolean indexer suitable
for passing to iloc
"""
# must be lexsorted to at least as many levels
true_slices = [i for (i, s) in enumerate(is_true_slices(tup)) if s]
if true_slices and true_slices[-1] >= self.lexsort_depth:
raise UnsortedIndexError('MultiIndex slicing requires the index '
'to be lexsorted: slicing on levels {0}, '
'lexsort depth {1}'
.format(true_slices, self.lexsort_depth))
# indexer
# this is the list of all values that we want to select
n = len(self)
indexer = None
def _convert_to_indexer(r):
# return an indexer
if isinstance(r, slice):
m = np.zeros(n, dtype=bool)
m[r] = True
r = m.nonzero()[0]
elif is_bool_indexer(r):
if len(r) != n:
raise ValueError("cannot index with a boolean indexer "
"that is not the same length as the "
"index")
r = r.nonzero()[0]
from .numeric import Int64Index
return Int64Index(r)
def _update_indexer(idxr, indexer=indexer):
if indexer is None:
indexer = Index(np.arange(n))
if idxr is None:
return indexer
return indexer & idxr
for i, k in enumerate(tup):
if is_bool_indexer(k):
# a boolean indexer, must be the same length!
k = np.asarray(k)
indexer = _update_indexer(_convert_to_indexer(k),
indexer=indexer)
elif is_list_like(k):
# a collection of labels to include from this level (these
# are or'd)
indexers = None
for x in k:
try:
idxrs = _convert_to_indexer(
self._get_level_indexer(x, level=i,
indexer=indexer))
indexers = (idxrs if indexers is None
else indexers | idxrs)
except KeyError:
# ignore not founds
continue
if indexers is not None:
indexer = _update_indexer(indexers, indexer=indexer)
else:
from .numeric import Int64Index
# no matches we are done
return Int64Index([])._values
elif is_null_slice(k):
# empty slice
indexer = _update_indexer(None, indexer=indexer)
elif isinstance(k, slice):
# a slice, include BOTH of the labels
indexer = _update_indexer(_convert_to_indexer(
self._get_level_indexer(k, level=i, indexer=indexer)),
indexer=indexer)
else:
# a single label
indexer = _update_indexer(_convert_to_indexer(
self.get_loc_level(k, level=i, drop_level=False)[0]),
indexer=indexer)
# empty indexer
if indexer is None:
return Int64Index([])._values
return indexer._values
def truncate(self, before=None, after=None):
"""
Slice index between two labels / tuples, return new MultiIndex
Parameters
----------
before : label or tuple, can be partial. Default None
None defaults to start
after : label or tuple, can be partial. Default None
None defaults to end
Returns
-------
truncated : MultiIndex
"""
if after and before and after < before:
raise ValueError('after < before')
i, j = self.levels[0].slice_locs(before, after)
left, right = self.slice_locs(before, after)
new_levels = list(self.levels)
new_levels[0] = new_levels[0][i:j]
new_labels = [lab[left:right] for lab in self.labels]
new_labels[0] = new_labels[0] - i
return MultiIndex(levels=new_levels, labels=new_labels,
verify_integrity=False)
def equals(self, other):
"""
Determines if two MultiIndex objects have the same labeling information
(the levels themselves do not necessarily have to be the same)
See also
--------
equal_levels
"""
if self.is_(other):
return True
if not isinstance(other, Index):
return False
if not isinstance(other, MultiIndex):
return array_equivalent(self._values,
_values_from_object(_ensure_index(other)))
if self.nlevels != other.nlevels:
return False
if len(self) != len(other):
return False
for i in range(self.nlevels):
slabels = self.labels[i]
slabels = slabels[slabels != -1]
svalues = algos.take_nd(np.asarray(self.levels[i]._values),
slabels, allow_fill=False)
olabels = other.labels[i]
olabels = olabels[olabels != -1]
ovalues = algos.take_nd(np.asarray(other.levels[i]._values),
olabels, allow_fill=False)
# since we use NaT both datetime64 and timedelta64
# we can have a situation where a level is typed say
# timedelta64 in self (IOW it has other values than NaT)
# but types datetime64 in other (where its all NaT)
# but these are equivalent
if len(svalues) == 0 and len(ovalues) == 0:
continue
if not array_equivalent(svalues, ovalues):
return False
return True
def equal_levels(self, other):
"""
Return True if the levels of both MultiIndex objects are the same
"""
if self.nlevels != other.nlevels:
return False
for i in range(self.nlevels):
if not self.levels[i].equals(other.levels[i]):
return False
return True
def union(self, other):
"""
Form the union of two MultiIndex objects, sorting if possible
Parameters
----------
other : MultiIndex or array / Index of tuples
Returns
-------
Index
>>> index.union(index2)
"""
self._assert_can_do_setop(other)
other, result_names = self._convert_can_do_setop(other)
if len(other) == 0 or self.equals(other):
return self
uniq_tuples = lib.fast_unique_multiple([self._values, other._values])
return MultiIndex.from_arrays(lzip(*uniq_tuples), sortorder=0,
names=result_names)
def intersection(self, other):
"""
Form the intersection of two MultiIndex objects, sorting if possible
Parameters
----------
other : MultiIndex or array / Index of tuples
Returns
-------
Index
"""
self._assert_can_do_setop(other)
other, result_names = self._convert_can_do_setop(other)
if self.equals(other):
return self
self_tuples = self._values
other_tuples = other._values
uniq_tuples = sorted(set(self_tuples) & set(other_tuples))
if len(uniq_tuples) == 0:
return MultiIndex(levels=[[]] * self.nlevels,
labels=[[]] * self.nlevels,
names=result_names, verify_integrity=False)
else:
return MultiIndex.from_arrays(lzip(*uniq_tuples), sortorder=0,
names=result_names)
def difference(self, other):
"""
Compute sorted set difference of two MultiIndex objects
Returns
-------
diff : MultiIndex
"""
self._assert_can_do_setop(other)
other, result_names = self._convert_can_do_setop(other)
if len(other) == 0:
return self
if self.equals(other):
return MultiIndex(levels=[[]] * self.nlevels,
labels=[[]] * self.nlevels,
names=result_names, verify_integrity=False)
difference = sorted(set(self._values) - set(other._values))
if len(difference) == 0:
return MultiIndex(levels=[[]] * self.nlevels,
labels=[[]] * self.nlevels,
names=result_names, verify_integrity=False)
else:
return MultiIndex.from_tuples(difference, sortorder=0,
names=result_names)
@Appender(_index_shared_docs['astype'])
def astype(self, dtype, copy=True):
if not is_object_dtype(np.dtype(dtype)):
raise TypeError('Setting %s dtype to anything other than object '
'is not supported' % self.__class__)
elif copy is True:
return self._shallow_copy()
return self
def _convert_can_do_setop(self, other):
result_names = self.names
if not hasattr(other, 'names'):
if len(other) == 0:
other = MultiIndex(levels=[[]] * self.nlevels,
labels=[[]] * self.nlevels,
verify_integrity=False)
else:
msg = 'other must be a MultiIndex or a list of tuples'
try:
other = MultiIndex.from_tuples(other)
except:
raise TypeError(msg)
else:
result_names = self.names if self.names == other.names else None
return other, result_names
def insert(self, loc, item):
"""
Make new MultiIndex inserting new item at location
Parameters
----------
loc : int
item : tuple
Must be same length as number of levels in the MultiIndex
Returns
-------
new_index : Index
"""
# Pad the key with empty strings if lower levels of the key
# aren't specified:
if not isinstance(item, tuple):
item = (item, ) + ('', ) * (self.nlevels - 1)
elif len(item) != self.nlevels:
raise ValueError('Item must have length equal to number of '
'levels.')
new_levels = []
new_labels = []
for k, level, labels in zip(item, self.levels, self.labels):
if k not in level:
# have to insert into level
# must insert at end otherwise you have to recompute all the
# other labels
lev_loc = len(level)
level = level.insert(lev_loc, k)
else:
lev_loc = level.get_loc(k)
new_levels.append(level)
new_labels.append(np.insert(_ensure_int64(labels), loc, lev_loc))
return MultiIndex(levels=new_levels, labels=new_labels,
names=self.names, verify_integrity=False)
def delete(self, loc):
"""
Make new index with passed location deleted
Returns
-------
new_index : MultiIndex
"""
new_labels = [np.delete(lab, loc) for lab in self.labels]
return MultiIndex(levels=self.levels, labels=new_labels,
names=self.names, verify_integrity=False)
get_major_bounds = slice_locs
__bounds = None
@property
def _bounds(self):
"""
Return or compute and return slice points for level 0, assuming
sortedness
"""
if self.__bounds is None:
inds = np.arange(len(self.levels[0]))
self.__bounds = self.labels[0].searchsorted(inds)
return self.__bounds
def _wrap_joined_index(self, joined, other):
names = self.names if self.names == other.names else None
return MultiIndex.from_tuples(joined, names=names)
@Appender(Index.isin.__doc__)
def isin(self, values, level=None):
if level is None:
values = MultiIndex.from_tuples(values,
names=self.names).values
return algos.isin(self.values, values)
else:
num = self._get_level_number(level)
levs = self.levels[num]
labs = self.labels[num]
sought_labels = levs.isin(values).nonzero()[0]
if levs.size == 0:
return np.zeros(len(labs), dtype=np.bool_)
else:
return np.lib.arraysetops.in1d(labs, sought_labels)
MultiIndex._add_numeric_methods_disabled()
MultiIndex._add_numeric_methods_add_sub_disabled()
MultiIndex._add_logical_methods_disabled()
def _sparsify(label_list, start=0, sentinel=''):
pivoted = lzip(*label_list)
k = len(label_list)
result = pivoted[:start + 1]
prev = pivoted[start]
for cur in pivoted[start + 1:]:
sparse_cur = []
for i, (p, t) in enumerate(zip(prev, cur)):
if i == k - 1:
sparse_cur.append(t)
result.append(sparse_cur)
break
if p == t:
sparse_cur.append(sentinel)
else:
sparse_cur.extend(cur[i:])
result.append(sparse_cur)
break
prev = cur
return lzip(*result)
def _get_na_rep(dtype):
return {np.datetime64: 'NaT', np.timedelta64: 'NaT'}.get(dtype, 'NaN')
|
Ziqi-Li/bknqgis
|
pandas/pandas/core/indexes/multi.py
|
Python
|
gpl-2.0
| 94,471
|
# F3AT - Flumotion Asynchronous Autonomous Agent Toolkit
# Copyright (C) 2010,2011 Flumotion Services, S.A.
# All rights reserved.
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
# See "LICENSE.GPL" in the source distribution for more information.
# Headers in this file shall remain intact.
# -*- Mode: Python -*-
# vi:si:et:sw=4:sts=4:ts=4
import re
from functools import partial
from twisted.internet import defer
from twisted.spread import pb
from feat.common import (decorator, annotate, enum, log, reflect,
error, container, text_helper, )
class SecurityLevel(enum.Enum):
"""
safe - should be used to expose querying commands which
will not mess up with the state
unsafe - should be for the operations which require a bit of thinking
superhuman - should not be used, but does it mean we shouldn't have it?
"""
(safe, unsafe, superhuman, ) = range(3)
@decorator.parametrized_function
def expose(function, security_level=SecurityLevel.safe):
annotate.injectClassCallback("recorded", 4,
"_register_exposed", function,
security_level)
return function
class Manhole(annotate.Annotable, pb.Referenceable):
_exposed = container.MroDict("_mro_exposed")
@classmethod
def _register_exposed(cls, function, security_level):
fun_id = function.__name__
cls._exposed[fun_id] = dict()
for lvl in SecurityLevel:
if lvl > security_level:
continue
cls._exposed[fun_id][lvl] = function
cls._build_remote_call(function)
@classmethod
def _build_remote_call(cls, function):
f_name = "remote_%s" % function.__name__
def wrapped(*args, **kwargs):
res = function(*args, **kwargs)
if isinstance(res, pb.Referenceable):
return res
else:
#TODO: Serialize it
return res
wrapped.__name__ = f_name
setattr(cls, f_name, wrapped)
@expose()
def help(self):
'''Prints exposed methods and their docstrings.'''
cmds = self.get_exposed_cmds()
t = text_helper.Table(fields=['command', 'doc'],
lengths=[50, 85])
return t.render((reflect.formatted_function_name(x), x.__doc__, )
for x in cmds.values())
def get_exposed_cmds(self, lvl=SecurityLevel.safe):
return dict((fun_id, v.get(lvl), )
for fun_id, v in self._exposed.iteritems() if lvl in v)
def remote_get_exposed_cmds(self, lvl=SecurityLevel.safe):
return self.get_exposed_cmds(lvl).keys()
def lookup_cmd(self, name, lvl=SecurityLevel.safe):
commands = self.get_exposed_cmds(lvl)
if name not in commands:
raise UnknownCommand('Unknown command: %s.%s' %\
(self.__class__.__name__, name, ))
return partial(commands[name], self)
class PBRemote(object):
def __init__(self, obj):
self.obj = obj
# names of exposed commands
self.commands = list()
def initiate(self):
d = self.obj.callRemote('get_exposed_cmds')
d.addCallback(self._set_cmds)
return d
def _set_cmds(self, cmds):
self.commands = cmds
def lookup_cmd(self, name, lvl=SecurityLevel.safe):
if name not in self.commands:
raise UnknownCommand('Unknown command: %s.%s' %\
(self.__class__.__name__, name, ))
return partial(self.obj.callRemote, name)
class Parser(log.Logger):
def __init__(self, driver, output, commands, cb_on_finish=None):
log.Logger.__init__(self, driver)
self.cb_on_finish = cb_on_finish
self.commands = commands
self.buffer = ""
self.output = output
self._locals = dict()
self._last_line = None
self.re = dict(
assignment=re.compile('\A(\w+)\s*=\s*(\S.*)'),
async=re.compile('\Aasync\s+(.+)'),
comment=re.compile('\A\s*#.*\Z'),
yielding=re.compile('\Ayield\s+(\w+)\s*\Z'),
number=re.compile('\A\d+(\.\d+)?\Z'),
none=re.compile('\ANone\Z'),
true=re.compile('\ATrue\Z'),
false=re.compile('\AFalse\Z'),
string=re.compile('\A\'([^(?<!\)\']*)\'\Z'),
call=re.compile('\A(\w+)\((.*)\)\Z'),
variable=re.compile('\A([^\(\)\'\"\s\+]+)\Z'),
method_call=re.compile('\A(\w+)\.(\w+)\((.*)\)\Z'))
def split(self, text):
'''
Splits the text with function arguments into the array with first
class citizens separated. See the unit tests for clarificatin.
'''
# nesting character -> count
counters = {"'": False, '(': 0}
def reverse(char):
def wrapped():
counters[char] = not counters[char]
return wrapped
def increase(char):
def wrapped():
counters[char] += 1
return wrapped
def decrease(char):
def wrapped():
counters[char] -= 1
return wrapped
def is_top_level():
return all([not x for x in counters.values()])
def fail():
raise BadSyntax('Syntax error processing line: %s' %\
self._last_line)
temp = ""
# end of field flag indicates that we expect next character to be
# either whitespace of split
eof_flag = False
def append_char(temp, char):
if eof_flag:
fail()
temp += char
return temp
# dictionary char -> handler
nesters = {"'": reverse("'"), "(": increase('('), ")": decrease('(')}
# chars to split on
split = (',', )
# chars to swallow
consume = (' ', '\n')
result = list()
self.log("spliting: %s", text)
for char in text:
if char in nesters:
nesters[char]()
temp = append_char(temp, char)
elif not is_top_level():
temp = append_char(temp, char)
elif char in consume:
if len(temp) > 0:
eof_flag = True
continue
elif char in split:
result.append(temp)
temp = ""
eof_flag = False
else:
temp = append_char(temp, char)
if len(temp) > 0:
result.append(temp)
if not is_top_level():
fail()
self.log('Split returns %r', result)
return result
def dataReceived(self, data):
self.buffer += data
self.process_line()
def send_output(self, data):
if data is not None:
self.output.write(str(data) + "\n")
def get_line(self):
def index_safe(sep):
try:
return self.buffer.index(sep)
except ValueError:
pass
separators = ["\n", ";"]
indexes = [index_safe(x) for x in separators]
indexes = [x for x in indexes if x is not None]
if not indexes:
return
index = min(indexes)
line = self.buffer[0:index]
self.buffer = self.buffer[(index + 1):]
return line
def process_line(self):
line = self.get_line()
if line is not None:
self._last_line = line
self.debug('Processing line: %s', line)
if not re.search('\w', line) or self.re['comment'].search(line):
return self.process_line()
assignment = self.re['assignment'].search(line)
if assignment:
variable_name = assignment.group(1)
line = assignment.group(2)
async = self.re['async'].search(line)
if async:
line = async.group(1)
async = True
yielding = self.re['yielding'].search(line)
if yielding:
varname = yielding.group(1)
d = defer.succeed(varname)
d.addCallback(self.get_local)
d.addCallback(WrappedDeferred.get_defer)
else: #normal processing
d = defer.maybeDeferred(self.split, line)
d.addCallback(self.process_array, async)
d.addCallback(self.validate_result)
if assignment:
d.addCallback(self.set_local, variable_name)
d.addCallback(self.set_local, '_')
d.addCallback(self.send_output)
d.addCallbacks(lambda _: self.process_line(), self._error_handler)
else:
self.on_finish()
@defer.inlineCallbacks
def process_array(self, array, async=False):
"""
Main part of the protocol handling. Whan comes in as the parameter is
a array of expresions, for example:
[ "1", "'some string'", "variable",
"some_call(param1, some_other_call())" ]
Each element of the is evaluated in synchronous way. In case of method
calls, the call is performed by iterating the method.
The result of the function is list with elements subsituted by the
values they stand for (for variables: values, for method calls: the
result of deferred returned).
The async parametr (default False) tells whether to yield the Deferred
returned. If False, the Deferreds are substitued with None.
"""
result = list()
kwargs = dict()
keyword = None
def append_result(value):
if keyword:
kwargs[keyword] = value
else:
result.append(value)
for element in array:
self.log('matching: %s', element)
# First check for expresion with the form keyword=expresion
keyword = None
assignment = self.re['assignment'].search(element)
if assignment:
keyword = assignment.group(1)
element = assignment.group(2)
m = self.re['number'].search(element)
if m:
append_result(eval(m.group(0)))
continue
m = self.re['string'].search(element)
if m:
append_result(m.group(1))
continue
m = self.re['none'].search(element)
if m:
append_result(None)
continue
m = self.re['true'].search(element)
if m:
append_result(True)
continue
m = self.re['false'].search(element)
if m:
append_result(False)
continue
m = self.re['variable'].search(element)
if m:
append_result(self.get_local(m.group(1)))
continue
m = self.re['call'].search(element)
n = self.re['method_call'].search(element)
if m or n:
if m:
command = m.group(1)
method = self.commands.lookup_cmd(command)
rest = m.group(2)
else:
obj = n.group(1)
local = self.get_local(obj)
if not isinstance(local, (Manhole, PBRemote, )):
raise IllegalCall('Variable %r should be a Manhole '
'instance to make this work! '
'Got %r instead.' %\
(obj, type(local)))
command = n.group(2)
method = local.lookup_cmd(command)
rest = n.group(3)
arguments, keywords =\
yield self.process_array(self.split(rest))
output = method(*arguments, **keywords)
if isinstance(output, defer.Deferred):
if not async:
output = yield output
else:
output = WrappedDeferred(output)
if isinstance(output, pb.RemoteReference):
output = PBRemote(output)
yield output.initiate()
self.debug("Finished processing command: %s.", element)
append_result(output)
continue
raise BadSyntax('Syntax error processing line: %s. '
'Could not detect type of element: %s' %\
(self._last_line, element, ))
defer.returnValue((result, kwargs, ))
def validate_result(self, (result_array, result_keywords, )):
'''
Check that the result is a list with a single element, and return it.
If we had more than one element it would mean that the line processed
looked somewhat like this:
call1(), "blah blah blah"
'''
if len(result_array) != 1 or len(result_keywords) > 0:
raise BadSyntax('Syntax error processing line: %s' %\
self._last_line)
return result_array[0]
def on_finish(self):
'''
Called when there is no more messages to be processed in the buffer.
'''
if callable(self.cb_on_finish):
self.cb_on_finish()
def set_local(self, value, variable_name):
'''
Assign local variable. The line processed looked somewhat like this:
variable = some_call()
'''
self.log('assigning %s = %r', variable_name, value)
self._locals[variable_name] = value
return value
def get_local(self, variable_name):
'''
Return the value of the local variable. Raises UnknownVariable is
the name is not known.
'''
if variable_name not in self._locals:
raise UnknownVariable('Unknown variable %s' % variable_name)
return self._locals[variable_name]
def _error_handler(self, f):
error.handle_failure(self, f, "Error processing")
self.send_output(f.getErrorMessage())
self.on_finish()
class BadSyntax(Exception):
pass
class IllegalCall(Exception):
pass
class UnknownVariable(Exception):
pass
class UnknownCommand(Exception):
pass
class WrappedDeferred(object):
def __init__(self, d):
self.d = d
def get_defer(self):
return self.d
|
f3at/feat
|
src/feat/common/manhole.py
|
Python
|
gpl-2.0
| 15,327
|
import helper
class check_user_users_with_defpwd():
"""
check_user_users_with_defpwd
Users with default passwords.
"""
# References:
# http://docs.oracle.com/cd/B28359_01/server.111/b28320/statviews_5074.htm
# https://www.sans.org/reading-room/whitepapers/analyst/oracle-database-security-secure-34885
TITLE = 'Users With Default Passowrds'
CATEGORY = 'User'
TYPE = 'sql'
SQL = 'SELECT username FROM dba_users_with_defpwd'
verbose = False
skip = False
result = {}
dbversion = None
def do_check(self, *results):
output = ''
self.result['level'] = 'GREEN'
if self.dbversion >= 11:
for rows in results:
for row in rows:
self.result['level'] = 'RED'
output += row[0] + '\n'
self.result['output'] = output
else:
self.result['level'] = 'GRAY'
output = 'This check only applies to Oracle versions 11 and above.'
return self.result
def __init__(self, parent):
print('Performing check: ' + self.TITLE)
self.dbversion = helper.get_version(parent.dbcurs)
|
foospidy/DbDat
|
plugins/oracle/check_user_users_with_defpwd.py
|
Python
|
gpl-2.0
| 1,215
|
#!/usr/bin/python
# -*- coding: UTF-8 -*-
import io
import tempfile
from uuid import uuid1
from unittest import TestCase
from unittest import mock
from castel import advcounter
from castel.drivers.fake import Fakeengine
from castel.drivers.stattext import Stattext
class TestAdvCounter(TestCase):
@mock.patch('sys.exit')
def test_get_iowrapper_file_not_found(self, mock_exit):
# Trying to open a non existing file
# we generate an unique string which will be
# used as filename to guarantee that the file
# doesn't exist
self.f_name = str(uuid1(1))
advcounter.get_iowrapper(Fakeengine(),
self.f_name,
"utf-8")
mock_exit.assert_called_with(1)
def test_wrong_unicode_format(self):
"""We write a temporary file in an UTF-16 format
and then we call the fucntion to get results opening
a file in the utf-8 format, we expect that the tool
return an UnicodeDecodeError exception
"""
# write an utf-16 string
content = bytes("test", 'utf-16')
with tempfile.TemporaryDirectory() as tmpdirname:
fname = tmpdirname + "/test"
# create a temporary file with the utf-16 string
with open(fname, "wb") as f:
f.write(content)
with open(fname, encoding="utf-8") as f:
# we open the file with the wrong encoding
# so an excpetion is raised
self.assertRaises(UnicodeDecodeError,
advcounter.get_and_print_results,
Stattext(), f)
class TestStattext(TestCase):
def setUp(self):
self.stattext = Stattext()
@mock.patch.object(Stattext, '_count_words')
def test_total_words(self, mock_count_words):
self.stattext.total_words = None
self.stattext.get_total_words("fake")
mock_count_words.assert_called_with("fake")
@mock.patch.object(Stattext, '_count_words')
def test_count_words_no_called(self, mock_count_words):
self.stattext.total_words = 21
self.stattext.get_total_words("fake")
self.assertFalse(mock_count_words.called)
@mock.patch.object(Stattext, '_count_lines')
def test_total_lines(self, mock_count_lines):
self.stattext.total_lines = None
self.stattext.get_total_lines("fake")
mock_count_lines.assert_called_with("fake")
@mock.patch.object(Stattext, '_count_lines')
def test_count_lines_no_called(self, mock_count_lines):
self.stattext.total_lines = 21
self.stattext.get_total_lines("fake")
self.assertFalse(mock_count_lines.called)
@mock.patch.object(Stattext, '_count_alphanumeric')
def test_total_letters(self, mock_count_letters):
self.stattext.total_letters = None
self.stattext.get_total_letters("fake")
mock_count_letters.assert_called_with("fake")
@mock.patch.object(Stattext, '_count_alphanumeric')
def test_count_letters_no_called(self, mock_count_letters):
self.stattext.total_letters = 21
self.stattext.get_total_letters("fake")
self.assertFalse(mock_count_letters.called)
@mock.patch.object(Stattext, '_avg_letters')
def test_avg_letters_per_word(self, mock_avg_letters):
self.stattext.avg_letters_per_word = None
self.stattext.get_avg_letters_per_word("fake")
mock_avg_letters.assert_called_with("fake", precision=1)
@mock.patch.object(Stattext, '_avg_letters')
def test_avg_letters_no_called(self, mock_avg_letters):
self.stattext.avg_letters_per_word = 21
self.stattext.get_avg_letters_per_word("fake")
self.assertFalse(mock_avg_letters.called)
class TestAvgLetterForWord(TestCase):
def setUp(self):
self.stattext = Stattext()
def do_count_letters(self, text):
file_content = io.StringIO(text)
return self.stattext._count_alphanumeric(file_content)
def do_avg_letters(self, text, precision=1):
file_content = io.StringIO(text)
return self.stattext._avg_letters(file_content, precision)
def test_count_letters(self):
text = "This is a good test"
result = self.do_count_letters(text)
expected = 15
self.assertEqual(result, expected)
def test_count_letters_multi_line(self):
text = "This\n is \na good test\n\n\n"
result = self.do_count_letters(text)
expected = 15
self.assertEqual(result, expected)
def test_count_letters_ignoring_signs(self):
text = " Hello, | World!! ...\n Is a ''good day''"
result = self.do_count_letters(text)
expected = 20
self.assertEqual(result, expected)
def test_count_letters_empty_line(self):
text = ""
result = self.do_count_letters(text)
expected = 0
self.assertEqual(result, expected)
def test_count_letters_ignore_single_quote(self):
text = "I'm"
result = self.do_count_letters(text)
expected = 2
self.assertEqual(result, expected)
def test_count_digit_as_letters(self):
text = "123"
result = self.do_count_letters(text)
expected = 3
self.assertEqual(result, expected)
def test_avg_letter_single_line(self):
text = "This is a good test"
result = self.do_avg_letters(text)
expected = 3.0
self.assertEqual(result, expected)
def test_avg_letter_multi_line(self):
text = "This is a good\n test\n\n"
result = self.do_avg_letters(text)
expected = 3.0
self.assertEqual(result, expected)
def test_avg_unicode(self):
text = "Thíś íś ṕŕéttӳ fúń.\n "
expected = 3.8
result = self.do_avg_letters(text)
self.assertEqual(result, expected)
def test_avg_no_decimal(self):
text = "Thíś íś ṕŕéttӳ fúń.\n "
expected = 3
result = self.do_avg_letters(text, precision=0)
self.assertEqual(result, expected)
def test_avg_emtpy_file(self):
text = ""
expected = 0
result = self.do_avg_letters(text)
self.assertEqual(result, expected)
class TestCountWords(TestCase):
def setUp(self):
self.stattext = Stattext()
def do_count_words(self, text):
file_content = io.StringIO(text)
return self.stattext._count_words(file_content)
def test_count_on_single_line(self):
text = "This is a test"
result = self.do_count_words(text)
expected = 4
self.assertEqual(result, expected)
def test_count_on_multi_line(self):
text = "This is a\n test"
result = self.do_count_words(text)
expected = 4
self.assertEqual(result, expected)
def test_count_number_as_word(self):
text = "The 4 is counted as a word"
result = self.do_count_words(text)
expected = 7
self.assertEqual(result, expected)
def test_ignore_double_spaces(self):
text = "this text has \n spaces"
result = self.do_count_words(text)
expected = 4
self.assertEqual(result, expected)
def test_words_with_single_quote(self):
text = "it's a quote"
result = self.do_count_words(text)
expected = 3
self.assertEqual(result, expected)
def test_words_with_hyphen(self):
text = "single-word"
result = self.do_count_words(text)
expected = 1
self.assertEqual(result, expected)
def test_empty_file(self):
text = ""
result = self.do_count_words(text)
expected = 0
self.assertEqual(result, expected)
def test_empty_line(self):
text = "\n"
result = self.do_count_words(text)
expected = 0
self.assertEqual(result, expected)
def test_space(self):
text = " "
result = self.do_count_words(text)
expected = 0
self.assertEqual(result, expected)
def test_ignore_punctuaction(self):
text = "this is awesome !!!"
result = self.do_count_words(text)
expected = 3
self.assertEqual(result, expected)
def test_unicode(self):
text = "Thíś íś ṕŕéttӳ fúń.\n And more, fun here"
result = self.do_count_words(text)
expected = 8
self.assertEqual(result, expected)
def test_dots_word(self):
text = "...just saying"
result = self.do_count_words(text)
expected = 2
self.assertEqual(result, expected)
def test_word_with_pipe(self):
text = "this|is a strange word"
result = self.do_count_words(text)
expected = 4
self.assertEqual(result, expected)
def test_only_signs(self):
text = "... ,, : !! | \ +-'"
result = self.do_count_words(text)
expected = 0
self.assertEqual(result, expected)
class TestFrequentLetter(TestCase):
def setUp(self):
self.stattext = Stattext()
def do_most_common_letter(self, text):
file_content = io.StringIO(text)
return self.stattext.most_common_letter(file_content)
def test_ascii_count(self):
text = "a"*2 + "b"*5
result = self.do_most_common_letter(text)
expected = "b"
self.assertEqual(result, expected)
def test_ascii_count_multi_line(self):
text = "a"*2 + "b"*5 + "\n"+"a"*5
result = self.do_most_common_letter(text)
expected = "a"
self.assertEqual(result, expected)
def test_ignore_digits(self):
text = "a1111\n232"
result = self.do_most_common_letter(text)
expected = "a"
self.assertEqual(result, expected)
def test_empty_file(self):
text = ""
result = self.do_most_common_letter(text)
self.assertIsNone(result)
def test_empty_line(self):
text = "\n\n\n"
result = self.do_most_common_letter(text)
self.assertIsNone(result)
def test_file_only_spaces(self):
text = " \n \r\n"
result = self.do_most_common_letter(text)
self.assertIsNone(result)
def test_ignore_punctuaction(self):
text = "hello, world:\n?!:;.'"
result = self.do_most_common_letter(text)
expected = "l"
self.assertEqual(result, expected)
def test_upper_case_ignored(self):
text = "BBb"
result = self.do_most_common_letter(text)
expected = "b"
self.assertEqual(result, expected)
def test_many_most_common_letter(self):
text = "test is done"
result = self.do_most_common_letter(text)
expected = ['e', 's', 't']
self.assertEqual(sorted(result.replace(" ", "")), expected)
def test_unicode_text(self):
text = "Thíś íś ṕŕéttӳ fúń"
result = self.do_most_common_letter(text)
expected = "t"
self.assertEqual(result, expected)
class TestCountLines(TestCase):
def setUp(self):
self.stattext = Stattext()
def do_count_lines(self, text):
file_content = io.StringIO(text)
return self.stattext._count_lines(file_content)
def test_empty_file(self):
result = self.do_count_lines("")
expected = 0
self.assertEqual(result, expected)
def test_single_line(self):
text = "This is my line"
result = self.do_count_lines(text)
expected = 1
self.assertEqual(result, expected)
def test_single_line_no_chars(self):
text = "\n"
result = self.do_count_lines(text)
expected = 1
self.assertEqual(result, expected)
def test_single_line_space_char(self):
text = " "
result = self.do_count_lines(text)
expected = 1
self.assertEqual(result, expected)
def test_multi_lines(self):
text = "This \n is \n a 4\nlines "
result = self.do_count_lines(text)
expected = 4
self.assertEqual(result, expected)
def test_multi_empty_lines(self):
text = "\n\n\n"
result = self.do_count_lines(text)
expected = 3
self.assertEqual(result, expected)
def test_escape_char(self):
text = "This \n is \n a 3\\nlines"
result = self.do_count_lines(text)
expected = 3
self.assertEqual(result, expected)
|
reclaro/castel
|
castel/tests/test_Stattext.py
|
Python
|
gpl-2.0
| 12,437
|
"""
Snapshot target
"""
from DeComp.compress import CompressMap
from catalyst import log
from catalyst.support import normpath, cmd
from catalyst.base.targetbase import TargetBase
from catalyst.base.genbase import GenBase
from catalyst.fileops import (clear_dir, ensure_dirs)
class snapshot(TargetBase, GenBase):
"""
Builder class for snapshots.
"""
def __init__(self,myspec,addlargs):
self.required_values=["version_stamp","target"]
self.valid_values=["version_stamp","target", "compression_mode"]
TargetBase.__init__(self, myspec, addlargs)
GenBase.__init__(self,myspec)
#self.settings=myspec
self.settings["target_subpath"]="repos"
st=self.settings["storedir"]
self.settings["snapshot_path"] = normpath(st + "/snapshots/"
+ self.settings["snapshot_name"]
+ self.settings["version_stamp"])
self.settings["tmp_path"]=normpath(st+"/tmp/"+self.settings["target_subpath"])
def setup(self):
x=normpath(self.settings["storedir"]+"/snapshots")
ensure_dirs(x)
def mount_safety_check(self):
pass
def run(self):
if "purgeonly" in self.settings["options"]:
self.purge()
return True
if "purge" in self.settings["options"]:
self.purge()
success = True
self.setup()
log.notice('Creating %s tree snapshot %s from %s ...',
self.settings["repo_name"], self.settings['version_stamp'],
self.settings['portdir'])
mytmp=self.settings["tmp_path"]
ensure_dirs(mytmp)
cmd(['rsync', '-a', '--no-o', '--no-g', '--delete',
'--exclude=/packages/',
'--exclude=/distfiles/',
'--exclude=/local/',
'--exclude=CVS/',
'--exclude=.svn',
'--filter=H_**/files/digest-*',
self.settings['portdir'] + '/',
mytmp + '/' + self.settings['repo_name'] + '/'],
env=self.env)
log.notice('Compressing %s snapshot tarball ...', self.settings["repo_name"])
compressor = CompressMap(self.settings["compress_definitions"],
env=self.env, default_mode=self.settings['compression_mode'],
comp_prog=self.settings["comp_prog"])
infodict = compressor.create_infodict(
source=self.settings["repo_name"],
destination=self.settings["snapshot_path"],
basedir=mytmp,
filename=self.settings["snapshot_path"],
mode=self.settings["compression_mode"],
auto_extension=True
)
if not compressor.compress(infodict):
success = False
log.error('Snapshot compression failure')
else:
filename = '.'.join([self.settings["snapshot_path"],
compressor.extension(self.settings["compression_mode"])])
log.notice('Snapshot successfully written to %s', filename)
self.gen_contents_file(filename)
self.gen_digest_file(filename)
if "keepwork" not in self.settings["options"]:
self.cleanup()
if success:
log.info('snapshot: complete!')
return success
def kill_chroot_pids(self):
pass
def cleanup(self):
log.info('Cleaning up ...')
self.purge()
def purge(self):
clear_dir(self.settings['tmp_path'])
|
benkohler/catalyst
|
catalyst/targets/snapshot.py
|
Python
|
gpl-2.0
| 2,907
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.29 on 2020-11-22 13:17
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('simon_app', '0004_auto_20201122_0919'),
]
operations = [
migrations.AddField(
model_name='probeapirequest',
name='test_type',
field=models.CharField(default=b'ping', max_length=16),
),
]
|
LACNIC/simon
|
simon-web/simon_app/migrations/0005_probeapirequest_test_type.py
|
Python
|
gpl-2.0
| 481
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('packages', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='signoffspecification',
name='known_bad',
field=models.BooleanField(default=False, help_text=b'Is this package known to be broken in some way?'),
preserve_default=True,
),
]
|
brain0/archweb
|
packages/migrations/0002_auto_20160731_0556.py
|
Python
|
gpl-2.0
| 509
|
# -*- coding: UTF-8 -*-
# -Cleaned and Checked on 08-24-2019 by JewBMX in Scrubs.
# -Cleaned and Checked on 04-14-2020 by Tempest.
import urllib, urlparse
import traceback
from resources.lib.modules import client, log_utils
from resources.lib.modules import cleantitle
from resources.lib.modules import directstream
from resources.lib.modules import source_utils
from resources.lib.sources import cfscrape
class source:
def __init__(self):
self.priority = 1
self.language = ['en'] # Old tinklepad.is movie25.hk
self.domains = ['5movies.to']
self.base_link = 'https://5movies.to'
self.search_link = '/search.php?q=%s'
self.video_link = '/getlink.php?Action=get&lk=%s'
self.headers = {'User-Agent': client.agent()}
def movie(self, imdb, title, localtitle, aliases, year):
try:
url = {'imdb': imdb, 'title': title, 'year': year}
url = urllib.urlencode(url)
return url
except:
return
def tvshow(self, imdb, tvdb, tvshowtitle, localtvshowtitle, aliases, year):
try:
url = {'imdb': imdb, 'tvdb': tvdb, 'tvshowtitle': tvshowtitle, 'year': year}
url = urllib.urlencode(url)
return url
except:
return
def episode(self, url, imdb, tvdb, title, premiered, season, episode):
try:
if url is None:
return
url = urlparse.parse_qs(url)
url = dict([(i, url[i][0]) if url[i] else (i, '') for i in url])
url['title'], url['premiered'], url['season'], url['episode'] = title, premiered, season, episode
url = urllib.urlencode(url)
return url
except:
return
def _search(self, title, year, headers):
try:
q = urlparse.urljoin(self.base_link, self.search_link % urllib.quote_plus(cleantitle.getsearch(title)))
r = cfscrape.get(q, headers=self.headers).content
r = client.parseDOM(r, 'div', attrs={'class': 'ml-img'})
r = zip(client.parseDOM(r, 'a', ret='href'), client.parseDOM(r, 'a', ret='title'))
url = [i for i in r if title in i[1] and year in i[1]][0][0]
return url
except:
pass
def sources(self, url, hostDict, hostprDict):
try:
sources = []
if url is None:
return sources
data = urlparse.parse_qs(url)
data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data])
headers = {}
title = data['tvshowtitle'] if 'tvshowtitle' in data else data['title']
year = data['year']
if 'tvshowtitle' in data:
episode = data['episode']
season = data['season']
url = self._search(data['tvshowtitle'], data['year'], headers)
url = url.replace('online-free', 'season-%s-episode-%s-online-free' % (season, episode))
else:
episode = None
year = data['year']
url = self._search(data['title'], data['year'], headers)
url = url if 'http' in url else urlparse.urljoin(self.base_link, url)
result = cfscrape.get(url, headers=self.headers).content
result = client.parseDOM(result, 'li', attrs={'class': 'link-button'})
links = client.parseDOM(result, 'a', ret='href')
i = 0
for t in links:
if i == 10:
break
try:
t = t.split('=')[1]
t = urlparse.urljoin(self.base_link, self.video_link % t)
result = client.request(t, post={}, headers={'User-Agent': client.agent(), 'Referer': url})
u = result if 'http' in result else 'http:' + result
if 'google' in u:
valid, hoster = source_utils.is_host_valid(u, hostDict)
urls, host, direct = source_utils.check_directstreams(u, hoster)
for x in urls:
sources.append(
{'source': host, 'quality': x['quality'], 'language': 'en', 'url': x['url'],
'direct': direct, 'debridonly': False})
else:
valid, hoster = source_utils.is_host_valid(u, hostDict)
if valid:
try:
u.decode('utf-8')
sources.append(
{'source': hoster, 'quality': 'SD', 'language': 'en', 'url': u, 'direct': False,
'debridonly': False})
i += 1
except:
pass
except:
pass
return sources
except Exception:
failure = traceback.format_exc()
log_utils.log('---5MOVIES Testing - Exception: \n' + str(failure))
return sources
def resolve(self, url):
if 'google' in url:
return directstream.googlepass(url)
else:
return url
|
repotvsupertuga/tvsupertuga.repository
|
script.module.streamtvsupertuga/lib/resources/lib/sources/en/5movies.py
|
Python
|
gpl-2.0
| 5,312
|
import SMTPLogger
|
3dfxsoftware/cbss-addons
|
bahmni_logger/__init__.py
|
Python
|
gpl-2.0
| 17
|
#!/usr/bin/env python
import numpy as np
import os
from os import walk
from get_samples import get_hog_samples
from get_classifier import get_classifier
from get_location import get_location
from output import output
from os.path import join
from shutil import rmtree
from sklearn.externals import joblib as pickle
import progressbar
# from 5.29, 2014
__author__ = 'Zhihua Liang'
__license__ = "GPL"
__version__ = "1.0.1"
__maintainer__ = "Zhihua Liang"
__email__ = "liangzhihua@gmail.com"
__status__ = "Development"
#define the parameters
dim_x = 760
dim_y = 195
dim_z = 240
orientations = 9
target_size = 48
pixels_per_cell = (4, 4)
cells_per_block = (3, 3) # not ready to change this value
weight_values = (1, 30)
scan_window_size = (target_size, target_size) # on pixels
out_path = 'result' # output directory
training_path = '/home/zhihua/work/object_detector/image/25_50_75_0_1'
test_path = '/home/zhihua/work/object_detector/image/25_50_75_2'
classifier_name = 'sgd' # options are 'svm', 'sgd' for now
classifier_file = 'classifier/sgd.pkl'
re_train = False # only sgd get the retrain
online_training = True # train on every single image when it is available.
verbose = False # print debug message
#########################################################
# training
#########################################################
# get progress bar for display progress
bar = progressbar.ProgressBar(maxval=100, widgets=[progressbar.Bar('=', '[', ']'), ' ', progressbar.Percentage()])
# get number of files in training directory
number_of_total_files = sum([len(files) for r, d, files in os.walk(training_path)])
number_of_total_files_over_20 = number_of_total_files/100 + 1
file_count = 0
total_training_sample = []
total_training_label = []
if os.path.isfile(classifier_file):
#load SVM if there exist trained SVM file.
clf = pickle.load(classifier_file)
# continue train the model with new data
if re_train:
print 'get re-training set'
for root, dirs, files in walk(training_path):
for file_name in files:
training_sample, training_label, dummy = get_hog_samples(join(root, file_name), dim_x, dim_z,
orientations, pixels_per_cell,
cells_per_block, scan_window_size,
training=True, verbose=verbose)
if online_training:
n_positive = np.count_nonzero(training_label)
sample_weight = [weight_values[0]]*(len(training_label) - n_positive) + [weight_values[1]]*n_positive
if file_count == 0:
clf.partial_fit(training_sample, training_label, classes=np.unique(training_label))
# sample_weight=sample_weight)
print 'training labels are', np.unique(training_label)
else:
clf.partial_fit(training_sample, training_label)#, sample_weight=sample_weight)
else:
total_training_sample = total_training_sample + training_sample
total_training_label = total_training_label + training_label
file_count += 1
#print 're-Training set contains', len(total_training_label), 'samples'
if file_count/number_of_total_files_over_20 == float(file_count)/float(number_of_total_files_over_20):
bar.update(file_count/number_of_total_files_over_20)
if not online_training:
clf.partial_fit(total_training_sample, total_training_label) # WARNING: Only SGD get the
# online learning feature.
pickle.dump(clf, classifier_file)
# if no svm exist, create it and train
else:
#if no svm file exist, train it
clf = get_classifier(classifier_name)
#training samples and labels
print 'Get training set on', training_path
print 'Training on progress.... \n\n\n\n'
for root, dirs, files in walk(training_path):
for file_name in files:
training_sample, training_label, dummy = get_hog_samples(join(root, file_name), dim_x, dim_z,
orientations, pixels_per_cell,
cells_per_block, scan_window_size,
training=True, verbose=verbose)
if online_training:
n_positive = np.count_nonzero(training_label)
sample_weight = [weight_values[0]]*(len(training_label) - n_positive) + [weight_values[1]]*n_positive
if file_count == 0:
clf.partial_fit(training_sample, training_label, classes=np.unique(training_label))
#sample_weight=sample_weight)
print 'training labels are', np.unique(training_label)
else:
clf.partial_fit(training_sample, training_label)#, sample_weight=sample_weight)
else:
total_training_sample = total_training_sample + training_sample
total_training_label = total_training_label + training_label
file_count += 1
if file_count/number_of_total_files_over_20 == float(file_count)/float(number_of_total_files_over_20):
bar.update(file_count/number_of_total_files_over_20)
if not online_training:
print '\n Training set contains', len(total_training_label), 'samples'
print total_training_sample[0].shape
clf.fit(total_training_sample, total_training_label)
pickle.dump(clf, classifier_file)
bar.finish()
#########################################################
# test
#########################################################
#remove the previous output if there exist any
rmtree(out_path)
os.makedirs(out_path)
# get the samples from test folder.
prediction_list = np.empty([])
for root, dirs, files in walk(test_path):
for file_name in files:
print '==========================================================================='
print file_name
test_sample, test_label, lesion_positions = get_hog_samples(join(root, file_name), dim_x, dim_z,
orientations, pixels_per_cell,
cells_per_block, scan_window_size,
training=False, verbose=verbose)
print 'Test set contains', len(test_label), 'samples'
predict_label = clf.predict(test_sample)
print 'Prediction-percentage-error is:', np.mean(predict_label != test_label)
print np.where(np.array(test_label) == 1)
print np.where(predict_label == 1)
#go back to the original image axis
label_x = (dim_x - scan_window_size[0])/pixels_per_cell[0]+1
label_y = (dim_z - scan_window_size[1])/pixels_per_cell[1]+1
#n_samples = len(lesion_positions)
print 'label number is', label_x*label_y
predict_label = predict_label.reshape([label_y, label_x])
y, x = np.where(predict_label[:, :] == 1)
predict_lesion_position = np.dstack((x*pixels_per_cell[0]+target_size/2,
y*pixels_per_cell[1]+target_size/2))[0]
print 'candidate positions are:', predict_lesion_position
# find the lesion location
if predict_lesion_position.size != 0:
position, confidence = get_location(predict_lesion_position, target_size)
else:
position = [-1, -1]
confidence = 1
confidence = (confidence+1)/float(2) # get to the range of LROC analysis
print 'predicted location is', position, 'with confidence', confidence
lesion = int(file_name.split('lesion_')[-1].split('_')[0]) > 0
truth_x = int(file_name.split('x_')[-1].split('_')[0])
truth_y = int(file_name.split('z_')[-1].split('_')[0])
if lesion:
print 'truth position is ', [truth_x, truth_y]
else:
print 'truth position is ', [-1, -1]
# get the density value and projection number as output file name:
projection_number = file_name.split('tvp_')[-1].split('_')[0]
output_file_name = 'TP_' + projection_number + '.txt'
#open out put file
with open(join(out_path, output_file_name), 'a') as fid:
output(file_name, position, confidence, fid)
|
magicknight/Observer
|
run.py
|
Python
|
gpl-2.0
| 8,980
|
__author__ = 'oskyar'
# user/admin.py
from django.contrib import admin
from .models import Topic, Subtopic
admin.site.register(Topic)
admin.site.register(Subtopic)
|
oskyar/test-TFG
|
TFG/apps/topic/admin.py
|
Python
|
gpl-2.0
| 168
|
"""
libguestfs tools test utility functions.
"""
import logging
import signal
import os
import re
import aexpect
from avocado.utils import path
from avocado.utils import process
from . import propcan
class LibguestfsCmdError(Exception):
"""
Error of libguestfs-tool command.
"""
def __init__(self, details=''):
self.details = details
Exception.__init__(self)
def __str__(self):
return str(self.details)
def lgf_cmd_check(cmd):
"""
To check whether the cmd is supported on this host.
:param cmd: the cmd to use a libguest tool.
:return: None if the cmd is not exist, otherwise return its path.
"""
libguestfs_cmds = ['libguestfs-test-tool', 'guestfish', 'guestmount',
'virt-alignment-scan', 'virt-cat', 'virt-copy-in',
'virt-copy-out', 'virt-df', 'virt-edit',
'virt-filesystems', 'virt-format', 'virt-inspector',
'virt-list-filesystems', 'virt-list-partitions',
'virt-ls', 'virt-make-fs', 'virt-rescue',
'virt-resize', 'virt-sparsify', 'virt-sysprep',
'virt-tar', 'virt-tar-in', 'virt-tar-out',
'virt-win-reg', 'virt-inspector2']
if cmd not in libguestfs_cmds:
raise LibguestfsCmdError(
"Command %s is not supported by libguestfs yet." % cmd)
try:
return path.find_command(cmd)
except path.CmdNotFoundError:
logging.warning("You have not installed %s on this host.", cmd)
return None
def lgf_command(cmd, ignore_status=True, debug=False, timeout=60):
"""
Interface of libguestfs tools' commands.
:param cmd: Command line to execute.
:return: CmdResult object.
:raise: LibguestfsCmdError if non-zero exit status
and ignore_status=False
"""
if debug:
logging.debug("Running command %s in debug mode.", cmd)
# Raise exception if ignore_status is False
try:
ret = process.run(cmd, ignore_status=ignore_status,
verbose=debug, timeout=timeout)
except process.CmdError as detail:
raise LibguestfsCmdError(detail)
if debug:
logging.debug("status: %s", ret.exit_status)
logging.debug("stdout: %s", ret.stdout_text.strip())
logging.debug("stderr: %s", ret.stderr_text.strip())
# Return CmdResult instance when ignore_status is True
return ret
class LibguestfsBase(propcan.PropCanBase):
"""
Base class of libguestfs tools.
"""
__slots__ = ['ignore_status', 'debug', 'timeout', 'uri', 'lgf_exec']
def __init__(self, lgf_exec="/bin/true", ignore_status=True,
debug=False, timeout=60, uri=None):
init_dict = {}
init_dict['ignore_status'] = ignore_status
init_dict['debug'] = debug
init_dict['timeout'] = timeout
init_dict['uri'] = uri
init_dict['lgf_exec'] = lgf_exec
super(LibguestfsBase, self).__init__(init_dict)
def set_ignore_status(self, ignore_status):
"""
Enforce setting ignore_status as a boolean.
"""
if bool(ignore_status):
self.__dict_set__('ignore_status', True)
else:
self.__dict_set__('ignore_status', False)
def set_debug(self, debug):
"""
Accessor method for 'debug' property that logs message on change
"""
if not self.INITIALIZED:
self.__dict_set__('debug', debug)
else:
current_setting = self.__dict_get__('debug')
desired_setting = bool(debug)
if not current_setting and desired_setting:
self.__dict_set__('debug', True)
logging.debug("Libguestfs debugging enabled")
# current and desired could both be True
if current_setting and not desired_setting:
self.__dict_set__('debug', False)
logging.debug("Libguestfs debugging disabled")
def set_timeout(self, timeout):
"""
Accessor method for 'timeout' property, timeout should be digit
"""
if type(timeout) is int:
self.__dict_set__('timeout', timeout)
else:
try:
timeout = int(str(timeout))
self.__dict_set__('timeout', timeout)
except ValueError:
logging.debug("Set timeout failed.")
def get_uri(self):
"""
Accessor method for 'uri' property that must exist
"""
# self.get() would call get_uri() recursivly
try:
return self.__dict_get__('uri')
except KeyError:
return None
# There are two ways to call guestfish:
# 1.Guestfish classies provided below(shell session)
# 2.guestfs module provided in system libguestfs package
class Guestfish(LibguestfsBase):
"""
Execute guestfish, using a new guestfish shell each time.
"""
__slots__ = []
def __init__(self, disk_img=None, ro_mode=False,
libvirt_domain=None, inspector=False,
uri=None, mount_options=None, run_mode="interactive"):
"""
Initialize guestfish command with options.
:param disk_img: if it is not None, use option '-a disk'.
:param ro_mode: only for disk_img. add option '--ro' if it is True.
:param libvirt_domain: if it is not None, use option '-d domain'.
:param inspector: guestfish mounts vm's disks automatically
:param uri: guestfish's connect uri
:param mount_options: Mount the named partition or logical volume
on the given mountpoint.
"""
guestfs_exec = "guestfish"
if lgf_cmd_check(guestfs_exec) is None:
raise LibguestfsCmdError
if run_mode not in ['remote', 'interactive']:
raise AssertionError("run_mode should be remote or interactive")
# unset GUESTFISH_XXX environment parameters
# to avoid color of guestfish shell session for testing
color_envs = ["GUESTFISH_PS1", "GUESTFISH_OUTPUT",
"GUESTFISH_RESTORE", "GUESTFISH_INIT"]
unset_cmd = ""
for env in color_envs:
unset_cmd += "unset %s;" % env
if run_mode == "interactive" and unset_cmd:
guestfs_exec = unset_cmd + " " + guestfs_exec
if run_mode == "remote":
guestfs_exec += " --listen"
else:
if uri:
guestfs_exec += " -c '%s'" % uri
if disk_img:
guestfs_exec += " -a '%s'" % disk_img
if libvirt_domain:
guestfs_exec += " -d '%s'" % libvirt_domain
if ro_mode:
guestfs_exec += " --ro"
if inspector:
guestfs_exec += " -i"
if mount_options is not None:
guestfs_exec += " --mount %s" % mount_options
super(Guestfish, self).__init__(guestfs_exec)
def complete_cmd(self, command):
"""
Execute built-in command in a complete guestfish command
(Not a guestfish session).
command: guestfish [--options] [commands]
"""
guestfs_exec = self.__dict_get__('lgf_exec')
ignore_status = self.__dict_get__('ignore_status')
debug = self.__dict_get__('debug')
timeout = self.__dict_get__('timeout')
if command:
guestfs_exec += " %s" % command
return lgf_command(guestfs_exec, ignore_status, debug, timeout)
else:
raise LibguestfsCmdError("No built-in command was passed.")
class GuestfishSession(aexpect.ShellSession):
"""
A shell session of guestfish.
"""
# Check output against list of known error-status strings
ERROR_REGEX_LIST = ['libguestfs: error:\s*']
def __init__(self, guestfs_exec=None, a_id=None, prompt=r"><fs>\s*"):
"""
Initialize guestfish session server, or client if id set.
:param guestfs_cmd: path to guestfish executable
:param id: ID of an already running server, if accessing a running
server, or None if starting a new one.
:param prompt: Regular expression describing the shell's prompt line.
"""
# aexpect tries to auto close session because no clients connected yet
super(GuestfishSession, self).__init__(guestfs_exec, a_id,
prompt=prompt,
auto_close=False)
def cmd_status_output(self, cmd, timeout=60, internal_timeout=None,
print_func=None):
"""
Send a guestfish command and return its exit status and output.
:param cmd: guestfish command to send
(must not contain newline characters)
:param timeout: The duration (in seconds) to wait for the prompt to
return
:param internal_timeout: The timeout to pass to read_nonblocking
:param print_func: A function to be used to print the data being read
(should take a string parameter)
:return: A tuple (status, output) where status is the exit status and
output is the output of cmd
:raise ShellTimeoutError: Raised if timeout expires
:raise ShellProcessTerminatedError: Raised if the shell process
terminates while waiting for output
:raise ShellStatusError: Raised if the exit status cannot be obtained
:raise ShellError: Raised if an unknown error occurs
"""
out = self.cmd_output(cmd, timeout, internal_timeout, print_func)
for line in out.splitlines():
if self.match_patterns(line, self.ERROR_REGEX_LIST) is not None:
return 1, out
return 0, out
def cmd_result(self, cmd, ignore_status=False):
"""Mimic process.run()"""
exit_status, stdout = self.cmd_status_output(cmd)
stderr = '' # no way to retrieve this separately
result = process.CmdResult(cmd, stdout, stderr, exit_status)
if not ignore_status and exit_status:
raise process.CmdError(cmd, result,
"Guestfish Command returned non-zero exit status")
return result
class GuestfishRemote(object):
"""
Remote control of guestfish.
"""
# Check output against list of known error-status strings
ERROR_REGEX_LIST = ['libguestfs: error:\s*']
def __init__(self, guestfs_exec=None, a_id=None):
"""
Initialize guestfish session server, or client if id set.
:param guestfs_cmd: path to guestfish executable
:param a_id: guestfish remote id
"""
if a_id is None:
try:
ret = process.run(guestfs_exec, ignore_status=False,
verbose=True, timeout=60)
except process.CmdError as detail:
raise LibguestfsCmdError(detail)
self.a_id = re.search(b"\d+", ret.stdout.strip()).group()
else:
self.a_id = a_id
def get_id(self):
return self.a_id
def cmd_status_output(self, cmd, ignore_status=None, verbose=None, timeout=60):
"""
Send a guestfish command and return its exit status and output.
:param cmd: guestfish command to send(must not contain newline characters)
:param timeout: The duration (in seconds) to wait for the prompt to return
:return: A tuple (status, output) where status is the exit status
and output is the output of cmd
:raise LibguestfsCmdError: Raised if commands execute failed
"""
guestfs_exec = "guestfish --remote=%s " % self.a_id
cmd = guestfs_exec + cmd
try:
ret = process.run(cmd, ignore_status=ignore_status,
verbose=verbose, timeout=timeout)
except process.CmdError as detail:
raise LibguestfsCmdError(detail)
for line in self.ERROR_REGEX_LIST:
if re.search(line, ret.stdout_text.strip()):
e_msg = ('Error pattern %s found on output of %s: %s' %
(line, cmd, ret.stdout_text.strip()))
raise LibguestfsCmdError(e_msg)
logging.debug("command: %s", cmd)
logging.debug("stdout: %s", ret.stdout_text.strip())
return 0, ret.stdout_text.strip()
def cmd(self, cmd, ignore_status=False):
"""Mimic process.run()"""
exit_status, stdout = self.cmd_status_output(cmd)
stderr = '' # no way to retrieve this separately
result = process.CmdResult(cmd, stdout, stderr, exit_status)
if not ignore_status and exit_status:
raise process.CmdError(cmd, result,
"Guestfish Command returned non-zero exit status")
return result
def cmd_result(self, cmd, ignore_status=False):
"""Mimic process.run()"""
exit_status, stdout = self.cmd_status_output(cmd)
stderr = '' # no way to retrieve this separately
result = process.CmdResult(cmd, stdout, stderr, exit_status)
if not ignore_status and exit_status:
raise process.CmdError(cmd, result,
"Guestfish Command returned non-zero exit status")
return result
class GuestfishPersistent(Guestfish):
"""
Execute operations using persistent guestfish session.
"""
__slots__ = ['session_id', 'run_mode']
# Help detect leftover sessions
SESSION_COUNTER = 0
def __init__(self, disk_img=None, ro_mode=False,
libvirt_domain=None, inspector=False,
uri=None, mount_options=None, run_mode="interactive"):
super(GuestfishPersistent, self).__init__(disk_img, ro_mode,
libvirt_domain, inspector,
uri, mount_options, run_mode)
self.__dict_set__('run_mode', run_mode)
if self.get('session_id') is None:
# set_uri does not call when INITIALIZED = False
# and no session_id passed to super __init__
self.new_session()
# Check whether guestfish session is prepared.
guestfs_session = self.open_session()
if run_mode != "remote":
status, output = guestfs_session.cmd_status_output(
'is-config', timeout=60)
if status != 0:
logging.debug(
"Persistent guestfish session is not responding.")
raise aexpect.ShellStatusError(self.lgf_exec, 'is-config')
def close_session(self):
"""
If a persistent session exists, close it down.
"""
try:
run_mode = self.get('run_mode')
existing = self.open_session()
# except clause exits function
# Try to end session with inner command 'quit'
try:
existing.cmd("quit")
# It should jump to exception followed normally
except aexpect.ShellProcessTerminatedError:
self.__class__.SESSION_COUNTER -= 1
self.__dict_del__('session_id')
return # guestfish session was closed normally
# Close with 'quit' did not respond
# So close with aexpect functions
if run_mode != "remote":
if existing.is_alive():
# try nicely first
existing.close()
if existing.is_alive():
# Be mean, incase it's hung
existing.close(sig=signal.SIGTERM)
# Keep count:
self.__class__.SESSION_COUNTER -= 1
self.__dict_del__('session_id')
except LibguestfsCmdError:
# Allow other exceptions to be raised
pass # session was closed already
def new_session(self):
"""
Open new session, closing any existing
"""
# Accessors may call this method, avoid recursion
# Must exist, can't be None
guestfs_exec = self.__dict_get__('lgf_exec')
self.close_session()
# Always create new session
run_mode = self.get('run_mode')
if run_mode == "remote":
new_session = GuestfishRemote(guestfs_exec)
else:
new_session = GuestfishSession(guestfs_exec)
# Keep count
self.__class__.SESSION_COUNTER += 1
session_id = new_session.get_id()
self.__dict_set__('session_id', session_id)
def open_session(self):
"""
Return session with session_id in this class.
"""
try:
session_id = self.__dict_get__('session_id')
run_mode = self.get('run_mode')
if session_id:
try:
if run_mode == "remote":
return GuestfishRemote(a_id=session_id)
else:
return GuestfishSession(a_id=session_id)
except aexpect.ShellStatusError:
# session was already closed
self.__dict_del__('session_id')
raise LibguestfsCmdError(
"Open session '%s' failed." % session_id)
except KeyError:
raise LibguestfsCmdError("No session id.")
# Inner command for guestfish should be executed in a guestfish session
def inner_cmd(self, command):
"""
Execute inner command of guestfish in a pesistent session.
:param command: inner command to be executed.
"""
session = self.open_session()
# Allow to raise error by default.
ignore_status = self.__dict_get__('ignore_status')
return session.cmd_result(command, ignore_status=ignore_status)
def add_drive(self, filename):
"""
add-drive - add an image to examine or modify
This function is the equivalent of calling "add_drive_opts" with no
optional parameters, so the disk is added writable, with the format
being detected automatically.
"""
return self.inner_cmd("add-drive %s" % filename)
def add_drive_opts(self, filename, readonly=False, format=None,
iface=None, name=None, label=None, protocol=None,
server=None, username=None, secret=None,
cachemode=None, discard=None, copyonread=False):
"""
add-drive-opts - add an image to examine or modify.
This function adds a disk image called "filename" to the handle.
"filename" may be a regular host file or a host device.
"""
cmd = "add-drive-opts %s" % filename
if readonly:
cmd += " readonly:true"
else:
cmd += " readonly:false"
if format:
cmd += " format:%s" % format
if iface:
cmd += " iface:%s" % iface
if name:
cmd += " name:%s" % name
if label:
cmd += " label:%s" % label
if protocol:
cmd += " protocol:%s" % protocol
if server:
cmd += " server:%s" % server
if username:
cmd += " username:%s" % username
if secret:
cmd += " secret:%s" % secret
if cachemode:
cmd += " cachemode:%s" % cachemode
if discard:
cmd += " discard:%s" % discard
if copyonread:
cmd += " copyonread:true"
else:
# The default is false for copyonread.
# If copyonread param is false,
# It's no need to set " copyonread:false" explicitly.
pass
return self.inner_cmd(cmd)
def add_drive_ro(self, filename):
"""
add-ro/add-drive-ro - add a drive in snapshot mode (read-only)
This function is the equivalent of calling "add_drive_opts" with the
optional parameter "GUESTFS_ADD_DRIVE_OPTS_READONLY" set to 1, so the
disk is added read-only, with the format being detected automatically.
"""
return self.inner_cmd("add-drive-ro %s" % filename)
def add_domain(self, domain, libvirturi=None, readonly=False, iface=None,
live=False, allowuuid=False, readonlydisk=None):
"""
domain/add-domain - add the disk(s) from a named libvirt domain
This function adds the disk(s) attached to the named libvirt domain
"dom". It works by connecting to libvirt, requesting the domain and
domain XML from libvirt, parsing it for disks, and calling
"add_drive_opts" on each one.
"""
cmd = "add-domain %s" % domain
if libvirturi:
cmd += " libvirturi:%s" % libvirturi
if readonly:
cmd += " readonly:true"
else:
cmd += " readonly:false"
if iface:
cmd += " iface:%s" % iface
if live:
cmd += " live:true"
if allowuuid:
cmd += " allowuuid:true"
if readonlydisk:
cmd += " readonlydisk:%s" % readonlydisk
return self.inner_cmd(cmd)
def run(self):
"""
run/launch - launch the qemu subprocess
Internally libguestfs is implemented by running a virtual machine
using qemu.
"""
return self.inner_cmd("launch")
def df(self):
"""
df - report file system disk space usage
This command runs the "df" command to report disk space used.
"""
return self.inner_cmd("df")
def df_h(self):
"""
df-h - report file system disk space usage (human readable)
This command runs the "df -h" command to report disk space used in
human-readable format.
"""
return self.inner_cmd("df-h")
def dd(self, src, dest):
"""
dd - copy from source to destination using dd
This command copies from one source device or file "src" to another
destination device or file "dest".Normally you would use this to copy
to or from a device or partition,for example to duplicate a filesystem
"""
return self.inner_cmd("dd %s %s" % (src, dest))
def copy_size(self, src, dest, size):
"""
copy-size - copy size bytes from source to destination using dd
This command copies exactly "size" bytes from one source device or file
"src" to another destination device or file "dest".
"""
return self.inner_cmd("copy-size %s %s %s" % (src, dest, size))
def list_partitions(self):
"""
list-partitions - list the partitions
List all the partitions detected on all block devices.
"""
return self.inner_cmd("list-partitions")
def mount(self, device, mountpoint):
"""
mount - mount a guest disk at a position in the filesystem
Mount a guest disk at a position in the filesystem.
"""
return self.inner_cmd("mount %s %s" % (device, mountpoint))
def mount_ro(self, device, mountpoint):
"""
mount-ro - mount a guest disk, read-only
This is the same as the "mount" command, but it mounts the
filesystem with the read-only (*-o ro*) flag.
"""
return self.inner_cmd("mount-ro %s %s" % (device, mountpoint))
def mount_options(self, options, device, mountpoint):
"""
mount - mount a guest disk at a position in the filesystem
Mount a guest disk at a position in the filesystem.
"""
return self.inner_cmd("mount-options %s %s %s" % (options, device, mountpoint))
def mounts(self):
"""
mounts - show mounted filesystems
This returns the list of currently mounted filesystems.
"""
return self.inner_cmd("mounts")
def mountpoints(self):
"""
mountpoints - show mountpoints
This call is similar to "mounts".
That call returns a list of devices.
"""
return self.inner_cmd("mountpoints")
def do_mount(self, mountpoint):
"""
do_mount - Automaticly mount
Mount a lvm or physical partation to '/'
"""
partition_type = self.params.get("partition_type")
if partition_type == "lvm":
vg_name = self.params.get("vg_name", "vol_test")
lv_name = self.params.get("lv_name", "vol_file")
device = "/dev/%s/%s" % (vg_name, lv_name)
logging.info("mount lvm partition...%s" % device)
elif partition_type == "physical":
pv_name = self.params.get("pv_name", "/dev/sdb")
device = pv_name + "1"
logging.info("mount physical partition...%s" % device)
self.mount(device, mountpoint)
def read_file(self, path):
"""
read-file - read a file
This calls returns the contents of the file "path" as a buffer.
"""
return self.inner_cmd("read-file %s" % path)
def cat(self, path):
"""
cat - list the contents of a file
Return the contents of the file named "path".
"""
return self.inner_cmd("cat %s" % path)
def write(self, path, content):
"""
write - create a new file
This call creates a file called "path". The content of the file
is the string "content" (which can contain any 8 bit data).
"""
return self.inner_cmd("write '%s' \"%s\"" % (path, content))
def write_append(self, path, content):
"""
write-append - append content to end of file
This call appends "content" to the end of file "path".
If "path" does not exist, then a new file is created.
"""
return self.inner_cmd("write-append '%s' \"%s\"" % (path, content))
def inspect_os(self):
"""
inspect-os - inspect disk and return list of operating systems found
This function uses other libguestfs functions and certain heuristics to
inspect the disk(s) (usually disks belonging to a virtual machine),
looking for operating systems.
"""
return self.inner_cmd("inspect-os")
def inspect_get_roots(self):
"""
inspect-get-roots - return list of operating systems found by
last inspection
This function is a convenient way to get the list of root devices
"""
return self.inner_cmd("inspect-get-roots")
def inspect_get_arch(self, root):
"""
inspect-get-arch - get architecture of inspected operating system
This returns the architecture of the inspected operating system.
"""
return self.inner_cmd("inspect-get-arch %s" % root)
def inspect_get_distro(self, root):
"""
inspect-get-distro - get distro of inspected operating system
This returns the distro (distribution) of the inspected
operating system.
"""
return self.inner_cmd("inspect-get-distro %s" % root)
def inspect_get_filesystems(self, root):
"""
inspect-get-filesystems - get filesystems associated with inspected
operating system
This returns a list of all the filesystems that we think are associated
with this operating system.
"""
return self.inner_cmd("inspect-get-filesystems %s" % root)
def inspect_get_hostname(self, root):
"""
inspect-get-hostname - get hostname of the operating system
This function returns the hostname of the operating system as found by
inspection of the guest's configuration files.
"""
return self.inner_cmd("inspect-get-hostname %s" % root)
def inspect_get_major_version(self, root):
"""
inspect-get-major-version - get major version of inspected operating
system
This returns the major version number of the inspected
operating system.
"""
return self.inner_cmd("inspect-get-major-version %s" % root)
def inspect_get_minor_version(self, root):
"""
inspect-get-minor-version - get minor version of inspected operating
system
This returns the minor version number of the inspected operating system
"""
return self.inner_cmd("inspect-get-minor-version %s" % root)
def inspect_get_mountpoints(self, root):
"""
inspect-get-mountpoints - get mountpoints of inspected operating system
This returns a hash of where we think the filesystems associated with
this operating system should be mounted.
"""
return self.inner_cmd("inspect-get-mountpoints %s" % root)
def list_filesystems(self):
"""
list-filesystems - list filesystems
This inspection command looks for filesystems on partitions, block
devices and logical volumes, returning a list of devices containing
filesystems and their type.
"""
return self.inner_cmd("list-filesystems")
def list_devices(self):
"""
list-devices - list the block devices
List all the block devices.
"""
return self.inner_cmd("list-devices")
def tar_out(self, directory, tarfile):
"""
tar-out - pack directory into tarfile
This command packs the contents of "directory" and downloads it
to local file "tarfile".
"""
return self.inner_cmd("tar-out %s %s" % (directory, tarfile))
def tar_in(self, tarfile, directory):
"""
tar-in - unpack tarfile to directory
This command uploads and unpacks local file "tarfile"
(an *uncompressed* tar file) into "directory".
"""
return self.inner_cmd("tar-in %s %s" % (tarfile, directory))
def tar_in_opts(self, tarfile, directory, compress=None):
"""
tar-in-opts - unpack tarfile to directory
This command uploads and unpacks local file "tarfile"
(an *compressed* tar file) into "directory".
"""
if compress:
return self.inner_cmd("tar-in-opts %s %s compress:%s" % (tarfile, directory, compress))
else:
return self.inner_cmd("tar-in-opts %s %s" % (tarfile, directory))
def file_architecture(self, filename):
"""
file-architecture - detect the architecture of a binary file
This detects the architecture of the binary "filename", and returns it
if known.
"""
return self.inner_cmd("file-architecture %s" % filename)
def filesize(self, file):
"""
filesize - return the size of the file in bytes
This command returns the size of "file" in bytes.
"""
return self.inner_cmd("filesize %s" % file)
def stat(self, path):
"""
stat - get file information
Returns file information for the given "path".
"""
return self.inner_cmd("stat %s" % path)
def lstat(self, path):
"""
lstat - get file information for a symbolic link
Returns file information for the given "path".
"""
return self.inner_cmd("lstat %s" % path)
def lstatlist(self, path, names):
"""
lstatlist - lstat on multiple files
This call allows you to perform the "lstat" operation on multiple files,
where all files are in the directory "path". "names" is the list of
files from this directory.
"""
return self.inner_cmd("lstatlist %s %s" % (path, names))
def umask(self, mask):
"""
umask - set file mode creation mask (umask)
This function sets the mask used for creating new files and device nodes
to "mask & 0777".
"""
return self.inner_cmd("umask %s" % mask)
def get_umask(self):
"""
get-umask - get the current umask
Return the current umask. By default the umask is 022 unless it has been
set by calling "umask".
"""
return self.inner_cmd("get-umask")
def mkdir(self, path):
"""
mkdir - create a directory
Create a directory named "path".
"""
return self.inner_cmd("mkdir %s" % path)
def mkdir_p(self, path):
"""
mkdir-p - create a directory and parents
Create a directory named "path", creating any parent directories as necessary.
This is like the "mkdir -p" shell command.
"""
return self.inner_cmd("mkdir-p %s" % path)
def mkdir_mode(self, path, mode):
"""
mkdir-mode - create a directory with a particular mode
This command creates a directory, setting the initial permissions of the
directory to "mode".
"""
return self.inner_cmd("mkdir-mode %s %s" % (path, mode))
def mknod(self, mode, devmajor, devminor, path):
"""
mknod - make block, character or FIFO devices
This call creates block or character special devices, or named pipes
(FIFOs).
"""
return self.inner_cmd("mknod %s %s %s %s" % (mode, devmajor, devminor, path))
def rm_rf(self, path):
"""
rm-rf - remove a file or directory recursively
Remove the file or directory "path", recursively removing the contents
if its a directory. This is like the "rm -rf" shell command.
"""
return self.inner_cmd("rm-rf %s" % path)
def copy_out(self, remote, localdir):
"""
copy-out - copy remote files or directories out of an image
"copy-out" copies remote files or directories recursively out of the
disk image, placing them on the host disk in a local directory called
"localdir" (which must exist).
"""
return self.inner_cmd("copy-out %s %s" % (remote, localdir))
def copy_in(self, local, remotedir):
"""
copy-in - copy local files or directories into an image
"copy-in" copies local files or directories recursively into the disk
image, placing them in the directory called "/remotedir" (which must
exist).
"""
return self.inner_cmd("copy-in %s /%s" % (local, remotedir))
def chmod(self, mode, path):
"""
chmod - change file mode
Change the mode (permissions) of "path" to "mode". Only numeric modes
are supported.
"""
return self.inner_cmd("chmod %s %s" % (mode, path))
def chown(self, owner, group, path):
"""
chown - change file owner and group
Change the file owner to "owner" and group to "group".
"""
return self.inner_cmd("chown %s %s %s" % (owner, group, path))
def lchown(self, owner, group, path):
"""
lchown - change file owner and group
Change the file owner to "owner" and group to "group". This is like
"chown" but if "path" is a symlink then the link itself is changed, not
the target.
"""
return self.inner_cmd("lchown %s %s %s" % (owner, group, path))
def du(self, path):
"""
du - estimate file space usage
This command runs the "du -s" command to estimate file space usage for
"path".
"""
return self.inner_cmd("du %s" % path)
def file(self, path):
"""
file - determine file type
This call uses the standard file(1) command to determine the type or
contents of the file.
"""
return self.inner_cmd("file %s" % path)
def rm(self, path):
"""
rm - remove a file
Remove the single file "path".
"""
return self.inner_cmd("rm %s" % path)
def is_file(self, path, followsymlinks=None):
"""
is-file - test if a regular file
This returns "true" if and only if there is a regular file with the
given "path" name.
"""
cmd = "is-file %s" % path
if followsymlinks:
cmd += " followsymlinks:%s" % followsymlinks
return self.inner_cmd(cmd)
def is_file_opts(self, path, followsymlinks=None):
"""
is-file_opts - test if a regular file
This returns "true" if and only if there is a regular file with the
given "path" name.
An alias of command is-file
"""
cmd = "is-file-opts %s" % path
if followsymlinks:
cmd += " followsymlinks:%s" % followsymlinks
return self.inner_cmd(cmd)
def is_blockdev(self, path, followsymlinks=None):
"""
is-blockdev - test if block device
This returns "true" if and only if there is a block device with the
given "path" name
"""
cmd = "is-blockdev %s" % path
if followsymlinks:
cmd += " followsymlinks:%s" % followsymlinks
return self.inner_cmd(cmd)
def is_blockdev_opts(self, path, followsymlinks=None):
"""
is-blockdev_opts - test if block device
This returns "true" if and only if there is a block device with the
given "path" name
An alias of command is-blockdev
"""
cmd = "is-blockdev-opts %s" % path
if followsymlinks:
cmd += " followsymlinks:%s" % followsymlinks
return self.inner_cmd(cmd)
def is_chardev(self, path, followsymlinks=None):
"""
is-chardev - test if character device
This returns "true" if and only if there is a character device with the
given "path" name.
"""
cmd = "is-chardev %s" % path
if followsymlinks:
cmd += " followsymlinks:%s" % followsymlinks
return self.inner_cmd(cmd)
def is_chardev_opts(self, path, followsymlinks=None):
"""
is-chardev_opts - test if character device
This returns "true" if and only if there is a character device with the
given "path" name.
An alias of command is-chardev
"""
cmd = "is-chardev-opts %s" % path
if followsymlinks:
cmd += " followsymlinks:%s" % followsymlinks
return self.inner_cmd(cmd)
def is_dir(self, path, followsymlinks=None):
"""
is-dir - test if a directory
This returns "true" if and only if there is a directory with the given
"path" name. Note that it returns false for other objects like files.
"""
cmd = "is-dir %s" % path
if followsymlinks:
cmd += " followsymlinks:%s" % followsymlinks
return self.inner_cmd(cmd)
def is_dir_opts(self, path, followsymlinks=None):
"""
is-dir-opts - test if character device
This returns "true" if and only if there is a character device with the
given "path" name.
An alias of command is-dir
"""
cmd = "is-dir-opts %s" % path
if followsymlinks:
cmd += " followsymlinks:%s" % followsymlinks
return self.inner_cmd(cmd)
def is_fifo(self, path, followsymlinks=None):
"""
is-fifo - test if FIFO (named pipe)
This returns "true" if and only if there is a FIFO (named pipe) with
the given "path" name.
"""
cmd = "is-fifo %s" % path
if followsymlinks:
cmd += " followsymlinks:%s" % followsymlinks
return self.inner_cmd(cmd)
def is_fifo_opts(self, path, followsymlinks=None):
"""
is-fifo-opts - test if FIFO (named pipe)
This returns "true" if and only if there is a FIFO (named pipe) with
the given "path" name.
An alias of command is-fifo
"""
cmd = "is-fifo-opts %s" % path
if followsymlinks:
cmd += " followsymlinks:%s" % followsymlinks
return self.inner_cmd(cmd)
def is_lv(self, device):
"""
is-lv - test if device is a logical volume
This command tests whether "device" is a logical volume, and returns
true iff this is the case.
"""
return self.inner_cmd("is-lv %s" % device)
def is_socket(self, path, followsymlinks=None):
"""
is-socket - test if socket
This returns "true" if and only if there is a Unix domain socket with
the given "path" name.
"""
cmd = "is-socket %s" % path
if followsymlinks:
cmd += " followsymlinks:%s" % followsymlinks
return self.inner_cmd(cmd)
def is_socket_opts(self, path, followsymlinks=None):
"""
is-socket-opts - test if socket
This returns "true" if and only if there is a Unix domain socket with
the given "path" name.
An alias of command is-socket
"""
cmd = "is-socket-opts %s" % path
if followsymlinks:
cmd += " followsymlinks:%s" % followsymlinks
return self.inner_cmd(cmd)
def is_symlink(self, path):
"""
is-symlink - test if symbolic link
This returns "true" if and only if there is a symbolic link with the
given "path" name.
"""
return self.inner_cmd("is-symlink %s" % path)
def is_whole_device(self, device):
"""
is_whole_device - test if a device is a whole device
This returns "true" if and only if "device" refers to a whole block
device. That is, not a partition or a logical device.
"""
return self.inner_cmd("is-whole-device %s" % device)
def is_zero(self, path):
"""
is-zero - test if a file contains all zero bytes
This returns true iff the file exists and the file is empty or it
contains all zero bytes.
"""
return self.inner_cmd("is-zero %s" % path)
def is_zero_device(self, device):
"""
is-zero-device - test if a device contains all zero bytes
This returns true iff the device exists and contains all zero bytes.
Note that for large devices this can take a long time to run.
"""
return self.inner_cmd("is-zero-device %s" % device)
def cp(self, src, dest):
"""
cp - copy a file
This copies a file from "src" to "dest" where "dest" is either a
destination filename or destination directory.
"""
return self.inner_cmd("cp %s %s" % (src, dest))
def exists(self, path):
"""
exists - test if file or directory exists
This returns "true" if and only if there is a file, directory (or
anything) with the given "path" name
"""
return self.inner_cmd("exists %s" % path)
def cp_a(self, src, dest):
"""
cp-a - copy a file or directory recursively
This copies a file or directory from "src" to "dest" recursively using
the "cp -a" command.
"""
return self.inner_cmd("cp-a %s %s" % (src, dest))
def equal(self, file1, file2):
"""
equal - test if two files have equal contents
This compares the two files "file1" and "file2" and returns true if
their content is exactly equal, or false otherwise.
"""
return self.inner_cmd("equal %s %s" % (file1, file2))
def fill(self, c, len, path):
"""
fill - fill a file with octets
This command creates a new file called "path". The initial content of
the file is "len" octets of "c", where "c" must be a number in the range
"[0..255]".
"""
return self.inner_cmd("fill %s %s %s" % (c, len, path))
def fill_dir(self, dir, nr):
"""
fill-dir - fill a directory with empty files
This function, useful for testing filesystems, creates "nr" empty files
in the directory "dir" with names 00000000 through "nr-1" (ie. each file
name is 8 digits long padded with zeroes).
"""
return self.inner_cmd("fill-dir %s %s" % (dir, nr))
def fill_pattern(self, pattern, len, path):
"""
fill-pattern - fill a file with a repeating pattern of bytes
This function is like "fill" except that it creates a new file of length
"len" containing the repeating pattern of bytes in "pattern". The
pattern is truncated if necessary to ensure the length of the file is
exactly "len" bytes.
"""
return self.inner_cmd("fill-pattern %s %s %s" % (pattern, len, path))
def strings(self, path):
"""
strings - print the printable strings in a file
This runs the strings(1) command on a file and returns the list of
printable strings found.
"""
return self.inner_cmd("strings %s" % path)
def head(self, path):
"""
head - return first 10 lines of a file
This command returns up to the first 10 lines of a file as a list of
strings.
"""
return self.inner_cmd("head %s" % path)
def head_n(self, nrlines, path):
"""
head-n - return first N lines of a file
If the parameter "nrlines" is a positive number, this returns the first
"nrlines" lines of the file "path".
"""
return self.inner_cmd("head-n %s %s" % (nrlines, path))
def tail(self, path):
"""
tail - return last 10 lines of a file
This command returns up to the last 10 lines of a file as a list of
strings.
"""
return self.inner_cmd("tail %s" % path)
def pread(self, path, count, offset):
"""
pread - read part of a file
This command lets you read part of a file. It reads "count" bytes of the
file, starting at "offset", from file "path".
"""
return self.inner_cmd("pread %s %s %s" % (path, count, offset))
def hexdump(self, path):
"""
hexdump - dump a file in hexadecimal
This runs "hexdump -C" on the given "path". The result is the
human-readable, canonical hex dump of the file.
"""
return self.inner_cmd("hexdump %s" % path)
def more(self, filename):
"""
more - view a file
This is used to view a file.
"""
return self.inner_cmd("more %s" % filename)
def download(self, remotefilename, filename):
"""
download - download a file to the local machine
Download file "remotefilename" and save it as "filename" on the local
machine.
"""
return self.inner_cmd("download %s %s" % (remotefilename, filename))
def download_offset(self, remotefilename, filename, offset, size):
"""
download-offset - download a file to the local machine with offset and
size
Download file "remotefilename" and save it as "filename" on the local
machine.
"""
return self.inner_cmd("download-offset %s %s %s %s" % (remotefilename, filename, offset, size))
def upload(self, filename, remotefilename):
"""
upload - upload a file from the local machine
Upload local file "filename" to "remotefilename" on the filesystem.
"""
return self.inner_cmd("upload %s %s" % (filename, remotefilename))
def upload_offset(self, filename, remotefilename, offset):
"""
upload - upload a file from the local machine with offset
Upload local file "filename" to "remotefilename" on the filesystem.
"""
return self.inner_cmd("upload-offset %s %s %s" % (filename, remotefilename, offset))
def fallocate(self, path, len):
"""
fallocate - preallocate a file in the guest filesystem
This command preallocates a file (containing zero bytes) named "path" of
size "len" bytes. If the file exists already, it is overwritten.
"""
return self.inner_cmd("fallocate %s %s" % (path, len))
def fallocate64(self, path, len):
"""
fallocate - preallocate a file in the guest filesystem
This command preallocates a file (containing zero bytes) named "path" of
size "len" bytes. If the file exists already, it is overwritten.
"""
return self.inner_cmd("fallocate64 %s %s" % (path, len))
def part_init(self, device, parttype):
"""
part-init - create an empty partition table
This creates an empty partition table on "device" of one of the
partition types listed below. Usually "parttype" should be either
"msdos" or "gpt" (for large disks).
"""
return self.inner_cmd("part-init %s %s" % (device, parttype))
def part_add(self, device, prlogex, startsect, endsect):
"""
part-add - add a partition to the device
This command adds a partition to "device". If there is no partition
table on the device, call "part_init" first.
"""
cmd = "part-add %s %s %s %s" % (device, prlogex, startsect, endsect)
return self.inner_cmd(cmd)
def part_del(self, device, partnum):
"""
part-del device partnum
This command deletes the partition numbered "partnum" on "device".
Note that in the case of MBR partitioning, deleting an extended
partition also deletes any logical partitions it contains.
"""
return self.inner_cmd("part_del %s %s" % (device, partnum))
def part_set_bootable(self, device, partnum, bootable):
"""
part-set-bootable device partnum bootable
This sets the bootable flag on partition numbered "partnum" on device
"device". Note that partitions are numbered from 1.
"""
return self.inner_cmd("part-set-bootable %s %s %s" % (device, partnum, bootable))
def part_set_mbr_id(self, device, partnum, idbyte):
"""
part-set-mbr-id - set the MBR type byte (ID byte) of a partition
Sets the MBR type byte (also known as the ID byte) of the numbered
partition "partnum" to "idbyte". Note that the type bytes quoted in
most documentation are in fact hexadecimal numbers, but usually documented
without any leading "0x" which might be confusing.
"""
return self.inner_cmd("part-set-mbr-id %s %s %s" % (device, partnum, idbyte))
def part_set_name(self, device, partnum, name):
"""
part-set-name - set partition name
This sets the partition name on partition numbered "partnum" on device
"device". Note that partitions are numbered from 1.
"""
return self.inner_cmd("part-set-name %s %s %s" % (device, partnum, name))
def part_to_dev(self, partition):
"""
part-to-dev - convert partition name to device name
This function takes a partition name (eg. "/dev/sdb1") and removes the
partition number, returning the device name (eg. "/dev/sdb").
The named partition must exist, for example as a string returned from
"list_partitions".
"""
return self.inner_cmd("part-to-dev %s" % partition)
def part_to_partnum(self, partition):
"""
part-to-partnum - convert partition name to partition number
This function takes a partition name (eg. "/dev/sdb1") and returns the
partition number (eg. 1).
The named partition must exist, for example as a string returned from
"list_partitions".
"""
return self.inner_cmd("part_to_partnum %s" % partition)
def checksum(self, csumtype, path):
"""
checksum - compute MD5, SHAx or CRC checksum of file
This call computes the MD5, SHAx or CRC checksum of the file named
"path".
"""
return self.inner_cmd("checksum %s %s" % (csumtype, path))
def checksum_device(self, csumtype, device):
"""
checksum-device - compute MD5, SHAx or CRC checksum of the contents of a
device
This call computes the MD5, SHAx or CRC checksum of the contents of the
device named "device". For the types of checksums supported see the
"checksum" command.
"""
return self.inner_cmd("checksum-device %s %s" % (csumtype, device))
def checksums_out(self, csumtype, directory, sumsfile):
"""
checksums-out - compute MD5, SHAx or CRC checksum of files in a
directory
This command computes the checksums of all regular files in "directory"
and then emits a list of those checksums to the local output file
"sumsfile".
"""
return self.inner_cmd("checksums-out %s %s %s" % (csumtype, directory, sumsfile))
def is_config(self):
"""
is-config - is ready to accept commands
This returns true if this handle is in the "CONFIG" state
"""
return self.inner_cmd("is-config")
def is_ready(self):
"""
is-ready - is ready to accept commands
This returns true if this handle is ready to accept commands
(in the "READY" state).
"""
return self.inner_cmd("is-ready")
def part_list(self, device):
"""
part-list - list partitions on a device
This command parses the partition table on "device" and
returns the list of partitions found.
"""
return self.inner_cmd("part-list %s" % device)
def mkfs(self, fstype, device, blocksize=None, features=None,
inode=None, sectorsize=None):
"""
mkfs - make a filesystem
This function creates a filesystem on "device". The filesystem type is
"fstype", for example "ext3".
"""
cmd = 'mkfs %s %s' % (fstype, device)
if blocksize:
cmd += ' blocksize:%s ' % blocksize
if features:
cmd += ' features:%s ' % features
if inode:
cmd += ' inode:%s ' % inode
if sectorsize:
cmd += ' sectorsize:%s ' % sectorsize
return self.inner_cmd(cmd)
def mkfs_opts(self, fstype, device, blocksize=None, features=None,
inode=None, sectorsize=None):
"""
same with mkfs
"""
return self.mkfs(fstype, device, blocksize, features,
inode, sectorsize)
def part_disk(self, device, parttype):
"""
part-disk - partition whole disk with a single primary partition
This command is simply a combination of "part_init" followed by
"part_add" to create a single primary partition covering
the whole disk.
"""
return self.inner_cmd("part-disk %s %s" % (device, parttype))
def part_get_bootable(self, device, partnum):
"""
part-get-bootable - return true if a partition is bootable
This command returns true if the partition "partnum" on "device"
has the bootable flag set.
"""
return self.inner_cmd("part-get-bootable %s %s" % (device, partnum))
def part_get_mbr_id(self, device, partnum):
"""
part-get-mbr-id - get the MBR type byte (ID byte) from a partition
Returns the MBR type byte (also known as the ID byte) from the
numbered partition "partnum".
"""
return self.inner_cmd("part-get-mbr-id %s %s" % (device, partnum))
def part_get_parttype(self, device):
"""
part-get-parttype - get the partition table type
This command examines the partition table on "device" and returns the
partition table type (format) being used.
"""
return self.inner_cmd("part-get-parttype %s" % device)
def fsck(self, fstype, device):
"""
fsck - run the filesystem checker
This runs the filesystem checker (fsck) on "device" which should have
filesystem type "fstype".
"""
return self.inner_cmd("fsck %s %s" % (fstype, device))
def blockdev_getss(self, device):
"""
blockdev-getss - get sectorsize of block device
This returns the size of sectors on a block device. Usually 512,
but can be larger for modern devices.
"""
return self.inner_cmd("blockdev-getss %s" % device)
def blockdev_getsz(self, device):
"""
blockdev-getsz - get total size of device in 512-byte sectors
This returns the size of the device in units of 512-byte sectors
(even if the sectorsize isn't 512 bytes ... weird).
"""
return self.inner_cmd("blockdev-getsz %s" % device)
def blockdev_getbsz(self, device):
"""
blockdev-getbsz - get blocksize of block device
This returns the block size of a device.
"""
return self.inner_cmd("blockdev-getbsz %s" % device)
def blockdev_getsize64(self, device):
"""
blockdev-getsize64 - get total size of device in bytes
This returns the size of the device in bytes
"""
return self.inner_cmd("blockdev-getsize64 %s" % device)
def blockdev_setbsz(self, device, blocksize):
"""
blockdev-setbsz - set blocksize of block device
This sets the block size of a device.
"""
return self.inner_cmd("blockdev-setbsz %s %s" % (device, blocksize))
def blockdev_getro(self, device):
"""
blockdev-getro - is block device set to read-only
Returns a boolean indicating if the block device is read-only
(true if read-only, false if not).
"""
return self.inner_cmd("blockdev-getro %s" % device)
def blockdev_setro(self, device):
"""
blockdev-setro - set block device to read-only
Sets the block device named "device" to read-only.
"""
return self.inner_cmd("blockdev-setro %s" % device)
def blockdev_setrw(self, device):
"""
blockdev-setrw - set block device to read-write
Sets the block device named "device" to read-write.
"""
return self.inner_cmd("blockdev-setrw %s" % device)
def blockdev_flushbufs(self, device):
"""
blockdev-flushbufs - flush device buffers
This tells the kernel to flush internal buffers associated with
"device".
"""
return self.inner_cmd("blockdev-flushbufs %s" % device)
def blockdev_rereadpt(self, device):
"""
blockdev-rereadpt - reread partition table
Reread the partition table on "device".
"""
return self.inner_cmd("blockdev-rereadpt %s" % device)
def canonical_device_name(self, device):
"""
canonical-device-name - return canonical device name
This utility function is useful when displaying device names to
the user.
"""
return self.inner_cmd("canonical-device-name %s" % device)
def device_index(self, device):
"""
device-index - convert device to index
This function takes a device name (eg. "/dev/sdb") and returns the
index of the device in the list of devices
"""
return self.inner_cmd("device-index %s" % device)
def disk_format(self, filename):
"""
disk-format - detect the disk format of a disk image
Detect and return the format of the disk image called "filename",
"filename" can also be a host device, etc
"""
return self.inner_cmd("disk-format %s" % filename)
def disk_has_backing_file(self, filename):
"""
disk-has-backing-file - return whether disk has a backing file
Detect and return whether the disk image "filename" has a backing file
"""
return self.inner_cmd("disk-has-backing-file %s" % filename)
def disk_virtual_size(self, filename):
"""
disk-virtual-size - return virtual size of a disk
Detect and return the virtual size in bytes of the disk image"
"""
return self.inner_cmd("disk-virtual-size %s" % filename)
def max_disks(self):
"""
max-disks - maximum number of disks that may be added
Return the maximum number of disks that may be added to a handle
"""
return self.inner_cmd("max-disks")
def nr_devices(self):
"""
nr-devices - return number of whole block devices (disks) added
This returns the number of whole block devices that were added
"""
return self.inner_cmd("nr-devices")
def scrub_device(self, device):
"""
scrub-device - scrub (securely wipe) a device
This command writes patterns over "device" to make data retrieval more
difficult
"""
return self.inner_cmd("scrub-device %s" % device)
def scrub_file(self, file):
"""
scrub-file - scrub (securely wipe) a file
This command writes patterns over a file to make data retrieval more
difficult
"""
return self.inner_cmd("scrub-file %s" % file)
def scrub_freespace(self, dir):
"""
scrub-freespace - scrub (securely wipe) free space
This command creates the directory "dir" and then fills it with files
until the filesystem is full,and scrubs the files as for "scrub_file",
and deletes them. The intention is to scrub any free space on the
partition containing "dir"
"""
return self.inner_cmd("scrub-freespace %s" % dir)
def md_create(self, name, device, missingbitmap=None, nrdevices=None,
spare=None, chunk=None, level=None):
"""
md-create - create a Linux md (RAID) device
Create a Linux md (RAID) device named "name" on the devices in the list
"devices".
"""
cmd = "md-create %s %s" % (name, device)
if missingbitmap:
cmd += " missingbitmap:%s" % missingbitmap
if nrdevices:
cmd += " nrdevices:%s" % nrdevices
if spare:
cmd += " spare:%s" % spare
if chunk:
cmd += " chunk:%s" % chunk
if level:
cmd += " level:%s" % level
return self.inner_cmd(cmd)
def list_md_devices(self):
"""
list-md-devices - list Linux md (RAID) devices
List all Linux md devices.
"""
return self.inner_cmd("list-md-devices")
def md_stop(self, md):
"""
md-stop - stop a Linux md (RAID) device
This command deactivates the MD array named "md".
The device is stopped, but it is not destroyed or zeroed.
"""
return self.inner_cmd("md-stop %s" % md)
def md_stat(self, md):
"""
md-stat - get underlying devices from an MD device
This call returns a list of the underlying devices which make up the
single software RAID array device "md".
"""
return self.inner_cmd("md-stat %s" % md)
def md_detail(self, md):
"""
md-detail - obtain metadata for an MD device
This command exposes the output of 'mdadm -DY <md>'. The following
fields are usually present in the returned hash. Other fields may also
be present.
"""
return self.inner_cmd("md-detail %s" % md)
def sfdisk(self, device, cyls, heads, sectors, lines):
"""
sfdisk - create partitions on a block device
This is a direct interface to the sfdisk(8) program for creating
partitions on block devices.
*This function is deprecated.* In new code, use the "part-add" call
instead.
Deprecated functions will not be removed from the API, but the fact
that they are deprecated indicates that there are problems with correct
use of these functions.
"""
return self.inner_cmd("sfdisk %s %s %s %s %s"
% (device, cyls, heads, sectors, lines))
def sfdisk_l(self, device):
"""
sfdisk-l - display the partition table
This displays the partition table on "device", in the human-readable
output of the sfdisk(8) command. It is not intended to be parsed.
*This function is deprecated.* In new code, use the "part-list" call
instead.
"""
return self.inner_cmd("sfdisk-l %s" % device)
def sfdiskM(self, device, lines):
"""
sfdiskM - create partitions on a block device
This is a simplified interface to the "sfdisk" command, where partition
sizes are specified in megabytes only (rounded to the nearest cylinder)
and you don't need to specify the cyls, heads and sectors parameters
which were rarely if ever used anyway.
*This function is deprecated.* In new code, use the "part-add" call
instead.
"""
return self.inner_cmd("sfdiskM %s %s" % (device, lines))
def sfdisk_N(self, device, partnum, cyls, heads, sectors, line):
"""
sfdisk-N - modify a single partition on a block device
This runs sfdisk(8) option to modify just the single partition "n"
(note: "n" counts from 1).
For other parameters, see "sfdisk". You should usually pass 0 for the
cyls/heads/sectors parameters.
*This function is deprecated.* In new code, use the "part-add" call
instead.
"""
return self.inner_cmd("sfdisk-N %s %s %s %s %s %s"
% (device, partnum, cyls, heads, sectors, line))
def sfdisk_disk_geometry(self, device):
"""
sfdisk-disk-geometry - display the disk geometry from the partition
table
This displays the disk geometry of "device" read from the partition
table. Especially in the case where the underlying block device has
been resized, this can be different from the kernel's idea of the
geometry
"""
return self.inner_cmd("sfdisk-disk-geometry %s" % device)
def sfdisk_kernel_geometry(self, device):
"""
sfdisk-kernel-geometry - display the kernel geometry
This displays the kernel's idea of the geometry of "device".
"""
return self.inner_cmd("sfdisk-kernel-geometry %s" % device)
def pvcreate(self, physvols):
"""
pvcreate - create an LVM physical volume
This creates an LVM physical volume called "physvols".
"""
return self.inner_cmd("pvcreate %s" % (physvols))
def pvs(self):
"""
pvs - list the LVM physical volumes (PVs)
List all the physical volumes detected. This is the equivalent of the
pvs(8) command.
"""
return self.inner_cmd("pvs")
def pvs_full(self):
"""
pvs-full - list the LVM physical volumes (PVs)
List all the physical volumes detected. This is the equivalent of the
pvs(8) command. The "full" version includes all fields.
"""
return self.inner_cmd("pvs-full")
def pvresize(self, device):
"""
pvresize - resize an LVM physical volume
This resizes (expands or shrinks) an existing LVM physical volume to
match the new size of the underlying device
"""
return self.inner_cmd("pvresize %s" % device)
def pvresize_size(self, device, size):
"""
pvresize-size - resize an LVM physical volume (with size)
This command is the same as "pvresize" except that it allows you to
specify the new size (in bytes) explicitly.
"""
return self.inner_cmd("pvresize-size %s %s" % (device, size))
def pvremove(self, device):
"""
pvremove - remove an LVM physical volume
This wipes a physical volume "device" so that LVM will no longer
recognise it.
The implementation uses the "pvremove" command which refuses to wipe
physical volumes that contain any volume groups, so you have to remove
those first.
"""
return self.inner_cmd("pvremove %s" % device)
def pvuuid(self, device):
"""
pvuuid - get the UUID of a physical volume
This command returns the UUID of the LVM PV "device".
"""
return self.inner_cmd("pvuuid %s" % device)
def vgcreate(self, volgroup, physvols):
"""
vgcreate - create an LVM volume group
This creates an LVM volume group called "volgroup" from the
non-empty list of physical volumes "physvols".
"""
return self.inner_cmd("vgcreate %s %s" % (volgroup, physvols))
def vgs(self):
"""
vgs - list the LVM volume groups (VGs)
List all the volumes groups detected.
"""
return self.inner_cmd("vgs")
def vgs_full(self):
"""
vgs-full - list the LVM volume groups (VGs)
List all the volumes groups detected. This is the equivalent of the
vgs(8) command. The "full" version includes all fields.
"""
return self.inner_cmd("vgs-full")
def vgrename(self, volgroup, newvolgroup):
"""
vgrename - rename an LVM volume group
Rename a volume group "volgroup" with the new name "newvolgroup".
"""
return self.inner_cmd("vgrename %s %s" % (volgroup, newvolgroup))
def vgremove(self, vgname):
"""
vgremove - remove an LVM volume group
Remove an LVM volume group "vgname", (for example "VG").
"""
return self.inner_cmd("vgremove %s" % vgname)
def vgscan(self):
"""
vgscan - rescan for LVM physical volumes, volume groups and logical
volumes
This rescans all block devices and rebuilds the list of LVM physical
volumes, volume groups and logical volumes.
"""
return self.inner_cmd("vgscan")
def vguuid(self, vgname):
"""
vguuid - get the UUID of a volume group
This command returns the UUID of the LVM VG named "vgname"
"""
return self.inner_cmd("vguuid %s" % vgname)
def vg_activate(self, activate, volgroups):
"""
vg-activate - activate or deactivate some volume groups
This command activates or (if "activate" is false) deactivates all
logical volumes in the listed volume groups "volgroups"
"""
return self.inner_cmd("vg-activate %s %s" % (activate, volgroups))
def vg_activate_all(self, activate):
"""
vg-activate-all - activate or deactivate all volume groups
This command activates or (if "activate" is false) deactivates all
logical volumes in all volume groups.
"""
return self.inner_cmd("vg-activate-all %s" % activate)
def vglvuuids(self, vgname):
"""
vglvuuids - get the LV UUIDs of all LVs in the volume group
Given a VG called "vgname", this returns the UUIDs of all the logical
volumes created in this volume group.
"""
return self.inner_cmd("vglvuuids %s" % vgname)
def vgpvuuids(self, vgname):
"""
vgpvuuids - get the PV UUIDs containing the volume group
Given a VG called "vgname", this returns the UUIDs of all the physical
volumes that this volume group resides on.
"""
return self.inner_cmd("vgpvuuids %s" % vgname)
def lvcreate(self, logvol, volgroup, mbytes):
"""
lvcreate - create an LVM logical volume
This creates an LVM logical volume called "logvol" on the
volume group "volgroup", with "size" megabytes.
"""
return self.inner_cmd("lvcreate %s %s %s" % (logvol, volgroup, mbytes))
def lvuuid(self, device):
"""
lvuuid - get the UUID of a logical volume
This command returns the UUID of the LVM LV "device".
"""
return self.inner_cmd("lvuuid %s" % device)
def lvm_canonical_lv_name(self, lvname):
"""
lvm-canonical-lv-name - get canonical name of an LV
This converts alternative naming schemes for LVs that you might
find to the canonical name.
"""
return self.inner_cmd("lvm-canonical-lv-name %s" % lvname)
def lvremove(self, device):
"""
lvremove - remove an LVM logical volume
Remove an LVM logical volume "device", where "device" is the path
to the LV, such as "/dev/VG/LV".
"""
return self.inner_cmd("lvremove %s" % device)
def lvresize(self, device, mbytes):
"""
lvresize - resize an LVM logical volume
This resizes (expands or shrinks) an existing LVM logical volume to
"mbytes".
"""
return self.inner_cmd("lvresize %s %s" % (device, mbytes))
def lvs(self):
"""
lvs - list the LVM logical volumes (LVs)
List all the logical volumes detected.
"""
return self.inner_cmd("lvs")
def lvs_full(self):
"""
lvs-full - list the LVM logical volumes (LVs)
List all the logical volumes detected. This is the equivalent of the
lvs(8) command. The "full" version includes all fields.
"""
return self.inner_cmd("lvs-full")
def lvm_clear_filter(self):
"""
lvm-clear-filter - clear LVM device filter
This undoes the effect of "lvm_set_filter". LVM will be able to see
every block device.
This command also clears the LVM cache and performs a volume group scan.
"""
return self.inner_cmd("lvm-clear-filter")
def lvm_remove_all(self):
"""
lvm-remove-all - remove all LVM LVs, VGs and PVs
This command removes all LVM logical volumes, volume groups and physical
volumes.
"""
return self.inner_cmd("lvm-remove-all")
def lvm_set_filter(self, device):
"""
lvm-set-filter - set LVM device filter
This sets the LVM device filter so that LVM will only be able to "see"
the block devices in the list "devices", and will ignore all other
attached block devices.
"""
return self.inner_cmd("lvm-set-filter %s" % device)
def lvresize_free(self, lv, percent):
"""
lvresize-free - expand an LV to fill free space
This expands an existing logical volume "lv" so that it fills "pc"% of
the remaining free space in the volume group. Commonly you would call
this with pc = 100 which expands the logical volume as much as possible,
using all remaining free space in the volume group.
"""
return self.inner_cmd("lvresize-free %s %s" % (lv, percent))
def lvrename(self, logvol, newlogvol):
"""
lvrename - rename an LVM logical volume
Rename a logical volume "logvol" with the new name "newlogvol"
"""
return self.inner_cmd("lvrename %s %s" % (logvol, newlogvol))
def vfs_type(self, mountable):
"""
vfs-type - get the Linux VFS type corresponding to a mounted device
Gets the filesystem type corresponding to the filesystem on "mountable"
"""
return self.inner_cmd("vfs-type %s" % (mountable))
def touch(self, path):
"""
touch - update file timestamps or create a new file
Touch acts like the touch(1) command. It can be used to update the
timestamps on a file, or, if the file does not exist, to create a new
zero-length file.
"""
return self.inner_cmd("touch %s" % (path))
def umount_all(self):
"""
umount-all - unmount all filesystems
This unmounts all mounted filesystems.
Some internal mounts are not unmounted by this call.
"""
return self.inner_cmd("umount-all")
def ls(self, directory):
"""
ls - list the files in a directory
List the files in "directory" (relative to the root directory, there is
no cwd). The '.' and '..' entries are not returned, but hidden files are
shown.
"""
return self.inner_cmd("ls %s" % (directory))
def ll(self, directory):
"""
ll - list the files in a directory (long format)
List the files in "directory" (relative to the root directory, there is
no cwd) in the format of 'ls -la'.
"""
return self.inner_cmd("ll %s" % (directory))
def sync(self):
"""
lsync - sync disks, writes are flushed through to the disk image
This syncs the disk, so that any writes are flushed through to the
underlying disk image.
"""
return self.inner_cmd("sync")
def debug(self, subcmd, extraargs):
"""
debug - debugging and internals
The "debug" command exposes some internals of "guestfsd" (the guestfs
daemon) that runs inside the hypervisor.
"""
return self.inner_cmd("debug %s %s" % (subcmd, extraargs))
def set_e2uuid(self, device, uuid):
"""
set-e2uuid - set the ext2/3/4 filesystem UUID
This sets the ext2/3/4 filesystem UUID of the filesystem on "device" to
"uuid". The format of the UUID and alternatives such as "clear",
"random" and "time" are described in the tune2fs(8) manpage.
"""
return self.inner_cmd("set_e2uuid %s %s" % (device, uuid))
def get_e2uuid(self, device):
"""
get-e2uuid - get the ext2/3/4 filesystem UUID
This returns the ext2/3/4 filesystem UUID of the filesystem on "device".
"""
return self.inner_cmd("get_e2uuid %s" % (device))
def vfs_uuid(self, mountable):
"""
vfs-uuid - get the filesystem UUID
This returns the filesystem UUID of the filesystem on "mountable".
"""
return self.inner_cmd("vfs_uuid %s" % (mountable))
def findfs_uuid(self, uuid):
"""
findfs-uuid - find a filesystem by UUID
This command searches the filesystems and returns the one which has the
given UUID. An error is returned if no such filesystem can be found.
"""
return self.inner_cmd("findfs_uuid %s" % (uuid))
def set_uuid(self, device, uuid):
"""
set-uuid - set the filesystem UUID
Set the filesystem UUID on "device" to "uuid".
"""
return self.inner_cmd("set_uuid %s %s" % (device, uuid))
def set_e2label(self, device, label):
"""
set-e2label - set the ext2/3/4 filesystem label
This sets the ext2/3/4 filesystem label of the filesystem on "device" to
"label". Filesystem labels are limited to 16 characters.
"""
return self.inner_cmd("set_e2label %s %s" % (device, label))
def get_e2label(self, device):
"""
get-e2label - get the ext2/3/4 filesystem label
This returns the ext2/3/4 filesystem label of the filesystem on
"device".
"""
return self.inner_cmd("get_e2label %s" % (device))
def vfs_label(self, mountable):
"""
vfs-label - get the filesystem label
This returns the label of the filesystem on "mountable".
"""
return self.inner_cmd("vfs_label %s" % (mountable))
def findfs_label(self, label):
"""
findfs-label - find a filesystem by label
This command searches the filesystems and returns the one which has the
given label. An error is returned if no such filesystem can be found.
"""
return self.inner_cmd("findfs_label %s" % (label))
def set_label(self, mountable, label):
"""
set-label - set filesystem label
Set the filesystem label on "mountable" to "label".
"""
return self.inner_cmd("set_label %s %s" % (mountable, label))
def set_e2attrs(self, file, attrs, clear=None):
"""
set-e2attrs - set ext2 file attributes of a file
This sets or clears the file attributes "attrs" associated with the
inode "file".
"""
cmd = "set_e2attrs %s %s" % (file, attrs)
if clear:
cmd += " clear:%s" % clear
return self.inner_cmd(cmd)
def get_e2attrs(self, file):
"""
get-e2attrs - get ext2 file attributes of a file
This returns the file attributes associated with "file".
"""
return self.inner_cmd("get_e2attrs %s" % (file))
def set_e2generation(self, file, generation):
"""
set-e2generation - set ext2 file generation of a file
This sets the ext2 file generation of a file.
"""
return self.inner_cmd("set_e2generation %s %s" % (file, generation))
def get_e2generation(self, file):
"""
get-e2generation - get ext2 file generation of a file
This returns the ext2 file generation of a file. The generation (which
used to be called the "version") is a number associated with an inode.
This is most commonly used by NFS servers.
"""
return self.inner_cmd("get_e2generation %s" % (file))
def statvfs(self, path):
"""
statvfs - get file system statistics
Returns file system statistics for any mounted file system. "path"
should be a file or directory in the mounted file system (typically it
is the mount point itself, but it doesn't need to be).
"""
return self.inner_cmd("statvfs %s" % (path))
def tune2fs_l(self, device):
"""
tune2fs-l - get ext2/ext3/ext4 superblock details
This returns the contents of the ext2, ext3 or ext4 filesystem
superblock on "device".
"""
return self.inner_cmd("tune2fs_l %s" % (device))
def tune2fs(self, device, force=None, maxmountcount=None, mountcount=None,
errorbehavior=None, group=None, intervalbetweenchecks=None,
reservedblockspercentage=None, lastmounteddirectory=None,
reservedblockscount=None, user=None):
"""
tune2fs - adjust ext2/ext3/ext4 filesystem parameters
This call allows you to adjust various filesystem parameters of an
ext2/ext3/ext4 filesystem called "device".
"""
cmd = "tune2fs %s" % device
if force:
cmd += ' force:%s' % force
if maxmountcount:
cmd += ' maxmountcount:%s' % maxmountcount
if mountcount:
cmd += ' mountcount:%s' % mountcount
if errorbehavior:
cmd += ' errorbehavior:%s' % errorbehavior
if group:
cmd += ' group:%s' % group
if intervalbetweenchecks:
cmd += ' intervalbetweenchecks:%s' % intervalbetweenchecks
if reservedblockspercentage:
cmd += ' reservedblockspercentage:%s' % reservedblockspercentage
if lastmounteddirectory:
cmd += ' lastmounteddirectory:%s' % lastmounteddirectory
if reservedblockscount:
cmd += ' reservedblockscount:%s' % reservedblockscount
if user:
cmd += ' user:%s' % user
return self.inner_cmd(cmd)
def umount(self, pathordevice, force=None, lazyunmount=None):
"""
umount - unmount a filesystem
This unmounts the given filesystem. The filesystem may be specified
either by its mountpoint (path) or the device which contains the
filesystem.
"""
cmd = 'umount %s' % pathordevice
if force:
cmd += ' force:%s ' % force
if lazyunmount:
cmd += ' lazyunmount:%s ' % lazyunmount
return self.inner_cmd(cmd)
def blkid(self, device):
"""
blkid - print block device attributes
This command returns block device attributes for "device". The following
fields are usually present in the returned hash. Other fields may also
be present.
"""
return self.inner_cmd("blkid %s" % device)
def filesystem_available(self, filesystem):
"""
filesystem-available - check if filesystem is available
Check whether libguestfs supports the named filesystem. The argument
"filesystem" is a filesystem name, such as "ext3".
"""
return self.inner_cmd("filesystem_available %s" % filesystem)
def e2fsck(self, device, correct=None, forceall=None):
"""
e2fsck - check an ext2/ext3 filesystem
This runs the ext2/ext3 filesystem checker on "device". It can take the
following optional arguments:
"""
cmd = 'e2fsck %s' % device
if correct:
cmd += ' correct:%s ' % correct
if forceall:
cmd += ' forceall:%s ' % forceall
return self.inner_cmd(cmd)
def mkfifo(self, mode, path):
"""
mkfifo - make FIFO (named pipe)
This call creates a FIFO (named pipe) called "path" with mode "mode". It
is just a convenient wrapper around "mknod".
"""
return self.inner_cmd('mkfifo %s %s' % (mode, path))
def mklost_and_found(self, mountpoint):
"""
mklost-and-found - make lost+found directory on an ext2/3/4 filesystem
Make the "lost+found" directory, normally in the root directory of an
ext2/3/4 filesystem. "mountpoint" is the directory under which we try to
create the "lost+found" directory.
"""
return self.inner_cmd('mklost_and_found %s' % mountpoint)
def mknod_b(self, mode, devmajor, devminor, path):
"""
mknod-b - make block device node
This call creates a block device node called "path" with mode "mode" and
device major/minor "devmajor" and "devminor". It is just a convenient
wrapper around "mknod".
"""
return self.inner_cmd('mknod_b %s %s %s %s' % (mode, devmajor, devminor, path))
def mknod_c(self, mode, devmajor, devminor, path):
"""
mknod-c - make char device node
This call creates a char device node called "path" with mode "mode" and
device major/minor "devmajor" and "devminor". It is just a convenient
wrapper around "mknod".
"""
return self.inner_cmd('mknod_c %s %s %s %s' % (mode, devmajor, devminor, path))
def ntfsresize_opts(self, device, size=None, force=None):
"""
ntfsresize - resize an NTFS filesystem
This command resizes an NTFS filesystem, expanding or shrinking it to
the size of the underlying device.
"""
cmd = 'ntfsresize-opts %s' % device
if size:
cmd += ' size:%s ' % size
if force:
cmd += ' force:%s ' % force
return self.inner_cmd(cmd)
def resize2fs(self, device):
"""
resize2fs - resize an ext2, ext3 or ext4 filesystem
This resizes an ext2, ext3 or ext4 filesystem to match the size of the
underlying device.
"""
return self.inner_cmd('resize2fs %s' % device)
def resize2fs_M(self, device):
"""
resize2fs-M - resize an ext2, ext3 or ext4 filesystem to the minimum size
This command is the same as "resize2fs", but the filesystem is resized
to its minimum size. This works like the *-M* option to the "resize2fs"
command.
"""
return self.inner_cmd('resize2fs_M %s' % device)
def resize2fs_size(self, device, size):
"""
resize2fs-size - resize an ext2, ext3 or ext4 filesystem (with size)
This command is the same as "resize2fs" except that it allows you to
specify the new size (in bytes) explicitly.
"""
return self.inner_cmd('resize2fs_size %s %s' % (device, size))
def e2fsck_f(self, device):
"""
e2fsck-f - check an ext2/ext3 filesystem
This runs "e2fsck -p -f device", ie. runs the ext2/ext3 filesystem
checker on "device", noninteractively (*-p*), even if the filesystem
appears to be clean (*-f*).
"""
return self.inner_cmd('e2fsck_f %s' % (device))
def readdir(self, dir):
"""
readdir - read directories entries
This returns the list of directory entries in directory "dir"
"""
return self.inner_cmd('readdir %s' % (dir))
def mount_loop(self, file, mountpoint):
"""
mount-loop - mount a file using the loop device
This command lets you mount "file" (a filesystem image in a file) on a
mount point. It is entirely equivalent to the command "mount -o loop
file mountpoint".
"""
return self.inner_cmd('mount_loop %s %s' % (file, mountpoint))
def mount_vfs(self, options, vfstype, mountable, mountpoint):
"""
mount-vfs - mount a guest disk with mount options and vfstype
This is the same as the "mount" command, but it allows you to set both
the mount options and the vfstype as for the mount(8) *-o* and *-t*
flags.
"""
return self.inner_cmd('mount_vfs %s %s %s %s' % (options, vfstype, mountable, mountpoint))
def mkswap(self, device, label=None, uuid=None):
"""
mkswap - create a swap partition
Create a Linux swap partition on "device"
"""
cmd = 'mkswap %s ' % device
if label:
cmd += ' label:%s ' % label
if uuid:
cmd += ' uuid:%s ' % uuid
return self.inner_cmd(cmd)
def swapon_device(self, device):
"""
swapon-device - enable swap on device
This command enables the libguestfs appliance to use the swap device or
partition named "device". The increased memory is made available for all
commands, for example those run using "command" or "sh".
"""
return self.inner_cmd('swapon_device %s' % device)
def swapoff_device(self, device):
"""
swapoff-device - disable swap on device
This command disables the libguestfs appliance swap device or partition
named "device". See "swapon_device".
"""
return self.inner_cmd('swapoff_device %s' % device)
def mkswap_L(self, label, device):
"""
mkswap-L - create a swap partition with a label
Create a swap partition on "device" with label "label".
"""
return self.inner_cmd('mkswap_L %s %s' % (label, device))
def swapon_label(self, label):
"""
swapon-label - enable swap on labeled swap partition
This command enables swap to a labeled swap partition. See
"swapon_device" for other notes.
"""
return self.inner_cmd("swapon_label %s" % label)
def swapoff_label(self, label):
"""
swapoff-label - disable swap on labeled swap partition
This command disables the libguestfs appliance swap on labeled swap
partition.
"""
return self.inner_cmd("swapoff_label %s" % label)
def mkswap_U(self, uuid, device):
"""
mkswap-U - create a swap partition with an explicit UUID
Create a swap partition on "device" with UUID "uuid".
"""
return self.inner_cmd('mkswap_U %s %s' % (uuid, device))
def swapon_uuid(self, uuid):
"""
swapon-uuid - enable swap on swap partition by UUID
This command enables swap to a swap partition with the given UUID. See
"swapon_device" for other notes.
"""
return self.inner_cmd('swapon_uuid %s' % uuid)
def swapoff_uuid(self, uuid):
"""
swapoff-uuid - disable swap on swap partition by UUID
This command disables the libguestfs appliance swap partition with the
given UUID.
"""
return self.inner_cmd('swapoff_uuid %s' % uuid)
def mkswap_file(self, file):
"""
mkswap-file - create a swap file
Create a swap file.
"""
return self.inner_cmd("mkswap_file %s" % file)
def swapon_file(self, file):
"""
swapon-file - enable swap on file
This command enables swap to a file. See "swapon_device" for other
notes.
"""
return self.inner_cmd('swapon_file %s' % file)
def swapoff_file(self, file):
"""
swapoff-file - disable swap on file
This command disables the libguestfs appliance swap on file.
"""
return self.inner_cmd('swapoff_file %s' % file)
def alloc(self, filename, size):
"""
alloc - allocate and add a disk file
This creates an empty (zeroed) file of the given size, and then adds so
it can be further examined.
"""
return self.inner_cmd('alloc %s %s' % (filename, size))
def list_disk_labels(self):
"""
list-disk-labels - mapping of disk labels to devices
If you add drives using the optional "label" parameter of
"add_drive_opts", you can use this call to map between disk labels, and
raw block device and partition names (like "/dev/sda" and "/dev/sda1").
"""
return self.inner_cmd('list_disk_labels')
def add_drive_ro_with_if(self, filename, iface):
"""
add-drive-ro-with-if - add a drive read-only specifying the QEMU block
emulation to use
This is the same as "add_drive_ro" but it allows you to specify the QEMU
interface emulation to use at run time.
"""
return self.inner_cmd('add_drive_ro_with_if %s %s' % (filename, iface))
def add_drive_with_if(self, filename, iface):
"""
add-drive-with-if - add a drive specifying the QEMU block emulation to
use
This is the same as "add_drive" but it allows you to specify the QEMU
interface emulation to use at run time.
"""
return self.inner_cmd('add_drive_with_if %s %s' % (filename, iface))
def available(self, groups):
"""
available - test availability of some parts of the API
This command is used to check the availability of some groups of
functionality in the appliance, which not all builds of the libguestfs
appliance will be able to provide.
"""
return self.inner_cmd('available %s' % groups)
def available_all_groups(self):
"""
available-all-groups - return a list of all optional groups
This command returns a list of all optional groups that this daemon
knows about. Note this returns both supported and unsupported groups. To
find out which ones the daemon can actually support you have to call
"available" / "feature_available" on each member of the returned list.
"""
return self.inner_cmd('available_all_groups')
def help(self, orcmd=None):
"""
help - display a list of commands or help on a command
"""
cmd = 'help'
if orcmd:
cmd += ' %s' % orcmd
return self.inner_cmd(cmd)
def quit(self):
"""
quit - quit guestfish
"""
return self.inner_cmd('quit')
def echo(self, params=None):
"""
echo - display a line of text
This echos the parameters to the terminal.
"""
cmd = 'echo'
if params:
cmd += ' %s' % params
return self.inner_cmd(cmd)
def echo_daemon(self, words):
"""
echo-daemon - echo arguments back to the client
This command concatenates the list of "words" passed with single spaces
between them and returns the resulting string.
"""
return self.inner_cmd('echo_daemon %s' % words)
def launch(self):
"""
launch - launch the backend
You should call this after configuring the handle (eg. adding drives)
but before performing any actions.
"""
return self.inner_cmd('launch')
def dmesg(self):
"""
dmesg - return kernel messages
This returns the kernel messages ("dmesg" output) from the guest kernel.
This is sometimes useful for extended debugging of problems.
"""
return self.inner_cmd('dmesg')
def version(self):
"""
version - get the library version number
Return the libguestfs version number that the program is linked against.
"""
return self.inner_cmd('version')
def sparse(self, filename, size):
"""
sparse - create a sparse disk image and add
This creates an empty sparse file of the given size, and then adds so it
can be further examined.
"""
return self.inner_cmd('sparse %s %s' % (filename, size))
def modprobe(self, modulename):
"""
modprobe - load a kernel module
This loads a kernel module in the appliance.
"""
return self.inner_cmd('modprobe %s' % modulename)
def ping_daemon(self):
"""
ping-daemon - ping the guest daemon
This is a test probe into the guestfs daemon running inside the
hypervisor. Calling this function checks that the daemon responds to the
ping message, without affecting the daemon or attached block device(s)
in any other way.
"""
return self.inner_cmd('ping_daemon')
def sleep(self, secs):
"""
sleep - sleep for some seconds
Sleep for "secs" seconds.
"""
return self.inner_cmd('sleep %s' % secs)
def reopen(self):
"""
reopen - close and reopen libguestfs handle
Close and reopen the libguestfs handle. It is not necessary to use this
normally, because the handle is closed properly when guestfish exits.
However this is occasionally useful for testing.
"""
return self.inner_cmd('reopen')
def time(self, command, args=None):
"""
time - print elapsed time taken to run a command
Run the command as usual, but print the elapsed time afterwards. This
can be useful for benchmarking operations.
"""
cmd = 'time %s' % command
if args:
cmd += args
return self.inner_cmd(cmd)
def config(self, hvparam, hvvalue):
"""
config - add hypervisor parameters
This can be used to add arbitrary hypervisor parameters of the form
*-param value*. Actually it's not quite arbitrary - we prevent you from
setting some parameters which would interfere with parameters that we
use.
"""
return self.inner_cmd('config %s %s' % (hvparam, hvvalue))
def kill_subprocess(self):
"""
kill-subprocess - kill the hypervisor
This kills the hypervisor.
"""
return self.inner_cmd('kill_subprocess')
def set_backend(self, backend):
"""
set-backend - set the backend
Set the method that libguestfs uses to connect to the backend guestfsd
daemon.
"""
return self.inner_cmd('set_backend %s' % backend)
def get_backend(self):
"""
get-backend - get the backend
Return the current backend.
"""
return self.inner_cmd('get_backend')
def shutdown(self):
"""
shutdown - shutdown the hypervisor
This is the opposite of "launch". It performs an orderly shutdown of the
backend process(es). If the autosync flag is set (which is the default)
then the disk image is synchronized.
"""
return self.inner_cmd('shutdown')
def ntfs_3g_probe(self, rw, device):
"""
ntfs-3g-probe - probe NTFS volume
This command runs the ntfs-3g.probe(8) command which probes an NTFS
"device" for mountability. (Not all NTFS volumes can be mounted
read-write, and some cannot be mounted at all).
"""
return self.inner_cmd('ntfs_3g_probe %s %s' % (rw, device))
def event(self, name, eventset, script):
"""
event - register a handler for an event or events
Register a shell script fragment which is executed when an event is
raised. See "guestfs_set_event_callback" in guestfs(3) for a discussion
of the event API in libguestfs.
"""
return self.inner_cmd('event %s %s %s' % (name, eventset, script))
def list_events(self):
"""
list-events - list event handlers
List the event handlers registered using the guestfish "event" command.
"""
return self.inner_cmd('list_events')
def delete_event(self, name):
"""
delete-event - delete a previously registered event handler
Delete the event handler which was previously registered as "name". If
multiple event handlers were registered with the same name, they are all
deleted.
"""
return self.inner_cmd('delete_event %s' % name)
def set_append(self, append):
"""
set-append - add options to kernel command line
This function is used to add additional options to the libguestfs
appliance kernel command line.
"""
return self.inner_cmd('set_append %s' % append)
def get_append(self):
"""
get-append - get the additional kernel options
Return the additional kernel options which are added to the libguestfs
appliance kernel command line.
"""
return self.inner_cmd('get_append')
def set_smp(self, smp):
"""
set-smp - set number of virtual CPUs in appliance
Change the number of virtual CPUs assigned to the appliance. The default
is 1. Increasing this may improve performance, though often it has no
effect.
"""
return self.inner_cmd('set_smp %s' % smp)
def get_smp(self):
"""
get-smp - get number of virtual CPUs in appliance
This returns the number of virtual CPUs assigned to the appliance.
"""
return self.inner_cmd('get_smp')
def set_pgroup(self, pgroup):
"""
set-pgroup - set process group flag
If "pgroup" is true, child processes are placed into their own process
group.
"""
return self.inner_cmd('set_pgroup %s' % pgroup)
def get_pgroup(self):
"""
get-pgroup - get process group flag
This returns the process group flag.
"""
return self.inner_cmd('get_pgroup')
def set_attach_method(self, backend):
"""
set-attach-method - set the backend
Set the method that libguestfs uses to connect to the backend guestfsd
daemon.
"""
return self.inner_cmd('set_attach_method %s' % backend)
def get_attach_method(self):
"""
get-attach-method - get the backend
Return the current backend.
"""
return self.inner_cmd('get_attach_method')
def set_autosync(self, autosync):
"""
set-autosync autosync
If "autosync" is true, this enables autosync. Libguestfs will make a
best effort attempt to make filesystems consistent and synchronized when
the handle is closed (also if the program exits without closing
handles).
"""
return self.inner_cmd('set_autosync %s' % autosync)
def get_autosync(self):
"""
get-autosync - get autosync mode
Get the autosync flag.
"""
return self.inner_cmd('get_autosync')
def set_direct(self, direct):
"""
set-direct - enable or disable direct appliance mode
If the direct appliance mode flag is enabled, then stdin and stdout are
passed directly through to the appliance once it is launched.
"""
return self.inner_cmd('set_direct %s' % direct)
def get_direct(self):
"""
get-direct - get direct appliance mode flag
Return the direct appliance mode flag.
"""
return self.inner_cmd('get_direct')
def set_memsize(self, memsize):
"""
set-memsize - set memory allocated to the hypervisor
This sets the memory size in megabytes allocated to the hypervisor. This
only has any effect if called before "launch".
"""
return self.inner_cmd('set_memsize %s' % memsize)
def get_memsize(self):
"""
get-memsize - get memory allocated to the hypervisor
This gets the memory size in megabytes allocated to the hypervisor.
"""
return self.inner_cmd('get_memsize')
def set_path(self, searchpath):
"""
set-path - set the search path
Set the path that libguestfs searches for kernel and initrd.img.
"""
return self.inner_cmd('set_path %s' % searchpath)
def get_path(self):
"""
get-path - get the search path
Return the current search path.
"""
return self.inner_cmd('get_path')
def set_qemu(self, hv):
"""
set-qemu - set the hypervisor binary (usually qemu)
Set the hypervisor binary (usually qemu) that we will use.
"""
return self.inner_cmd('set_qemu %s' % hv)
def get_qemu(self):
"""
get-qemu - get the hypervisor binary (usually qemu)
Return the current hypervisor binary (usually qemu).
"""
return self.inner_cmd('get_qemu')
def set_recovery_proc(self, recoveryproc):
"""
set-recovery-proc - enable or disable the recovery process
If this is called with the parameter "false" then "launch" does not
create a recovery process. The purpose of the recovery process is to
stop runaway hypervisor processes in the case where the main program
aborts abruptly.
"""
return self.inner_cmd('set_recovery_proc %s' % recoveryproc)
def get_recovery_proc(self):
"""
get-recovery-proc - get recovery process enabled flag
Return the recovery process enabled flag.
"""
return self.inner_cmd('get_recovery_proc')
def set_trace(self, trace):
"""
set-trace - enable or disable command traces
If the command trace flag is set to 1, then libguestfs calls, parameters
and return values are traced.
"""
return self.inner_cmd('set_trace %s' % trace)
def get_trace(self):
"""
get-trace - get command trace enabled flag
Return the command trace flag.
"""
return self.inner_cmd('get_trace')
def set_verbose(self, verbose):
"""
set-verbose - set verbose mode
If "verbose" is true, this turns on verbose messages.
"""
return self.inner_cmd('set_verbose %s' % verbose)
def get_verbose(self):
"""
get-verbose - get verbose mode
This returns the verbose messages flag.
"""
return self.inner_cmd('get_verbose')
def get_pid(self):
"""
get-pid - get PID of hypervisor
Return the process ID of the hypervisor. If there is no hypervisor
running, then this will return an error.
"""
return self.inner_cmd('get_pid')
def set_network(self, network):
"""
set-network - set enable network flag
If "network" is true, then the network is enabled in the libguestfs
appliance. The default is false.
"""
return self.inner_cmd('set_network %s' % network)
def get_network(self):
"""
get-network - get enable network flag
This returns the enable network flag.
"""
return self.inner_cmd('get_network')
def setenv(self, VAR, value):
"""
setenv - set an environment variable
Set the environment variable "VAR" to the string "value".
"""
return self.inner_cmd('setenv %s %s' % (VAR, value))
def unsetenv(self, VAR):
"""
unsetenv - unset an environment variable
Remove "VAR" from the environment.
"""
return self.inner_cmd('unsetenv %s' % VAR)
def lcd(self, directory):
"""
lcd - change working directory
Change the local directory, ie. the current directory of guestfish
itself.
"""
return self.inner_cmd('lcd %s' % directory)
def man(self):
"""
man - open the manual
Opens the manual page for guestfish.
"""
return self.inner_cmd('man')
def supported(self):
"""
supported - list supported groups of commands
This command returns a list of the optional groups known to the daemon,
and indicates which ones are supported by this build of the libguestfs
appliance.
"""
return self.inner_cmd('supported')
def extlinux(self, directory):
"""
extlinux - install the SYSLINUX bootloader on an ext2/3/4 or btrfs
filesystem
Install the SYSLINUX bootloader on the device mounted at "directory".
Unlike "syslinux" which requires a FAT filesystem, this can be used on
an ext2/3/4 or btrfs filesystem.
"""
return self.inner_cmd('extlinux %s' % directory)
def syslinux(self, device, directory=None):
"""
syslinux - install the SYSLINUX bootloader
Install the SYSLINUX bootloader on "device".
"""
cmd = 'syslinux %s' % device
if directory:
cmd += ' directory:%s' % directory
return self.inner_cmd(cmd)
def feature_available(self, groups):
"""
feature-available - test availability of some parts of the API
This is the same as "available", but unlike that call it returns a
simple true/false boolean result, instead of throwing an exception if a
feature is not found. For other documentation see "available".
"""
return self.inner_cmd('feature_available %s' % groups)
def get_program(self):
"""
get-program - get the program name
Get the program name. See "set_program".
"""
return self.inner_cmd('get_program')
def set_program(self, program):
"""
set-program - set the program name
Set the program name. This is an informative string which the main
program may optionally set in the handle.
"""
return self.inner_cmd('set_program %s' % program)
def add_drive_scratch(self, size, name=None, label=None):
"""
add-drive-scratch - add a temporary scratch drive
This command adds a temporary scratch drive to the handle. The "size"
parameter is the virtual size (in bytes). The scratch drive is blank
initially (all reads return zeroes until you start writing to it). The
drive is deleted when the handle is closed.
"""
cmd = 'add_drive_scratch %s' % size
if name:
cmd += ' name:%s' % name
if label:
cmd += ' label:%s' % label
return self.inner_cmd(cmd)
def drop_caches(self, whattodrop):
"""
drop-caches - drop kernel page cache, dentries and inodes
The "drop-caches" command instructs the guest kernel to drop its page
cache, and/or dentries and inode caches. The parameter "whattodrop"
tells the kernel what precisely to drop.
"""
return self.inner_cmd("drop-caches %s" % whattodrop)
def case_sensitive_path(self, path):
"""
case-sensitive-path - return true path on case-insensitive filesystem
The "drop-caches" command can be used to resolve case insensitive
paths on a filesystem which is case sensitive. The use case is to
resolve paths which you have read from Windows configuration files or
the Windows Registry, to the true path.
"""
return self.inner_cmd("case-sensitive-path '%s'" % path)
def command(self, cmd):
"""
command - run a command from the guest filesystem
This call runs a command from the guest filesystem. The filesystem must
be mounted, and must contain a compatible operating system (ie.
something Linux, with the same or compatible processor architecture).
"""
return self.inner_cmd("command '%s'" % cmd)
def command_lines(self, cmd):
"""
command-lines - run a command, returning lines
This is the same as "command", but splits the result into a list of
lines.
"""
return self.inner_cmd("command-lines '%s'" % cmd)
def sh(self, cmd):
"""
sh - run a command via the shell
This call runs a command from the guest filesystem via the guest's
"/bin/sh".
"""
return self.inner_cmd("sh '%s'" % cmd)
def sh_lines(self, cmd):
"""
sh-lines - run a command via the shell returning lines
This is the same as "sh", but splits the result into a list of
lines.
"""
return self.inner_cmd("sh-lines '%s'" % cmd)
def zero(self, device):
"""
zero - write zeroes to the device
This command writes zeroes over the first few blocks of "device".
"""
return self.inner_cmd("zero '%s'" % device)
def zero_device(self, device):
"""
zero-device - write zeroes to an entire device
This command writes zeroes over the entire "device". Compare with "zero"
which just zeroes the first few blocks of a device.
"""
return self.inner_cmd("zero-device '%s'" % device)
def grep(self, regex, path):
"""
grep - return lines matching a pattern
This calls the external "grep" program and returns the matching lines.
"""
return self.inner_cmd("grep '%s' '%s'" % (regex, path))
def grepi(self, regex, path):
"""
grepi - return lines matching a pattern
This calls the external "grep -i" program and returns the matching lines.
"""
return self.inner_cmd("grepi '%s' '%s'" % (regex, path))
def fgrep(self, pattern, path):
"""
fgrep - return lines matching a pattern
This calls the external "fgrep" program and returns the matching lines.
"""
return self.inner_cmd("fgrep '%s' '%s'" % (pattern, path))
def fgrepi(self, pattern, path):
"""
fgrepi - return lines matching a pattern
This calls the external "fgrep -i" program and returns the matching lines.
"""
return self.inner_cmd("fgrepi '%s' '%s'" % (pattern, path))
def egrep(self, regex, path):
"""
egrep - return lines matching a pattern
This calls the external "egrep" program and returns the matching lines.
"""
return self.inner_cmd("egrep '%s' '%s'" % (regex, path))
def egrepi(self, regex, path):
"""
egrepi - return lines matching a pattern
This calls the external "egrep -i" program and returns the matching lines.
"""
return self.inner_cmd("egrepi '%s' '%s'" % (regex, path))
def zgrep(self, regex, path):
"""
zgrep - return lines matching a pattern
This calls the external "zgrep" program and returns the matching lines.
"""
return self.inner_cmd("zgrep '%s' '%s'" % (regex, path))
def zgrepi(self, regex, path):
"""
zgrepi - return lines matching a pattern
This calls the external "zgrep -i" program and returns the matching lines.
"""
return self.inner_cmd("zgrepi '%s' '%s'" % (regex, path))
def zfgrep(self, pattern, path):
"""
zfgrep - return lines matching a pattern
This calls the external "zfgrep" program and returns the matching lines.
"""
return self.inner_cmd("zfgrep '%s' '%s'" % (pattern, path))
def zfgrepi(self, pattern, path):
"""
zfgrepi - return lines matching a pattern
This calls the external "zfgrep -i" program and returns the matching lines.
"""
return self.inner_cmd("zfgrepi '%s' '%s'" % (pattern, path))
def zegrep(self, regex, path):
"""
zegrep - return lines matching a pattern
This calls the external "zegrep" program and returns the matching lines.
"""
return self.inner_cmd("zegrep '%s' '%s'" % (regex, path))
def zegrepi(self, regex, path):
"""
zegrepi - return lines matching a pattern
This calls the external "zegrep -i" program and returns the matching lines.
"""
return self.inner_cmd("zegrepi '%s' '%s'" % (regex, path))
def compress_out(self, ctype, file, zfile):
"""
compress-out - output compressed file
This command compresses "file" and writes it out to the local file
"zfile".
The compression program used is controlled by the "ctype" parameter.
Currently this includes: "compress", "gzip", "bzip2", "xz" or "lzop".
Some compression types may not be supported by particular builds of
libguestfs, in which case you will get an error containing the substring
"not supported".
The optional "level" parameter controls compression level. The meaning
and default for this parameter depends on the compression program being
used.
"""
return self.inner_cmd("compress-out '%s' '%s' '%s'" % (ctype, file, zfile))
def compress_device_out(self, ctype, device, zdevice):
"""
compress-device-out - output compressed device
This command compresses "device" and writes it out to the local file
"zdevice".
The "ctype" and optional "level" parameters have the same meaning as in
"compress_out".
"""
return self.inner_cmd("compress-device-out '%s' '%s' '%s'" % (ctype, device, zdevice))
def glob(self, command, args):
"""
glob - expand wildcards in command
Expand wildcards in any paths in the args list, and run "command"
repeatedly on each matching path.
"""
return self.inner_cmd("glob '%s' '%s'" % (command, args))
def glob_expand(self, path):
"""
glob-expand - expand a wildcard path
This command searches for all the pathnames matching "pattern" according
to the wildcard expansion rules used by the shell.
"""
return self.inner_cmd("glob-expand '%s'" % path)
def mkmountpoint(self, exemptpath):
"""
mkmountpoint - create a mountpoint
"mkmountpoint" and "rmmountpoint" are specialized calls that can be used
to create extra mountpoints before mounting the first filesystem.
"""
return self.inner_cmd("mkmountpoint '%s'" % exemptpath)
def rmmountpoint(self, exemptpath):
"""
rmmountpoint - remove a mountpoint
This calls removes a mountpoint that was previously created with
"mkmountpoint". See "mkmountpoint" for full details.
"""
return self.inner_cmd("rmmountpoint '%s'" % exemptpath)
def parse_environment(self):
"""
parse-environment - parse the environment and set handle flags
accordingly
Parse the program's environment and set flags in the handle accordingly.
For example if "LIBGUESTFS_DEBUG=1" then the 'verbose' flag is set in
the handle.
"""
return self.inner_cmd("parse_environment")
def parse_environment_list(self, environment):
"""
parse-environment-list - parse the environment and set handle flags
accordingly
Parse the list of strings in the argument "environment" and set flags in
the handle accordingly. For example if "LIBGUESTFS_DEBUG=1" is a string
in the list, then the 'verbose' flag is set in the handle.
"""
return self.inner_cmd("parse_environment_list '%s'" % environment)
def rsync(self, src, dest, args):
"""
rsync - synchronize the contents of two directories
This call may be used to copy or synchronize two directories under the
same libguestfs handle. This uses the rsync(1) program which uses a fast
algorithm that avoids copying files unnecessarily.
"""
return self.inner_cmd("rsync %s %s %s" % (src, dest, args))
def rsync_in(self, src, dest, args):
"""
rsync-in - synchronize host or remote filesystem with filesystem
This call may be used to copy or synchronize the filesystem on the host
or on a remote computer with the filesystem within libguestfs. This uses
the rsync(1) program which uses a fast algorithm that avoids copying
files unnecessarily.
"""
return self.inner_cmd("rsync-in %s %s %s" % (src, dest, args))
def rsync_out(self, src, dest, args):
"""
rsync-out - synchronize filesystem with host or remote filesystem
This call may be used to copy or synchronize the filesystem within
libguestfs with a filesystem on the host or on a remote computer. This
uses the rsync(1) program which uses a fast algorithm that avoids
copying files unnecessarily.
"""
return self.inner_cmd("rsync-out %s %s %s" % (src, dest, args))
def utimens(self, path, atsecs, atnsecs, mtsecs, mtnsecs):
"""
utimens - set timestamp of a file with nanosecond precision
This command sets the timestamps of a file with nanosecond precision.
"""
return self.inner_cmd("utimens '%s' '%s' '%s' '%s' '%s'" % (path,
atsecs, atnsecs, mtsecs, mtnsecs))
def utsname(self):
"""
utsname - appliance kernel version
This returns the kernel version of the appliance, where this is
available. This information is only useful for debugging. Nothing in the
returned structure is defined by the API.
"""
return self.inner_cmd("utsname")
def grub_install(self, root, device):
"""
grub-install root device
This command installs GRUB 1 (the Grand Unified Bootloader) on "device",
with the root directory being "root".
"""
return self.inner_cmd("grub-install %s %s" % (root, device))
def initrd_cat(self, initrdpath, filename):
"""
initrd-cat - list the contents of a single file in an initrd
This command unpacks the file "filename" from the initrd file called
"initrdpath". The filename must be given *without* the initial "/"
character.
"""
return self.inner_cmd("initrd-cat %s %s" % (initrdpath, filename))
def initrd_list(self, path):
"""
initrd-list - list files in an initrd
This command lists out files contained in an initrd.
"""
return self.inner_cmd("initrd-list %s" % path)
def aug_init(self, root, flags):
"""
aug-init - create a new Augeas handle
Create a new Augeas handle for editing configuration files. If
there was any previous Augeas handle associated with this guestfs
session, then it is closed.
"""
return self.inner_cmd("aug-init %s %s" % (root, flags))
def aug_clear(self, augpath):
"""
aug-clear - clear Augeas path
Set the value associated with "path" to "NULL". This is the same as the
augtool(1) "clear" command.
"""
return self.inner_cmd("aug-clear %s" % augpath)
def aug_set(self, augpath, val):
"""
aug-set - set Augeas path to value
Set the value associated with "path" to "val".
In the Augeas API, it is possible to clear a node by setting the value
to NULL. Due to an oversight in the libguestfs API you cannot do that
with this call. Instead you must use the "aug_clear" call.
"""
return self.inner_cmd("aug-set %s %s" % (augpath, val))
def aug_get(self, augpath):
"""
aug-get - look up the value of an Augeas path
Look up the value associated with "path". If "path" matches exactly one
node, the "value" is returned.
"""
return self.inner_cmd("aug-get %s" % augpath)
def aug_close(self):
"""
aug-close - close the current Augeas handle and free up any resources
used by it.
After calling this, you have to call "aug_init" again before you can
use any other Augeas functions.
"""
return self.inner_cmd("aug-close")
def aug_defnode(self, node, expr, value):
"""
aug-defnode - defines a variable "name" whose value is the result
of evaluating "expr".
If "expr" evaluates to an empty nodeset, a node is created, equivalent
to calling "aug_set" "expr", "value". "name" will be the nodeset
containing that single node.
On success this returns a pair containing the number of nodes in the
nodeset, and a boolean flag if a node was created.
"""
return self.inner_cmd("aug-defnode %s %s %s" % (node, expr, value))
def aug_defvar(self, name, expr):
"""
aug-defvar - define an Augeas variable
Defines an Augeas variable "name" whose value is the result of evaluating "expr".
If "expr" is NULL, then "name" is undefined.
On success this returns the number of nodes in "expr", or 0 if "expr" evaluates to
something which is not a nodeset.
"""
return self.inner_cmd("aug-defvar %s %s" % (name, expr))
def aug_ls(self, augpath):
"""
aug-ls - list Augeas nodes under augpath
This is just a shortcut for listing "aug_match" "path/\*" and sorting the resulting nodes
into alphabetical order.
"""
return self.inner_cmd("aug-ls %s" % augpath)
def aug_insert(self, augpath, label, before):
"""
aug-insert - insert a sibling Augeas node
Create a new sibling "label" for "path", inserting it into the tree before or after
"path" (depending on the boolean flag "before").
"path" must match exactly one existing node in the tree, and "label"
must be a label, ie. not contain "/", "*" or end with a bracketed index "[N]".
"""
return self.inner_cmd("aug-insert %s %s %s" % (augpath, label, before))
def aug_match(self, augpath):
"""
aug-match - return Augeas nodes which match augpath
Returns a list of paths which match the path expression "path". The returned
paths are sufficiently qualified so that they match exactly one node in the current tree.
"""
return self.inner_cmd("aug-match %s" % augpath)
def aug_mv(self, src, dest):
"""
aug-mv - move Augeas node
Move the node "src" to "dest". "src" must match exactly one node. "dest" is overwritten
if it exists.
"""
return self.inner_cmd("aug-mv %s %s" % (src, dest))
def aug_rm(self, augpath):
"""
aug-rm - remove an Augeas path
Remove "path" and all of its children.
On success this returns the number of entries which were removed.
"""
return self.inner_cmd("aug-rm %s" % augpath)
def aug_label(self, augpath):
"""
aug-label - return the label from an Augeas path expression
The label (name of the last element) of the Augeas path expression "augpath" is returned.
"augpath" must match exactly one node, else this function returns an error.
"""
return self.inner_cmd("aug-label %s" % augpath)
def aug_setm(self, base, sub, val):
"""
aug-setm - set multiple Augeas nodes
"""
return self.inner_cmd("aug-setm %s %s %s" % (base, sub, val))
def aug_load(self):
"""
aug-load - load files into the tree
Load files into the tree.
See "aug_load" in the Augeas documentation for the full gory details.
"""
return self.inner_cmd("aug-load")
def aug_save(self):
"""
aug-save - write all pending Augeas changes to disk
This writes all pending changes to disk.
The flags which were passed to "aug_init" affect exactly how files are saved.
"""
return self.inner_cmd("aug-save")
def libguest_test_tool_cmd(qemuarg=None, qemudirarg=None,
timeoutarg=None, ignore_status=True,
debug=False, timeout=60):
"""
Execute libguest-test-tool command.
:param qemuarg: the qemu option
:param qemudirarg: the qemudir option
:param timeoutarg: the timeout option
:return: a CmdResult object
:raise: raise LibguestfsCmdError
"""
cmd = "libguestfs-test-tool"
if qemuarg is not None:
cmd += " --qemu '%s'" % qemuarg
if qemudirarg is not None:
cmd += " --qemudir '%s'" % qemudirarg
if timeoutarg is not None:
cmd += " --timeout %s" % timeoutarg
# Allow to raise LibguestfsCmdError if ignore_status is False.
return lgf_command(cmd, ignore_status, debug, timeout)
def virt_edit_cmd(disk_or_domain, file_path, is_disk=False, disk_format=None,
options=None, extra=None, expr=None, connect_uri=None,
ignore_status=True, debug=False, timeout=60):
"""
Execute virt-edit command to check whether it is ok.
Since virt-edit will need uses' interact, maintain and return
a session if there is no raise after command has been executed.
:param disk_or_domain: a img path or a domain name.
:param file_path: the file need to be edited in img file.
:param is_disk: whether disk_or_domain is disk or domain
:param disk_format: when is_disk is true, add a format if it is set.
:param options: the options of virt-edit.
:param extra: additional suffix of command.
:return: a session of executing virt-edit command.
"""
# disk_or_domain and file_path are necessary parameters.
cmd = "virt-edit"
if connect_uri is not None:
cmd += " -c %s" % connect_uri
if is_disk:
# For latest version, --format must exist before -a
if disk_format is not None:
cmd += " --format=%s" % disk_format
cmd += " -a %s" % disk_or_domain
else:
cmd += " -d %s" % disk_or_domain
cmd += " %s" % file_path
if options is not None:
cmd += " %s" % options
if extra is not None:
cmd += " %s" % extra
if expr is not None:
cmd += " -e '%s'" % expr
return lgf_command(cmd, ignore_status, debug, timeout)
def virt_clone_cmd(original, newname=None, autoclone=False, **dargs):
"""
Clone existing virtual machine images.
:param original: Name of the original guest to be cloned.
:param newname: Name of the new guest virtual machine instance.
:param autoclone: Generate a new guest name, and paths for new storage.
:param dargs: Standardized function API keywords. There are many
options not listed, they can be passed in dargs.
"""
def storage_config(cmd, options):
"""Configure options for storage"""
# files should be a list
files = options.get("files", [])
if len(files):
for file in files:
cmd += " --file '%s'" % file
if options.get("nonsparse") is not None:
cmd += " --nonsparse"
return cmd
def network_config(cmd, options):
"""Configure options for network"""
mac = options.get("mac")
if mac is not None:
cmd += " --mac '%s'" % mac
return cmd
cmd = "virt-clone --original '%s'" % original
if newname is not None:
cmd += " --name '%s'" % newname
if autoclone is True:
cmd += " --auto-clone"
# Many more options can be added if necessary.
cmd = storage_config(cmd, dargs)
cmd = network_config(cmd, dargs)
ignore_status = dargs.get("ignore_status", True)
debug = dargs.get("debug", False)
timeout = dargs.get("timeout", 180)
return lgf_command(cmd, ignore_status, debug, float(timeout))
def virt_sparsify_cmd(indisk, outdisk, compress=False, convert=None,
format=None, ignore_status=True, debug=False,
timeout=60):
"""
Make a virtual machine disk sparse.
:param indisk: The source disk to be sparsified.
:param outdisk: The destination disk.
"""
cmd = "virt-sparsify"
if compress is True:
cmd += " --compress"
if format is not None:
cmd += " --format '%s'" % format
cmd += " '%s'" % indisk
if convert is not None:
cmd += " --convert '%s'" % convert
cmd += " '%s'" % outdisk
# More options can be added if necessary.
return lgf_command(cmd, ignore_status, debug, timeout)
def virt_resize_cmd(indisk, outdisk, **dargs):
"""
Resize a virtual machine disk.
:param indisk: The source disk to be resized
:param outdisk: The destination disk.
"""
cmd = "virt-resize"
ignore_status = dargs.get("ignore_status", True)
debug = dargs.get("debug", False)
timeout = dargs.get("timeout", 60)
resize = dargs.get("resize")
resized_size = dargs.get("resized_size", "0")
expand = dargs.get("expand")
shrink = dargs.get("shrink")
ignore = dargs.get("ignore")
delete = dargs.get("delete")
if resize is not None:
cmd += " --resize %s=%s" % (resize, resized_size)
if expand is not None:
cmd += " --expand %s" % expand
if shrink is not None:
cmd += " --shrink %s" % shrink
if ignore is not None:
cmd += " --ignore %s" % ignore
if delete is not None:
cmd += " --delete %s" % delete
cmd += " %s %s" % (indisk, outdisk)
return lgf_command(cmd, ignore_status, debug, timeout)
def virt_list_partitions_cmd(disk_or_domain, long=False, total=False,
human_readable=False, ignore_status=True,
debug=False, timeout=60):
"""
"virt-list-partitions" is a command line tool to list the partitions
that are contained in a virtual machine or disk image.
:param disk_or_domain: a disk or a domain to be mounted
"""
cmd = "virt-list-partitions %s" % disk_or_domain
if long is True:
cmd += " --long"
if total is True:
cmd += " --total"
if human_readable is True:
cmd += " --human-readable"
return lgf_command(cmd, ignore_status, debug, timeout)
def guestmount(disk_or_domain, mountpoint, inspector=False,
readonly=False, **dargs):
"""
guestmount - Mount a guest filesystem on the host using
FUSE and libguestfs.
:param disk_or_domain: a disk or a domain to be mounted
If you need to mount a disk, set is_disk to True in dargs
:param mountpoint: the mountpoint of filesystems
:param inspector: mount all filesystems automatically
:param readonly: if mount filesystem with readonly option
"""
def get_special_mountpoint(cmd, options):
special_mountpoints = options.get("special_mountpoints", [])
for mountpoint in special_mountpoints:
cmd += " -m %s" % mountpoint
return cmd
cmd = "guestmount"
ignore_status = dargs.get("ignore_status", True)
debug = dargs.get("debug", False)
timeout = dargs.get("timeout", 60)
# If you need to mount a disk, set is_disk to True
is_disk = dargs.get("is_disk", False)
if is_disk is True:
cmd += " -a %s" % disk_or_domain
else:
cmd += " -d %s" % disk_or_domain
if inspector is True:
cmd += " -i"
if readonly is True:
cmd += " --ro"
cmd = get_special_mountpoint(cmd, dargs)
cmd += " %s" % mountpoint
return lgf_command(cmd, ignore_status, debug, timeout)
def virt_filesystems(disk_or_domain, **dargs):
"""
virt-filesystems - List filesystems, partitions, block devices,
LVM in a virtual machine or disk image
:param disk_or_domain: a disk or a domain to be mounted
If you need to mount a disk, set is_disk to True in dargs
"""
def get_display_type(cmd, options):
all = options.get("all", False)
filesystems = options.get("filesystems", False)
extra = options.get("extra", False)
partitions = options.get("partitions", False)
block_devices = options.get("block_devices", False)
logical_volumes = options.get("logical_volumes", False)
volume_groups = options.get("volume_groups", False)
physical_volumes = options.get("physical_volumes", False)
long_format = options.get("long_format", False)
human_readable = options.get("human_readable", False)
if all is True:
cmd += " --all"
if filesystems is True:
cmd += " --filesystems"
if extra is True:
cmd += " --extra"
if partitions is True:
cmd += " --partitions"
if block_devices is True:
cmd += " --block_devices"
if logical_volumes is True:
cmd += " --logical_volumes"
if volume_groups is True:
cmd += " --volume_groups"
if physical_volumes is True:
cmd += " --physical_volumes"
if long_format is True:
cmd += " --long"
if human_readable is True:
cmd += " -h"
return cmd
cmd = "virt-filesystems"
# If you need to mount a disk, set is_disk to True
is_disk = dargs.get("is_disk", False)
ignore_status = dargs.get("ignore_status", True)
debug = dargs.get("debug", False)
timeout = dargs.get("timeout", 60)
if is_disk is True:
cmd += " -a %s" % disk_or_domain
else:
cmd += " -d %s" % disk_or_domain
cmd = get_display_type(cmd, dargs)
return lgf_command(cmd, ignore_status, debug, timeout)
def virt_list_partitions(disk_or_domain, long=False, total=False,
human_readable=False, ignore_status=True,
debug=False, timeout=60):
"""
"virt-list-partitions" is a command line tool to list the partitions
that are contained in a virtual machine or disk image.
:param disk_or_domain: a disk or a domain to be mounted
"""
cmd = "virt-list-partitions %s" % disk_or_domain
if long is True:
cmd += " --long"
if total is True:
cmd += " --total"
if human_readable is True:
cmd += " --human-readable"
return lgf_command(cmd, ignore_status, debug, timeout)
def virt_list_filesystems(disk_or_domain, format=None, long=False,
all=False, ignore_status=True, debug=False,
timeout=60):
"""
"virt-list-filesystems" is a command line tool to list the filesystems
that are contained in a virtual machine or disk image.
:param disk_or_domain: a disk or a domain to be mounted
"""
cmd = "virt-list-filesystems %s" % disk_or_domain
if format is not None:
cmd += " --format %s" % format
if long is True:
cmd += " --long"
if all is True:
cmd += " --all"
return lgf_command(cmd, ignore_status, debug, timeout)
def virt_df(disk_or_domain, ignore_status=True, debug=False, timeout=60):
"""
"virt-df" is a command line tool to display free space on
virtual machine filesystems.
"""
cmd = "virt-df %s" % disk_or_domain
return lgf_command(cmd, ignore_status, debug, timeout)
def virt_sysprep_cmd(disk_or_domain, options=None,
extra=None, ignore_status=True,
debug=False, timeout=600):
"""
Execute virt-sysprep command to reset or unconfigure a virtual machine.
:param disk_or_domain: a img path or a domain name.
:param options: the options of virt-sysprep.
:return: a CmdResult object.
"""
if os.path.isfile(disk_or_domain):
disk_or_domain = "-a " + disk_or_domain
else:
disk_or_domain = "-d " + disk_or_domain
cmd = "virt-sysprep %s" % (disk_or_domain)
if options is not None:
cmd += " %s" % options
if extra is not None:
cmd += " %s" % extra
return lgf_command(cmd, ignore_status, debug, timeout)
def virt_cat_cmd(disk_or_domain, file_path, options=None, ignore_status=True,
debug=False, timeout=60):
"""
Execute virt-cat command to print guest's file detail.
:param disk_or_domain: a img path or a domain name.
:param file_path: the file to print detail
:param options: the options of virt-cat.
:return: a CmdResult object.
"""
# disk_or_domain and file_path are necessary parameters.
if os.path.isfile(disk_or_domain):
disk_or_domain = "-a " + disk_or_domain
else:
disk_or_domain = "-d " + disk_or_domain
cmd = "virt-cat %s '%s'" % (disk_or_domain, file_path)
if options is not None:
cmd += " %s" % options
return lgf_command(cmd, ignore_status, debug, timeout)
def virt_tar_in(disk_or_domain, tar_file, destination, is_disk=False,
ignore_status=True, debug=False, timeout=60):
"""
"virt-tar-in" unpacks an uncompressed tarball into a virtual machine
disk image or named libvirt domain.
"""
cmd = "virt-tar-in"
if is_disk is True:
cmd += " -a %s" % disk_or_domain
else:
cmd += " -d %s" % disk_or_domain
cmd += " %s %s" % (tar_file, destination)
return lgf_command(cmd, ignore_status, debug, timeout)
def virt_tar_out(disk_or_domain, directory, tar_file, is_disk=False,
ignore_status=True, debug=False, timeout=60):
"""
"virt-tar-out" packs a virtual machine disk image directory into a tarball.
"""
cmd = "virt-tar-out"
if is_disk is True:
cmd += " -a %s" % disk_or_domain
else:
cmd += " -d %s" % disk_or_domain
cmd += " %s %s" % (directory, tar_file)
return lgf_command(cmd, ignore_status, debug, timeout)
def virt_copy_in(disk_or_domain, file, destination, is_disk=False,
ignore_status=True, debug=False, timeout=60):
"""
"virt-copy-in" copies files and directories from the local disk into a
virtual machine disk image or named libvirt domain.
#TODO: expand file to files
"""
cmd = "virt-copy-in"
if is_disk is True:
cmd += " -a %s" % disk_or_domain
else:
cmd += " -d %s" % disk_or_domain
cmd += " %s %s" % (file, destination)
return lgf_command(cmd, ignore_status, debug, timeout)
def virt_copy_out(disk_or_domain, file_path, localdir, is_disk=False,
ignore_status=True, debug=False, timeout=60):
"""
"virt-copy-out" copies files and directories out of a virtual machine
disk image or named libvirt domain.
"""
cmd = "virt-copy-out"
if is_disk is True:
cmd += " -a %s" % disk_or_domain
else:
cmd += " -d %s" % disk_or_domain
cmd += " %s %s" % (file_path, localdir)
return lgf_command(cmd, ignore_status, debug, timeout)
def virt_format(disk, filesystem=None, image_format=None, lvm=None,
partition=None, wipe=False, ignore_status=False,
debug=False, timeout=60):
"""
Virt-format takes an existing disk file (or it can be a host partition,
LV etc), erases all data on it, and formats it as a blank disk.
"""
cmd = "virt-format"
if filesystem is not None:
cmd += " --filesystem=%s" % filesystem
if image_format is not None:
cmd += " --format=%s" % image_format
if lvm is not None:
cmd += " --lvm=%s" % lvm
if partition is not None:
cmd += " --partition=%s" % partition
if wipe is True:
cmd += " --wipe"
cmd += " -a %s" % disk
return lgf_command(cmd, ignore_status, debug, timeout)
def virt_inspector(disk_or_domain, is_disk=False, ignore_status=True,
debug=False, timeout=60):
"""
virt-inspector2 examines a virtual machine or disk image and tries to
determine the version of the operating system and other information
about the virtual machine.
"""
# virt-inspector has been replaced by virt-inspector2 in RHEL7
# Check it here to choose which one to be used.
cmd = lgf_cmd_check("virt-inspector2")
if cmd is None:
cmd = "virt-inspector"
# If you need to mount a disk, set is_disk to True
if is_disk is True:
cmd += " -a %s" % disk_or_domain
else:
cmd += " -d %s" % disk_or_domain
return lgf_command(cmd, ignore_status, debug, timeout)
def virt_sysprep_operations():
"""Get virt-sysprep support operation"""
sys_list_cmd = "virt-sysprep --list-operations"
result = lgf_command(sys_list_cmd, ignore_status=False)
oper_info = result.stdout_text.strip()
oper_dict = {}
for oper_item in oper_info.splitlines():
oper = oper_item.split("*")[0].strip()
desc = oper_item.split("*")[-1].strip()
oper_dict[oper] = desc
return oper_dict
def virt_cmd_contain_opt(virt_cmd, opt):
""" Check if opt is supported by virt-command"""
if lgf_cmd_check(virt_cmd) is None:
raise LibguestfsCmdError
if not opt.startswith('-'):
raise ValueError("Format should be '--a' or '-a', not '%s'" % opt)
virt_help_cmd = virt_cmd + " --help"
result = lgf_command(virt_help_cmd, ignore_status=False)
# "--add" will not equal to "--addxxx"
opt = " " + opt.strip() + " "
return (result.stdout_text.count(opt) != 0)
def virt_ls_cmd(disk_or_domain, file_dir_path, is_disk=False, options=None,
extra=None, connect_uri=None, ignore_status=True,
debug=False, timeout=60):
"""
Execute virt-ls command to check whether file exists.
:param disk_or_domain: a img path or a domain name.
:param file_dir_path: the file or directory need to check.
"""
# disk_or_domain and file_dir_path are necessary parameters.
cmd = "virt-ls"
if connect_uri is not None:
cmd += " -c %s" % connect_uri
if is_disk:
cmd += " -a %s" % disk_or_domain
else:
cmd += " -d %s" % disk_or_domain
cmd += " %s" % file_dir_path
if options is not None:
cmd += " %s" % options
if extra is not None:
cmd += " %s" % extra
return lgf_command(cmd, ignore_status, debug, timeout)
|
clebergnu/avocado-vt
|
virttest/utils_libguestfs.py
|
Python
|
gpl-2.0
| 147,061
|
import fp
from fp import cfg
class bga(fp.base):
"""Generator for ball grid array footprints"""
def __init__(self, name, model, description, tags, package_width, package_height, pad_diameter, pad_grid, pad_distance, count_x, count_y):
super(bga, self).__init__(name, model, description, tags)
|
PiWare/kicad_library
|
script/fpgen/bga.py
|
Python
|
gpl-2.0
| 299
|
## begin license ##
#
# "Meresco Components" are components to build searchengines, repositories
# and archives, based on "Meresco Core".
#
# Copyright (C) 2007-2009 SURF Foundation. http://www.surf.nl
# Copyright (C) 2007 SURFnet. http://www.surfnet.nl
# Copyright (C) 2007-2010 Seek You Too (CQ2) http://www.cq2.nl
# Copyright (C) 2007-2009 Stichting Kennisnet Ict op school. http://www.kennisnetictopschool.nl
# Copyright (C) 2010, 2020-2021 Stichting Kennisnet https://www.kennisnet.nl
# Copyright (C) 2012, 2020-2021 Seecr (Seek You Too B.V.) https://seecr.nl
# Copyright (C) 2020-2021 Data Archiving and Network Services https://dans.knaw.nl
# Copyright (C) 2020-2021 SURF https://www.surf.nl
# Copyright (C) 2020-2021 The Netherlands Institute for Sound and Vision https://beeldengeluid.nl
#
# This file is part of "Meresco Components"
#
# "Meresco Components" is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# "Meresco Components" is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with "Meresco Components"; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
#
## end license ##
from meresco.core import Transparent
from weightless.core import compose, Yield
from time import strftime, gmtime
from urllib.parse import urlsplit
class DevNull(object):
def write(self, *args, **kwargs):
pass
def flush(self, *args, **kwargs):
pass
logline = '%(ipaddress)s - %(user)s [%(timestamp)s] "%(Method)s %(path)s%(query)s HTTP/1.0" %(status)s %(responseSize)s "%(Referer)s" "%(UserAgent)s"\n'
class ApacheLogger(Transparent):
def __init__(self, outputStream=DevNull()):
Transparent.__init__(self)
self._outputStream = outputStream
def handleRequest(self, *args, **kwargs):
status = 0
for line in compose(self.all.handleRequest(*args, **kwargs)):
if line is Yield or callable(line):
yield line
continue
if not status and asBytes(line).startswith(b'HTTP/1.'):
status = str(asBytes(line)[len(b'HTTP/1.0 '):][:3], encoding='utf-8')
self._log(status, **kwargs)
yield line
def logHttpError(self, ResponseCode, RequestURI, *args, **kwargs):
scheme, netloc, path, query, fragments = urlsplit(RequestURI)
self._log(ResponseCode, path=path, query=query, **kwargs)
self.do.logHttpError(ResponseCode=ResponseCode, RequestURI=RequestURI, **kwargs)
def _log(self, status, Method, Client, query, Headers, path, **kwargs):
ipaddress = Client[0]
timestamp = strftime('%d/%b/%Y:%H:%M:%S +0000', gmtime())
responseSize = '??'
user = '-'
query = query and '?%s' % query or ''
Referer = Headers.get('Referer', '-')
UserAgent = Headers.get('User-Agent', '-')
self._outputStream.write(logline % locals())
self._outputStream.flush()
def asBytes(s):
if type(s) is str:
return bytes(s, encoding='utf-8')
return s
|
seecr/meresco-components
|
meresco/components/http/apachelogger.py
|
Python
|
gpl-2.0
| 3,479
|
'''
doos: A multi-threaded server for running client-provided macros in OpenOffice.org
Copyright (C) 2008 - 2009 therudegesture and dustymugs
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, a copy is available at
http://www.gnu.org/licenses/gpl-3.0-standalone.html
'''
# used by requestHandler
class jobStatus:
'''
This is an enumeration for the status of jobs
'''
notFound, error, enqueued, dequeued, done = range(5)
def asText(id):
if id == 0:
rtn = 'Not found'
elif id == 1:
rtn = 'Error'
elif id == 2:
rtn = 'Enqueued'
elif id == 3:
rtn = 'Dequeued'
elif id == 4:
rtn = 'Done'
else:
rtn = false
return rtn
asText = staticmethod(asText)
|
dustymugs/doos
|
server/jobStatus.py
|
Python
|
gpl-3.0
| 1,210
|
# -*- coding: utf-8 -*-
from openerp import api, models
from openerp.addons.training_management.models.model_names import ModelNames
from openerp.addons.training_management.utils.date_utils import DateUtils
class TrainerWorkloadAnalyzer(models.AbstractModel):
_name = ModelNames.TRAINER_WORKLOAD_ANALYZER
@api.model
def compute_trainer_workload_data(self, start_date, end_date):
start_date, end_date = DateUtils.convert_to_dates(start_date, end_date)
first_week = DateUtils.get_monday_of_week(start_date)
last_week = DateUtils.get_friday_of_week(end_date)
trainer_workload_data = {
"weeks_to_display": [],
"trainer_info": {},
"workloads": {},
"workload_totals": {},
}
current_week = first_week
while current_week <= last_week:
year_week = DateUtils.build_year_week_string_from_date(current_week)
trainer_workload_data["weeks_to_display"].append(year_week)
current_week += DateUtils.ONE_WEEK_TIME_DELTA
partner_model = self.env[ModelNames.PARTNER]
trainers = partner_model.search([("is_trainer", "=", True)])
for trainer in trainers:
trainer_id = str(trainer.id)
trainer_workload_data["workloads"][trainer_id] = {}
self._add_trainer_info(trainer_workload_data, trainer)
resources = self._find_resources_in_range_having_trainer(first_week, last_week, trainers)
self._update_trainer_workload_data_from_resources(resources, trainer_workload_data)
workloads = trainer_workload_data["workloads"]
for trainer_id, trainer_workload in workloads.iteritems():
lesson_total = sum(trainer_workload.values())
trainer_workload_data["workload_totals"][trainer_id] = lesson_total;
return trainer_workload_data
@staticmethod
def _add_trainer_info(trainer_workload_data, trainer):
trainer_info = trainer_workload_data["trainer_info"]
trainer_id = str(trainer.id)
if trainer_id not in trainer_info:
trainer_info[trainer_id] = {}
trainer_info[trainer_id].update({
"color_name": trainer.color_name,
"name": u"{surname}, {forename}".format(surname=trainer.surname, forename=trainer.forename),
})
def _update_trainer_workload_data_from_resources(self, resources, trainer_workload_data):
for resource in resources:
if not resource.trainer_id:
continue
trainer_id = str(resource.trainer_id.id)
year_week = resource.year_week_string
workloads = trainer_workload_data["workloads"]
if trainer_id not in workloads:
workloads[trainer_id] = {}
self._add_trainer_info(trainer_workload_data, resource.trainer_id)
trainer_workload = workloads[trainer_id]
if year_week not in trainer_workload:
trainer_workload[year_week] = 0
trainer_workload[year_week] += resource.get_lesson_count()
def _find_resources_in_range_having_trainer(self, start_date, end_date, trainers):
resource_model = self.env[ModelNames.RESOURCE]
domain = [
("date", ">=", DateUtils.convert_to_string(start_date)),
("date", "<=", DateUtils.convert_to_string(end_date)),
("trainer_id", "in", trainers.ids),
]
return resource_model.search(domain)
@api.model
@api.returns("self")
def find_trainers_with_main_location(self, main_location_id):
trainer_model = self.env[ModelNames.TRAINER]
domain = [
("is_trainer", "=", True),
("main_location_id", "=", main_location_id)
]
trainers = trainer_model.search(domain)
return trainers
def _find_trainers_for_user_locations(self):
location_model = self.env[ModelNames.LOCATION]
trainer_model = self.env[ModelNames.TRAINER]
user_locations = location_model.search([("user_ids", "in", [self.env.user.id])])
domain = [
("is_trainer", "=", True),
("main_location_id", "in", user_locations.ids)
]
trainers = trainer_model.search(domain)
return trainers
|
SNeuhausen/training_management
|
models/resource_analysis/trainer_workload_analyzer.py
|
Python
|
gpl-3.0
| 4,295
|
from common import primes
def main():
print(len(circularPrimesBelow(1000000)))
def circularPrimesBelow(limit):
primeList = []
for i in range(limit):
if isCircularPrime(i):
primeList.append(i)
return primeList
def isCircularPrime(n):
n = str(n)
for _ in range(len(n)):
if not primes.isPrime(int(n)):
return False
n = n[1:] + n[0]
return True
if __name__ == "__main__":
main()
|
ZachOhara/Project-Euler
|
python/p031_p040/problem035.py
|
Python
|
gpl-3.0
| 398
|
#!/usr/bin/pythonw
# -*- coding: utf-8 -*-
in_file_directory = ['input/toy_data/']
in_file_name = ['data_test']
in_file_extension = ['txt']
in_file_delimiter = ['\t']
in_file_quote = ['off']
in_file_missing_value = ['NA']
raw_data_structure = [['From','str','To','str','IRI1','str','IRI2','str','Exposure','float','Exp1','float','Exp2','float','IRI3','str','IRI4','str','IRI5','str']]
clean_data_structure = [['From','str','To','str','Exposure','float']]
out_file_directory = ['clean_data_time_1']
out_file_name = ['output/']
out_file_extension = ['txt']
out_file_delimiter = [',']
out_file_missing_value = ['NaN']
out_file_single_file = ['off']
out_file_separate_line = ['']
mapping_file_directory = [['input/toy_data/']]
mapping_file_name = [['mapping']]
mapping_file_extension = [['txt']]
mapping_file_delimiter = [[',']]
mapping_out_file_name = [['output/']]
mapping_replace_ids = ['on']
mapping_kept_id_position = ['0']
mapping_lost_id_position = ['1']
mapping_target_position = [['From,To,Exposure']]
mapping_drop_unreferenced_entries = ['off']
mapping_target_unreferenced_entries = [['']]
mapping_drop_ghosts = ['off']
mapping_remove_duplicates = ['off']
mapping_target_duplicates_set = [['']]
mapping_merge_entries = ['off']
mapping_target_merge_set = [['']]
mapping_commands = [['']]
|
troukny/NetGen
|
src/config_web.py
|
Python
|
gpl-3.0
| 1,329
|
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
from __future__ import unicode_literals
import frappe, erpnext
from frappe.utils import cint, flt, cstr
from frappe import _
import frappe.defaults
from erpnext.accounts.utils import get_fiscal_year
from erpnext.accounts.general_ledger import make_gl_entries, delete_gl_entries, process_gl_map
from erpnext.controllers.accounts_controller import AccountsController
from erpnext.stock.stock_ledger import get_valuation_rate
from erpnext.stock import get_warehouse_account_map
class QualityInspectionRequiredError(frappe.ValidationError): pass
class QualityInspectionRejectedError(frappe.ValidationError): pass
class QualityInspectionNotSubmittedError(frappe.ValidationError): pass
class StockController(AccountsController):
def validate(self):
super(StockController, self).validate()
self.validate_inspection()
def make_gl_entries(self, gl_entries=None, repost_future_gle=True, from_repost=False):
if self.docstatus == 2:
delete_gl_entries(voucher_type=self.doctype, voucher_no=self.name)
if cint(erpnext.is_perpetual_inventory_enabled(self.company)):
warehouse_account = get_warehouse_account_map(self.company)
if self.docstatus==1:
if not gl_entries:
gl_entries = self.get_gl_entries(warehouse_account)
make_gl_entries(gl_entries, from_repost=from_repost)
if repost_future_gle:
items, warehouses = self.get_items_and_warehouses()
update_gl_entries_after(self.posting_date, self.posting_time, warehouses, items,
warehouse_account, company=self.company)
elif self.doctype in ['Purchase Receipt', 'Purchase Invoice'] and self.docstatus == 1:
gl_entries = []
gl_entries = self.get_asset_gl_entry(gl_entries)
make_gl_entries(gl_entries, from_repost=from_repost)
def get_gl_entries(self, warehouse_account=None, default_expense_account=None,
default_cost_center=None):
if not warehouse_account:
warehouse_account = get_warehouse_account_map(self.company)
sle_map = self.get_stock_ledger_details()
voucher_details = self.get_voucher_details(default_expense_account, default_cost_center, sle_map)
gl_list = []
warehouse_with_no_account = []
for item_row in voucher_details:
sle_list = sle_map.get(item_row.name)
if sle_list:
for sle in sle_list:
if warehouse_account.get(sle.warehouse):
# from warehouse account
self.check_expense_account(item_row)
# If the item does not have the allow zero valuation rate flag set
# and ( valuation rate not mentioned in an incoming entry
# or incoming entry not found while delivering the item),
# try to pick valuation rate from previous sle or Item master and update in SLE
# Otherwise, throw an exception
if not sle.stock_value_difference and self.doctype != "Stock Reconciliation" \
and not item_row.get("allow_zero_valuation_rate"):
sle = self.update_stock_ledger_entries(sle)
gl_list.append(self.get_gl_dict({
"account": warehouse_account[sle.warehouse]["account"],
"against": item_row.expense_account,
"cost_center": item_row.cost_center,
"remarks": self.get("remarks") or "Accounting Entry for Stock",
"debit": flt(sle.stock_value_difference, 2),
}, warehouse_account[sle.warehouse]["account_currency"]))
# to target warehouse / expense account
gl_list.append(self.get_gl_dict({
"account": item_row.expense_account,
"against": warehouse_account[sle.warehouse]["account"],
"cost_center": item_row.cost_center,
"remarks": self.get("remarks") or "Accounting Entry for Stock",
"credit": flt(sle.stock_value_difference, 2),
"project": item_row.get("project") or self.get("project")
}))
elif sle.warehouse not in warehouse_with_no_account:
warehouse_with_no_account.append(sle.warehouse)
if warehouse_with_no_account:
for wh in warehouse_with_no_account:
if frappe.db.get_value("Warehouse", wh, "company"):
frappe.throw(_("Warehouse {0} is not linked to any account, please mention the account in the warehouse record or set default inventory account in company {1}.").format(wh, self.company))
return process_gl_map(gl_list)
def update_stock_ledger_entries(self, sle):
sle.valuation_rate = get_valuation_rate(sle.item_code, sle.warehouse,
self.doctype, self.name, currency=self.company_currency, company=self.company)
sle.stock_value = flt(sle.qty_after_transaction) * flt(sle.valuation_rate)
sle.stock_value_difference = flt(sle.actual_qty) * flt(sle.valuation_rate)
if sle.name:
frappe.db.sql("""
update
`tabStock Ledger Entry`
set
stock_value = %(stock_value)s,
valuation_rate = %(valuation_rate)s,
stock_value_difference = %(stock_value_difference)s
where
name = %(name)s""", (sle))
return sle
def get_voucher_details(self, default_expense_account, default_cost_center, sle_map):
if self.doctype == "Stock Reconciliation":
return [frappe._dict({ "name": voucher_detail_no, "expense_account": default_expense_account,
"cost_center": default_cost_center }) for voucher_detail_no, sle in sle_map.items()]
else:
details = self.get("items")
if default_expense_account or default_cost_center:
for d in details:
if default_expense_account and not d.get("expense_account"):
d.expense_account = default_expense_account
if default_cost_center and not d.get("cost_center"):
d.cost_center = default_cost_center
return details
def get_items_and_warehouses(self):
items, warehouses = [], []
if hasattr(self, "items"):
item_doclist = self.get("items")
elif self.doctype == "Stock Reconciliation":
import json
item_doclist = []
data = json.loads(self.reconciliation_json)
for row in data[data.index(self.head_row)+1:]:
d = frappe._dict(zip(["item_code", "warehouse", "qty", "valuation_rate"], row))
item_doclist.append(d)
if item_doclist:
for d in item_doclist:
if d.item_code and d.item_code not in items:
items.append(d.item_code)
if d.get("warehouse") and d.warehouse not in warehouses:
warehouses.append(d.warehouse)
if self.doctype == "Stock Entry":
if d.get("s_warehouse") and d.s_warehouse not in warehouses:
warehouses.append(d.s_warehouse)
if d.get("t_warehouse") and d.t_warehouse not in warehouses:
warehouses.append(d.t_warehouse)
return items, warehouses
def get_stock_ledger_details(self):
stock_ledger = {}
stock_ledger_entries = frappe.db.sql("""
select
name, warehouse, stock_value_difference, valuation_rate,
voucher_detail_no, item_code, posting_date, posting_time,
actual_qty, qty_after_transaction
from
`tabStock Ledger Entry`
where
voucher_type=%s and voucher_no=%s
""", (self.doctype, self.name), as_dict=True)
for sle in stock_ledger_entries:
stock_ledger.setdefault(sle.voucher_detail_no, []).append(sle)
return stock_ledger
def make_batches(self, warehouse_field):
'''Create batches if required. Called before submit'''
for d in self.items:
if d.get(warehouse_field) and not d.batch_no:
has_batch_no, create_new_batch = frappe.db.get_value('Item', d.item_code, ['has_batch_no', 'create_new_batch'])
if has_batch_no and create_new_batch:
d.batch_no = frappe.get_doc(dict(
doctype='Batch',
item=d.item_code,
supplier=getattr(self, 'supplier', None),
reference_doctype=self.doctype,
reference_name=self.name)).insert().name
def make_adjustment_entry(self, expected_gle, voucher_obj):
from erpnext.accounts.utils import get_stock_and_account_difference
account_list = [d.account for d in expected_gle]
acc_diff = get_stock_and_account_difference(account_list,
expected_gle[0].posting_date, self.company)
cost_center = self.get_company_default("cost_center")
stock_adjustment_account = self.get_company_default("stock_adjustment_account")
gl_entries = []
for account, diff in acc_diff.items():
if diff:
gl_entries.append([
# stock in hand account
voucher_obj.get_gl_dict({
"account": account,
"against": stock_adjustment_account,
"debit": diff,
"remarks": "Adjustment Accounting Entry for Stock",
}),
# account against stock in hand
voucher_obj.get_gl_dict({
"account": stock_adjustment_account,
"against": account,
"credit": diff,
"cost_center": cost_center or None,
"remarks": "Adjustment Accounting Entry for Stock",
}),
])
if gl_entries:
from erpnext.accounts.general_ledger import make_gl_entries
make_gl_entries(gl_entries)
def check_expense_account(self, item):
if not item.get("expense_account"):
frappe.throw(_("Expense or Difference account is mandatory for Item {0} as it impacts overall stock value").format(item.item_code))
else:
is_expense_account = frappe.db.get_value("Account",
item.get("expense_account"), "report_type")=="Profit and Loss"
if self.doctype not in ("Purchase Receipt", "Purchase Invoice", "Stock Reconciliation", "Stock Entry") and not is_expense_account:
frappe.throw(_("Expense / Difference account ({0}) must be a 'Profit or Loss' account")
.format(item.get("expense_account")))
if is_expense_account and not item.get("cost_center"):
frappe.throw(_("{0} {1}: Cost Center is mandatory for Item {2}").format(
_(self.doctype), self.name, item.get("item_code")))
def get_sl_entries(self, d, args):
sl_dict = frappe._dict({
"item_code": d.get("item_code", None),
"warehouse": d.get("warehouse", None),
"posting_date": self.posting_date,
"posting_time": self.posting_time,
'fiscal_year': get_fiscal_year(self.posting_date, company=self.company)[0],
"voucher_type": self.doctype,
"voucher_no": self.name,
"voucher_detail_no": d.name,
"actual_qty": (self.docstatus==1 and 1 or -1)*flt(d.get("stock_qty")),
"stock_uom": frappe.db.get_value("Item", args.get("item_code") or d.get("item_code"), "stock_uom"),
"incoming_rate": 0,
"company": self.company,
"batch_no": cstr(d.get("batch_no")).strip(),
"serial_no": d.get("serial_no"),
"project": d.get("project") or self.get('project'),
"is_cancelled": self.docstatus==2 and "Yes" or "No"
})
sl_dict.update(args)
return sl_dict
def make_sl_entries(self, sl_entries, is_amended=None, allow_negative_stock=False,
via_landed_cost_voucher=False):
from erpnext.stock.stock_ledger import make_sl_entries
make_sl_entries(sl_entries, is_amended, allow_negative_stock, via_landed_cost_voucher)
def make_gl_entries_on_cancel(self, repost_future_gle=True):
if frappe.db.sql("""select name from `tabGL Entry` where voucher_type=%s
and voucher_no=%s""", (self.doctype, self.name)):
self.make_gl_entries(repost_future_gle=repost_future_gle)
def get_serialized_items(self):
serialized_items = []
item_codes = list(set([d.item_code for d in self.get("items")]))
if item_codes:
serialized_items = frappe.db.sql_list("""select name from `tabItem`
where has_serial_no=1 and name in ({})""".format(", ".join(["%s"]*len(item_codes))),
tuple(item_codes))
return serialized_items
def get_incoming_rate_for_sales_return(self, item_code, against_document):
incoming_rate = 0.0
if against_document and item_code:
incoming_rate = frappe.db.sql("""select abs(stock_value_difference / actual_qty)
from `tabStock Ledger Entry`
where voucher_type = %s and voucher_no = %s
and item_code = %s limit 1""",
(self.doctype, against_document, item_code))
incoming_rate = incoming_rate[0][0] if incoming_rate else 0.0
return incoming_rate
def validate_warehouse(self):
from erpnext.stock.utils import validate_warehouse_company
warehouses = list(set([d.warehouse for d in
self.get("items") if getattr(d, "warehouse", None)]))
for w in warehouses:
validate_warehouse_company(w, self.company)
def update_billing_percentage(self, update_modified=True):
self._update_percent_field({
"target_dt": self.doctype + " Item",
"target_parent_dt": self.doctype,
"target_parent_field": "per_billed",
"target_ref_field": "amount",
"target_field": "billed_amt",
"name": self.name,
}, update_modified)
def validate_inspection(self):
'''Checks if quality inspection is set for Items that require inspection.
On submit, throw an exception'''
inspection_required_fieldname = None
if self.doctype in ["Purchase Receipt", "Purchase Invoice"]:
inspection_required_fieldname = "inspection_required_before_purchase"
elif self.doctype in ["Delivery Note", "Sales Invoice"]:
inspection_required_fieldname = "inspection_required_before_delivery"
if ((not inspection_required_fieldname and self.doctype != "Stock Entry") or
(self.doctype == "Stock Entry" and not self.inspection_required) or
(self.doctype in ["Sales Invoice", "Purchase Invoice"] and not self.update_stock)):
return
for d in self.get('items'):
qa_required = False
if (inspection_required_fieldname and not d.quality_inspection and
frappe.db.get_value("Item", d.item_code, inspection_required_fieldname)):
qa_required = True
elif self.doctype == "Stock Entry" and not d.quality_inspection and d.t_warehouse:
qa_required = True
if self.docstatus == 1 and d.quality_inspection:
qa_doc = frappe.get_doc("Quality Inspection", d.quality_inspection)
if qa_doc.docstatus == 0:
link = frappe.utils.get_link_to_form('Quality Inspection', d.quality_inspection)
frappe.throw(_("Quality Inspection: {0} is not submitted for the item: {1} in row {2}").format(link, d.item_code, d.idx), QualityInspectionNotSubmittedError)
qa_failed = any([r.status=="Rejected" for r in qa_doc.readings])
if qa_failed:
frappe.throw(_("Row {0}: Quality Inspection rejected for item {1}")
.format(d.idx, d.item_code), QualityInspectionRejectedError)
elif qa_required :
frappe.msgprint(_("Quality Inspection required for Item {0}").format(d.item_code))
if self.docstatus==1:
raise QualityInspectionRequiredError
def update_blanket_order(self):
blanket_orders = list(set([d.blanket_order for d in self.items if d.blanket_order]))
for blanket_order in blanket_orders:
frappe.get_doc("Blanket Order", blanket_order).update_ordered_qty()
def update_gl_entries_after(posting_date, posting_time, for_warehouses=None, for_items=None,
warehouse_account=None, company=None):
def _delete_gl_entries(voucher_type, voucher_no):
frappe.db.sql("""delete from `tabGL Entry`
where voucher_type=%s and voucher_no=%s""", (voucher_type, voucher_no))
if not warehouse_account:
warehouse_account = get_warehouse_account_map(company)
future_stock_vouchers = get_future_stock_vouchers(posting_date, posting_time, for_warehouses, for_items)
gle = get_voucherwise_gl_entries(future_stock_vouchers, posting_date)
for voucher_type, voucher_no in future_stock_vouchers:
existing_gle = gle.get((voucher_type, voucher_no), [])
voucher_obj = frappe.get_doc(voucher_type, voucher_no)
expected_gle = voucher_obj.get_gl_entries(warehouse_account)
if expected_gle:
if not existing_gle or not compare_existing_and_expected_gle(existing_gle, expected_gle):
_delete_gl_entries(voucher_type, voucher_no)
voucher_obj.make_gl_entries(gl_entries=expected_gle, repost_future_gle=False, from_repost=True)
else:
_delete_gl_entries(voucher_type, voucher_no)
def compare_existing_and_expected_gle(existing_gle, expected_gle):
matched = True
for entry in expected_gle:
account_existed = False
for e in existing_gle:
if entry.account == e.account:
account_existed = True
if entry.account == e.account and entry.against_account == e.against_account \
and (not entry.cost_center or not e.cost_center or entry.cost_center == e.cost_center) \
and (entry.debit != e.debit or entry.credit != e.credit):
matched = False
break
if not account_existed:
matched = False
break
return matched
def get_future_stock_vouchers(posting_date, posting_time, for_warehouses=None, for_items=None):
future_stock_vouchers = []
values = []
condition = ""
if for_items:
condition += " and item_code in ({})".format(", ".join(["%s"] * len(for_items)))
values += for_items
if for_warehouses:
condition += " and warehouse in ({})".format(", ".join(["%s"] * len(for_warehouses)))
values += for_warehouses
for d in frappe.db.sql("""select distinct sle.voucher_type, sle.voucher_no
from `tabStock Ledger Entry` sle
where timestamp(sle.posting_date, sle.posting_time) >= timestamp(%s, %s) {condition}
order by timestamp(sle.posting_date, sle.posting_time) asc, creation asc""".format(condition=condition),
tuple([posting_date, posting_time] + values), as_dict=True):
future_stock_vouchers.append([d.voucher_type, d.voucher_no])
return future_stock_vouchers
def get_voucherwise_gl_entries(future_stock_vouchers, posting_date):
gl_entries = {}
if future_stock_vouchers:
for d in frappe.db.sql("""select * from `tabGL Entry`
where posting_date >= %s and voucher_no in (%s)""" %
('%s', ', '.join(['%s']*len(future_stock_vouchers))),
tuple([posting_date] + [d[1] for d in future_stock_vouchers]), as_dict=1):
gl_entries.setdefault((d.voucher_type, d.voucher_no), []).append(d)
return gl_entries
|
brownharryb/erpnext
|
erpnext/controllers/stock_controller.py
|
Python
|
gpl-3.0
| 17,415
|
from .base import BaseConfig
__all__ = ["ImmunizationRoute"]
class ImmunizationRoute(BaseConfig):
@classmethod
def build_fhir_object_from_health(cls, health):
# Health routes
# ('im', 'Intramuscular'),
# ('sc', 'Subcutaneous'),
# ('id', 'Intradermal'),
# ('nas', 'Intranasal'),
# ('po', 'Oral'),
if health == "im":
return cls.get_fhir_intramuscular()
elif health == "sc":
return cls.get_fhir_subcutaneous()
elif health == "id":
return cls.get_fhir_intradermal()
elif health == "nas":
return cls.get_fhir_intranasal()
elif health == "po":
return cls.get_fhir_oral()
else:
return cls.get_fhir_unknown()
@classmethod
def build_health_object_from_fhir(cls, fhir):
routes = {
"IM": "im",
"PO": "po",
"NASINHLC": "nas",
"TRNSDERM": None,
"IDINJ": "id",
"SQ": "sc",
"IVINJ": None,
}
return routes.get(fhir, None)
@classmethod
def get_fhir_intramuscular(cls):
return cls.build_codeable_concept(
code="IM",
system="http://terminology.hl7.org/CodeSystem/v3-RouteOfAdministration",
text="Intramuscular",
)
@classmethod
def get_fhir_oral(cls):
return cls.build_codeable_concept(
code="PO",
system="http://terminology.hl7.org/CodeSystem/v3-RouteOfAdministration",
text="Oral",
)
@classmethod
def get_fhir_intranasal(cls):
return cls.build_codeable_concept(
code="NASINHLC",
system="http://terminology.hl7.org/CodeSystem/v3-RouteOfAdministration",
text="Intranasal",
)
@classmethod
def get_fhir_intravenous(cls):
return cls.build_codeable_concept(
code="IVINJ",
system="http://terminology.hl7.org/CodeSystem/v3-RouteOfAdministration",
text="Intravenous",
)
@classmethod
def get_fhir_subcutaneous(cls):
return cls.build_codeable_concept(
code="SQ",
system="http://terminology.hl7.org/CodeSystem/v3-RouteOfAdministration",
text="Subcutaneous",
)
@classmethod
def get_fhir_intradermal(cls):
return cls.build_codeable_concept(
code="IDINJ",
system="http://terminology.hl7.org/CodeSystem/v3-RouteOfAdministration",
text="Intradermal",
)
@classmethod
def get_fhir_transdermal(cls):
return cls.build_codeable_concept(
code="TRNSDERM",
system="http://terminology.hl7.org/CodeSystem/v3-RouteOfAdministration",
text="Transdermal",
)
@classmethod
def get_fhir_unknown(cls):
return cls.build_codeable_concept(
code="UNK",
system="http://terminology.hl7.org/CodeSystem/v3-RouteOfAdministration",
text="Unkown",
)
|
teffalump/health_fhir
|
gnu_health_fhir/config/converters/config_immunization_route.py
|
Python
|
gpl-3.0
| 3,070
|
import pickle
import sys
from collections import Counter
# Get kmap it from:
# https://github.com/gaborgulyas/kmap -- get "kmap.py"
# Which will also need:
# https://github.com/gaborgulyas/SelectPoints -- get "selectpoints.py"
from kmap import plot_kmap
from common import *
config, outpath = load_config("config_999.json")
anonsetsizes = pickle.load(open(outpath.replace(".csv", ".p"), "r"))
ixs = []
for ix in range(len(anonsetsizes)):
if anonsetsizes[ix] == 0:
ixs.append(ix)
for ctr, ix in enumerate(ixs):
del anonsetsizes[ix-ctr]
xy = Counter(anonsetsizes)
plot_kmap([len(anonsetsizes), xy], data_raw=False, title="Anonymity set sizes", filename=outpath.replace(".csv", "_anon_set_sizes.png"), plot_heatmap=False, plot_annotation=[[1, 3], [10, 50000]], annotation_params=dict(radius=[.2, .1], distance=[.33, .75], linestyle=dict(color='r', width=1, style='--'), location=['right', 'top']), titlelabelsize=30, axlabelsize=30, textsize=28, annotationsize=26, plot_legend=False)
|
gaborgulyas/constrainted_fingerprinting
|
06_plot_anonsets.py
|
Python
|
gpl-3.0
| 996
|